diff --git "a/2177.jsonl" "b/2177.jsonl" new file mode 100644--- /dev/null +++ "b/2177.jsonl" @@ -0,0 +1,346 @@ +{"seq_id":"431941929","text":"flour = int(input('How much flour for 25 cakes: '))\nbutter = int(input('How much butter for 25 cakes: '))\nsugar = int(input('How much sugar for 25 cakes: '))\n\ndef recipe(flour, butter, sugar):\n flour_per_cake = flour / 25\n butter_per_cake = butter / 25\n sugar_per_cake = sugar / 25\n\n print(f\"You need {flour_per_cake * 35} flour for 35 cakes\")\n print(f\"You need {butter_per_cake * 35} butter for 35 cakes\")\n print(f\"You need {sugar_per_cake * 35} sugar for 35 cakes\")\n\nrecipe(flour, butter, sugar)","sub_path":"maths-examples/cake-recipe.py","file_name":"cake-recipe.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"485154972","text":"__author__ = 'Yule Jin'\nfrom pandas import DataFrame\nfrom pandas import Series\nimport numpy as np\nimport pandas as pd\n\n\n## calculate libor rate and suvival rate's vascicek models\nclass VasicekAffine:\n def __init__(self, x = None, y= None, datelist = None):\n if x==None and y==None and datelist == None:\n return\n self.datelist = datelist\n self.y = y\n self.x = x\n self.Q = []\n self.r = []\n self.h=0\n ##function used to calculate the survival rate based on Vasicek model\n def calQ(self):\n tenors = self.datelist\n a = self.x[0]\n theta = self.x[1]\n sigma = self.x[2]\n lambda0 = self.x[3]\n time0 = self.datelist[0]\n self.Q = pd.DataFrame(np.ones([1, 1]))\n for k in np.arange(len(tenors)):\n i = (tenors[k]-time0).days/365.0\n B = 1/a * (1-np.exp(-a*i))\n A = (theta-sigma*sigma/(2*a*a))*(i-B)+ sigma*sigma/(4*a)*B*B\n self.Q.loc[k,:] = np.exp(-A-B*lambda0)\n self.Q.index = tenors\n return self.Q\n ## function used to calculate the interest rate based on Vasicek model\n def calR(self):\n tenors = self.datelist\n kappa = self.y[0]\n mu = self.y[1]\n sigmaR = self.y[2]\n lambdaR0 = self.y[3]\n time0 = self.datelist[0]\n length = len(tenors)\n #self.r = pd.DataFrame(np.ones([1,1]))\n self.r=np.zeros(np.shape(tenors))\n for self.h in range(0,length-1):\n j = (tenors[self.h]-time0).days/365.0\n B_r = 1/kappa * (1-np.exp(-kappa*j))\n A_r = (mu - sigmaR*sigmaR/(2*kappa*kappa))*(j-B_r) + sigmaR*sigmaR/(4*kappa)*B_r*B_r\n #self.r.loc[h,:] = np.exp(-A_r-B_r*lambdaR0)\n self.r[self.h]=(np.exp(-A_r-B_r*lambdaR0))\n #self.r.index = tenors\n return self.r\n","sub_path":"MonteCarloRutgers/MonteCarloSimulators/Vasicek/VasicekAffine.py","file_name":"VasicekAffine.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"567888309","text":"from flask import render_template\nfrom . import portfolio\nfrom ..models import About, Item\n\n\n@portfolio.route('/')\ndef index():\n about = About.query.first()\n item_list = Item.query.order_by(Item.date_of_publication.desc()).all()\n return render_template(\n 'portfolio/index.html',\n about=about,\n items=item_list)\n\n\n@portfolio.route('/portfolio/')\ndef product(item_slug):\n return render_template('portfolio/item.html', item_slug=item_slug)\n\n\n# FOR DEV ONLY\n\n\ndef shutdown_server():\n from flask import request\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with werkzeug server')\n func()\n\n\n@portfolio.route('/sd')\ndef shutdown():\n shutdown_server()\n return 'server shutting down'\n","sub_path":"app/portfolio/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"629353840","text":"#!/usr/bin/env python\n\nimport webapp2\nimport logging\nfrom google.appengine.api import mail\nfrom google.appengine.ext.webapp.mail_handlers import InboundMailHandler\nfrom models import ProfileManager, Announcement\nimport os\n\nclass EmailHandler(InboundMailHandler):\n def post(self):\n # We always want to return successfully because retrying\n # an invalid message isn't going to do any good.\n try:\n message = mail.InboundEmailMessage(self.request.body.lstrip())\n self.receive(message)\n except mail.InvalidEmailError:\n self.response.out.write(\"INVALID EMAIL\")\n\n def receive(self, message):\n token = None\n\n # the recipient address is determined by the url, not the\n # content of the email message.\n received_at = self.request.path.split(\"/\")[-1]\n token = received_at.split(\"@\")[0]\n\n profile = ProfileManager.findByToken(token)\n\n if not profile:\n self.response.out.write(\"NO PROFILE\")\n logging.info(\"Profile not found\")\n return\n\n announcement = Announcement(type=\"email\", params={\"message\": message,\n \"profile\": profile})\n announcement.send()\n\n self.response.set_status(200)\n self.response.out.write(\"OK\")\n\napp = webapp2.WSGIApplication([(EmailHandler.mapping())],\n debug=True)\n","sub_path":"appengine/emailhandler.py","file_name":"emailhandler.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"329586172","text":"import numpy as np\r\nfrom pygame.locals import *\r\nimport pygame\r\nimport random as rd\r\n\r\nfrom snake_env import Render, Map\r\nimport time\r\n\r\nclass IA():\r\n\r\n def __init__(self, agent_id):\r\n self.name = \"IA_keyboard\"\r\n self.id = agent_id\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n def act(self, state):\r\n while True:\r\n pygame.event.pump()\r\n keys = pygame.key.get_pressed()\r\n if (keys[K_RIGHT]):\r\n return 0\r\n if (keys[K_UP]):\r\n return 1\r\n if (keys[K_LEFT]):\r\n return 2\r\n if (keys[K_DOWN]):\r\n return 3\r\n\r\n def dead(self):\r\n return\r\n","sub_path":"IA/IA_keyboard.py","file_name":"IA_keyboard.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"59711651","text":"\"\"\"\nThis should be moved to a separate repo later\n\"\"\"\n\nimport abc\nimport numpy as np\nimport rbdl\nimport Model\nimport time\nimport message_filters\nfrom GaitCore.Core import Point\nfrom GaitCore.Core import utilities\nfrom std_msgs.msg import Float32MultiArray\nfrom threading import Thread\nfrom . import Model\nfrom GaitCore.Bio import Leg, Joint\nimport rospy\nfrom ambf_msgs.msg import RigidBodyState, SensorState\nfrom GaitAnaylsisToolkit.LearningTools.Runner import TPGMMRunner\nfrom geometry_msgs.msg import Point32\nfrom sensor_msgs.msg import PointCloud\nfrom os.path import dirname, join\n\n\n\nclass Exoskeleton(Model.Model):\n\n def __init__(self, client, model_name, joints, mass, height):\n super(Exoskeleton, self).__init__(client, model_name=model_name, joint_names=joints)\n self._handle = self._client.get_obj_handle('ExoHip')\n self.left_foot = self._client.get_obj_handle('ExoLeftFoot')\n self.right_foot = self._client.get_obj_handle('ExoRightFoot')\n # self.left_foot.set_pos(0.24, -0.55, -0.24)\n # self.left_foot.set_rpy(0.0, 0.0, 0.0)\n # self.right_foot.set_pos(-0.24, -0.55, -0.24)\n # self.right_foot.set_rpy(0.0, 0.0, 0.0)\n\n # Update to current\n self.prox = {}\n self.prox[\"LeftSideProx\"] = rospy.Publisher('left_leg', PointCloud, queue_size=10)\n self.prox[\"RightSideProx\"] = rospy.Publisher('right_leg', PointCloud, queue_size=10)\n time.sleep(4)\n self._mass = mass\n self._height = height\n\n self.rbdl_model = self.dynamic_model()\n left_joints = {}\n right_joints = {}\n\n for joint in (left_joints, right_joints):\n for output in [\"Hip\", \"Knee\", \"Ankle\"]:\n angle = Point.Point(0, 0, 0)\n force = Point.Point(0, 0, 0)\n moment = Point.Point(0, 0, 0)\n power = Point.Point(0, 0, 0)\n joint[output] = Joint.Joint(angle, moment, power, force)\n\n self._left_leg = Leg.Leg(left_joints[\"Hip\"], left_joints[\"Knee\"], left_joints[\"Ankle\"])\n self._right_leg = Leg.Leg(right_joints[\"Hip\"], right_joints[\"Knee\"], right_joints[\"Ankle\"])\n\n self._state = (self._q, self._qd)\n\n # START ATTEMPT\n # self._left_thigh_sensorF = Point.Point(0, 0, 0)\n # self._left_thigh_sensorB = Point.Point(0, 0, 0)\n # self._left_shank_sensorF = Point.Point(0, 0, 0)\n # self._left_shank_sensorB = Point.Point(0, 0, 0)\n # self._right_thigh_sensorF = Point.Point(0, 0, 0)\n # self._right_thigh_sensorB = Point.Point(0, 0, 0)\n # self._right_shank_sensorF = Point.Point(0, 0, 0)\n # self._right_shank_sensorB = Point.Point(0, 0, 0)\n rospy.Subscriber(\"/ambf/env/LeftSideProx/State\", SensorState, self.prox_callback)\n rospy.Subscriber(\"/ambf/env/RightSideProx/State\", SensorState, self.prox_callback)\n\n rospy.Subscriber(\"/ambf/env/LeftFootProx/State\", SensorState, self.left_foot_prox_callback)\n rospy.Subscriber(\"/ambf/env/RightFootProx/State\", SensorState, self.right_foot_prox_callback)\n\n self._left_thigh_sensorF_sub = message_filters.Subscriber(\"/ambf/env/FrontSensorLeftThigh/State\", RigidBodyState)\n self._left_thigh_sensorB_sub = message_filters.Subscriber(\"/ambf/env/BackSensorLeftThigh/State\", RigidBodyState)\n self._left_shank_sensorF_sub = message_filters.Subscriber(\"/ambf/env/FrontSensorLeftShank/State\", RigidBodyState)\n self._left_shank_sensorB_sub = message_filters.Subscriber(\"/ambf/env/BackSensorLeftShank/State\", RigidBodyState)\n self._right_thigh_sensorF_sub = message_filters.Subscriber(\"/ambf/env/FrontSensorRightThigh/State\", RigidBodyState)\n self._right_thigh_sensorB_sub = message_filters.Subscriber(\"/ambf/env/BackSensorRightThigh/State\", RigidBodyState)\n self._right_shank_sensorF_sub = message_filters.Subscriber(\"/ambf/env/FrontSensorRightShank/State\", RigidBodyState)\n self._right_shank_sensorB_sub = message_filters.Subscriber(\"/ambf/env/BackSensorRightShank/State\", RigidBodyState)\n self._leg_sensor_ls = [self._left_thigh_sensorF_sub, self._left_thigh_sensorB_sub,\n self._left_shank_sensorF_sub, self._left_shank_sensorB_sub,\n self._right_thigh_sensorF_sub, self._right_thigh_sensorB_sub,\n self._right_shank_sensorF_sub, self._right_shank_sensorB_sub]\n self._leg_sensor_cb = message_filters.TimeSynchronizer(self._leg_sensor_ls, 1)\n self._leg_sensor_cb.registerCallback(self.leg_sensor_callback)\n\n self._left_foot_force_sensor = []\n self._right_foot_force_sensor = []\n\n self._left_foot_force_sensor.append(Point.Point(0, 0, 0))\n self._left_foot_force_sensor.append(Point.Point(0, 0, 0))\n self._left_foot_force_sensor.append(Point.Point(0, 0, 0))\n\n self._right_foot_force_sensor.append(Point.Point(0, 0, 0))\n self._right_foot_force_sensor.append(Point.Point(0, 0, 0))\n self._right_foot_force_sensor.append(Point.Point(0, 0, 0))\n\n self._left_foot_sensor1_sub = message_filters.Subscriber(\"/ambf/env/SensorLeftFoot1Tab/State\", RigidBodyState)\n self._left_foot_sensor2_sub = message_filters.Subscriber(\"/ambf/env/SensorLeftFoot2Tab/State\", RigidBodyState)\n self._left_foot_sensor3_sub = message_filters.Subscriber(\"/ambf/env/SensorLeftFoot3Tab/State\", RigidBodyState)\n self._right_foot_sensor1_sub = message_filters.Subscriber(\"/ambf/env/SensorRightFoot1Tab/State\", RigidBodyState)\n self._right_foot_sensor2_sub = message_filters.Subscriber(\"/ambf/env/SensorRightFoot2Tab/State\", RigidBodyState)\n self._right_foot_sensor3_sub = message_filters.Subscriber(\"/ambf/env/SensorRightFoot3Tab/State\", RigidBodyState)\n self._foot_sensor_ls = [self._left_foot_sensor1_sub, self._left_foot_sensor2_sub, self._left_foot_sensor3_sub,\n self._right_foot_sensor1_sub, self._right_foot_sensor2_sub, self._right_foot_sensor3_sub]\n self._foot_sensor_cb = message_filters.TimeSynchronizer(self._foot_sensor_ls, 1)\n self._foot_sensor_cb.registerCallback(self.foot_sensor_callback)\n self._right_foot_prox = SensorState()\n self._left_foot_prox = SensorState()\n self._updater.start()\n\n def left_foot_prox_callback(self, msg):\n self._left_foot_prox = msg\n\n def right_foot_prox_callback(self, msg):\n self._right_foot_prox = msg\n\n def check_left_foot_collision(self):\n return any(self._left_foot_prox.triggered)\n\n def check_foot_collision(self):\n return any(self._left_foot_prox.triggered)\n\n def get_right_foot_collision_distance(self):\n dist = self._left_foot_prox.measurement\n raduis = self._left_foot_prox.range[0]\n return dist[0] - raduis\n\n def get_left_foot_collision_distance(self):\n dist = self._right_foot_prox.measurement\n raduis = self._right_foot_prox.range[0]\n return dist[0] - raduis\n\n def prox_callback(self, msg):\n\n pos = msg.pose\n name = msg.name.data\n ranges = msg.measurement\n raduis = msg.range[0]\n parent_name = msg.parent_name\n sensed_objects = msg.sensed_objects\n theta = np.linspace(-np.pi, -0.70 * np.pi, 75)\n cloud = PointCloud()\n for rad, angle, obj in zip(ranges, theta, sensed_objects):\n point = Point32()\n if \"Rob\" in obj.data:\n r = 0\n else:\n r = rad\n point.y = (raduis - r) * np.cos(angle) + pos.position.y\n point.z = (raduis - r) * np.sin(angle) + pos.position.z\n point.x = pos.position.x\n cloud.points.append(point)\n\n cloud.header.stamp = rospy.Time.now()\n cloud.header.frame_id = \"/exoskeleton\"\n self.prox[name].publish(cloud)\n\n def leg_sensor_callback(self, flt, blt, fls, bls, frt, brt, frs, brs):\n force_flt = Point.Point(flt.wrench.force.x, flt.wrench.force.y, flt.wrench.force.z)\n force_blt = Point.Point(blt.wrench.force.x, blt.wrench.force.y, blt.wrench.force.z)\n force_fls = Point.Point(fls.wrench.force.x, fls.wrench.force.y, fls.wrench.force.z)\n force_bls = Point.Point(bls.wrench.force.x, bls.wrench.force.y, bls.wrench.force.z)\n force_frt = Point.Point(frt.wrench.force.x, frt.wrench.force.y, frt.wrench.force.z)\n force_brt = Point.Point(brt.wrench.force.x, brt.wrench.force.y, brt.wrench.force.z)\n force_frs = Point.Point(frs.wrench.force.x, frs.wrench.force.y, frs.wrench.force.z)\n force_brs = Point.Point(brs.wrench.force.x, brs.wrench.force.y, brs.wrench.force.z)\n\n self._left_leg.hip.force = force_flt\n self._left_leg.knee.force = force_fls\n self._right_leg.hip.force = force_frt\n self._right_leg.knee.force = force_frs\n\n # self._left_thigh_sensorF = force_flt\n # self._left_thigh_sensorB = force_blt\n # self._left_shank_sensorF = force_fls\n # self._left_shank_sensorB = force_bls\n # self._right_thigh_sensorF = force_frt\n # self._right_thigh_sensorB = force_brt\n # self._right_shank_sensorF = force_frs\n # self._right_shank_sensorB = force_brs\n\n def foot_sensor_callback(self, lf1, lf2, lf3, rf1, rf2, rf3):\n force_lf1 = Point.Point(lf1.wrench.force.x, lf1.wrench.force.y, lf1.wrench.force.z)\n force_lf2 = Point.Point(lf2.wrench.force.x, lf2.wrench.force.y, lf2.wrench.force.z)\n force_lf3 = Point.Point(lf3.wrench.force.x, lf3.wrench.force.y, lf3.wrench.force.z)\n force_rf1 = Point.Point(rf1.wrench.force.x, rf1.wrench.force.y, rf1.wrench.force.z)\n force_rf2 = Point.Point(rf2.wrench.force.x, rf2.wrench.force.y, rf2.wrench.force.z)\n force_rf3 = Point.Point(rf3.wrench.force.x, rf3.wrench.force.y, rf3.wrench.force.z)\n\n self.left_foot_force_sensor = [force_lf1, force_lf2, force_lf3]\n self.right_foot_force_sensor = [force_rf1, force_rf2, force_rf3]\n\n def calculate_dynamics(self, qdd):\n tau = np.asarray([0.0] * self._joint_num)\n rbdl.InverseDynamics(self.rbdl_model, self.q[0:6], self.qd[0:6], qdd[0:6], tau)\n return tau\n\n def grav(self, q ):\n tau = np.asarray([0.0] * self._joint_num)\n qd = qdd = np.asarray([0.0] * self._joint_num)\n rbdl.InverseDynamics(self.rbdl_model, q, qd, qdd, tau)\n return tau\n\n def dynamic_model(self):\n # add in mass and height params\n model = rbdl.Model()\n bodies = {}\n mass = {}\n com = {}\n inertia = {}\n bodies[\"right\"] = {}\n bodies[\"left\"] = {}\n segments = [\"thigh\", \"shank\", \"foot\"]\n\n mass[\"hip\"] = 2.37\n mass[\"right_thigh\"] = 2.11\n mass[\"left_thigh\"] = 2.11\n mass[\"right_shank\"] = 1.28\n mass[\"left_shank\"] = 1.28\n mass[\"right_foot\"] = 0.86\n mass[\"left_foot\"] = 0.86\n parent_dist = {}\n\n parent_dist[\"hip\"] = np.array([0.0, 0.0, 0.0])\n\n parent_dist[\"left_thigh\"] = np.array([0.237, -0.124, -0.144])\n parent_dist[\"left_shank\"] = np.array([0.033, -0.03, -0.436])\n parent_dist[\"left_foot\"] = np.array([0.02, -0.027, -0.39])\n\n parent_dist[\"right_thigh\"] = np.array([-0.237, -0.124, -0.144])\n parent_dist[\"right_shank\"] = np.array([0.033, -0.03, -0.436])\n parent_dist[\"right_foot\"] = np.array([0.02, -0.027, -0.39])\n\n\n\n inertia[\"hip\"] = np.diag([ 0.0,0.0,0.0])\n\n inertia[\"left_thigh\"] = np.diag([0.0, 0.0, 0.07])\n inertia[\"left_shank\"] = np.diag([0.18, 0.18, 0.0])\n inertia[\"left_foot\"] = np.diag([0.07, 0.07, 0.0])\n\n inertia[\"right_thigh\"] = np.diag([0.0, 0.00, 0.07])\n inertia[\"right_shank\"] = np.diag([0.18, 0.18, 0.0])\n inertia[\"right_foot\"] = np.diag([0.07, 0.07, 0.0])\n\n com[\"hip\"] = np.array([0.00, -0.02, 0.18])\n com[\"left_thigh\"] = np.array([0.02, 0.01, -0.09])\n com[\"left_shank\"] = np.array([-0.02, -0.007, 0.06])\n com[\"left_foot\"] = np.array([0.08, -0.06, 0.04])\n\n com[\"right_thigh\"] = np.array([-0.02, 0.01, -0.09])\n com[\"right_shank\"] = np.array([0.02, -0.007, 0.06])\n com[\"right_foot\"] = np.array([0.08, -0.06, 0.04])\n\n hip_body = rbdl.Body.fromMassComInertia(mass[\"hip\"], com[\"hip\"], inertia[\"hip\"])\n for segs in segments:\n bodies[\"right_\" + segs] = rbdl.Body.fromMassComInertia(mass[\"right_\" + segs], com[\"right_\" + segs], inertia[\"right_\" + segs])\n bodies[\"left_\" + segs] = rbdl.Body.fromMassComInertia(mass[\"left_\" + segs], com[\"left_\" + segs], inertia[\"left_\" + segs])\n\n xtrans = rbdl.SpatialTransform()\n xtrans.r = np.array([0.0, 0.0, 0.0])\n xtrans.E = np.eye(3)\n\n self.hip = model.AddBody(0, xtrans, rbdl.Joint.fromJointType(\"JointTypeFixed\"), hip_body,\"hip\")\n joint_rot_z = rbdl.Joint.fromJointType(\"JointTypeRevoluteX\")\n\n xtrans.r = parent_dist[\"left_thigh\"]\n self.left_thigh = model.AddBody(self.hip, xtrans, joint_rot_z, bodies[\"left_thigh\"], \"left_thigh\")\n xtrans.E = np.eye(3)\n xtrans.r = parent_dist[\"left_shank\"]\n self.left_shank = model.AddBody(self.left_thigh, xtrans, joint_rot_z, bodies[\"left_shank\"], \"left_shank\")\n xtrans.r = parent_dist[\"left_foot\"]\n self.left_foot = model.AddBody(self.left_shank, xtrans, joint_rot_z, bodies[\"left_foot\"], \"left_foot\")\n\n xtrans.r = parent_dist[\"right_thigh\"]\n self.right_thigh = model.AddBody(self.hip, xtrans, joint_rot_z, bodies[\"right_thigh\"], \"right_thigh\")\n xtrans.E = np.eye(3)\n xtrans.r = parent_dist[\"right_shank\"]\n self.right_shank = model.AddBody(self.right_thigh, xtrans, joint_rot_z, bodies[\"right_shank\"], \"right_shank\")\n xtrans.r = parent_dist[\"right_foot\"]\n self.right_foot = model.AddBody(self.right_shank, xtrans, joint_rot_z, bodies[\"right_foot\"], \"right_foot\")\n\n model.gravity = np.array([0, 0, -9.81])\n\n # constraint_set_right = rbdl.ConstraintSet()\n # constraint_set_left = rbdl.ConstraintSet()\n # constraint_set_both = rbdl.ConstraintSet()\n #\n # constraint_set_right.AddContactConstraint(id_r, heel_point, np.array([1., 0., 0.]), \"right_heel_x\")\n # constraint_set_right.AddContactConstraint(id_r, heel_point, np.array([0., 1., 0.]), \"right_heel_y\")\n #\n # constraint_set_left.AddContactConstraint(id_l, heel_point, np.array([1., 0., 0.]), \"left_heel_x\")\n # constraint_set_left.AddContactConstraint(id_l, heel_point, np.array([0., 1., 0.]), \"left_heel_y\")\n #\n # constraint_set_both.AddContactConstraint(id_r, heel_point, np.array([1., 0., 0.]), \"right_heel_x\")\n # constraint_set_both.AddContactConstraint(id_r, heel_point, np.array([0., 1., 0.]), \"right_heel_y\")\n # constraint_set_both.AddContactConstraint(id_r, heel_point, np.array([0., 0., 1.]), \"right_heel_z\")\n #\n # constraint_set_both.AddContactConstraint(id_l, heel_point, np.array([1., 0., 0.]), \"left_heel_x\")\n # constraint_set_both.AddContactConstraint(id_l, heel_point, np.array([0., 1., 0.]), \"left_heel_y\")\n # constraint_set_both.AddContactConstraint(id_l, heel_point, np.array([0., 0., 1.]), \"left_heel_z\")\n #\n # constraint_set_right.Bind(model)\n # constraint_set_left.Bind(model)\n # constraint_set_both.Bind(model)\n\n x = []\n y = []\n return model\n\n def fk(self):\n fk = {}\n\n point_local = np.array([0.0, 0.0, 0.0])\n\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.left_thigh, point_local)\n fk[\"left_hip\"] = Point.Point(data[0], data[1], data[2])\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.left_shank, point_local)\n fk[\"left_knee\"] = Point.Point(data[0], data[1], data[2])\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.left_foot, point_local)\n fk[\"left_ankle\"] = Point.Point(data[0], data[1], data[2])\n\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.right_thigh, point_local)\n fk[\"right_hip\"] = Point.Point(data[0], data[1], data[2])\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.right_shank, point_local)\n fk[\"right_knee\"] = Point.Point(data[0], data[1], data[2])\n data = rbdl.CalcBodyToBaseCoordinates(self.rbdl_model, self.q, self.right_foot, point_local)\n fk[\"right_ankle\"] = Point.Point(data[0], data[1], data[2])\n\n q_left = self.get_left_leg().ankle.angle.z\n q_right = self.get_right_leg().ankle.angle.z\n fk[\"left_toe\"] = Point.Point(0, 0, 0)\n fk[\"left_toe\"].x = fk[\"left_ankle\"].x - 0.8 * (8.0 / 100.0) * self._height * np.cos(q_left)\n fk[\"left_toe\"].y = fk[\"left_ankle\"].y - 0.8 * (8.0 / 100.0) * self._height * np.cos(q_left)\n fk[\"left_toe\"].z = fk[\"left_ankle\"].z - 0.05 + 0.8 * (8.0 / 100.0) * self._height * np.sin(q_left)\n\n fk[\"left_heel\"] = Point.Point(0, 0, 0)\n fk[\"left_heel\"].x = fk[\"left_ankle\"].x + 0.2 * (8.0 / 100.0) * self._height * np.cos(q_left)\n fk[\"left_heel\"].y = fk[\"left_ankle\"].y + 0.2 * (8.0 / 100.0) * self._height * np.cos(q_left)\n fk[\"left_heel\"].z = fk[\"left_ankle\"].z - 0.05 + 0.2 * (8.0 / 100.0) * self._height * np.sin(q_left)\n\n fk[\"right_toe\"] = Point.Point(0, 0, 0)\n fk[\"right_toe\"].x = fk[\"right_ankle\"].x - 0.8 * (8.0 / 100.0) * 1.57 * np.cos(q_right)\n fk[\"right_toe\"].y = fk[\"right_ankle\"].y - 0.8 * (8.0 / 100.0) * 1.57 * np.cos(q_right)\n fk[\"right_toe\"].z = fk[\"right_ankle\"].z - 0.05 + 0.8 * (8.0 / 100.0) * self._height * np.sin(q_right)\n\n fk[\"right_heel\"] = Point.Point(0, 0, 0)\n fk[\"right_heel\"].x = fk[\"right_ankle\"].x + 0.2 * (8.0 / 100.0) * self._height * np.cos(q_right)\n fk[\"right_heel\"].y = fk[\"right_ankle\"].y + 0.2 * (8.0 / 100.0) * self._height * np.cos(q_right)\n fk[\"right_heel\"].z = fk[\"right_ankle\"].z - 0.05 + 0.2 * (8.0 / 100.0) * self._height * np.sin(q_right)\n\n return fk\n\n def stance_trajectory(self, tf=2, dt=0.01):\n hip = Model.get_traj(0.0, -0.5, 0.0, 0.0, tf, dt)\n knee = Model.get_traj(0.0, 0.50, 0.0, 0., tf, dt)\n ankle = Model.get_traj(-0.349, -0.2, 0.0, 0.0, tf, dt)\n return hip, knee, ankle\n\n def walk_init_trajectory(self, tf=2, dt=0.01):\n hip = Model.get_traj(0.0, 0.3234, 0.0, 0.0, tf, dt)\n knee = Model.get_traj(0.0, 0.815, 0.0, 0., tf, dt)\n ankle = Model.get_traj(-0.349, 0.07, 0.0, 0.0, tf, dt)\n return hip, knee, ankle\n\n def standing_to_sitting_trajectory(self, ip, tf=2, dt=0.01):\n # exo_hip = self.handle.get_pos()\n # exo_hip_x = Model.get_traj(exo_hip.x, exo_hip.x, 0.0, 0.0, tf, dt)\n # exo_hip_y = Model.get_traj(exo_hip.y, exo_hip.y + .5, 0.0, 0.0, tf, dt)\n # exo_hip_z = Model.get_traj(exo_hip.z, exo_hip.z - .25, 0.0, 0.0, tf, dt)\n hip = Model.get_traj(-0.5, -1.35, 0.0, 0.0, tf, dt)\n knee = Model.get_traj(0.5, 1.54, 0.0, 0., tf, dt)\n ankle = Model.get_traj(-0.2, -0.04, 0.0, 0.0, tf, dt)\n return hip, knee, ankle #, exo_hip_x, exo_hip_y, exo_hip_z,\n\n def sitting_to_standing_trajectory(self, ip, tf=2, dt=0.01):\n # -0.66\n #.77\n #-0.32\n hip = Model.get_traj(-1.35, -0.5, 0.0, 0.0, tf, dt)\n knee = Model.get_traj(1.54, 0.5, 0.0, 0.0, tf, dt)\n ankle = Model.get_traj(-0.04, -0.2, 0.0, 0.0, tf, dt)\n return hip, knee, ankle\n\n def get_runner(self):\n project_root = dirname(dirname(__file__))\n config_path = join(project_root, 'config/gotozero.pickle')\n return TPGMMRunner.TPGMMRunner(config_path)\n\n def get_walker(self):\n project_root = dirname(dirname(__file__))\n config_path = join(project_root, 'config/walk2.pickle')\n return TPGMMRunner.TPGMMRunner(config_path)\n\n def linearize(self):\n pass\n\n def state(self, q, qd ):\n self.get_left_leg.hip.angle.z = q[0]\n self.get_left_leg.knee.angle.z = q[1]\n self.get_left_leg.ankle.angle.z = q[2]\n\n self.get_right_leg.hip.angle.z = q[3]\n self.get_right_leg.knee.angle.z = q[4]\n self.get_right_leg.ankle.angle.z = q[5]\n\n def get_right_leg(self):\n \"\"\"\n :return:\n \"\"\"\n return self._right_leg\n\n def get_left_leg(self):\n \"\"\"\n :return:\n \"\"\"\n return self._left_leg\n\n\n # def get_leg_sensors(self):\n # left_leg_sensors = [self._left_leg.hip.force, self._left_leg.knee.force]\n # right_leg_sensors = [self._right_leg.hip.force, self._right_leg.knee.force]\n # return left_leg_sensors, right_leg_sensors\n\n @property\n def left_foot_force_sensor(self):\n return self._left_foot_force_sensor\n\n @property\n def right_foot_force_sensor(self):\n return self._right_foot_force_sensor\n\n @left_foot_force_sensor.setter\n def left_foot_force_sensor(self, value):\n self._left_foot_force_sensor = value\n\n @right_foot_force_sensor.setter\n def right_foot_force_sensor(self, value):\n self._right_foot_force_sensor = value\n\n # def get_foot_sensors(self):\n # left_foot_sensors = [self._left_foot_sensor1, self._left_foot_sensor2, self._left_foot_sensor3]\n # right_foot_sensors = [self._right_foot_sensor1, self._right_foot_sensor2, self._right_foot_sensor3]\n # return left_foot_sensors, right_foot_sensors\n\n\n def leg_inverse_kinimatics(self, toe, hip_location):\n\n l1 = 436.0\n l2 = 390.0\n l3 = 98.0\n l4 = 217.0\n\n x = toe[0] - hip_location[0] - abs(l4)\n y = toe[1] - hip_location[1] + abs(l3)\n\n num = x*x + y*y - l1**2 - l2**2\n dem = 2*l1*l2\n\n q2 = np.arctan2(-np.sqrt(1 - (num / dem)**2), (num / dem))\n q2 = np.nan_to_num(q2)\n q1 = -(np.nan_to_num(np.arctan2(y, x) - np.arctan2(l2*np.sin(q2), l1 + l2*np.cos(q2))) + 0.5*np.pi)\n q3 = -(np.nan_to_num(2*np.pi - q1 - q2) - 2*np.pi) + 0.75*np.pi\n\n return [q1, -q2, q3]\n\n","sub_path":"Model/Exoskeleton.py","file_name":"Exoskeleton.py","file_ext":"py","file_size_in_byte":22100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"477959418","text":"isPrimeList = []\ndef primes_sieve2(limit):\n global isPrimeList\n isPrimeList = [True] * limit\n isPrimeList[0] = isPrimeList[1] = False\n\n for (i, isprime) in enumerate(isPrimeList):\n if isprime:\n yield i\n for n in range(i*i, limit, i): # Mark factors non-prime\n isPrimeList[n] = False\n","sub_path":"primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"187311725","text":"class Solution(object):\n def removeInvalidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n \"\"\"\n Analysis:\n this is a backtrack problem \n \"\"\"\n left, right = 0, 0\n for c in s:\n if c ==\"(\":\n left += 1\n elif c == \")\":\n if left != 0:\n left -= 1\n else:\n right += 1\n res = set()\n self.backtrack(s, 0, left, right, 0, \"\", res)\n return list(res)\n \n def backtrack(self, s, i, left, right, pair, path, res):\n if i == len(s):\n if pair == 0 and left == 0 and right == 0:\n res.add(path)\n return\n \n if s[i] == \"(\":\n if left > 0:\n self.backtrack(s, i+1, left-1, right, pair, path, res)\n self.backtrack(s, i+1, left, right, pair+1, path+s[i], res)\n elif s[i] == \")\":\n if right > 0:\n self.backtrack(s, i+1, left, right-1, pair, path, res)\n if pair > 0:\n self.backtrack(s, i+1, left, right, pair-1, path+s[i], res)\n else:\n self.backtrack(s, i+1, left, right, pair, path+s[i], res)\n\n","sub_path":"backtrack_recursion/301. Remove Invalid Parentheses.py","file_name":"301. Remove Invalid Parentheses.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"454383657","text":"import pandas as pd\nsnps=pd.read_csv(\"/srv1/scratch/media/gecco/snps.hammock.gz\",header=None,sep='\\t')\noutf=open('snps.formatted.bed','w')\nfor index,row in snps.iterrows():\n if index%100000==0:\n print(index) \n chrom=row[0]\n start=str(row[1])\n end=str(row[2])\n metadata=row[3].split(',')\n name=metadata[1].split(':')[1].strip('\"')\n pval=str(round(float(metadata[2].split(':')[1].split('[')[1]),2))\n beta=str(round(float(metadata[3].split(']')[0]),2))\n freq=str(round(float(metadata[6].split('=')[1]),2))\n out='\\t'.join([chrom,start,end,name,pval,beta,freq])\n #print(out)\n outf.write(out+'\\n')\noutf.close()\n\n","sub_path":"format.snp.names.py","file_name":"format.snp.names.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"153536294","text":"import networkx as nx\nimport numpy as np\nfrom drawGraph import networker\n\ndata_path = './selected_edges.csv'\n\nnetworker = networker(data_path)\n# networker.data_loader()\n# edges = networker.edges\n# networker.build_graph()\n\nclass Subsets(object):\n\n def __init__(self,networker):\n self.networker = networker\n self.networker.data_loader()\n self.edges = networker.edges\n self.networker.build_graph()\n\n\n def get_subsets(self, whole_set):\n self.whole_set = whole_set\n output = [[]]\n for i in whole_set:\n output.extend([subset + [i] for subset in output])\n subsets_list = [set(x) for x in output[1:-1]]\n return subsets_list\n def get_path_subedges_set(self,edges,subset):\n '''\n alphas: coefficient list\n '''\n alphas = np.zeros((len(self.whole_set),)).tolist()\n if len(subset)==1:\n return alphas\n else:\n edges_subset = []\n for i in edges:\n if i[0] in subset and i[1] in subset:\n edges_subset.append(i)\n if len(edges_subset)==0:\n return alphas\n else:\n self.networker.edges = edges_subset\n self.networker.build_graph()\n G_adjacency = self.networker.build_adjacency()\n for i in range(len(subset)):\n alphas[i] = (np.sum(G_adjacency**i)-np.trace(G_adjacency))/2\n return alphas\n def get_listOfAlphas(self,edges,whole_set):\n subsets_list = self.get_subsets(whole_set)\n alphas_list = []\n for i in subsets_list:\n alphas_list.append(self.get_path_subedges_set(edges,i))\n return subsets_list,alphas_list\n\n\n\n\n \n\nif __name__ == \"__main__\":\n\n whole_set = [1,2,3]\n subsets = Subsets(networker)\n subsets_list = subsets.get_subsets(whole_set)\n print(subsets_list)\n\n # whole_set = nx.nodes(networker.graph)\n # print(whole_set)\n # subsets = Subsets()\n # subsets_list = subsets.get_subsets(whole_set)\n # print(subsets_list)\n\n","sub_path":"subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"91379929","text":"from __future__ import unicode_literals\n\nimport inspect\nimport os\nimport shutil\nimport tempfile\n\nimport numpy\nfrom raster.models import Legend, LegendEntry, LegendSemantics, RasterLayer\n\nfrom django.core.files import File\nfrom django.test import TestCase\nfrom raster_aggregation.models import AggregationLayer\nfrom raster_aggregation.tasks import aggregation_layer_parser\n\n\nclass RasterAggregationTestCase(TestCase):\n\n def setUp(self):\n # Instantiate Django file instances with nodes and links\n self.pwd = os.path.dirname(\n os.path.abspath(\n inspect.getfile(inspect.currentframe())\n )\n )\n\n self.rasterfile = File(open(os.path.join(self.pwd, 'data/raster.tif.zip'), 'rb'), name='raster.tif.zip')\n shapefile = File(open(os.path.join(self.pwd, 'data/shapefile.zip'), 'rb'), name='shapefile.zip')\n\n self.media_root = tempfile.mkdtemp()\n\n with self.settings(MEDIA_ROOT=self.media_root):\n # Create raster layer\n self.rasterlayer = RasterLayer.objects.create(\n name='Raster data',\n description='Small raster for testing',\n datatype='ca',\n nodata='0',\n rasterfile=self.rasterfile\n )\n self.empty_rasterlayer = RasterLayer.objects.create(\n name='Raster data',\n description='Small raster for testing',\n datatype='ca',\n nodata='0',\n rasterfile=self.rasterfile\n )\n self.empty_rasterlayer.rastertile_set.all().delete()\n\n # Create aggregation layer\n self.agglayer = AggregationLayer.objects.create(\n name='My Aggregation Layer',\n name_column='Name',\n shapefile=shapefile\n )\n\n # Push aggregation layer parsing.\n aggregation_layer_parser(self.agglayer.id)\n\n # Create legend semantics.\n sem1 = LegendSemantics.objects.create(name='Earth')\n sem2 = LegendSemantics.objects.create(name='Wind')\n sem3 = LegendSemantics.objects.create(name='Fire')\n\n # Create legends.\n self.legend_float = Legend.objects.create(title='Float key legend')\n LegendEntry.objects.create(semantics=sem1, expression='4', color='#123456', legend=self.legend_float, code='1')\n LegendEntry.objects.create(semantics=sem2, expression='2', color='#654321', legend=self.legend_float, code='2')\n\n self.legend_exp = Legend.objects.create(title='Expression key legend')\n LegendEntry.objects.create(semantics=sem3, expression='(x >= 2) & (x < 5)', color='#123456', legend=self.legend_exp, code='1')\n\n # Compute expected totals from numpy value count\n self.expected = {}\n for tile in self.rasterlayer.rastertile_set.filter(tilez=11):\n val, counts = numpy.unique(tile.rast.bands[0].data(), return_counts=True)\n for pair in zip(val, counts):\n if str(pair[0]) in self.expected:\n self.expected[str(pair[0])] += pair[1]\n else:\n self.expected[str(pair[0])] = pair[1]\n\n # Pop the nodata value, aggregation values are computed on masked arrays\n self.expected.pop('0')\n\n def tearDown(self):\n shutil.rmtree(self.media_root)\n","sub_path":"tests/aggregation_testcase.py","file_name":"aggregation_testcase.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"316517975","text":"import requests, bs4\n\n\nsearchUrl = \"http://cafe.naver.com/CafeMemberNetworkArticleList.nhn?clubid=29066286&search.clubid=29066286&search.writerid=syk3428&search.page=%d\"\narticleUrl = \"http://cafe.naver.com/ArticleRead.nhn?clubid=29066286&articleid=%d&networkMemberId=syk3428&networkSearchKey=Article&networkSearchType=7&networkSearchPage=8\"\n# clubid : Cafe Number // search.clubid : Cafe Number\n# search.writerid : UserID\n# search.page : page\n\ncookies = {\"NNB\" : \"RVDVUJ2TQBZVS\",\n \"nx_ssl\" : \"2\",\n \"npic\" : \"v4ZNclZNaAhMntlMt99PwE4WNPWWpRjlQBN/Km51e3lStSGnh+YJ/ZILpcv5k7MACA==\",\n \"ncvid\" : \"#vid#_123.212.39.53tPgf\",\n \"nci4\" : \"f7c927081b55308cd13f786f6291eb8c2d7e9e783073966a987a6187f29745bc42bc04d50145328eb069439d4a83207cadada0817d46200237f651a7697ee3134b45486f4e7f324342654071504a5a795d632f5f56715465156568486c5d1f6867437a4b78777e597c4d0f0f090e0a72767301022500317b14656466087d600d7e6d27\",\n \"nid_inf\" : \"865173077\",\n \"NID_AUT\" : \"e4q0SpuRHXfE/Szbkw/bbwQKuLEepKQTuTv9R90s+zUnpLTVPJGGelI6boaDynhQ\",\n \"NID_SES\" : \"AAABXJC5eO5yuHHlaiB2LF2x7ntRn/3UEfpUsgGaHXkktrOryCxcfr1Wbd4dYuuwhJ/TF1mi8O3q8uPvDNjBLJ8jGQ2VsfTe7WE6ajr2DAAEEQD4w0p9jgqTgvt1q9tEL5bW/1/SD2GB5yWNeNW5pdCEoK4yxkRHmvCITM5ww3Z12XcW7pzxh22lCMZvlTtG+C4WhGJfH/1taCSfDIP3cIeMLD6+Os0jEkKIvRjQ/PzgAwu4ZK4gNrid4IM67BlqF2tegieOVHqcM97bszpNZ/xDWYaR1gONdjIfYjf9bHZ5SBZYjqTpc8z9yYs2nd1Hb0xEUv0QtzAmuEF4T6BEwen37Eftk4WCXvFOzhCmZj63/NiRBbUXXZlRj7ZIV8dzpLzKaHs1MqOfxbB5MBljD95wTOpoSv86+nv9qQPj0SjS/jEx5Lnu1m7D4sOtfjaZCUEQlrYtg7l+VCFbJW4dfHpkw9o=\",\n \"ncu\" : \"83b7022338756cd0dd1b4a207ab2eff9be\",\n \"ncmc4\" : \"89b75976652b4ef2af4106111ce69db61e44bd297200c72ae62c1fef7063a777b6708c7bf9c5540078c2b759e309c676e1fbdee9252c7354688b0be769b1\",\n \"personaconmain|bc1916\" : \"EAE11B32508CF3CE4A030B7C3217AD76C41080A269BB226E\",\n \"personacon|bc1916\" : \"3FFFAF7E3BDAD5901B3C51E6429837F350F7A6ED245C4E8CA43DE7D4FDC9F3E6\",\n \"ncvc2\" : \"97a316372c6114b8987116271ef286e55817f7124215f429d51577\",\n \"JSESSIONID\" : \"4468AA33551B4DC8334E57CC52A23D6E\"}\n\nclass Selecter:\n articleDate = '#post_%d > div > div.tit-box > div.fr > table > tbody > tr > td.m-tcol-c.date'\n articleNo = 'm-tcol-c list-count'\n bodyXpath = '//*[@id=\"tbody\"]/div'\n body = 'tbody m-tcol-c'\n lastPage = 'on'\n title = 'board-list'\n qna = 'list-blog border-sub'\n qnaDate = 'm-tcol-c date'\n\nframeReq = requests.get(searchUrl%999999999, cookies=cookies)\n\nprint(\" Frame Body \", frameReq.status_code)\n\n\n\nsoup = bs4.BeautifulSoup(frameReq.text, \"html.parser\")\nlastPage = int(soup.find(\"td\", attrs={\"class\":Selecter.lastPage}).a.text)\n\n\narticleNum = {}\nfor n in range(1, lastPage+1):\n soup = bs4.BeautifulSoup(requests.get(searchUrl%n, cookies=cookies).text, \"html.parser\")\n num = [ tag.text for tag in soup.find_all(\"span\", attrs={\"class\":Selecter.articleNo})]\n #titles = [ tag.text.replace(\"\\n\",\"\").replace(\"\\xa0\",\"\") for tag in soup.find_all(\"td\", attrs={\"class\":Selecter.title})]\n titles = [ tag.text for tag in soup.find_all(\"td\", attrs={\"class\":Selecter.title})]\n\n for n in range(len(num)):\n articleNum[int(num[n])] = titles[n]\n\nfor no in articleNum.keys():\n title = articleNum[no]\n print(no)\n if \"A.\\n\\n\" in title: # IF A\n soup = bs4.BeautifulSoup(requests.get(articleUrl%no, cookies=cookies).text, \"html.parser\")\n qna = soup.find(\"div\", attrs={\"class\":Selecter.qna, \"id\":\"post_%d\"%no})\n date = qna.find(\"td\", attrs={\"class\":Selecter.qnaDate}).text\n bodyLen = len(qna.find(\"div\", attrs={\"class\":\"tbody m-tcol-c\",\"id\":\"tbody\"}).text.replace(\"\\n\",\"\").replace(\"\\r\",\"\").replace(\"\\t\",\"\"))\n\n articleNum[no] = {}\n articleNum[no]['type'] = 'A'\n articleNum[no]['title'] = title\n articleNum[no]['date'] = date\n articleNum[no]['bodySize'] = bodyLen\n del soup, qna, date, bodyLen, title\n\n elif \"Q.\\n\\n\" in title: # IF Q\n soup = bs4.BeautifulSoup(requests.get(articleUrl%no, cookies=cookies).text, \"html.parser\")\n date = soup.find(\"td\", attrs={\"class\":Selecter.qnaDate}).text\n bodyLen = len(soup.find(\"div\", attrs={\"class\": Selecter.body, \"id\": \"tbody\"}).text)\n articleNum[no] = {}\n articleNum[no]['type'] = 'Q'\n articleNum[no]['title'] = title\n articleNum[no]['date'] = date\n articleNum[no]['bodySize'] = bodyLen\n del soup, date, bodyLen, title\n\n else:\n soup = bs4.BeautifulSoup(requests.get(articleUrl % no, cookies=cookies).text, \"html.parser\")\n date = soup.select(Selecter.articleDate%no)[0].text\n bodyLen = len(soup.find(\"div\", attrs={\"class\":Selecter.body, \"id\":\"tbody\"}).text.replace(\"건의사항 작성 전 잠깐!1. 본 공지사항을 읽어주세요: http://cafe.naver.com/kkutukorea/6402. 단어추가 신청은 \\\"단어추가요청\\\" 게시판에서 신청해주세요.:: 해당 되는 내용을 굵은 글씨로 표시해주세요. ::[ 끄투코리아 항목 ]시스템 / 버그 제보 / 게임 유형 / 특수 규칙모드 / 의상 및 휘장 / 아이템 / 기타[ 공식카페 항목 ]게시판 / 시스템 / 디자인 / 기타[ 공식 디스코드 항목 ]채팅 및 통화방 / 시스템 / 기타  [ 이외의 항목 ]기타\",\"\"))\n articleNum[no] = {}\n articleNum[no]['type'] = 'normal'\n articleNum[no]['title'] = title\n articleNum[no]['date'] = date\n articleNum[no]['bodySize'] = bodyLen\n del soup, date, bodyLen, title","sub_path":"R3turn-ExtraScripts/카페 게시글디텍터/body.py","file_name":"body.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"443470843","text":"import sys\nclass Node:\n\n def __init__(self, name, number):\n self.name = name\n self.number = number\n self.visited = False\n self.adjacenciesList = []\n self.predecessor = None\n self.mindistance = sys.maxsize\n\n def __lt__(self, other):\n return self.mindistance < other.mindistance\n","sub_path":"HW3_2/NodeClass.py","file_name":"NodeClass.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"160311508","text":"n=int(input())\nd={}\ny='No idea'\nfor i in range(n-1):\n\ta,b=[int(i) for i in input().split()]\n\tif d.get(a,-1)==-1:\n\t\td[a]=[]\n\td[a].append(b)\n\t\nfor i in d:\n\ta=i\n\tb=d[i] # a list\n\tfor i in b:\n\t\tif d.get(i,-1)!=-1:\n\t\t\tfor k in d[i]:\n\t\t\t\td[a].append(k)\nfor i in d:\n\tprint(str(i),':',str(d[i]))\nq=int(input())\nfor i in range(q):\n\ta,b=[int(i) for i in input().split()]\n\tchk=0\n\tif (d.get(a,-1)==-1 or b not in d[a]) and (d.get(b,-1)==-1 or a not in d[b]): # a->b and b->a\n\t\tprint(y)\n\telse:\n\t\tif d.get(a,-1)!=-1 and b in d[a]:\n\t\t\tprint(str(a),'taught',str(b))\n\t\telse:\n\t\t\tprint(str(b),'taught',str(a))","sub_path":"locmay/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"21946097","text":"try:\n import debug_settings\nexcept:\n pass\n\n\nimport unittest\nimport torch\nimport os\nimport numpy as np\nimport gym\nfrom gym import spaces\nimport matplotlib\nimport time\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\n\n# BARK imports\nfrom bark.runtime.commons.parameters import ParameterServer\n\n# BARK-ML imports\nfrom bark_ml.environments.blueprints import \\\n DiscreteHighwayBlueprint, DiscreteMergingBlueprint\nfrom bark_ml.environments.single_agent_runtime import SingleAgentRuntime\nimport bark_ml.environments.gym\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import FQFAgent, IQNAgent\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.model import IQN\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.network import initialize_weights_he\nfrom bark_ml.observers.nearest_state_observer import NearestAgentsObserver\nfrom bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML\n\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss,\\\n calculate_supervised_classification_quantile_loss, calculate_huber_loss, \\\n evaluate_quantile_at_action, get_margin_loss, update_params\n\n\ndef set_grad(var):\n def hook(grad):\n var.grad = grad\n return hook\n\nclass TestDQN(nn.Module):\n\n def __init__(self, num_channels, hidden=4, embedding_dim=1):\n super(TestDQN, self).__init__()\n\n self.net = nn.Sequential(\n torch.nn.Linear(num_channels, hidden),\n torch.nn.Linear(hidden, embedding_dim),\n ).apply(initialize_weights_he)\n\n self.embedding_dim = embedding_dim\n\n def forward(self, states):\n batch_size = states.shape[0]\n # Calculate embeddings of states.\n state_embedding = self.net(states)\n assert state_embedding.shape == (batch_size, self.embedding_dim)\n\n return state_embedding\n\n\nclass TestCosineEmbeddingNet(nn.Module):\n\n def __init__(self, num_cosines=4, embedding_dim=1, noisy_net=False):\n super(TestCosineEmbeddingNet, self).__init__()\n linear = nn.Linear\n\n self.net = nn.Sequential(linear(num_cosines, embedding_dim), nn.ReLU())\n self.num_cosines = num_cosines\n self.embedding_dim = embedding_dim\n\n def forward(self, taus):\n batch_size = taus.shape[0]\n N = taus.shape[1]\n\n # Calculate i * \\pi (i=1,...,N).\n i_pi = np.pi * torch.arange(start=1,\n end=self.num_cosines + 1,\n dtype=taus.dtype,\n device=taus.device).view(\n 1, 1, self.num_cosines)\n\n # Calculate cos(i * \\pi * \\tau).\n cosines = torch.cos(taus.view(batch_size, N, 1) * i_pi).view(\n batch_size * N, self.num_cosines)\n\n # Calculate embeddings of taus.\n tau_embeddings = self.net(cosines).view(batch_size, N, self.embedding_dim)\n\n return tau_embeddings\n\n\nclass TestQuantileNet(nn.Module):\n\n def __init__(self, num_actions, embedding_dim=1, noisy_net=False):\n super(TestQuantileNet, self).__init__()\n linear = nn.Linear\n\n self.net = nn.Sequential(\n linear(embedding_dim, 4),\n nn.ReLU(),\n linear(4, num_actions),\n )\n self.num_actions = num_actions\n self.embedding_dim = embedding_dim\n self.noisy_net = noisy_net\n\n def forward(self, state_embeddings, tau_embeddings):\n assert state_embeddings.shape[0] == tau_embeddings.shape[0]\n assert state_embeddings.shape[1] == tau_embeddings.shape[2]\n\n # NOTE: Because variable taus correspond to either \\tau or \\hat \\tau\n # in the paper, N isn't neccesarily the same as fqf.N.\n batch_size = state_embeddings.shape[0]\n N = tau_embeddings.shape[1]\n\n # Reshape into (batch_size, 1, embedding_dim).\n state_embeddings = state_embeddings.view(batch_size, 1, self.embedding_dim)\n\n # Calculate embeddings of states and taus.\n embeddings = (state_embeddings * tau_embeddings).view(\n batch_size * N, self.embedding_dim)\n\n # Calculate quantile values.\n quantiles = self.net(embeddings)\n\n return quantiles.view(batch_size, N, self.num_actions)\n\n\nclass TestIQN(IQN):\n\n def __init__(self, num_channels, num_actions, params, num_cosines, noisy_net):\n super(TestIQN, self).__init__(num_channels, num_actions, params, num_cosines, noisy_net)\n self.K = 64\n self.N = 64\n self.N_dash = 64\n self.embedding_dim = 1\n # Feature extractor of DQN.\n self.dqn_net = TestDQN(num_channels=num_channels,\n embedding_dim=self.embedding_dim,\n hidden=4)\n # Cosine embedding network.\n self.cosine_net = TestCosineEmbeddingNet(num_cosines=num_cosines,\n embedding_dim=self.embedding_dim,\n noisy_net=noisy_net)\n # Quantile network.\n self.quantile_net = TestQuantileNet(num_actions=num_actions,\n embedding_dim=self.embedding_dim,\n noisy_net=noisy_net)\n\n\nclass LossTests(unittest.TestCase):\n\n def test_quantile_huber_loss(self):\n td_errors = torch.zeros((1, 2, 1))\n td_errors[:, 0, :] = 0.0\n td_errors[:, 1, :] = 2.0\n taus = torch.rand(1, 2)\n kappa = 1.0\n quantile_huber_loss = calculate_huber_loss(td_errors, kappa=kappa).squeeze()\n assert quantile_huber_loss[0] == 0.0\n assert quantile_huber_loss[1] == kappa * (td_errors[0, 1, 0] - 0.5 * kappa)\n\n def test_supervised_margin_loss(self):\n expert_margin = 0.8\n supervised_loss_weight = 0.5\n num_actions = 2\n batch_size = 1\n state_size = 1\n params = ParameterServer()\n states = torch.rand((batch_size, state_size))\n next_states = torch.rand((batch_size, state_size))\n actions = torch.zeros((batch_size, 1), dtype=torch.int64)\n actions[states >= 0.5] = 1.0\n is_demos = torch.zeros((batch_size, 1))\n is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0\n state_shape = spaces.Box(low=np.zeros(state_size), high=np.zeros(state_size))\n test_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params, \n num_cosines=4, noisy_net=False)\n taus = torch.rand(batch_size, test_iqn.N)\n state_embeddings = test_iqn.dqn_net(states)\n next_state_embeddings = test_iqn.dqn_net(next_states)\n\n supervised_classification_loss = calculate_supervised_classification_quantile_loss(actions,\n states, test_iqn, taus, state_embeddings, next_state_embeddings, is_demos, \n num_actions, 'cpu', supervised_loss_weight, expert_margin)\n resampled_batch_margin_loss = get_margin_loss(actions, num_actions, is_demos, expert_margin, 'cpu')\n recalculated_quantiles = test_iqn.calculate_quantiles(taus, state_embeddings=state_embeddings)\n recalculated_q = recalculated_quantiles.mean(dim=1)\n\n recalculated_loss = calculate_expert_loss(recalculated_q, resampled_batch_margin_loss, is_demos, \n actions, supervised_loss_weight * is_demos.squeeze())\n assert recalculated_loss.mean () == supervised_classification_loss\n\n def test_supervised_margin_loss_zero_states(self):\n params = ParameterServer()\n states = torch.zeros((1, 4))\n state_shape = spaces.Box(low=np.zeros(4), high=np.zeros(4))\n test_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=2, params=params, \n num_cosines=4, noisy_net=False)\n state_embeddings = test_iqn.dqn_net(states)\n assert(torch.all(state_embeddings == 0.0))\n \n def test_supervised_margin_loss_states(self):\n num_actions = 2\n params = ParameterServer()\n batch_size = 512\n state_size = 1\n state_shape = spaces.Box(low=np.zeros(state_size), high=np.zeros(state_size))\n online_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params, \n num_cosines=4, noisy_net=False)\n target_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params, \n num_cosines=4, noisy_net=False)\n optim = Adam(online_iqn.parameters(),\n lr=5.5e-3,\n eps=1e-2 / batch_size)\n online_iqn.train()\n target_iqn.train()\n states = torch.rand((batch_size, state_size))\n actions = torch.zeros((batch_size, 1), dtype=torch.int64)\n actions[states >= 0.5] = 1.0\n online_iqn.sample_noise()\n loss = Variable(requires_grad=True)\n\n is_demos = torch.zeros((batch_size, 1))\n is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0\n for i in range(100):\n is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0\n next_states = torch.rand((batch_size, state_size))\n state_embeddings = online_iqn.dqn_net(states)\n next_state_embeddings = target_iqn.dqn_net(states=next_states)\n\n # sample tau random quantiles from online network\n taus = torch.rand(batch_size, 4)\n current_sa_quantiles = evaluate_quantile_at_action(\n online_iqn.calculate_quantiles(taus,\n state_embeddings=state_embeddings),\n actions\n )\n current_q_values = online_iqn.calculate_q(states=states)\n online_iqn.sample_noise()\n next_q = online_iqn.calculate_q(states=next_states)\n next_actions = torch.argmax(next_q, dim=1, keepdim=True)\n tau_dashes = torch.rand(batch_size, 4)\n target_sa_quantiles = evaluate_quantile_at_action(\n target_iqn.calculate_quantiles(\n tau_dashes, next_state_embeddings\n ), next_actions\n ).transpose(1, 2)\n td_errors = target_sa_quantiles - current_sa_quantiles\n supervised_classification_loss = calculate_supervised_classification_quantile_loss(\n actions, states, online_iqn, tau_dashes, state_embeddings, next_state_embeddings, is_demos,\n num_actions, 'cpu', 0.5, 0.8\n )\n loss = supervised_classification_loss\n gradients = update_params(optim, loss, [online_iqn], retain_graph=True, count=i)\n states = next_states\n actions = next_actions\n if i % 25 == 0:\n target_iqn.load_state_dict(online_iqn.state_dict())\n assert loss == 0.0\n \n \nif __name__ == \"__main__\":\n unittest.main()","sub_path":"bark_ml/library_wrappers/lib_fqf_iqn_qrdqn/tests/test_loss_functions.py","file_name":"test_loss_functions.py","file_ext":"py","file_size_in_byte":10607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"352827867","text":"from backend.models import ExpertRating, Video, UserPreferences\r\nfrom backend.rating_fields import VIDEO_FIELDS\r\nfrom helpers import test_username, login, logout, create_test_video, TIME_WAIT, open_more_menu\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By # noqa: E402\r\nfrom selenium.webdriver.support import expected_conditions as EC # noqa: E402\r\nfrom selenium.webdriver.support.ui import WebDriverWait # noqa: E402\r\n\r\n\r\ndef test_my_ratings(driver, django_db_blocker):\r\n # creating a video\r\n\r\n login(driver)\r\n\r\n with django_db_blocker.unblock():\r\n me = UserPreferences.objects.get(user__username=test_username)\r\n video_id1 = create_test_video()\r\n video_id2 = create_test_video()\r\n video_1 = Video.objects.get(video_id=video_id1)\r\n video_2 = Video.objects.get(video_id=video_id2)\r\n ExpertRating.objects.create(video_1=video_1, video_2=video_2,\r\n **{k: 50 for k in VIDEO_FIELDS},\r\n user=me)\r\n\r\n open_more_menu(driver)\r\n\r\n WebDriverWait(driver, TIME_WAIT).until(\r\n EC.visibility_of_element_located((By.ID, 'video_details_menu')))\r\n\r\n print(\"Going to the details page\")\r\n expert_interface_btn = driver.find_element_by_id('video_details_menu')\r\n expert_interface_btn.click()\r\n\r\n WebDriverWait(driver, TIME_WAIT).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, 'video_id_text_field')))\r\n\r\n elem = driver.find_element_by_class_name('video_id_text_field')\r\n elem = elem.find_element_by_tag_name('input')\r\n\r\n elem.clear()\r\n elem.send_keys(video_id1, Keys.HOME)\r\n if elem.get_attribute('value') != video_id1:\r\n elem.send_keys(3 * [Keys.DELETE])\r\n\r\n WebDriverWait(driver, TIME_WAIT).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, 'button_video_ratings')))\r\n\r\n # opening ratings page\r\n driver.find_element_by_class_name('button_video_ratings').click()\r\n\r\n WebDriverWait(driver, TIME_WAIT).until(\r\n EC.presence_of_element_located((By.ID, 'id_my_ratings')))\r\n\r\n WebDriverWait(driver, TIME_WAIT).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, 'video_rating_video')))\r\n\r\n # have only 1 rating\r\n assert len(driver.find_elements_by_class_name('video_rating_video')) == 1\r\n\r\n # rerate\r\n driver.find_elements_by_class_name('video_rating_rerate')[0].click()\r\n\r\n # on the right page\r\n assert driver.current_url.split('/')[-2:] == [video_id1, video_id2]\r\n\r\n logout(driver)\r\n","sub_path":"integration_test/test_my_ratings.py","file_name":"test_my_ratings.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"256845341","text":"import torch\nimport numpy as np\nimport warnings\nimport torch.utils.data as Data\nfrom sklearn.preprocessing import StandardScaler\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport fire\n\nclass Configuration(object):\n def __init__(self):\n # data\n self.data_path = './nasdaq100_padding.csv'\n\n # hyperparameters\n self.epochs = 5000\n self.lr = 0.00005\n self.batch_size = 64\n self.weight_decay = 0.00005\n self.dropout = 0.1\n self.method = 'adam'\n self.encoder_hidden_size = 128\n self.decoder_hidden_size = 128\n self.conv_size = 128\n self.kernel_size = 2\n self.dropout=0.1\n\n # model\n self.T = 10\n self.use_gpu = True\n self.input_size = 81 # for nasdaq data\n self.num_workers = 4\n\n def _setting(self, kwargs=None):\n if kwargs:\n for k, v in kwargs.items():\n if not hasattr(self, k):\n warnings.warn('warning: Configuration has no attribute %s' % str(k))\n setattr(self, k, v)\n if self.use_gpu and torch.cuda.is_available():\n self.device = torch.device('cuda')\n else:\n self.device = torch.device('cpu')\n self._print()\n\n def _print(self):\n print('-'*10, '>'*3, 'User Config', '<'*3, '-'*10)\n for k, v in self.__dict__.items():\n if not k.startswith('_'):\n print(k, '=', v)\n print('-'*37)\n\n\nclass NasdaqDataset(Data.Dataset):\n def __init__(self, opt, is_train=True,):\n self.T = opt.T\n dat = pd.read_csv(opt.data_path)\n self.X = self.preprocess_X_data(dat.loc[:, [x for x in dat.columns.tolist() if x != 'NDX']])\n # self.X = dat.loc[:, [x for x in dat.columns.tolist() if x != 'NDX']].values\n # self.y, self.y_mean, self.y_std = self.preprocess_y_data(np.array(dat.NDX))\n self.scale = np.max(np.array(dat.NDX))\n self.y, self.y_mean, self.y_std = self.preprocess_y_data(np.array(dat.NDX))\n self.is_train = is_train\n self.train_len = int(0.7 * len(self.X))\n self.test_len = len(self.X) - self.train_len\n self.device = opt.device\n\n def __getitem__(self, item):\n if self.is_train:\n self.return_x = self.X[item: item + self.T, :]\n self.return_y_history = self.y[item: item + self.T - 1]\n self.return_y_target = self.y[item + self.T]\n else:\n self.return_x = self.X[self.train_len + item: self.train_len + item + self.T, :]\n self.return_y_history = self.y[self.train_len + item: self.train_len + item + self.T - 1]\n self.return_y_target = self.y[self.train_len + item + self.T]\n return self.return_x, self.return_y_history,\\\n self.return_y_target\n\n def __len__(self):\n if self.is_train:\n return self.train_len - self.T\n else:\n return self.test_len - self.T\n\n def preprocess_X_data(self, raw_data):\n scaler = StandardScaler().fit(raw_data)\n prerocess_data = scaler.transform(raw_data)\n return prerocess_data\n\n def preprocess_y_data(self, raw_data):\n return (raw_data - np.mean(raw_data)) / np.std(raw_data), np.mean(raw_data), np.std(raw_data)\n\ndef main(**kwargs):\n opt = Configuration()\n opt._setting(kwargs)\n\nif __name__ == '__main__':\n fire.Fire(main)","sub_path":"darnn-based/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"628236010","text":"# -*- coding: utf-8 -*-\n\"\"\"Setup/installation tests for this package.\"\"\"\n\nfrom collective.behavior.banner.testing import IntegrationTestCase\nfrom plone import api\n\n\nclass TestIntegration(IntegrationTestCase):\n \"\"\"Test integration of collective.behavior.banner into Plone.\"\"\"\n\n def setUp(self):\n \"\"\"Custom shared utility setup for tests.\"\"\"\n self.portal = self.layer['portal']\n self.installer = api.portal.get_tool('portal_quickinstaller')\n\n def test_css_available(self):\n cssreg = getattr(self.portal, 'portal_css')\n stylesheets_ids = cssreg.getResourceIds()\n self.failUnless(\"++resource++collective.behavior.banner/slider.css\" in stylesheets_ids)\n self.failUnless(\"++resource++collective.behavior.banner/banner.css\" in stylesheets_ids)\n","sub_path":"src/collective/behavior/banner/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"165347651","text":"import os\nimport sys\nimport json\nimport base64\nimport pathlib\nimport pandas as pd\nfrom io import StringIO\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\n\nsys.path.append(str(pathlib.Path(__file__).parent.parent.resolve() / 'aws_lambda'))\nsys.path.append(str(pathlib.Path(__file__).parent.parent.resolve()))\n\nfrom lambda_function import lambda_handler_norm, lambda_handler_clean\n\n\ndef test_norm_post():\n path_table = \"tests/test_small.csv\"\n mp_encoder = MultipartEncoder(\n fields={'file': ('filename', open(path_table, \"rb\"), 'text/csv'),\n })\n body = mp_encoder.to_string()\n print('form-data is :')\n print(body[:100])\n body = base64.b64encode(body)\n event = dict(httpMethod='POST',\n path='/norm',\n pathParameters=dict(table_ref_name='caradisiac'),\n headers={'content-type': mp_encoder.content_type},\n body=body)\n resp = lambda_handler_norm(event, None)\n body = resp['body']\n assert resp['statusCode'] == 200\n df_in = pd.read_csv(path_table, header=None)\n df_out = pd.read_csv(StringIO(body), header=None)\n print(df_in)\n print(df_out)\n assert df_in.shape[0] == df_out.shape[0]\n assert (df_in.shape[1] + 1) == df_out.shape[1]\n assert 'renault' in body, 'There is no renault in predictions {}'.format(body)\n\n\ndef test_norm_get():\n event = dict(httpMethod='GET',\n path='/norm/caradisiac',\n pathParameters=dict(table_ref_name='caradisiac'),\n queryStringParameters=dict(\n marque='renault',\n modele='clio')\n )\n resp = lambda_handler_norm(event, None)\n body = resp['body']\n print(body)\n assert resp['statusCode'] == 200\n assert 'modele' in json.loads(resp['body']).keys()\n assert 'renault' in json.loads(resp['body'])['marque'].lower()\n\n\ndef test_clean_get():\n event = dict(httpMethod='GET',\n path='/clean/caradisiac?marque=renault&modele=clioo',\n pathParameters=dict(table_ref_name='caradisiac'),\n queryStringParameters=dict(\n marque='renault',\n modele='clio')\n )\n resp = lambda_handler_clean(event, None)\n body = resp['body']\n print(body)\n assert resp['statusCode'] == 200\n assert 'modele' in json.loads(resp['body']).keys()\n assert 'renault' in json.loads(resp['body'])['marque'].lower()\n","sub_path":"tests/test_lambda.py","file_name":"test_lambda.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"56036888","text":"# Code originally from https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/transformer_chatbot.ipynb\nimport tensorflow as tf\n\nMAX_POSITION = 1000\n\nclass PositionalEncoding(tf.keras.layers.Layer):\n def __init__(self, d_model, position=None):\n super(PositionalEncoding, self).__init__()\n if position is None:\n position = MAX_POSITION\n self.pos_encoding = self.positional_encoding(position, d_model)\n\n def get_angles(self, position, i, d_model):\n angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))\n return position * angles\n\n def positional_encoding(self, position, d_model):\n angle_rads = self.get_angles(\n position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],\n i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],\n d_model=d_model,\n )\n # apply sin to even index in the array\n sines = tf.math.sin(angle_rads[:, 0::2])\n # apply cos to odd index in the array\n cosines = tf.math.cos(angle_rads[:, 1::2])\n\n pos_encoding = tf.concat([sines, cosines], axis=-1)\n pos_encoding = pos_encoding[tf.newaxis, ...]\n return tf.cast(pos_encoding, tf.float32)\n\n def call(self, inputs):\n return inputs + self.pos_encoding[:, : tf.shape(inputs)[1], :]\n\n","sub_path":"positional_encoding.py","file_name":"positional_encoding.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"149557755","text":"from django.shortcuts import render, redirect\nfrom .models import Orders, StatusOrder\nfrom database.models import Medicines, Category\nfrom .forms import OrdersForm, OrdersFilterForm\nfrom datetime import date\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef order_index(request):\n orders = Orders.objects.order_by('registration_date')\n\n active = StatusOrder.objects.get(status=\"Активен\")\n delivered = StatusOrder.objects.get(status=\"Доставлен\")\n expired = StatusOrder.objects.get(status=\"Просрочен\")\n\n for u in orders:\n\n entry = Orders.objects.get(pk=u.id)\n\n if u.received == delivered:\n continue\n elif u.extradition_date < date.today():\n stat = expired\n entry.received = stat\n elif u.extradition_date > date.today():\n stat = active\n entry.received = stat\n \n entry.save()\n\n filter = OrdersFilterForm(request.GET)\n\n if filter.is_valid():\n orders = Orders.objects.all()\n if filter.cleaned_data['status']:\n orders = orders.filter(received__exact=filter.cleaned_data['status'])\n if filter.cleaned_data['find_category']:\n orders = orders.filter(medicines__category__exact=filter.cleaned_data['find_category'])\n if filter.cleaned_data['find_medicines']:\n orders = orders.filter(medicines__exact=filter.cleaned_data['find_medicines'])\n\n return render(request, 'orders/orders.html', {'orders': orders, 'filter': filter }) \n\n@login_required\ndef add_order(request):\n if request.method == 'POST':\n form = OrdersForm(request.POST)\n if form.is_valid():\n item = Medicines.objects.get(pk=request.POST['medicines'])\n item.count = item.count - 1\n item.popularity = item.popularity + 1\n item.save()\n form.save()\n return redirect('orders')\n\n form = OrdersForm()\n \n \n return render(request, 'orders/add_order.html', { 'form': form })\n\n@login_required\ndef order_get(request, id):\n try:\n order = Orders.objects.get(id=id)\n stat = StatusOrder.objects.get(status=\"Доставлен\")\n order.received = stat\n order.save()\n return redirect('orders')\n except Orders.DoesNotExist:\n return redirect('orders')\n\n@login_required\ndef order_delete(request, id):\n try:\n order = Orders.objects.get(id=id)\n order.delete()\n return redirect('orders')\n except Orders.DoesNotExist:\n return redirect('orders')","sub_path":"pharmacy/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"167015870","text":"#import standard libraries\nimport os, shutil\nimport warnings\n\n#set path to java file\nos.environ['CLASSPATH'] = \"../jar/beats-0.1-SNAPSHOT-jar-with-dependencies.jar\"\n\n#import third-party libraries\nimport numpy as np\nfrom jnius import autoclass\n\n\nclass Beats():\n \"\"\"\n This class is responsible for wrapping the beats simulator, which is\n written in Java.\n \"\"\"\n def __init__(self, scenario_file='', param=None):\n \"\"\"\n Class constructor. Initializes object with\n\n --input--\n path_to_input: path to xml file containing the scenario data.\n \"\"\"\n self.java_beats = None\n\n #paths\n self.scenario = ''\n self.output_folder = ''\n\n #state attributes\n self.initialized=False\n self.simulation_done=False\n\n #simulation attributes\n self.sim_dt=0.0\n self.out_dt=0.0\n\n self.time = None\n self.density = None\n self.inflow = None\n self.outflow = None\n\n\n def __repr__(self):\n repr_output = ('scenario: {0}\\noutput_folder: {1}\\ninitialized: {2}\\nsimulation_done: {3}\\n'\n 'sim_dt: {4}\\nout_dt: {5}\\ntime: {6}\\ndensity_veh: {7}\\noutflow_veh: {8}\\n'\n 'inflow_veh: {9}\\n ').format(self.scenario, self.output_folder,\n self.initialized, self.simulation_done,\n self.sim_dt, self.out_dt, self.time,\n self.density, self.outflow, self.inflow)\n return repr_output\n\n\n def create_scenario(self, scenario_file):\n \"\"\"\n This method will initialize a beats object by reading a xml scenario file.\n This is done by calling a Java class in.\n\n --input--\n cfgfile: path to the xml file containing the scenario.\n\n param: dictionary containing the simulation's parameters.\n \"\"\"\n\n if not os.path.isfile(scenario_file):\n raise RuntimeError('Scenario file not found.')\n\n if not self.scenario:\n warnings.warn('Overwriting scenario file.', RuntimeWarning)\n\n\n # copy scenario to tmp folder\n scenario = open('../tmp/temp_scenario.xml', 'w') # open file\n\n shutil.copyfile(scenario_file, scenario.name) # copy scenario to tmp file\n\n scenario.close() # close file\n\n\n # record path to scenario\n self.scenario = scenario.name\n\n # load beats scenario using java implemented function.\n # Jaxb is a Java class with a method for reading xml files and creating scenarios from it.\n Jaxb = autoclass('edu.berkeley.path.beats.Jaxb')\n self.java_beats = Jaxb.create_scenario_from_xml(scenario.name)\n\n #set self.initialized to false\n self.initialized = False\n\n\n def initialize_beats(self, param=None):\n\n # build and write properties file\n prop_file_name = self.create_properties_file(param)\n\n # initialize beats by calling Java modules\n simulator = autoclass('edu.berkeley.path.beats.simulator.BeatsProperties')\n beatsProperties = simulator(prop_file_name)\n\n try:\n self.java_beats.initialize_with_properties(beatsProperties)\n except:\n raise RuntimeError('Error in initializing the BeATS scenario')\n\n self.initialized = True\n\n\n def create_properties_file(self, param=None):\n \"\"\"\n This method creates a properties file from a dictionary of\n simulation parameters.\n\n --input--\n param: dictionary of simulation parameters. We expect the following\n keys to be in it:\n -> 'SIM_DT': simulation time step. If not given, 5 seconds is used.\n -> 'OUTPUT_DT': output time step. If not given, 300 seconds is used.\n -> 'OUTPUT_PREFIX': prefix to be added to output files. if not given,\n we use '../output/'.\n\n \"\"\"\n\n if not param:\n param = {}\n\n if os.path.isfile('../tmp/prop_file.properties'):\n warnings.warn('Overwriting existing properties file.', RuntimeWarning)\n\n # create prop file\n prop_file = open('../tmp/prop_file.properties', 'w')\n\n # setting simulation's time step.\n if 'SIM_DT' not in param:\n print('Missing SIM_DT, using 5 seconds.')\n param['SIM_DT'] = 5.0\n\n self.sim_dt = float(param['SIM_DT'])\n\n # setting output's time step.\n if 'OUTPUT_DT' not in param:\n print('Missing OUTPUT_DT, using 300 seconds.')\n param['OUTPUT_DT'] = 300.0\n\n self.out_dt = float(param['OUTPUT_DT'])\n\n #setting output's prefix\n if 'OUTPUT_PREFIX' not in param:\n print('Saving output in default folder: ../output/')\n param['OUTPUT_PREFIX'] = '../output/'\n\n self.output_folder = param['OUTPUT_PREFIX']\n\n\n # write to file\n for key in param:\n if isinstance(param[key], str):\n prop_file.write('{0} = {1}\\n'.format(key, param[key].replace('\\\\', '\\\\\\\\')))\n else:\n prop_file.write('{0} = {1}\\n'.format(key, param[key]))\n\n\n # close and return properties file path\n prop_file.close()\n return prop_file.name\n\n\n def run_beats(self, duration, param=None):\n\n # initialize\n if not self.initialized:\n warnings.warn('Object not initialized. Initializing with default parameters.', RuntimeWarning)\n self.initialize_beats(param)\n\n if self.simulation_done:\n warnings.warn('Overwriting previous output files.', RuntimeWarning)\n\n # run beats\n self.java_beats.set.end_time(duration - self.sim_dt) # NOTE: this should be \"duration\", but for a bug in BeATS\n self.java_beats.run()\n self.simulation_done = True\n\n # load the result\n #self.load_simulation_output(self.output_folder)\n","sub_path":"python_beats/code/Beats.py","file_name":"Beats.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"595097256","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom pathlib import Path as path\nfrom psutil._common import bytes2human\nimport errno\n\n\nclass Questao_03():\n \"\"\"\n This program lists the files in a given directory.\n \"\"\"\n\n def __init__(self):\n \"\"\" Constructor \"\"\"\n self.search_path = path.home() / 'Documents'\n self.error = ''\n self.list_dir = list()\n self.list_file = list()\n print('===' * 25, 'Questão 03'.center(75), '===' * 25, sep='\\n')\n self.init_class()\n self.process_data()\n\n def init_class(self):\n \"\"\" This function receives the input data from users. \"\"\"\n\n if os.path.exists(self.search_path):\n self.list_dir = os.listdir(self.search_path)\n else:\n self.search_path = input(str('Digite o caminho de um diretório: '))\n os.chdir(self.search_path)\n self.list_dir = os.listdir(os.chdir(self.search_path))\n\n def process_data(self):\n \"\"\" This function process the input data from init_class. \"\"\"\n for item in self.list_dir:\n if os.path.isfile(os.path.join(self.search_path, item)):\n self.list_file.append({\n 'name': item,\n 'size': os.stat(os.path.join(self.search_path, item)).st_size\n })\n self.list_file.sort(key=lambda x: x['size'], reverse=True)\n elif os.path.isdir(os.path.join(self.search_path, item)):\n pass\n else:\n self.error = FileNotFoundError(errno.ENOENT, os.strerror(\n errno.ENOENT), os.path.basename(item))\n with open('questao03.txt', 'w') as _file:\n _file.write(str(self.list_file))\n\n def print_result(self):\n \"\"\" This is a printer! It prints. \"\"\"\n if self.error:\n print(self.error,\n '---' * 25, 'Aluno: Francisco Camello'.rjust(75), sep=\"\\n\")\n else:\n print('Conteúdo do diretório: \\n {}'.format(os.path.abspath(self.search_path)),\n '{} {:<10} {:^10}'.format(' '*2, 'Tamanho', 'Arquivo'), sep=\"\\n\")\n for _file in self.list_file:\n print('{} {:<10} {:^10}'.format(\n ' '*2, bytes2human(_file['size']), _file['name']))\n print('---' * 25, 'O arquivo questao03.txt foi criado com sucesso!')\n\n\nQuestao_03().print_result()\n","sub_path":"questao03.py","file_name":"questao03.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"371043856","text":"\n\nimport threading\nimport time, random\nimport os\n\ndef thrd_handler(name):\n\tprint ('thread (%s) starts in process(%s)' % (name, os.getpid()))\n\ttime.sleep(random.random())\n\tprint ('thread (%s) finished in process(%s)' % (name, os.getpid()))\n\nt = threading.Thread (target = thrd_handler, name = 'thread in ex21', args = ('test0',) )\nprint ('Pid is %s before.' % os.getpid())\nt.start()\nt.join()\n","sub_path":"ex21_threading.py","file_name":"ex21_threading.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"538852670","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('main', '0037_auto_20150222_2008'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FavoriteCourse',\n fields=[\n ('favorite_course_id', models.AutoField(serialize=False, primary_key=True)),\n ('modified_date', models.DateTimeField(auto_now=True)),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('course', models.ForeignKey(related_name=b'favorited_by', to='main.Course')),\n ('user', models.ForeignKey(related_name=b'favorite_courses', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-modified_date',),\n 'db_table': 'favorite_courses',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FavoriteTopic',\n fields=[\n ('favorite_topic_id', models.AutoField(serialize=False, primary_key=True)),\n ('modified_date', models.DateTimeField(auto_now=True)),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('topic', models.ForeignKey(related_name=b'favorited_by', to='main.Topic')),\n ('user', models.ForeignKey(related_name=b'favorite_topics', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-modified_date',),\n 'db_table': 'favorite_topics',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FavoriteUnit',\n fields=[\n ('favorite_unit_id', models.AutoField(serialize=False, primary_key=True)),\n ('modified_date', models.DateTimeField(auto_now=True)),\n ('created_date', models.DateTimeField(auto_now_add=True)),\n ('unit', models.ForeignKey(related_name=b'favorited_by', to='main.Unit')),\n ('user', models.ForeignKey(related_name=b'favorite_units', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-modified_date',),\n 'db_table': 'favorite_units',\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='favoriteunit',\n unique_together=set([('user', 'unit')]),\n ),\n migrations.AlterUniqueTogether(\n name='favoritetopic',\n unique_together=set([('user', 'topic')]),\n ),\n migrations.AlterUniqueTogether(\n name='favoritecourse',\n unique_together=set([('user', 'course')]),\n ),\n migrations.AlterField(\n model_name='favoritelesson',\n name='user',\n field=models.ForeignKey(related_name=b'favorite_lessons', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"main/migrations/0038_auto_20150223_1145.py","file_name":"0038_auto_20150223_1145.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"599335468","text":"#! /usr/bin/python -u\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nimport sys\nimport json\nimport struct\n\nsys.stderr = open('/tmp/test', 'wb')\n\nsid = SentimentIntensityAnalyzer()\n\n\ndef getMessage():\n rawLength = sys.stdin.read(4)\n if len(rawLength) == 0:\n sys.exit(0)\n messageLength = struct.unpack('@I', rawLength)[0]\n message = sys.stdin.read(messageLength)\n return json.loads(message)\n\n\ndef encodeMessage(messageContent):\n encodedContent = json.dumps(messageContent)\n encodedLength = struct.pack('@I', len(encodedContent))\n return {'length': encodedLength, 'content': encodedContent}\n\n\ndef sendMessage(encodedMessage):\n sys.stdout.write(encodedMessage['length'])\n sys.stdout.write(encodedMessage['content'])\n sys.stdout.flush()\n\n\ndef pic_polarity(polarity):\n if polarity >= 0.5:\n result = 'veryPositive.png'\n elif polarity > 0:\n result = 'neutral.png'\n elif polarity > -0.3:\n result = 'negative.png'\n else:\n result = 'veryNegative.png'\n\n return result\n\n\ndef pic_objectivity(objectivity):\n if objectivity == 'obj':\n return 'objective.png'\n else:\n return 'subjective.png'\n\n\nwhile True:\n receivedMessage = getMessage()\n scores = []\n for line in receivedMessage[0].splitlines():\n line = line.strip()\n score = sid.polarity_scores(line)['compound']\n if score:\n scores.append((score, len(line)))\n average = (sum([score * weight for score, weight in scores]) /\n sum([weight for score, weight in scores]))\n sendMessage(encodeMessage(pic_polarity(average)))\n","sub_path":"rating.py","file_name":"rating.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"432501290","text":"from rest_framework import generics\n\nfrom ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF\nfrom ipam import filters\n\nfrom extras.api.views import CustomFieldModelAPIView\nfrom . import serializers\n\n\n#\n# VRFs\n#\n\nclass VRFListView(CustomFieldModelAPIView, generics.ListAPIView):\n \"\"\"\n List all VRFs\n \"\"\"\n queryset = VRF.objects.select_related('tenant').prefetch_related('custom_field_values__field')\n serializer_class = serializers.VRFSerializer\n filter_class = filters.VRFFilter\n\n\nclass VRFDetailView(CustomFieldModelAPIView, generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single VRF\n \"\"\"\n queryset = VRF.objects.select_related('tenant').prefetch_related('custom_field_values__field')\n serializer_class = serializers.VRFSerializer\n\n\n#\n# Roles\n#\n\nclass RoleListView(generics.ListAPIView):\n \"\"\"\n List all roles\n \"\"\"\n queryset = Role.objects.all()\n serializer_class = serializers.RoleSerializer\n\n\nclass RoleDetailView(generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single role\n \"\"\"\n queryset = Role.objects.all()\n serializer_class = serializers.RoleSerializer\n\n\n#\n# RIRs\n#\n\nclass RIRListView(generics.ListAPIView):\n \"\"\"\n List all RIRs\n \"\"\"\n queryset = RIR.objects.all()\n serializer_class = serializers.RIRSerializer\n\n\nclass RIRDetailView(generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single RIR\n \"\"\"\n queryset = RIR.objects.all()\n serializer_class = serializers.RIRSerializer\n\n\n#\n# Aggregates\n#\n\nclass AggregateListView(CustomFieldModelAPIView, generics.ListAPIView):\n \"\"\"\n List aggregates (filterable)\n \"\"\"\n queryset = Aggregate.objects.select_related('rir').prefetch_related('custom_field_values__field')\n serializer_class = serializers.AggregateSerializer\n filter_class = filters.AggregateFilter\n\n\nclass AggregateDetailView(CustomFieldModelAPIView, generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single aggregate\n \"\"\"\n queryset = Aggregate.objects.select_related('rir').prefetch_related('custom_field_values__field')\n serializer_class = serializers.AggregateSerializer\n\n\n#\n# Prefixes\n#\n\nclass PrefixListView(CustomFieldModelAPIView, generics.ListAPIView):\n \"\"\"\n List prefixes (filterable)\n \"\"\"\n queryset = Prefix.objects.select_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role')\\\n .prefetch_related('custom_field_values__field')\n serializer_class = serializers.PrefixSerializer\n filter_class = filters.PrefixFilter\n\n\nclass PrefixDetailView(CustomFieldModelAPIView, generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single prefix\n \"\"\"\n queryset = Prefix.objects.select_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role')\\\n .prefetch_related('custom_field_values__field')\n serializer_class = serializers.PrefixSerializer\n\n\n#\n# IP addresses\n#\n\nclass IPAddressListView(CustomFieldModelAPIView, generics.ListAPIView):\n \"\"\"\n List IP addresses (filterable)\n \"\"\"\n queryset = IPAddress.objects.select_related('vrf__tenant', 'tenant', 'interface__device', 'nat_inside')\\\n .prefetch_related('nat_outside', 'custom_field_values__field')\n serializer_class = serializers.IPAddressSerializer\n filter_class = filters.IPAddressFilter\n\n\nclass IPAddressDetailView(CustomFieldModelAPIView, generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single IP address\n \"\"\"\n queryset = IPAddress.objects.select_related('vrf__tenant', 'tenant', 'interface__device', 'nat_inside')\\\n .prefetch_related('nat_outside', 'custom_field_values__field')\n serializer_class = serializers.IPAddressSerializer\n\n\n#\n# VLAN groups\n#\n\nclass VLANGroupListView(generics.ListAPIView):\n \"\"\"\n List all VLAN groups\n \"\"\"\n queryset = VLANGroup.objects.select_related('site')\n serializer_class = serializers.VLANGroupSerializer\n filter_class = filters.VLANGroupFilter\n\n\nclass VLANGroupDetailView(generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single VLAN group\n \"\"\"\n queryset = VLANGroup.objects.select_related('site')\n serializer_class = serializers.VLANGroupSerializer\n\n\n#\n# VLANs\n#\n\nclass VLANListView(CustomFieldModelAPIView, generics.ListAPIView):\n \"\"\"\n List VLANs (filterable)\n \"\"\"\n queryset = VLAN.objects.select_related('site', 'group', 'tenant', 'role')\\\n .prefetch_related('custom_field_values__field')\n serializer_class = serializers.VLANSerializer\n filter_class = filters.VLANFilter\n\n\nclass VLANDetailView(CustomFieldModelAPIView, generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single VLAN\n \"\"\"\n queryset = VLAN.objects.select_related('site', 'group', 'tenant', 'role')\\\n .prefetch_related('custom_field_values__field')\n serializer_class = serializers.VLANSerializer\n\n\n#\n# Services\n#\n\nclass ServiceListView(generics.ListAPIView):\n \"\"\"\n List services (filterable)\n \"\"\"\n queryset = Service.objects.select_related('device').prefetch_related('ipaddresses')\n serializer_class = serializers.ServiceSerializer\n filter_class = filters.ServiceFilter\n\n\nclass ServiceDetailView(generics.RetrieveAPIView):\n \"\"\"\n Retrieve a single service\n \"\"\"\n queryset = Service.objects.select_related('device').prefetch_related('ipaddresses')\n serializer_class = serializers.ServiceSerializer\n","sub_path":"netbox/ipam/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"147675881","text":"import math\n\nT_g = 285. + 273.15 ; T_w = 38. + 273.15\nHg = 47. ; Di = 4. ; rho_w = 995.7; gravty = 9.81\nPres = 750. # (= B in mm-Hg)\nT_air = [4.5 + 273.15, 32.5 + 273.15]\nh=[]\n\n# Calculating maximum and minimum theoretical puff based on outside air temperature\nn = len(T_air)\nfor i in range(n):\n hh = .464 * gravty * Pres * Hg * (1./T_air[i] - 1./ T_g)\n h.append(hh)\nprint('(a) Min/MaxTheoretical Puff:', min(h), 'Pa and ', max(h), 'Pa')\nprint ('')\nprint('(b) ')\n\n# Calculating Plume rise at 1 km from the facility using Briggs equation (assume average air temp of 18 C)\nT_air_aver = 18. + 273.15 ; Gas_Velocity = 15.; Wind_Velocity = 6.5\nF = gravty * Gas_Velocity * (Di/2.)**2 * (T_g-T_air_aver)/T_g\n#F = 379.1\nprint(' Buoyancy Flux: %5.4f' %(F) , 'm^4/s^3')\ncorrection_wind_speed = False#True\nif correction_wind_speed:\n a = 0.209 # assuming neutral atmospheric conditions\n Wind_Velocity = Wind_Velocity * (Hg/10.)**a\n print(' Corected wind speed velocity: %5.4f' %(Wind_Velocity) )\n Xf = 2.16 * F**.4 * Hg**.6\n print(' Distance to full plume rise: %5.4f' %(Xf), 'm')\n DeltaH = 1.6 * F**(1./3.)*Xf**(2./3.)/Wind_Velocity\n print(' Plume rise at 1 km: %5.4f' %DeltaH, 'm')\n \n # Calculating concentration at 1 km (optional)\n sigma_y = 8. ; sigma_z = 5. # Turner diagram assuming neutral conditions\n Q_CO2 = 1. # mass flow rate of CO2\n H = Hg + DeltaH ; pi = 3.1415\n print(' Total height of the plume: %5.4f' %H, 'm')\n Conc = Q_CO2/(pi * sigma_y*sigma_z*Wind_Velocity)*math.exp(-.5*(H/sigma_z)**2)\n print(' Concentration of the plume at 1 km from the release: %5.4e' %(Conc), 'micrograms/m^3')\nelse:\n Xf = 2.16 * F**.4 * Hg**.6\n print(' Distance to full plume rise: %5.4f' %(Xf), 'm')\n DeltaH = 1.6 * F**(1./3.)*Xf**(2./3.)/Wind_Velocity\n print(' Plume rise at 1 km: %5.4f' %DeltaH, 'm')\n \n # Calculating concentration at 1 km (optional)\n sigma_y = 8. ; sigma_z = 5. # Turner diagram assuming neutral conditions\n Q_CO2 = 1. # mass flow rate of CO2\n H = Hg + DeltaH ; pi = 3.1415\n print(' Total height of the plume: %5.4f' %H, 'm')\n Conc = Q_CO2/(pi * sigma_y*sigma_z*Wind_Velocity)*math.exp(-.5*(H/sigma_z)**2)\n print(' Concentration of the plume at 1 km from the release: %5.4e' %(Conc), 'micrograms/m^3')","sub_path":"Exams/EX501U/code/Q2ExDec.py","file_name":"Q2ExDec.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"170780408","text":"#!/usr/bin/python3\n# -*- coding: utf8 -*-\n\n# Copyright (c) 2022 Baidu, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis is a simple case of using Hadamard gate and CNOT gate.\nResults will be fetched from a local program.\n\"\"\"\n\nimport sys\nfrom pprint import pprint\n\nsys.path.append('../..')\nfrom QCompute import *\n\nmatchSdkVersion('Python 3.3.3')\n\n# Create environment\nenv = QEnv()\n# Choose backend Baidu local simulator\nenv.backend(BackendName.LocalBaiduSim2)\n\nq = env.Q.createList(2)\n\n# Apply a Hadamard gate on the 0th qubit firstly.\nH(q[0])\n\n# Apply a CX gate to generate an entangle quantum state\nCX(q[0], q[1])\n\n# Measure with the computational basis\nMeasureZ(*env.Q.toListPair())\n\n# Commit the task with 1024 shots\ntaskResult = env.commit(1024, fetchMeasure=True)\n\npprint(taskResult)\n","sub_path":"Example/Level_1/HCNOT_Local.py","file_name":"HCNOT_Local.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"608084371","text":"from user_metrics.models import MetricItem, MetricDay\n\nfrom django.core.management.base import NoArgsCommand\nfrom django.db.models.aggregates import Count\nimport datetime\n\nclass Command(NoArgsCommand):\n help = \"Aggregate Application Metrics\"\n\n requires_model_validation = True\n\n def handle_noargs(self, **options):\n \"\"\" Aggregate Metrics by User \"\"\"\n\n items = MetricItem.objects.extra(select={'day': \"DATE_TRUNC('day', visit_time)\"}).\\\n values('day', 'metric', 'user', 'user_object_id').order_by().annotate(\n count=Count('id'), unique_count=Count('visitor_id', distinct=True))\n\n for item in items:\n day, create = MetricDay.objects.get_or_create(\n date_up=item['day'].date(),\n metric_id=item['metric'],\n user_id=item['user'],\n user_object_id=item['user_object_id']\n )\n\n day.count = item['count']\n day.unique_count = item['unique_count']\n\n day.save()\n\n MetricItem.objects.filter(visit_time__lt=datetime.date.today()).delete()\n","sub_path":"user_metrics/management/commands/metrics_aggregate_by_user.py","file_name":"metrics_aggregate_by_user.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"471678604","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 24 19:00:29 2021\n\n@author: em812\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom tierpsytools.analysis.statistical_tests import univariate_tests, get_effect_sizes\nfrom tierpsytools import AUX_FILES_DIR\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport pdb\n\nfilepath = Path(AUX_FILES_DIR) / 'feature_groups.csv'\ndefault_groups = pd.read_csv(filepath, index_col=0)\n\nclass tierpsy_fingerprints():\n\n def __init__(self, bluelight=True, test='ANOVA', comparison_type='multiclass',\n multitest_method='fdr_by', significance_threshold=0.05,\n test_results=None, groups=None):\n self.bluelight=bluelight\n self.test = test\n self.comparison_type = comparison_type\n self.multitest_method='fdr_by'\n self.p_threshold = significance_threshold\n if groups is None:\n self.groups = default_groups\n else:\n self.groups = self._parse_groups(groups)\n if test_results is not None:\n self.test_results = self._parse_results(test_results)\n return\n\n def fit(self, X, y, control='N2', n_jobs=-1):\n \"\"\"\n Get the tierpsy fingerprint:\n Run univariate statistical tests if not provided as input\n Create profile: group and subgroup features and get the stat summaries\n for each group/subgroup\n\n Parameters\n ----------\n X : pandas dataframe\n features matrix.\n y : pandas series or numpy array\n the labels defining the sample groups to compare (must contain control label).\n control : str, optional\n The control group label. The default is 'N2'.\n n_jobs : int or -1, optional\n number of jobs for parallel processing. Used only if univariate tests\n are ran. The default is -1 (all available cores).\n\n Returns\n -------\n None.\n\n \"\"\"\n if not hasattr(self, 'test_results'):\n self._run_univariate_tests(X, y, control='N2', n_jobs=-1)\n self._create_profile()\n return\n\n\n def get_representative_features(self, merge_bluelight=False):\n if not hasattr(self, '_profile'):\n raise ValueError('You must fit the instance first.')\n\n if not self.bluelight:\n return self._feature_profile(self._profile, bluelight=None)\n\n if merge_bluelight:\n return self._feature_profile(self.get_profile(merge_bluelight=True), bluelight=None)\n else:\n represent_feat = {}\n for blue,profile in self._profile.items():\n represent_feat[blue] = self._feature_profile(profile, bluelight=blue)\n return represent_feat\n\n\n def get_profile(self, feat_set=None, merge_bluelight=False):\n if not hasattr(self, '_profile'):\n raise ValueError('You must fit the instance first.')\n\n if not self.bluelight:\n return self._profile\n\n if merge_bluelight:\n return self._get_merged_profile()\n else:\n return self._profile\n\n def plot_fingerprints(\n self, merge_bluelight, fig=None, ax=None, plot_colorbar=True,\n title=None):\n\n if merge_bluelight:\n data = self.get_profile(merge_bluelight=merge_bluelight)\n\n if fig is None and ax is None:\n fig, ax = plt.subplots(figsize=(20,5))\n g = self._plot_one_fingerprint(data, title, ax)\n\n if plot_colorbar:\n c = g.figure.colorbar(g) #, fig.colorbar(im, cax=cbar_ax)\n c.set_label('ratio of significant features') #, labelpad=-40, y=1.15, rotation=0)\n\n plt.tight_layout()\n else:\n profile = self.get_profile(merge_bluelight=merge_bluelight)\n\n n_plots = len(profile.keys())\n if fig is None and ax is None:\n fig, ax = plt.subplots(n_plots, 1, figsize=(20,5*n_plots))\n\n for i, (blue, data) in enumerate(profile.items()):\n g = self._plot_one_fingerprint(data, blue, ax[i])\n\n plt.tight_layout()\n\n if plot_colorbar:\n fig.subplots_adjust(bottom=0.2)\n cbar_ax = fig.add_axes([0.2, 0.01, 0.6, 0.008])\n c = plt.colorbar(g, orientation='horizontal', cax=cbar_ax) # fig.colorbar(im, cax=cbar_ax)\n c.set_label('ratio of significant features') #, labelpad=-40, y=1.15, rotation=0)\n\n return g\n\n def _plot_one_fingerprint(self, data, title, ax):\n \"\"\"\n Plots a signle heatmap with the effect sizes of a set of representative\n features (fingerprint). The set comes from a single bluelight condition\n or the merged profile.\n\n Parameters\n ----------\n data : pandas dataframe\n the results from a single bluelight condition or the merges profile.\n title : str\n the title of the plot/subplot.\n ax : matplotlib axes object\n the axes object to plot the heatmap in.\n\n Returns\n -------\n g : seaborn figure object\n the heatmap figure object.\n\n \"\"\"\n import seaborn as sns\n\n data = data[data['best_group']]\n data = data.sort_index(level=0)\n\n pal = sns.color_palette(\"Reds\", int(data['n_significant'].max())+1) #'Reds'\n xs = np.linspace(0,1,int(data['n_significant'].max())+1)\n pal = {n:c for n,c in zip(xs, pal)}\n lut = data['n_significant_ratio'].apply(lambda x: xs[np.argmin(np.abs(xs-x))])\n lut = lut.map(pal)\n\n norm = plt.Normalize(0, 1)\n sm = plt.cm.ScalarMappable(cmap=\"Reds\", norm=norm)\n sm.set_array([])\n\n g= sns.barplot(x=data.index.get_level_values(0), y=data['effect_size_50th'], ax=ax,\n yerr=data['effect_size_50th_ci'], palette=lut, dodge=False)\n ax.set_title(title)\n\n labels = ax.get_xticklabels()\n ax.set_xticklabels(labels, rotation=90)\n ax.tick_params(axis='both', which='major', labelsize=6)\n ax.tick_params(axis='both', which='minor', labelsize=6)\n\n return g\n\n def _parse_groups(self, groups):\n if isinstance(groups, pd.DataFrame):\n assert 'group_label' in groups\n elif isinstance(groups, dict):\n df = pd.DataFrame(index=np.concatenate([x for x in groups.values()]))\n for grp, fts in groups.items():\n df.loc[fts] = grp\n return groups\n\n def _parse_results(self, test_results):\n if not isinstance(test_results, pd.DataFrame):\n raise ValueError('test_results must be a dataframe.')\n else:\n assert np.all(np.isin(['p-value', 'effect_size'], test_results.columns))\n return test_results\n\n def _feature_profile(self, profile, bluelight=None):\n\n feature_profile = pd.DataFrame(\n index=profile[profile['best_group']].index.get_level_values(0),\n columns=['feature', 'p-value', 'effect_size'])\n\n for idx in profile[profile['best_group']].index:\n group, subgroup = idx[0], idx[1]\n if len(idx)==3:\n bluelight=idx[2]\n fts = self._subgroup_features(\n group, subgroup, bluelight=bluelight, feat_set=self.test_results.index)\n res = self.test_results.loc[fts]\n mean = profile.loc[idx, 'effect_size_mean']\n std = profile.loc[idx, 'effect_size_std']\n if np.isnan(std):\n std = 0\n mask = (res['effect_size']>=mean-1.96*std) & (res['effect_size']<=mean+1.96*std)\n\n feature_profile.loc[group, 'feature'] = res.loc[mask, 'p-value'].idxmin()\n feature_profile.loc[group, ['p-value', 'effect_size']] = res.loc[\n feature_profile.loc[group, 'feature'], ['p-value', 'effect_size']]\n\n return feature_profile\n\n def _get_merged_profile(self):\n if not self.bluelight:\n return self._profile\n\n merged_profile = [x.assign(bluelight=blue)\n for blue,x in self._profile.items()]\n merged_profile = pd.concat(\n [x.set_index('bluelight', append=True) for x in merged_profile],\n axis=0)\n\n merged_profile = self._mark_best_subgroup(merged_profile)\n return merged_profile\n\n def _run_univariate_tests(self, X, y, control='N2', n_jobs=-1):\n\n stats, pvals, _ = univariate_tests(\n X, y, control=control, test=self.test,\n comparison_type=self.comparison_type,\n multitest_correction=self.multitest_method,\n n_jobs=n_jobs)\n\n effects = get_effect_sizes(\n X, y, control=control, test=self.test,\n comparison_type=self.comparison_type)\n\n test_res = pd.DataFrame(pvals.min(axis=1), columns=['p-value'])\n\n # In most cases, the pvals and effects have the same shape\n # (when we do group-by-group comparisons, we get group-by-group\n # effect sizes too, and when we do multi-class comparisons we get one\n # effect size).\n # But for the Kruskal-Wallis case, we cannot get one effect size for the\n # test, so we get group-by-group effect sizes instead and keep the max.\n # In this case pvals has only one column, but effects has more than one\n # columns\n if pvals.shape==effects.shape:\n test_res['effect_size'] = effects.values[pvals.isin(pvals.min(axis=1)).values]\n else:\n test_res['effect_size'] = effects.max(axis=1)\n\n self.test_results = test_res\n\n return\n\n def _profile_info(self, res, groupby, n_boot=1000):\n \"\"\"\n Gets stat summaries for effect sizes and number of significant features\n for each subgroup and stores them in the profile dataframe.\n\n Parameters\n ----------\n res : pandas dataframe\n Dataframe with the results of the univariate statistical tests\n (p-values and effect sizes). It also includes the group and subgroup\n labels for each feature.\n groupby : list of strings\n The group and subgroup labels and the bluelight label if applicable.\n n_boot : int, optional\n number of bootstrap samples for the stats estimates. The default is 1000.\n\n Returns\n -------\n profile : pandas dataframe\n summary stats of effect sizes and number of significant features\n per features subgroup.\n\n \"\"\"\n from tierpsytools.analysis.statistical_tests import bootstrapped_ci as boot_ci\n grouped_res = res.groupby(by=groupby)\n\n profile = grouped_res.agg(\n effect_size_mean= ('effect_size', lambda x: np.mean(x)),\n effect_size_mean_ci = ('effect_size', lambda x: np.diff(boot_ci(x, np.mean, n_boot))/2),\n effect_size_10th = ('effect_size', lambda x: np.quantile(x, 0.1)),\n effect_size_50th= ('effect_size', lambda x: np.median(x)),\n effect_size_50th_ci = ('effect_size', lambda x: np.diff(boot_ci(x, np.median, n_boot))/2),\n effect_size_90th= ('effect_size', lambda x: np.quantile(x, 0.9)),\n effect_size_std= ('effect_size', lambda x: np.std(x)),\n n_significant = ('p-value', lambda x: (x 0:\r\n n.append( (ra, ca - 1) )\r\n else:\r\n if ra > 1:\r\n n.append( (ra - 1, s[ra - 1]) );\r\n\r\n # right neighbor:\r\n # forward one char on same line, except after last char (and\r\n # not on last line) neighbor is beginning of next line\r\n if ca < s[ra]:\r\n n.append( (ra, ca + 1) )\r\n else:\r\n if ra < f:\r\n n.append( (ra + 1, 0) )\r\n\r\n # up neighbor:\r\n # on the first line, no neighbor; otherwise previous line,\r\n # either the same position or the line length\r\n if ra > 1:\r\n if ca > s[ra - 1]:\r\n n.append( (ra - 1, s[ra - 1]) )\r\n else:\r\n n.append( (ra - 1, ca) )\r\n\r\n # down neighbor:\r\n # on the last line, no neighbor; otherwise next line,\r\n # either the same position or the line length\r\n if ra < f:\r\n if ca > s[ra + 1]:\r\n n.append( (ra + 1, s[ra + 1]) )\r\n else:\r\n n.append( (ra + 1, ca) )\r\n\r\n return n\r\n\r\n\r\n# get the number of scenarios in the input\r\nnscenarios = int(input())\r\n\r\nfor t in range(nscenarios):\r\n # read all the input for a scenario\r\n f = int(input())\r\n # we want s index to be the line number which is 1-based,\r\n # so insert a dummy element at beginning of the list\r\n s = [0]\r\n s.extend(list(map(int, input().split())))\r\n rc, cc = map(int, (input().split()))\r\n rm, cm = map(int, (input().split()))\r\n # horizontal positions can be 0 through s[i] inclusive, so\r\n # we'll need to allocate 1 more than max s[i] value\r\n smax = 1 + max(s)\r\n\r\n # bfs setup\r\n visited = [[False for c in range(smax + 1)] for r in range(f + 1)]\r\n distance = [[oo for c in range(smax + 1)] for r in range(f + 1)]\r\n\r\n # queue for bfs (breadth-first search algorithm)\r\n q = []\r\n\r\n # start bfs\r\n visited[rc][cc] = True\r\n q.append( (rc, cc) )\r\n distance[rc][cc] = 0\r\n\r\n while q:\r\n (ra, ca) = q.pop(0)\r\n if (ra, ca) == (rm, cm):\r\n break\r\n for (rn, cn) in valid_neighbors(f, s, ra, ca):\r\n if not visited[rn][cn]:\r\n visited[rn][cn] = True\r\n q.append((rn, cn))\r\n distance[rn][cn] = 1 + distance[ra][ca]\r\n\r\n # output distance (number of keypresses)\r\n print(distance[rm][cm])\r\n\r\n","sub_path":"dblab/problem_set/2017/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"500289378","text":"from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove,\n InlineKeyboardButton, InlineKeyboardMarkup)\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler, CallbackQueryHandler)\nimport traceback\nimport time\n\nimport request\nimport state_machine\nfrom db import requests_col, merchants_col\nfrom data import (CATEGORY_1, CATEGORY_2, CITY_1, CITY_2, CITY_3, AMOUNT_1, AMOUNT_2, AMOUNT_3, \n PERIOD_1, PERIOD_2, PERIOD_3, PRICE_1, PRICE_2, PRICE_3, WHEN_1, WHEN_2, WHEN_3 )\n\n\ndef start_keyboard(update):\n func_keyboard = ['Сдать', 'Снять']\n start_keyboard = ['Поделиться', 'Инфо']\n merchants_keyboard = ['Кабинет']\n\n user = update.message.from_user\n \"\"\"\n if merchants_col.find_one({'seller_id':user.id}):\n return [merchants_keyboard]\n else:\n keyboard = []\n keyboard.append(merchants_keyboard)\n keyboard.append(start_keyboard)\n return keyboard\n \"\"\"\n keyboard = []\n keyboard.append(func_keyboard)\n keyboard.append(start_keyboard)\n if merchants_col.find_one({'seller_id':user.id}):\n keyboard.append(merchants_keyboard)\n return keyboard\n\ndef get_info(bot, update):\n text = \"\"\"\nЭтот бот позволяет сдать или снять комнату/квартиру. Чтоб отправить запрос нажмите кнопку \"Снять\" и\n следуя вопросам бота введите параметры (количество комнат, дату и так далее). Чтобы сдать жилье\n нажмите соответсвенно кнопку \"Сдать\" и указав город вы зарегистрируетесь и будете получать запросы по\n указаному городу (или по всем городам категории \"Другой город\")\"\"\"\n\n update.message.reply_text(text)\n","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"573040642","text":"# local imports\nfrom utilities import SHEDS, PROFILES, WRAPPERS, SERVICES\nfrom Toolbox import Toolbox, InvalidToolbox\n\n# pipeline imports\nfrom context import activeUser\nfrom aws import dynamo\n\n# python imports\nfrom copy import deepcopy\n\n# facility management\nimport hfx.facilityApi as facilityApi\n\n\nACTIVE_SHED = None\n\n\ndef activeShed():\n global ACTIVE_SHED\n\n if not ACTIVE_SHED:\n ACTIVE_SHED = ToolShed()\n\n return ACTIVE_SHED\n\n\nclass ToolShed(object):\n SHED = dict(\n user_id=str(None),\n load={},\n toolboxes=[],\n private={},\n development={},\n release=str(None)\n )\n\n PROFILE = dict(\n user_id=str(None)\n )\n\n def __init__(self, user=None):\n super(ToolShed, self).__init__()\n\n self.user = activeUser() if not user else user\n\n self.load = {}\n self.toolboxes = {}\n self.wrappers = [{'name': wrapper['name'], 'link': wrapper['link']} for wrapper in WRAPPERS.scan()['Items']]\n self.development = {}\n self.release = 'latest'\n self.private = {}\n\n # load up shed data\n self.loadShed()\n\n # set up\n def setUpShed(self):\n \"\"\"\n Set up a new shed for the current user.\n \"\"\"\n for data in [[self.SHED, SHEDS], [self.PROFILE, PROFILES]]:\n template = deepcopy(data[0])\n template['user_id'] = self.user\n\n if data[1] == PROFILES and self.user.isdigit():\n continue\n\n data[1].put_item(Item=template)\n\n template = deepcopy(self.SHED)\n template['user_id'] = self.user\n return template\n\n # loaders\n def loadShed(self):\n \"\"\"\n Loads data about the users shed from the database.\n \"\"\"\n request = SHEDS.get_item(Key={'user_id': self.user})\n if 'Item' not in request:\n shedData = self.setUpShed()\n else:\n shedData = request['Item']\n\n self.cleanStaleData(shedData)\n\n def cleanStaleData(self, shedData):\n \"\"\"\n Clean out old toolboxes\n \"\"\"\n # copied shed data reference\n referenceData = deepcopy(shedData)\n\n toolboxes = {}\n privateToolboxes = {}\n\n # loop over toolboxes.\n for toolboxId in referenceData['toolboxes']:\n\n toolbox = Toolbox(toolboxId)\n\n try:\n toolboxes[toolbox.id] = toolbox\n except InvalidToolbox:\n shedData['toolboxes'].remove(toolboxId)\n\n if int(toolboxId) in referenceData['load']:\n del referenceData['load'][int(toolboxId)]\n\n\n # clean out stale private toolboxes.\n for privateToolbox in referenceData['private'].values():\n\n toolbox = Toolbox(privateToolbox['id'])\n\n try:\n toolbox.loadData(referenceData)\n privateToolboxes[toolbox.id] = toolbox\n except InvalidToolbox:\n # remove the invalid toolbox\n del shedData['private'][str(int(privateToolbox['id']))]\n\n if int(privateToolbox['id']) in referenceData['load']:\n del referenceData['load'][int(privateToolbox['id'])]\n\n continue\n\n # update the database if there were data changes.\n if shedData != referenceData:\n SHEDS.put_item(Item=shedData)\n\n # set up object data\n self.setColumn('load', shedData)\n self.setColumn('release', shedData)\n self.setColumn('development', shedData)\n self.private = privateToolboxes\n self.toolboxes = toolboxes\n\n # setters\n def setToolbox(self, toolbox, value):\n \"\"\"\n Activate a toolbox to be loaded during next session.\n \"\"\"\n self.load[str(toolbox)] = value\n\n # clean up load settings\n for box in [box for box in self.load]:\n if int(box) in self.toolboxes:\n continue\n\n if str(box) in self.private:\n continue\n\n if str(box) in self.development:\n continue\n\n del self.load[box]\n\n def setEnvironmentVariable(self, key, value=None):\n \"\"\"\n Set an environment variable that tools will pass down to the client.\n \"\"\"\n if not value:\n if key in self.development:\n del self.development[key]\n\n return\n\n self.development[key] = value\n\n def setColumn(self, column, data):\n try:\n setattr(self, column, data[column])\n except KeyError:\n return\n\n def addItem(self, id):\n \"\"\"\n Add an item to the tool shed.\n \"\"\"\n id = int(id)\n\n if id not in self.private and id not in self.toolboxes:\n self.toolboxes[id] = Toolbox(id)\n\n # deletes\n def removeItem(self, id):\n \"\"\"\n Remove an item from the tool shed.\n \"\"\"\n id = int(id)\n\n if id in self.private:\n del self.private[id]\n\n if id in self.toolboxes:\n del self.toolboxes[id]\n\n # saving\n def save(self):\n \"\"\"\n Save this shed to the database.\n \"\"\"\n private = {}\n toolboxes = []\n\n for privateBox in self.private.values():\n if isinstance(privateBox, Toolbox):\n private[str(privateBox.id)] = privateBox.data\n else:\n private[str(privateBox['id'])] = privateBox\n\n for box in self.toolboxes.values():\n if isinstance(box, Toolbox):\n toolboxes.append(str(box.id))\n else:\n toolboxes.append(str(box['id']))\n\n SHEDS.put_item(\n Item={\n 'user_id': self.user,\n 'load': self.load,\n 'private': private,\n 'toolboxes': toolboxes,\n 'development': self.development,\n 'release': self.release\n }\n )\n\n def delete(self):\n SHEDS.delete_item(Key={'user_id': self.user})\n\n # package\n @property\n def data(self):\n\n delivery = {\n 'load': self.load,\n 'toolboxes': {},\n 'services': [],\n 'wrappers': self.wrappers,\n 'development': self.development,\n 'release': self.release,\n 'user': self.user,\n 'facility': None\n }\n\n for toolboxSetName in ['toolboxes', 'private']:\n toolboxSet = getattr(self, toolboxSetName)\n for toolbox in toolboxSet.values():\n toolbox.data['is'] = toolboxSetName if toolboxSetName != 'toolboxes' else 'public'\n\n toolbox.data['load'] = False\n if str(toolbox.id) in self.load:\n if self.load[str(toolbox.id)]:\n toolbox.data['load'] = True\n\n delivery['toolboxes'][toolbox.id] = toolbox.data\n\n for service in SERVICES.scan()['Items']:\n delivery['services'].append(service)\n\n # handle facility context if there is one\n if facilityApi.activeFacility():\n\n # recursion block\n if self.user != facilityApi.activeFacility().data['facility_id']:\n facilityShed = ToolShed(str(facilityApi.activeFacility().data['facility_id']))\n\n data = facilityShed.data\n delivery['facility'] = {}\n delivery['facility']['id'] = facilityApi.activeFacility().data['facility_id']\n\n for key in ['development', 'load', 'toolboxes']:\n delivery['facility'][key] = data[key]\n\n farm = facilityApi.activeFacility().data['deadline_repository_address']\n delivery['facility']['farm'] = None\n if farm and 'dns' in farm:\n delivery['facility']['farm'] = farm['dns']\n\n\n return dynamo.replaceDecimals(delivery)","sub_path":"packages/hfx/toolsApi/Toolshed.py","file_name":"Toolshed.py","file_ext":"py","file_size_in_byte":7878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"409097148","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLast edited on Thu Mar 8 18:04:56 2018\n\n@author: twweng with modifications by karunyas\n\nThe basic script of using pytorch to train a 2-layer MLP. \n\n\"\"\"\n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\n# F is a library of functions, e.g. the loss function, the relu, etc.\nimport torch.nn.functional as F \nimport torch.optim as optim\n# transforms can transform the input data (for each image)\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\n\n## define nn model (the cnn model)\n## we can actually use two nn.Sequential to define the convolution layer and then FC layer (the linear layer with ReLU)\n## e.g. the AlexNet in torchvision: https://github.com/pytorch/vision/blob/master/torchvision/models/alexnet.py\n#class Net(nn.Module):\n# def __init__(self):\n# super(Net, self).__init__()\n# self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n# self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n# self.conv2_drop = nn.Dropout2d()\n# self.fc1 = nn.Linear(320, 50)\n# self.fc2 = nn.Linear(50, 10)\n\n# def forward(self, x):\n# x = F.relu(F.max_pool2d(self.conv1(x), 2))\n# x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n# x = x.view(-1, 320)\n# x = F.relu(self.fc1(x))\n# x = F.dropout(x, training=self.training)\n# x = self.fc2(x)\n# return F.log_softmax(x, dim=1)\n\n\n## define training function\ndef train(epoch):\n model.train()\n print('size of train_loader = {}, N = {}'.format(len(train_loader), N))\n for batch_idx, (data, target) in enumerate(train_loader):\n #print('batch_idx = {}'.format(batch_idx))\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n \n # Because our current model is linear, so we need to flatten the data to (batch_size, 784). For the \"Net\" model, they use convolutional layer first, so their input is (batch_size, 1,28,28)\n # Alternatively, we can add a lambda function in the transforms of the DataLoader (e.g. add such a line: transforms.Lambda(lambda x: x.view(-1)) to reshape each image) for our model)\n if batch_idx == len(train_loader)-1: # the last batch\n #print('data size = {}, target = {}'.format(data.shape, target.shape)) \n # the last batch of data will not be batch_size if total number of data is not a multiple of batch size, so let data.view to decide first \n data = data.view(-1,784) # 784 for mnist\n else:\n data = data.view(N,-1)\n \n # Forward pass: output is the predicted output, target is the true label\n output = model(data)\n # compute the loss\n loss = loss_fn(output,target)\n ## loss = F.nll_loss(output, target)\n\n # Before the backward pass, use the optimizer object to zero all of the\n # gradients for the variables it will update (which are the learnable weights\n # of the model)\n optimizer.zero_grad()\n \n # Backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # Calling the step function on an Optimizer makes an update to its parameters\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data[0]))\n\n## define test function\ndef test():\n model.eval()\n test_loss = 0\n correct = 0\n for data, target in test_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n \n data, target = Variable(data, volatile=True), Variable(target)\n \n data = data.view(1000,-1)\n output = model(data)\n \n if args.loss == 'nll_loss':\n test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss\n elif args.loss == 'cross_entropy':\n test_loss += F.cross_entropy(output, target, size_average=False).data[0] # sum up batch loss\n \n pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n print(\"length\", len(test_loader.dataset))\n test_loss /= len(test_loader.dataset)\n print('\\nTest loss = {}'.format(test_loss))\n print('\\nTest set: Average loss: {:.4f}'.format(test_loss))\n print('Accuracy: {}/{}'.format(correct, len(test_loader.dataset)))\n print('percentage ({:.0f}%)\\n'.format(100 * int(correct) / len(test_loader.dataset)))\n\n\n\n## main function\nif __name__ == \"__main__\":\n \n \n # parse the training settings: \n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=100, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--loss',choices=['nll_loss','cross_entropy'], default='cross_entropy')\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n \n # seed, argument setting\n torch.manual_seed(args.seed)\n if args.cuda:\n torch.cuda.manual_seed(args.seed)\n \n kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n \n ## download dataset \n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n \n \n # N is batch size; D_in is input dimension;\n # H is hidden dimension; D_out is output dimension.\n N, D_in, H1, H2, D_out = args.batch_size, 784, 300, 100, 10\n \n if args.loss == 'nll_loss':\n print('using nll_loss!')\n loss_fn = nn.NLLLoss()\n model = nn.Sequential(\n nn.Linear(D_in, H1),\n nn.ReLU(),\n nn.Linear(H1, H2),\n nn.ReLU(),\n nn.Linear(H2, D_out),\n nn.LogSoftmax(dim=1)\n )\n\n elif args.loss == 'cross_entropy':\n print('using cross_entropy loss')\n loss_fn = nn.CrossEntropyLoss()\n model = nn.Sequential(\n nn.Linear(D_in, H1),\n nn.ReLU(),\n nn.Linear(H1, H2),\n nn.ReLU(),\n nn.Linear(H2, D_out),\n nn.LogSoftmax(dim=1)\n )\n\n if args.cuda:\n model.cuda() \n \n # Use the optim package to define an Optimizer that will update the weights of\n # the model for us. Here we will use Adam; the optim package contains many other\n # optimization algoriths. The first argument to the Adam constructor tells the\n # optimizer which Variables it should update.\n learning_rate = 1e-4\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n ## Tune the learning rate -- switch to 1e-5 with for loop and compare\n ## Do cross validation -- split training set as validation set to tune learning rate\n\n ## Do the same thing for 300, 500 of hidden nodes for loop\n \n for epoch in range(1, args.epochs+1):\n train(epoch)\n test()\n\n","sub_path":"Karunya/train_3layer_withnn.py","file_name":"train_3layer_withnn.py","file_ext":"py","file_size_in_byte":8880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"93363820","text":"from flask import (Flask, send_from_directory, abort,\n render_template, request, flash, redirect, g, jsonify)\nfrom random import choice, random\nfrom uuid import uuid4\nimport json\nimport logging\nfrom hashlib import sha256\nimport shutil\nimport socket\nimport os\nfrom time import strftime\n\nfrom raven.contrib.flask import Sentry\n\napp = Flask(__name__)\nsentry = Sentry(app)\n\n\ndelta = 0\n\n\ndef map_ips(ip, default):\n return get_ips().get(ip, default)\n\n\ndef get_ips():\n with open('addresses.json') as fp:\n return json.load(fp)\n\n\ndef md5_to_file(md5):\n with open('webms/md5.txt', 'r') as fp:\n strings = fp.read()\n fp.close()\n if strings is not None:\n strings = strings.split('\\n')\n for string in strings:\n string = string.split(' ')\n if md5 == string[0]:\n return string[1].split('/')[1]\n return False\n\n\ndef get_ip():\n if (request.environ.get('HTTP_X_REAL_IP')):\n return request.environ.get('HTTP_X_REAL_IP')\n elif request.environ.get('X-Forwarded-For'):\n return request.environ.get('X-Forwarded-For')\n else:\n return request.access_route[0]\n\n\ndef add_log(webm, action):\n global delta\n ip = get_user()\n string = strftime('%Y-%m-%d %H:%M:%S ' + ip + ' ' + action)\n with open('webms/metadata/' + webm, 'a') as logfile:\n logfile.write(string + '\\n')\n print(str(delta) + ' ' + string + ' http://webm.website/' + webm)\n\n\ndef get_user():\n user = get_ip()\n user = map_ips(user, user)\n return user\n\n\ndef set_user(ip, user):\n ips = get_ips()\n blacklist = [\n ' ',\n 'good',\n 'demote',\n 'held',\n 'censure',\n 'affirm',\n 'feature',\n '.'\n ]\n if user in ips.values():\n return False\n\n if any(substr in user for substr in blacklist):\n return False\n\n ips[ip] = user\n\n with open('addresses.json', 'w') as fp:\n fp.write(json.dumps(ips))\n return True\n\n\ndef ban_user():\n with open('bans.txt', 'a') as fp:\n fp.write(get_ip()+'\\n')\n\n\ndef user_banned():\n bans = open('bans.txt').read().splitlines()\n if get_ip() in bans:\n return True\n else:\n return False\n\n\n@app.route('/settings/request-ban')\ndef request_ban():\n ban_user()\n return send_from_directory('webms', 'neil.jpg'), 201\n\n\ndef get_user_censured(webm):\n log = get_log(webm)\n if log is not None:\n user = get_user()\n log = log.split('\\n')\n for line in log:\n if user in line:\n if 'censure' in line:\n return True\n if 'demote' in line:\n return True\n return False\n\n\ndef is_unpromotable(webm):\n if webm in get_best_webms():\n return 'already featured'\n if webm in get_vetoed_webms():\n return 'this video has been vetoed'\n user = get_user()\n if user.startswith('94.119'):\n return 'this shared IP address is banned'\n if user.startswith('('):\n return 'this shared IP address is banned'\n log = get_log(webm)\n if log is not None:\n log = log.split('\\n')\n for line in log:\n if user in line:\n if 'marked good' in line:\n return 'cannot feature own videos'\n if 'demoted' in line:\n return 'you demoted this before!'\n if 'held' in line:\n return 'you held this last time'\n return False\n\n\ndef is_votable(webm):\n user = get_user()\n log = get_log(webm)\n if log is not None:\n log = log.split('\\n')\n for line in log:\n if user in line:\n if 'marked good' in line:\n return 'cannot feature own videos'\n if 'demoted' in line:\n return 'you demoted this before!'\n if 'censure' in line:\n return 'you already censured'\n if 'affirm' in line:\n return 'you already affirmed'\n if 'featured' in line:\n return 'you featured this!'\n return False\n\n\ndef get_log(webm):\n try:\n fp = open('webms/metadata/' + webm, 'r')\n string = fp.read()\n fp.close()\n return string\n except IOError:\n return None\n\n\ndef get_name(webm):\n return os.path.splittext(webm)[0]\n\n\ndef generate_webm_token(webm, salt=None):\n if not salt:\n salt = uuid4().hex\n return sha256(app.secret_key.encode(\n ) + webm.encode() + salt.encode()).hexdigest() + ':' + salt\n\n\ndef get_all_webms():\n return os.listdir('webms/all')\n\n\ndef get_good_webms():\n return os.listdir('webms/good')\n\n\ndef get_music_webms():\n return os.listdir('webms/music')\n\n\ndef get_best_webms():\n return os.listdir('webms/best')\n\n\ndef get_vetoed_webms():\n return os.listdir('webms/veto')\n\n\ndef get_bad_webms():\n return os.listdir('webms/bad')\n\n\ndef get_safe_webms():\n return list(set(get_all_webms()) - set(get_trash_webms()))\n\n\ndef get_quality_webms():\n \"\"\"Allows whitelisting of reports to stop the top-tier webms being 403'd\"\"\"\n return list(set(get_good_webms()).union(get_best_webms()))\n\n\ndef get_pending_webms():\n return list(set(get_safe_webms()) - set(get_good_webms()) -\n set(get_bad_webms()) - set(get_music_webms()))\n\n\ndef get_trash_webms():\n return os.listdir('webms/trash')\n\n\ndef get_held_webms():\n return os.listdir('webms/held')\n\n\ndef get_unheld_good_webms():\n return list(set(get_good_webms()) - set(get_held_webms()))\n\n\ndef get_stats():\n best = len(get_best_webms())\n return {\n 'good': (len(get_good_webms()) - best),\n 'bad': len(get_bad_webms()),\n 'music': len(get_music_webms()),\n 'held': len(get_held_webms()),\n 'best': best,\n 'pending': len(get_pending_webms()),\n 'trash': len(get_trash_webms()),\n 'total': len(get_all_webms())\n }\n\n\ndef delete_holding_queue():\n shutil.rmtree('webms/held')\n os.makedirs('webms/held')\n\n\n@app.route('/', subdomain='about')\n@app.route('/', subdomain='privacy')\ndef privacy():\n return render_template(\n 'privacypolicy.html',\n stats=get_stats(),\n user=get_user())\n\n\n@app.route('/.webm')\n@app.route('/.webm', subdomain='')\ndef serve_webm(name, domain=None):\n if request.accept_mimetypes.best_match(\n ['video/webm', 'text/html']) == 'text/html':\n return redirect(name)\n name = name + '.webm'\n if name not in get_all_webms():\n if metadata_exists(name):\n abort(410, 'This webm has been deleted.')\n else:\n abort(404, 'Cannot find that webm!')\n\n if name in get_trash_webms():\n if name not in get_quality_webms():\n add_log(name, 'was blocked from viewing')\n abort(403, 'webm was reported')\n\n add_log(name, 'viewed')\n return send_from_directory('webms/all', name)\n\n\ndef metadata_exists(webm):\n return os.path.isfile('webms/metadata/' + webm)\n\n\n@app.route('/', subdomain='')\n@app.route('/')\ndef show_webm(name, domain=None):\n name = name + '.webm'\n queue = 'pending'\n token = None\n if name not in get_all_webms():\n if metadata_exists(name):\n abort(410, \"This webm has been deleted\")\n else:\n abort(404, \"No webm exists or has existed with that name\")\n elif name not in get_safe_webms():\n if name not in get_quality_webms():\n abort(403)\n if name in get_best_webms():\n queue = 'best'\n elif name in get_music_webms():\n queue = 'music'\n elif name in get_good_webms():\n queue = 'good'\n elif name in get_bad_webms():\n queue = 'bad'\n token = generate_webm_token(name)\n\n return render_template(\n 'display.html',\n webm=name,\n queue=queue,\n token=token,\n history=get_log(name))\n\n\n@app.route('/md5/')\ndef serve_md5(md5):\n webm = md5_to_file(md5)\n if webm:\n return redirect(webm)\n else:\n abort(404, 'md5 match not found')\n\n\n@app.route('/', subdomain='pending')\n@app.route('/')\ndef serve_random():\n try:\n pending = get_pending_webms()\n webm = choice(pending)\n except IndexError:\n abort(404, 'no webms to show!')\n pass\n if user_banned():\n return send_from_directory('webms', 'neil.jpg'), 403\n return render_template(\n 'display.html',\n webm=webm,\n token=generate_webm_token(webm),\n count=len(pending),\n history=get_log(webm),\n stats=get_stats(),\n unpromotable=is_unpromotable(webm),\n user=get_user())\n\n\n@app.route('/', subdomain='decent')\ndef serve_good():\n global delta\n best = None\n held = 0\n try:\n good = get_unheld_good_webms()\n if len(good) == 0:\n delete_holding_queue()\n good = get_unheld_good_webms()\n else:\n held = len(get_held_webms())\n webm = choice(good)\n if webm in get_best_webms():\n best = True\n except IndexError:\n abort(404, 'You need to promote some webms!')\n return render_template(\n 'display.html',\n webm=webm,\n token=generate_webm_token(webm),\n queue='good',\n count=len(good),\n best=best,\n held=held,\n unpromotable=is_unpromotable(webm),\n stats=get_stats(),\n history=get_log(webm),\n debug=u'\\u0394'+str(delta),\n user=get_user())\n\n\n@app.route('/', subdomain='new.decent')\ndef serve_unjudged_good():\n global delta\n best = None\n held = 0\n try:\n good = get_unheld_good_webms()\n if len(good) == 0:\n delete_holding_queue()\n good = get_unheld_good_webms()\n else:\n held = len(get_held_webms())\n webm = choice(good)\n if webm in get_best_webms():\n best = True\n except IndexError:\n abort(404, 'You need to promote some webms!')\n unpromotable = is_unpromotable(webm)\n if unpromotable:\n return redirect('/')\n else:\n return render_template(\n 'display.html',\n webm=webm,\n token=generate_webm_token(webm),\n queue='good',\n count=len(good),\n best=best,\n held=held,\n unpromotable=is_unpromotable(webm),\n stats=get_stats(),\n history=get_log(webm),\n debug=u'\\u0394'+str(delta),\n user=get_user())\n\n\n@app.route('/', subdomain='good')\ndef redirect_to_held():\n return redirect('//decent.' + app.config['SERVER_NAME'])\n\n\n@app.route('/', subdomain='held')\ndef serve_held():\n try:\n good = get_held_webms()\n webm = choice(good)\n except IndexError:\n abort(404, 'There are no held webms.')\n return render_template(\n 'display.html',\n webm=webm,\n queue='decent',\n stats=get_stats(),\n history=get_log(webm))\n\n\n@app.route('/', subdomain='best')\ndef serve_best():\n try:\n webm = choice(get_best_webms())\n except IndexError:\n abort(404, 'You need to feature some webms!')\n if get_user_censured(webm):\n return redirect('/', 302)\n token = generate_webm_token(webm)\n return render_template(\n 'display.html',\n webm=webm,\n queue='best',\n token=token,\n unpromotable=is_votable(webm),\n user=get_user())\n\n\n@app.route('/', subdomain='top')\ndef serve_best_nocensor():\n try:\n webm = choice(get_best_webms())\n except IndexError:\n abort(404, 'There are no featured webms.')\n token = generate_webm_token(webm)\n return render_template(\n 'display.html',\n webm=webm,\n queue='best',\n token=token,\n history=get_log(webm),\n unpromotable=is_votable(webm))\n\n\n@app.route('/', subdomain='music')\ndef serve_music():\n try:\n webms = get_music_webms()\n webm = choice(webms)\n except IndexError:\n abort(404, 'You need to shunt some videos!')\n token = generate_webm_token(webm)\n return render_template(\n 'display.html',\n webm=webm,\n queue='music',\n token=token,\n history=get_log(webm),\n count=len(webms))\n\n\n@app.route('/', subdomain='index')\ndef serve_best_index():\n webms = get_best_webms()\n return render_template('index.html', webms=webms)\n\n\n@app.route('/', subdomain='bad')\ndef serve_bad():\n try:\n webms = get_bad_webms()\n webm = choice(webms)\n except IndexError:\n abort(404, 'No webms have been marked bad.')\n return render_template(\n 'display.html',\n webm=webm,\n token=generate_webm_token(webm),\n queue='bad',\n count=len(webms),\n stats=get_stats())\n\n\ndef mark_good(webm):\n global delta\n add_log(webm, 'marked good')\n delta += 1\n os.symlink('webms/all/' + webm, 'webms/good/' + webm)\n\n\ndef mark_bad(webm):\n global delta\n if random() > 0.8:\n # For a small percentage of \"bad\" moves, don't actually do it\n # That way, some webms get a second chance\n add_log(webm, 'marked bad (placebo)')\n else:\n delta -= 1\n add_log(webm, 'marked bad')\n os.symlink('webms/all/' + webm, 'webms/bad/' + webm)\n\n\ndef mark_ugly(webm):\n global delta\n delta -= 5\n add_log(webm, 'reported')\n os.symlink('webms/all/' + webm, 'webms/trash/' + webm)\n\n\ndef mark_veto(webm):\n add_log(webm, 'vetoed')\n os.symlink('webms/all/' + webm, 'webms/veto/' + webm)\n\n\ndef mark_hold(webm):\n add_log(webm, 'held')\n os.symlink('webms/all/' + webm, 'webms/held/' + webm)\n\n\ndef unmark_good(webm):\n global delta\n delta -= 1\n add_log(webm, 'demoted')\n os.unlink('webms/good/' + webm)\n\n\ndef unmark_bad(webm):\n global delta\n delta += 1\n add_log(webm, 'forgiven')\n os.unlink('webms/bad/' + webm)\n\n\ndef mark_music(webm):\n global delta\n delta += 3\n os.unlink('webms/good/' + webm)\n os.symlink('webms/all/' + webm, 'webms/music/' + webm)\n add_log(webm, 'shunted')\n\n\ndef unmark_music(webm):\n global delta\n delta -= 3\n os.unlink('webms/music/' + webm)\n os.symlink('webms/all/' + webm, 'webms/good/' + webm)\n add_log(webm, 'unshunted')\n\n\ndef mark_best(webm):\n global delta\n delta += 5\n add_log(webm, 'featured ****')\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(('http://best.webm.website/' + webm +\n ' has been marked as \"best\" by ' + get_user()).encode(),\n ('saraneth.lon.fluv.net', 41337))\n os.symlink('webms/all/' + webm, 'webms/best/' + webm)\n\n\n@app.route('/settings/change-nick', methods=['POST'])\ndef change_nick():\n nick = request.form['nick']\n user = get_user()\n\n ips = get_ips()\n if nick in ips.values():\n abort(400, 'duplicate nickname')\n\n if user in ips:\n abort(403, 'you have already set a nickname')\n\n if set_user(get_ip(), nick):\n flash('Set nickname to ' + get_user())\n return redirect('/')\n else:\n abort(400, 'unacceptable nickname')\n\n\n@app.route('/moderate', methods=['POST'])\n@app.route('/moderate', methods=['POST'], subdomain='')\ndef moderate_webm(domain=None):\n webm = request.form['webm']\n token = request.form['token'].split(':')\n if not (token[0] + ':' + token[1] == generate_webm_token(webm, token[1])):\n abort(400, 'token mismatch')\n\n verdict = request.form['verdict']\n\n status = None\n try:\n if verdict == 'good':\n status = mark_good(webm)\n elif verdict == 'bad':\n status = mark_bad(webm)\n elif verdict == 'shunt':\n if webm in get_good_webms():\n status = mark_music(webm)\n else:\n abort(400, 'can only shunt good webms')\n elif verdict == 'unshunt':\n if webm in get_music_webms():\n status = unmark_music(webm)\n else:\n abort(400, 'can only unshunt if shunted!')\n elif verdict == 'report':\n status = mark_ugly(webm)\n elif verdict == 'demote':\n if webm in get_good_webms():\n unmark_good(webm)\n flash('Demoted ' + webm)\n return redirect('/', 303)\n else:\n abort(400, 'can only demote good webms')\n elif verdict == 'feature':\n if is_unpromotable(webm):\n abort(400, 'not allowed to feature')\n if webm in get_good_webms():\n mark_best(webm)\n flash('Promoted ' + webm)\n return redirect('/', 303)\n else:\n abort(400, 'can only feature good webms')\n elif verdict == 'forgive':\n if webm in get_bad_webms():\n unmark_bad(webm)\n flash('Forgave ' + webm)\n return redirect('/', 303)\n else:\n abort(400, 'can only forgive bad webms')\n elif verdict == 'keep' or verdict == 'hold':\n if webm in get_unheld_good_webms():\n mark_hold(webm)\n return redirect('/')\n elif verdict == 'veto' or verdict == 'nsfw':\n if webm in get_good_webms():\n if webm not in get_best_webms():\n mark_veto(webm)\n return redirect('/', 303)\n else:\n abort(400, 'cannot veto things already in best')\n else:\n abort(400, 'can only veto good webms')\n elif verdict == 'unsure':\n # placebo\n add_log(webm, 'skipped')\n return redirect('/')\n elif verdict == 'affirm' or verdict == 'censure':\n if not is_votable(webm):\n if webm in get_best_webms():\n add_log(webm, verdict)\n else:\n abort(400, is_votable(webm))\n else:\n abort(400, 'invalid verdict')\n\n flash('Marked ' + webm + ' as ' + verdict)\n return redirect('/', '303')\n except OSError: # file exists\n flash('Unable to mark ' + webm + ' as ' + verdict)\n return redirect('/')\n\n\n@app.route('/stats.json', subdomain='api')\ndef api():\n return jsonify(get_stats())\n\n\n@app.errorhandler(404)\n@app.errorhandler(400)\n@app.errorhandler(403)\n@app.errorhandler(410)\ndef page_not_found(e):\n return render_template('error.html', e=e), e.code\n\n\n@app.errorhandler(500)\ndef server_error(e):\n return render_template(\n 'error.html', e=e, sentry=g.sentry_event_id,\n dsn=sentry.client.get_public_dsn('https')), 500\n\nif __name__ == '__main__':\n\n required_dirs = [\n 'webms',\n 'webms/all',\n 'webms/bad',\n 'webms/best',\n 'webms/good',\n 'webms/held',\n 'webms/metadata',\n 'webms/music'\n 'webms/trash',\n 'webms/veto',\n ]\n for directory in required_dirs:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # probably should make this persist\n app.config.update(\n SECRET_KEY=uuid4().hex,\n SERVER_NAME='webm.website',\n TEMPLATES_AUTO_RELOAD=True\n )\n\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.INFO)\n\n app.run(host='0.0.0.0', port=3000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"95811823","text":"import sys\r\nimport game\r\nimport stack\r\nimport queue\r\nimport random\r\nimport time\r\nimport heapq\r\nfrom game import Game\r\n\r\ngoal_list = [0,1,2,3,4,5,6,7,8]\r\n\r\ndef heapsort(q):\r\n i=-1\r\n temp=q\r\n new=[]\r\n \r\n while temp:\r\n heapq.heapify(temp)\r\n heapq.heappush(new,heapq.heappop(temp))\r\n #heapq.heapify(new)\r\n #print(str(i)+\" \"+str(temp[0].h))\r\n #heapq.heappop(q)\r\n #temp=q\r\n i=i+1 \r\n heapq.heapify(new) \r\n return new\r\n\r\n\r\ndef AStarSearchManhattan(StartState,Z):\r\n t0=time.process_time()\r\n Puzzle= game.Game(StartState,Z)\r\n #puzzle_state.heuristic()\r\n q=[Puzzle]\r\n heapq.heapify(q)\r\n explored = set() #to store already explored puzzle lists\r\n i=0\r\n \r\n while q[0]:\r\n #state=q[0]\r\n state=heapq.heappop(q)\r\n explored.add(tuple(state.list))\r\n if (state.list==goal_list):\r\n print(\"\\n SUCCESS!!!!!!!!!!\")\r\n WriteInFile(state,i,\"A star search,Manhattan Heuristic\",time.process_time()-t0,state.depth)\r\n return 1 \r\n children=state.expand() \r\n \r\n for states in children:\r\n old=states\r\n states=states.heuristic1()\r\n #states.expand()\r\n states.g=states.h+state.cost\r\n if (tuple(states.list) not in (explored)) and (states not in (q)): \r\n heapq.heappush(q,states)\r\n #q=heapsort(q)\r\n elif states in (q):\r\n #print(\"h \"+str(old.h)+\" \"+str(states.h))\r\n if old.h > states.h:\r\n old.h=states.h\r\n #heapq.heappop(q,old)\r\n heapq.heappush(q,old) \r\n \r\n q=heapsort(q)\r\n i = i+1\r\n print(\"\\n FAILURE!!!!!!!!!\")\r\n\r\n \r\ndef AStarSearchEuclidian(StartState,Z):\r\n t0=time.process_time()\r\n Puzzle = game.Game(StartState,Z)\r\n \r\n q=[Puzzle]\r\n heapq.heapify(q)\r\n explored = set() #to store already explored puzzle lists\r\n i=0\r\n \r\n while q[0]:\r\n #state=q[0]\r\n state=heapq.heappop(q)\r\n explored.add(tuple(state.list))\r\n if (state.list==goal_list):\r\n print(\"\\n SUCCESS!!!!!!!!!!\")\r\n WriteInFile(state,i,\"A star search,Eculidian Heuristic\",time.process_time()-t0,state.depth)\r\n return 1 \r\n children=state.expand() \r\n \r\n for states in children:\r\n old=states\r\n states=states.heuristic2()\r\n #states.expand()\r\n states.g=states.h+state.cost\r\n if (tuple(states.list) not in (explored)) and (states not in (q)): \r\n heapq.heappush(q,states)\r\n #q=heapsort(q)\r\n elif states in (q):\r\n #print(\"h \"+str(old.h)+\" \"+str(states.h))\r\n if old.h > states.h:\r\n old.h=states.h\r\n #heapq.heappop(q,old)\r\n heapq.heappush(q,old) \r\n \r\n q=heapsort(q)\r\n i = i+1\r\n print(\"\\n FAILURE!!!!!!!!!\")\r\n \r\n\r\n\r\ndef DFSSearch(StartState,Z):\r\n t0=time.process_time()\r\n Puzzle = game.Game(StartState,Z)\r\n stck = stack.Stack()\r\n\r\n depth_limit = 100\r\n i=0\r\n explored = set() #to store already explored puzzle lists\r\n stck.push(Puzzle)\r\n state = Puzzle\r\n\r\n while (state.cost < depth_limit) or (not stck.isEmpty()):\r\n state = stck.pop() \r\n print(\"iteration : \",i) \r\n ##print(state.list)\r\n explored.add(tuple(state.list))\r\n\r\n if(state.list == goal_list):\r\n print(\"\\n SUCCESS!!!!!!!!!!\")\r\n WriteInFile(state,i,\"DFS\",run_time,state.depth)\r\n return 1\r\n\r\n children = []\r\n children.extend(state.expandDFS())\r\n children.reverse() \r\n\r\n if (state.cost < depth_limit):\r\n for states in children:\r\n if tuple(states.list) not in (explored.union(stck.state)):\r\n stck.push(states)\r\n\r\n i = i+1\r\n run_time=time.process_time()-t0\r\n print(\"\\n FAILURE!!!!!!!!!\")\r\n\r\n\r\ndef BFSSearch(StartState,Z):\r\n t0=time.process_time()\r\n Puzzle = game.Game(StartState,Z)\r\n \r\n q = queue.Queue()\r\n \r\n q.enqueue(Puzzle)\r\n explored = set() #to store already explored puzzle lists\r\n i=0\r\n\r\n while not q.isEmpty():\r\n state = q.dequeue()\r\n #print(\"iteration : \",i)\r\n #print(state.list)\r\n \r\n \r\n \r\n explored.add(tuple(state.list))\r\n \r\n\r\n if(state.list == goal_list):\r\n print(\"\\n SUCCESS!!!!!!!\")\r\n WriteInFile(state,i,\"BFS\",run_time,state.depth)\r\n return 1\r\n\r\n children = state.expand()\r\n \r\n for states in children:\r\n if tuple(states.list) not in (explored.union(q.state)):\r\n q.enqueue(states)\r\n \r\n i = i+1\r\n \r\n \r\n run_time=time.process_time()-t0\r\n print(\"\\n FAILURE!!!!!!!!\") \r\n\r\ndef WriteInFile(state,i,string,run_time,depth):\r\n \r\n cost = \"Path Cost : \" + str(state.cost)\r\n nodes = \"Nodes Expanded :\" + str(i)\r\n action_list = []\r\n fofa=[]\r\n x=[]\r\n m=[]\r\n g=[0,1,2,3,4,5,6,7,8]\r\n for i in range(state.cost):\r\n action_list.append(state.action)\r\n state = state.p\r\n fofa.append(state.list)\r\n \r\n action_list.reverse()\r\n fofa.reverse()\r\n \r\n \r\n for f in fofa:\r\n \r\n x=f\r\n print(\"\\n\")\r\n for i in x[0:3]:\r\n \r\n i=str(i).replace('0',' ')\r\n print(i, end = '|')\r\n \r\n print(\"\\n\" + \"------\") \r\n for i in x[3:6]: \r\n i=str(i).replace('0',' ')\r\n print(i, end = '|')\r\n \r\n print(\"\\n\" + \"------\") \r\n for i in x[6:9]:\r\n i=str(i).replace('0',' ')\r\n print(i, end = '|') \r\n \r\n # print(\"\\n\"+ str(f))\r\n print(\"\\n\")\r\n \r\n \r\n print(\" |1|2|\"+\"\\n\"+\"------\"+\"\\n\"+ \"3|4|5|\"+\"\\n\"+\"------\"+\"\\n\"+\"6|7|8|\") \r\n \r\n \r\n Text = [cost+\"\\n\",nodes+\"\\n\"]\r\n with open(\"ArtificialIntelligence.txt\",\"w\") as file1:\r\n #file1.write(\"First State = \"+ StartState +\"\\n\")\r\n file1.write(\"You Chose = \" + string +\"\\n\")\r\n for line in Text:\r\n \r\n file1.write(line)\r\n file1.write(\"Written Path = [ \")\r\n for each in action_list:\r\n file1.write(each+\", \")\r\n file1.write( \"]\")\r\n file1.write(\"\\n\"+\"Depth= \" + str(depth))\r\n file1.write(\"\\n\"+ \"Run Time =\" + str(run_time))\r\n \r\n for f in fofa:\r\n file1.write(\"\\n\"+ str(f))\r\n \r\n file1.write(\"\\n\" + str(g))\r\n \r\n\r\n\r\n\r\ndef main():\r\n\r\n goal= [0,1,2,3,4,5,6,7,8]\r\n print(\"Please Enter you list of 9 non-repeated numbers\")\r\n #StartState=goal\r\n # random.shuffle(StartState)\r\n \r\n #StartState=[1,4,2,0,3,5,6,7,8]\r\n #print (StartState)\r\n \r\n \r\n a = [int(x) for x in input().split(\",\")]\r\n print(a)\r\n \r\n if(not len(a)==9):\r\n print(\"Please Enter 9 number to play the game...\")\r\n sys.exit() \r\n \r\n \r\n def FindDuplicates(in_list): \r\n unique = set(in_list) \r\n for each in unique: \r\n count = in_list.count(each) \r\n if count > 1: \r\n print ('There are duplicates in this list' ) \r\n sys.exit() \r\n FindDuplicates(a)\r\n \r\n for i,state in enumerate(a):\r\n if state == 0:\r\n Z = i \r\n alg=input(\"Please Enter the algorithm with which you want to play: \")\r\n print(\"You Chose\"+ ' ' + alg)\r\n \r\n if alg == 'bfs' or alg=='BFS':\r\n #print(\"BFS\")\r\n BFSSearch(a,Z)\r\n \r\n if alg == 'dfs' or alg=='DFS':\r\n #print(\"DFS\")\r\n DFSSearch(a,Z) \r\n \r\n if alg == 'ast' or alg=='AST':\r\n #print(\"A*\")\r\n AStarSearchManhattan(a,Z)\r\n AStarSearchEuclidian(a,Z)\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n main()","sub_path":"artinteldemo.py","file_name":"artinteldemo.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"107829529","text":"import pandas as pd\nimport numpy as np\nimport pytest\nfrom pylabtools.pandas import read_csv_where_row_has_value\n\n@pytest.fixture(scope='session')\ndef csv_file(tmpdir_factory):\n N = 10\n index = range(N)\n even = [(n % 2 == 0) for n in range(N)]\n dataframe = pd.DataFrame({'even': even, 'values': index}, index=index) \n filename = str(tmpdir_factory.mktemp('data').join('data.csv'))\n dataframe.to_csv(filename, index=False)\n return filename\n\n\ndef test_read_csv_where_row_has_value__reads_correct_rows(csv_file):\n data = read_csv_where_row_has_value(csv_file, usecol='even', value=1)\n print(data)\n assert np.array_equal(data['values'].values, np.array([0, 2, 4, 6, 8]))\n\ndef test_read_csv_where_row_has_value__no_usecol_throws_exeption(csv_file):\n with pytest.raises(AssertionError):\n read_csv_where_row_has_value(csv_file, value=1)\n\ndef test_read_csv_where_row_has_value__no_value_throws_exeption(csv_file):\n with pytest.raises(AssertionError):\n read_csv_where_row_has_value(csv_file, usecol='even')\n \ndef test_read_csv_where_row_has_value__different_chunk_sizes(csv_file):\n data0 = read_csv_where_row_has_value(csv_file, usecol='even', value=1, chunksize=None)\n data1 = read_csv_where_row_has_value(csv_file, usecol='even', value=1, chunksize=1)\n print(data0)\n print(data1)\n assert data0.equals(data1)\n \n","sub_path":"tests/test_read_csv_where_row_has_value.py","file_name":"test_read_csv_where_row_has_value.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"422207614","text":"#!/usr/bin/python2.7\n#-*- coding: utf-8 -*-\n\nLienSondage = \"Data/SondageHabitudes.csv\"\nimport csv\nfrom random import sample, seed\nfrom Personne import *\nfrom math import sqrt\n\nclass Foyer:\n\n\tdef __init__(self, nombre_individu):\n\n\t\tself.temperature = 10.0\n\t\tself.liste_personne = []\n\n\t\tself.climatisation_presente = True\n\t\tself.chauffage = True\n\t\tself.frigo = Frigo(\"Frigo\",175+randrange(-25,25))\n\t\tself.frigo.allume=True\n\n\t\tself.nb_machine_a_laver = 0\n\t\tself.machine_a_laver = Appareil(\"Machine_a_laver\",470+randrange(-47,47))\n\t\tself.heure_jour_on_off_machine_a_laver = dict() #Dictionnaire permettant de stocker l'heure d'allumage de la machine\n\n\t\tself.nb_lave_vaisselle = 0\n\t\tself.lave_vaisselle = Appareil(\"Lave_vaisselle\",500+randrange(-50,50))\n\t\tself.heure_jour_on_off_lave_vaisselle = dict() #Dictionnaire permettant de stocker l'heure d'allumage de la machine\n\n\t\tself.nb_seche_linge = 0\n\t\tself.seche_linge = Appareil(\"Seche_linge\",1700+randrange(-170,170))\n\t\tself.heure_jour_on_off_seche_linge = dict() #Dictionnaire permettant de stocker l'heure d'allumage de la machine\n\t\tself.radiateur=Appareil(\"Radiateur 1\",750+randrange(-50,50))\n\n\t\tself.climatisation=Appareil(\"climatisation\",2500+randrange(-250,250))\n\n\t\tself.nombre_individu = nombre_individu\n\t\tself.ajouter_individu()\n\t\tself.habitude_foyer()\n\t\tself.surface_mur = sqrt(25*nombre_individu)*4*2.5\n\t\tself.epaisseur_mur = 0.20\n\t\tself.volume = 25*nombre_individu*2.5\n\t\tself.coef_isolation=1.5\n\n\tdef habitude_foyer(self):\n\t\tcpt_climatisation = 0\n\t\tcpt_chauffage = 0\n\n\t\tfor individu in self.liste_personne:\n\t\t\tself.nb_machine_a_laver = individu.machine_a_laver if (self.nb_machine_a_laver 0.5):\t\n\t\t\tclimatisation = True\n\t\tif(float(cpt_chauffage)/float(self.nombre_individu) > 0.5):\n\t\t\tchauffage = True\n\n\n\tdef choix_population(self):\n\t\twith open(LienSondage, 'rb') as FichierCsv:\n\t\t\tlignes = csv.reader(FichierCsv, delimiter=',')\n\t\t\tchoix = sample(range(1,284),self.nombre_individu) # taille de la population on decale de 1 pouur header du csv\n\t\t\tPopulationFoyer = []\n\t\t\tfor i, line in enumerate(lignes):\n\t\t\t\tif i in choix :\n\t\t\t\t\tPopulationFoyer.append(line[1:-1])\n\n\t\treturn PopulationFoyer\n\n\tdef ajouter_individu(self):\n\t\t#Permet de ne pas avoir deux fois la meme image de personne dans le foyer\n\t\tliste_image_personne_deja_presente=[]\t\t\n\n\t\tPopulationDisponible = self.choix_population()\n\t\t# print(PopulationDisponible)\n\t\tfor ligne in PopulationDisponible:\n\t\t\tnouvel_individu = Personne(liste_image_personne_deja_presente)\n\n\t\t\t# Television jours travaillés\n\t\t\tfor index, var in enumerate(ligne[0:7]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.tv_h_jt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.tv_h_jt[360 + (index)*180]=int(var)\n\n\t\t\t# Ordinateurs jours travaillés\n\t\t\tfor index, var in enumerate(ligne[7:14]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.pc_h_jt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.pc_h_jt[360 + (index)*180]=int(var)\n\n\t\t\t# Plaques à induction jours travaillés\n\t\t\tfor index, var in enumerate(ligne[14:21]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.pai_h_jt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.pai_h_jt[360 + (index)*180]=int(var)\n\n\t\t\t# Electro ménagers jours travaillés\n\t\t\tfor index, var in enumerate(ligne[21:28]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.electro_h_jt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.electro_h_jt[360 + (index)*180]=int(var)\n\n\t\t\t# Electro salle de bain jours travaillés\n\t\t\tfor index, var in enumerate(ligne[28:35]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tif 0 in nouvel_individu.electro_h_jt :\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jt[0]=nouvel_individu.electro_h_jt[0] + int(var)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 360 + (index)*180 in nouvel_individu.electro_h_jt :\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jt[360 + (index)*180]=nouvel_individu.electro_h_jt[360 + (index)*180] + int(var)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jt[360 + (index)*180]=int(var)\n\n\t\t\t# Television jours non travaillés\n\t\t\tfor index, var in enumerate(ligne[35:42]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.tv_h_jnt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.tv_h_jnt[360 + (index)*180]=int(var)\n\n\t\t\t# Ordinateurs jours non travaillés\n\t\t\tfor index, var in enumerate(ligne[42:49]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.pc_h_jnt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.pc_h_jnt[360 + (index)*180]=int(var)\n\n\t\t\t# Plaques à induction jours non travaillés\n\t\t\tfor index, var in enumerate(ligne[49:56]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.pai_h_jnt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.pai_h_jnt[360 + (index)*180]=int(var)\n\n\t\t\t# Electro ménager jours non travaillés\n\t\t\tfor index, var in enumerate(ligne[56:63]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tnouvel_individu.electro_h_jnt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tnouvel_individu.electro_h_jnt[360 + (index)*180]=int(var)\n\n\t\t\t# Electro salle de bain jours non travaillés\n\t\t\tfor index, var in enumerate(ligne[63:70]):\n\t\t\t\tif int(var)!=0:\n\t\t\t\t\tif index==6:\n\t\t\t\t\t\tif 0 in nouvel_individu.electro_h_jnt :\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jnt[0]=nouvel_individu.electro_h_jnt[0] + int(var)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jnt[0]=int(var)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif 360 + (index)*180 in nouvel_individu.electro_h_jnt :\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jnt[360 + (index)*180]=nouvel_individu.electro_h_jnt[360 + (index)*180] + int(var)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnouvel_individu.electro_h_jnt[360 + (index)*180]=int(var)\n\n\t\t\tnouvel_individu.machine_a_laver = int(ligne[70])\n\t\t\tnouvel_individu.lave_vaisselle = int(ligne[71])\n\t\t\tnouvel_individu.seche_linge = int(ligne[72])\n\n\t\t\tif(ligne[73]==\"Oui\"):\n\t\t\t\tnouvel_individu.climatisation = True\n\n\t\t\tif(ligne[74]==\"Oui\"):\n\t\t\t\tnouvel_individu.chauffage = True\n\n\t\t\tself.liste_personne.append(nouvel_individu)\n\t\t\tliste_image_personne_deja_presente.append(nouvel_individu.num_image)\n\n\nif __name__=='__main__':\n\tseed(3)\n\ta = Foyer(1)\n\tfor i in a.liste_personne :\n\t\ti.afficher()\n\t#a.CsvParse()","sub_path":"Tomaro_v2/Foyer.py","file_name":"Foyer.py","file_ext":"py","file_size_in_byte":6549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"168739972","text":"# USAGE\r\n# python lenet_mnist.py --save-model 1 --weights output/lenet_weights.hdf5\r\n# python lenet_mnist.py --load-model 1 --weights output/lenet_weights.hdf5\r\n\r\n# import the necessary packages\r\nfrom __future__ import division\r\nfrom CNNTest import CNNTest\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom keras.optimizers import SGD\r\nfrom keras.utils import np_utils\r\nfrom scipy import ndimage\r\nimport numpy as np\r\nimport argparse\r\nimport cv2\r\nimport helper\r\nimport dataset \r\n\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"--nbConv1\", type=int, default=100,\r\n help=\"(optional) number of convolution in the first layer\")\r\nap.add_argument(\"--szConv1\", type=int, default=7,\r\n help=\"(optional) size of receptive field in the first layer\")\r\nap.add_argument(\"--nbConv2\", type=int, default=150,\r\n help=\"(optional) number of convolution in the second layer\")\r\nap.add_argument(\"--szConv2\", type=int, default=4,\r\n help=\"(optional) size of receptive field in the second layer\")\r\nap.add_argument(\"--nbConv3\", type=int, default=250,\r\n help=\"(optional) number of convolution in the second layer\")\r\nap.add_argument(\"--szConv3\", type=int, default=4,\r\n help=\"(optional) size of receptive field in the second layer\")\r\nap.add_argument(\"--activationFun\", type=str, default=\"relu\",\r\n help=\"(optional) number of convolution in the first layer\")\r\nap.add_argument(\"-e\", \"--epochs\", type=int, default=20,\r\n help=\"(optional) number of training epochs\")\r\nap.add_argument(\"-s\", \"--save-model\", type=int, default=-1,\r\n help=\"(optional) whether or not model should be saved to disk\")\r\nap.add_argument(\"-c\", \"--converge\", type=int, default=-1,\r\n help=\"(optional) active convergence\")\r\nap.add_argument(\"-l\", \"--load-model\", type=int, default=-1,\r\n help=\"(optional) whether or not pre-trained model should be loaded\")\r\nap.add_argument(\"-w\", \"--weights\", type=str,\r\n help=\"(optional) path to weights file\")\r\nargs = vars(ap.parse_args())\r\n\r\n\r\ndataset = dataset.get_gtsrb()\r\n\r\n# Separation des ensembles d'apprentissage et de validations\r\ndata = dataset.data[:, np.newaxis, :, :]\r\n(trainData, testData, trainLabels, testLabels) = train_test_split(\r\n data / (65536 * 255), dataset.label.astype(\"int\"), test_size=0.33)\r\n\r\ntrainLabels = np_utils.to_categorical(trainLabels, 43)\r\ntestLabels = np_utils.to_categorical(testLabels, 43)\r\n\r\n# initialize the optimizer and model\r\nprint(\"[INFO] compiling model\")\r\nopt = SGD(lr=0.01)\r\nmodel = CNNTest.build(width=28, height=28, depth=1, classes=43,\r\n weightsPath=args[\"weights\"] if args[\"load_model\"] > 0 else None,\r\n nbConv1=args[\"nbConv1\"], conv1size=args[\"szConv1\"],\r\n nbConv2=args[\"nbConv2\"], conv2size=args[\"szConv2\"],\r\n\t\t\t\t\tnbConv3=args[\"nbConv3\"], conv3size=args[\"szConv3\"],\r\n activationFun=args[\"activationFun\"]\r\n)\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,\r\n metrics=[\"accuracy\"])\r\n\r\n# only train and evaluate the model if we *are not* loading a\r\n# pre-existing model\r\nif args[\"load_model\"] < 0:\r\n print(\"[INFO] training...\")\r\n if args[\"converge\"] > 0:\r\n model.fit(trainData, trainLabels, batch_size=128, nb_epoch=args[\"epochs\"],\r\n verbose=1)\r\n else:\r\n model.fit(trainData, trainLabels, batch_size=128,\r\n nb_epoch=args[\"epochs\"], verbose=2,\r\n validation_data=(testData, testLabels))\r\n\r\n # show the accuracy on the testing set\r\n print(\"[INFO] evaluating...\")\r\n (loss, accuracy) = model.evaluate(testData, testLabels,\r\n batch_size=128, verbose=1)\r\n print(\"[INFO] accuracy: {:.2f}%\".format(accuracy * 100))\r\n\r\n# check to see if the model should be saved to file\r\nif args[\"save_model\"] > 0:\r\n print(\"[INFO] dumping weights to file...\")\r\n model.save_weights(args[\"weights\"], overwrite=True)\r\n\r\n# randomly select a few testing digits\r\nfor i in np.random.choice(np.arange(0, len(testLabels)), size=(10,)):\r\n # classify the digit\r\n probs = model.predict(testData[np.newaxis, i])\r\n prediction = probs.argmax(axis=1)\r\n\r\n image = (helper.rgb_to_img(testData[i][0] * 255 * 65536)).astype(np.uint8)\r\n image = cv2.resize(image, (96, 96), interpolation=cv2.INTER_LINEAR)\r\n cv2.putText(image, str(prediction[0]), (5, 20),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)\r\n \r\n # show the image and prediction\r\n print(\"[INFO] Predicted: {}, Actual: {}\".format(prediction[0],\r\n np.argmax(testLabels[i])))\r\n cv2.imshow(\"Digit\", image)\r\n cv2.waitKey(0)\r\n","sub_path":"src/CNNmain.py","file_name":"CNNmain.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"542396585","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nfrom collections import defaultdict\n\nimport pyquery\nimport six\nfrom django.conf import settings\nfrom django.utils.html import escape\nfrom django_webtest import WebTestMixin\nfrom six import text_type\nfrom webtest.forms import Checkbox\n\nfrom .base import FuncBaseMixin\nfrom .exceptions import WebTestCantUseElement, WebTestMultipleElementsException, WebTestNoSuchElementException\nfrom .utils import BrowserSessionToken, CommonMixin, get_session_store\n\ntry:\n from django.urls import reverse\nexcept ImportError:\n from django.core.urlresolvers import reverse\n\n\ndef html_norm(html):\n return html.replace('"', '\"').replace(''', \"'\").replace(''', \"'\")\n\n\nclass FuncWebTestMixin(WebTestMixin, CommonMixin, FuncBaseMixin):\n\n def __init__(self, *args, **kwargs):\n super(FuncWebTestMixin, self).__init__(*args, **kwargs)\n self._all_last_responses = defaultdict(list)\n self._all_apps = []\n\n # Public Common API\n def assertTextAbsent(self, text):\n \"\"\"\n Asserts that the text is not present on the current page\n \"\"\"\n self.assertNotIn(html_norm(escape(text)),\n html_norm(self.last_response.content.decode('utf-8')))\n\n def assertTextPresent(self, text):\n \"\"\"\n Asserts that the text is present on the current page\n \"\"\"\n self.assertIn(html_norm(escape(text)),\n html_norm(self.last_response.content.decode('utf-8')))\n\n def back(self):\n \"\"\"\n Go back in the browser.\n \"\"\"\n self.last_responses.pop()\n\n @property\n def current_url(self):\n \"\"\"\n The current full URL\n \"\"\"\n return self.last_response.request.url\n\n def follow_link(self, css_selector):\n \"\"\"\n Follows the link specified in the CSS selector.\n \"\"\"\n elems = self._make_pq(self.last_response).find(css_selector)\n if len(elems) == 0:\n raise WebTestNoSuchElementException(\"Can't find element matching '{0}'\".format(css_selector))\n\n hrefs = []\n for e in elems:\n if 'href' in e.attrib:\n hrefs.append(e.attrib['href'])\n\n if not hrefs:\n raise WebTestCantUseElement(\"No href attribute found for '{0}'\".format(css_selector))\n\n if not all(h == hrefs[0] for h in hrefs):\n raise WebTestMultipleElementsException(\"Different href values for links '{0}': '{1}'\"\n .format(css_selector, ' ,'.join(hrefs)))\n self.get_literal_url(hrefs[0])\n\n def fill(self, data):\n \"\"\"\n Fills form inputs using the values in fields, which is a dictionary\n of CSS selectors to values.\n \"\"\"\n for selector, value in data.items():\n form, field_name = self._find_form_and_field_by_css_selector(self.last_response, selector)\n form[field_name] = value\n\n def fill_by_text(self, fields):\n \"\"\"\n Same as ``fill`` except the values are text captions. Useful for ``select`` elements.\n \"\"\"\n for selector, text in fields.items():\n form, field_name = self._find_form_and_field_by_css_selector(self.last_response, selector)\n self._fill_field_by_text(form, field_name, text)\n\n def get_url(self, name, *args, **kwargs):\n \"\"\"\n Gets the named URL, passing *args and **kwargs to Django's URL 'reverse' function.\n \"\"\"\n return self.get_literal_url(reverse(name, args=args, kwargs=kwargs))\n\n def get_literal_url(self, url, auto_follow=True, expect_errors=False):\n \"\"\"\n Gets the passed in URL, as a literal relative URL, without using reverse.\n \"\"\"\n return self._get_url_raw(url, auto_follow=auto_follow, expect_errors=expect_errors)\n\n def is_element_present(self, css_selector):\n \"\"\"\n Returns True if the element specified by the CSS selector is present on the current page,\n False otherwise.\n \"\"\"\n return len(self._make_pq(self.last_response).find(css_selector)) > 0\n\n @property\n def is_full_browser_test(self):\n \"\"\"\n True for Selenium tests, False for WebTest tests.\n \"\"\"\n return False\n\n def set_session_data(self, item_dict):\n \"\"\"\n Set a dictionary of items directly into the Django session.\n \"\"\"\n session = self._get_session()\n for name, value in item_dict.items():\n session[name] = text_type(value)\n session.save()\n\n def new_browser_session(self):\n \"\"\"\n Creates (and switches to) a new session that is separate from previous\n sessions. Returns a tuple (old_session_token, new_session_token). These\n values should be treated as opaque tokens that can be used with\n switch_browser_session.\n \"\"\"\n # WebTestMixin creates the instance as 'self.app', so we just just move\n # that value around.\n last_app = self.app\n self.renew_app()\n return (BrowserSessionToken(last_app),\n BrowserSessionToken(self.app))\n\n def switch_browser_session(self, session_token):\n \"\"\"\n Switch to the browser session indicated by the supplied token.\n Returns a tuple (old_session_token, new_session_token).\n \"\"\"\n last_app = self.app\n self.app = session_token.value\n return (BrowserSessionToken(last_app),\n BrowserSessionToken(self.app))\n\n def submit(self, css_selector, wait_for_reload=None, auto_follow=True, window_closes=None):\n \"\"\"\n Submit the form using the input given in the CSS selector\n \"\"\"\n form, field_name = self._find_form_and_field_by_css_selector(self.last_response,\n css_selector,\n require_name=False,\n filter_selector=\"input[type=submit], button\")\n response = form.submit(field_name)\n if auto_follow:\n while 300 <= response.status_int < 400:\n response = response.follow()\n self.last_responses.append(response)\n\n def value(self, css_selector):\n \"\"\"\n Returns the value of the form input specified in the CSS selector\n \"\"\"\n form, field_name = self._find_form_and_field_by_css_selector(self.last_response,\n css_selector,\n require_name=False)\n field = form[field_name]\n if isinstance(field, Checkbox):\n return field.checked\n else:\n return field.value\n\n # WebTest specific\n\n @property\n def last_response(self):\n \"\"\"\n Returns the last WebTest response received.\n \"\"\"\n return self.last_responses[-1]\n\n # Implementation methods - private\n @property\n def last_responses(self):\n return self._all_last_responses[self.app]\n\n def _set_cookie(self, name, value):\n if six.PY2:\n value = value.encode('utf-8')\n name = name.encode('utf-8')\n\n self.app.set_cookie(name, value)\n\n def _get_session(self):\n session_key = self.app.cookies.get(settings.SESSION_COOKIE_NAME, None)\n if session_key is None:\n # Create new\n session = get_session_store()\n self._set_cookie(settings.SESSION_COOKIE_NAME,\n session.session_key)\n else:\n session_key = session_key.strip('\"')\n session = get_session_store(session_key=session_key)\n return session\n\n def _get_url_raw(self, url, auto_follow=True, expect_errors=False):\n \"\"\"\n 'raw' method for getting URL - not compatible between FullBrowserTest and WebTestBase\n \"\"\"\n self.last_responses.append(self.app.get(url, auto_follow=auto_follow, expect_errors=expect_errors))\n return self.last_response\n\n def _find_form_and_field_by_css_selector(self, response, css_selector, filter_selector=None,\n require_name=True):\n pq = self._make_pq(response)\n items = pq.find(css_selector)\n\n found = []\n if filter_selector:\n items = items.filter(filter_selector)\n for item in items:\n form_elem = self._find_parent_form(item)\n if form_elem is None:\n raise WebTestCantUseElement(\"Can't find form for input {0}.\".format(css_selector))\n form = self._match_form_elem_to_webtest_form(form_elem, response)\n field = item.name if hasattr(item, 'name') else item.attrib.get('name', None)\n if field is None and require_name:\n raise WebTestCantUseElement(\n \"Element {0} needs 'name' attribute in order to use it\".format(css_selector))\n found.append((form, field))\n\n if len(found) > 1:\n if not all(f == found[0] for f in found):\n raise WebTestMultipleElementsException(\n \"Multiple elements found matching '{0}'\".format(css_selector))\n\n if len(found) > 0:\n return found[0]\n\n raise WebTestNoSuchElementException(\n \"Can't find element matching {0} in response {1}.\".format(css_selector, response))\n\n def _find_parent_form(self, elem):\n p = elem.getparent()\n if p is None:\n return None\n if p.tag == 'form':\n return p\n return self._find_parent_form(p)\n\n def _fill_field_by_text(self, form, field_name, text):\n field = form[field_name]\n if field.tag == 'select':\n for val, _, t in field.options:\n if t == text:\n form[field_name] = val\n break\n else:\n raise ValueError(\"No option matched '{0}'\".format(text))\n else:\n raise WebTestCantUseElement(\"Don't know how to 'fill_by_text' for elements of type '{0}'\"\n .format(field.tag))\n\n def _match_form_elem_to_webtest_form(self, form_elem, response):\n pq = self._make_pq(response)\n forms = pq('form')\n form_index = forms.index(form_elem)\n webtest_form = response.forms[form_index]\n form_sig = {'action': form_elem.attrib.get('action', ''),\n 'id': form_elem.attrib.get('id', ''),\n 'method': form_elem.attrib.get('method', '').lower(),\n }\n webtest_sig = {\n 'action': getattr(webtest_form, 'action', ''),\n 'id': getattr(webtest_form, 'id', ''),\n 'method': getattr(webtest_form, 'method', '').lower(),\n }\n webtest_sig = {k: v if v is not None else '' for k, v in webtest_sig.items()}\n assert form_sig == webtest_sig\n return webtest_form\n\n def _make_pq(self, response):\n # Cache to save parsing every time\n if not hasattr(self, '_pq_cache'):\n self._pq_cache = {}\n if response in self._pq_cache:\n return self._pq_cache[response]\n pq = pyquery.PyQuery(response.content)\n self._pq_cache[response] = pq\n return pq\n","sub_path":"django_functest/funcwebtest.py","file_name":"funcwebtest.py","file_ext":"py","file_size_in_byte":11403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"213081945","text":"#########################################\n#\n# Utility functions for the FMM\n#\n# (C) Ryan Pepper, 2018\n# University of Southampton, UK\n#\n#\n#########################################\nimport sympy as sp\nfrom sympy.polys.orderings import monomial_key\nfrom sympy.polys.monomials import itermonomials as sp_itermonomials\nimport functools\n\n\nq, x, y, z = sp.symbols('q x y z')\n\ndef TriangleNumbers(n):\n \"\"\"\n Returns the nth triangle number\n \"\"\"\n return int((n * (n + 1)) / 2)\n\n@functools.lru_cache(maxsize=None)\ndef Nterms(p):\n \"\"\"\n Determines the number of terms in a multipole expansion of order\n n\n \"\"\"\n if p < 0:\n return 0\n else:\n return int(sum([TriangleNumbers(i) for i in range(p + 2)]))\n\n\ndef new_itermonomials(symbols, lower_order, upper_order):\n monom_key = monomial_key('grevlex', symbols)\n monoms = itermonomials(symbols, upper_order)\n monoms = sorted(monoms, key=monom_key)\n new_monoms = []\n for monom in monoms:\n monom_dict = monom.as_powers_dict()\n order = 0\n for symbol in symbols:\n order += monom_dict[symbol]\n if order >= lower_order:\n new_monoms.append(monom)\n return set(new_monoms)\n\n\n\n\n\"\"\"Tools and arithmetics for monomials of distributed polynomials. \"\"\"\n\nfrom itertools import combinations_with_replacement, product\nfrom sympy.core import Mul, S\n\n#\n#\n# def itermonomials(variables, degree):\n# \"\"\"\n# Generate a set of monomials of the given total degree or less.\n#\n# Given a set of variables `V` and a total degree `N` generate\n# a set of monomials of degree at most `N`. The total number of\n# monomials in commutative variables is huge and is given by the\n# following formula:\n#\n# .. math::\n#\n# \\frac{(\\#V + N)!}{\\#V! N!}\n#\n# For example if we would like to generate a dense polynomial of\n# a total degree `N = 50` in 5 variables, assuming that exponents\n# and all of coefficients are 32-bit long and stored in an array we\n# would need almost 80 GiB of memory! Fortunately most polynomials,\n# that we will encounter, are sparse.\n#\n# Examples\n# ========\n#\n# Consider monomials in commutative variables `x` and `y`\n# and non-commutative variables `a` and `b`::\n#\n# >>> from sympy import symbols\n# >>> from sympy.polys.monomials import itermonomials\n# >>> from sympy.polys.orderings import monomial_key\n# >>> from sympy.abc import x, y\n#\n# >>> sorted(itermonomials([x, y], 2), key=monomial_key('grlex', [y, x]))\n# [1, x, y, x**2, x*y, y**2]\n#\n# >>> sorted(itermonomials([x, y], 3), key=monomial_key('grlex', [y, x]))\n# [1, x, y, x**2, x*y, y**2, x**3, x**2*y, x*y**2, y**3]\n#\n# >>> a, b = symbols('a, b', commutative=False)\n# >>> itermonomials([a, b, x], 2)\n# {1, a, a**2, b, b**2, x, x**2, a*b, b*a, x*a, x*b}\n#\n#\n# \"\"\"\n# if degree < 0:\n# return set()\n# if not variables or degree == 0:\n# return [S(1)]\n# # Force to list in case of passed tuple or other incompatible collection\n# variables = list(variables) + [S(1)]\n# if all(variable.is_commutative for variable in variables):\n# return [Mul(*item) for item in combinations_with_replacement(variables, degree)]\n# else:\n# return [Mul(*item) for item in product(variables, repeat=degree)]\n\n\n\ndef itermonomials(symbols, max_degree, min_degree=0):\n monoms = list(sp_itermonomials(symbols, max_degree))\n new_monoms = []\n for monom in monoms:\n monom_dict = monom.as_powers_dict()\n order = 0\n for symbol in symbols:\n order += monom_dict[symbol]\n if order >= min_degree:\n new_monoms.append(monom)\n return set(new_monoms)\n\n\ndef generate_mappings(order, symbols, key='grevlex', source_order=0):\n \"\"\"\n generate_mappings(order, symbols, key='grevlex'):\n\n Generates a set of mappings between three-tuples of\n indices denoting monomials and and array indices.\n Returns both the forward and backward maps.\n\n Inputs:\n order: int\n Maximum monomial order\n\n symbols: list\n List of sympy.Symbol type objects\n\n source_order: int\n Integer describing order of o\n\n Returns:\n dict:\n Forward mapping from n-tuple to array index.\n\n dict:\n Reversed version; mapping from array index to\n tuple mapping.\n\n Example:\n >>> x, y, z = sp.symbols('x y z')\n >>> map, rmap = generate_mappings(1, [x, y, z])\n >>> print(map):\n {(0, 0, 0): 0, (1, 0, 0): 1, (0, 1, 0): 2, (0, 0, 1): 3}\n >>> print(rmap):\n {0: (0, 0, 0), 1: (1, 0, 0), 2: (0, 1, 0), 3: (0, 0, 1)}\n \"\"\"\n if order < source_order:\n raise ValueError(\n \"source_order must be <= order for meaningful calculations to occur\"\n )\n\n x, y, z = symbols\n rsymbols = [z, y, x]\n\n monoms = itermonomials(symbols, order, source_order)\n if key:\n monom_key = monomial_key(key, rsymbols)\n monoms = sorted(monoms, key=monom_key)\n\n index_dict = {}\n rindex_dict = {}\n for i, monom in enumerate(monoms):\n d = monom.as_powers_dict()\n n = d[x], d[y], d[z]\n index_dict[n] = i\n rindex_dict[i] = n\n return index_dict, rindex_dict\n","sub_path":"fmmgen/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"481362317","text":"tests = int(input())\nnormal = []\ninter = []\nfor i in range(0,tests):\n normal.append(list(input()))\nfor i in range(0,len(normal[0])):\n for j in range(0,tests):\n if not normal[0][i] in normal[j]:\n break\n if j == tests-1:\n inter.append(normal[0][i])\ninter = list(set(inter))\nequ = inter[:]\ni = 0\nwhile i < len(equ):\n for j in range(0,tests):\n if normal[j].count(equ[i])>1 or equ[i]==normal[j][0] or equ[i]==normal[j][len(normal[j])-1]:\n equ.pop(i)\n i-=1\n break\n i+=1\nif len(equ) == 0 or equ==['b','c']:\n print(\"noway\")\nelse:\n if inter==['g', 'b'] or inter==['b', 'g']:\n print(\"a0\")\n print(\"b1\")\n print(\"c2\")\n print(\"d*\")\n print(\"f+\")\n print(\"g=\")\n elif inter==['c', 'e', 'd'] or inter==['d', 'e', 'c'] or inter ==['c','d','e']:\n print(\"a6\")\n print(\"b*\")\n print(\"d=\")\n print(\"f+\")\n else:\n print(normal)\n print(inter)\n print(equ)","sub_path":"Code/CodeRecords/2954/60592/300223.py","file_name":"300223.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"50200609","text":"# coding:utf-8\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm, TextInput, NumberInput, Textarea, Select\nfrom .models import *\nfrom django.forms.models import inlineformset_factory\nfrom product.models import *\nfrom django.utils.translation import ugettext_lazy as _\n\nclass SearchForm(forms.Form):\n\tfield = forms.CharField(widget=forms.TextInput(attrs={\n\t\t'class':'form-inputs'\n\t\t}), required=False, label='Buscar'\n\t)\n\terror_css_class = 'has-error'\n\n\nclass SearchandDateForm(forms.Form):\n\tfield = forms.CharField(widget=forms.TextInput(attrs={\n\t\t'class':'form-inputs'\n\t\t}), required=False, label='Buscar'\n\t)\n\tstart = forms.DateField(widget=forms.TextInput(attrs={\n\t\t'class':'form-inputs datepicker',\n\t\t'type':'date',\n\t\t'placeholder':'dd/mm/aaaa',\n\t\t'size':10}), required=False, label='Del'\n\t)\n\tend = forms.DateField(widget=forms.TextInput(attrs={\n\t\t'class':'form-inputs datepicker',\n\t\t'type':'date',\n\t\t'placeholder':'dd/mm/aaaa',\n\t\t'size':10}), required=False, label='Hasta'\n\t)\n\terror_css_class = 'has-error'\n\n\nclass Store_entryForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Store_entry\n\t\tfields = ['product', 'size']\n\t\tlabels = {\n\t\t\t'product': _('Producto'),\n\t\t\t'size': _('Cantidad'),\n\t\t}\n\t\twidgets = {\n\t\t\t'product':Select(attrs={'class':'form-inputs'}),\n\t\t\t'size':TextInput(attrs={'class':'form-inputs'}),\n\t\t}\n","sub_path":"jaed/devel/jaed/store/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"513774605","text":"from django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom .serializers import Producto, ProductoSerializer, Categoria, CategoriaSerializer\nfrom elasticsearch import Elasticsearch\n\n\n@receiver(post_save, sender=Producto, dispatch_uid=\"update_record_producto\")\ndef update_record_producto(sender, instance, **kwargs):\n producto = ProductoSerializer(instance)\n payload = {\n 'titulo': producto.data['titulo'],\n 'detalle': producto.data['detalle'],\n 'precio': producto.data['precio']\n }\n\n es = Elasticsearch()\n es.index(index=\"producto\", doc_type='producto', id=producto.data['id'], body=payload)\n\n\n@receiver(post_delete, sender=Producto, dispatch_uid=\"delete_record_producto\")\ndef delete_record_producto(sender, instance, *args, **kwargs):\n producto = ProductoSerializer(instance)\n es = Elasticsearch()\n es.delete(index=\"producto\", doc_type='producto', id=producto.data['id'], ignore=[400, 404])\n","sub_path":"Loysa/api/api/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"344180080","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 18-10-11\n\n@author: Suspext\n\"\"\"\nimport re\nimport tensorflow as tf\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.training import moving_averages\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n__all__ = ['NetWork', 'Conv2D', 'Avg_pool', 'Max_pool', 'BN', 'FC']\nkey_type_list = ['conv', 'fc',\n 'maxpool', 'mp', 'maxp', # max pool\n 'avgpool', 'ap', 'avgp', # avg pool\n ]\nactivation_list = ['relu', 'softmax']\nBN_DECAT = 0.9997\nBN_ESPILON = 0.001\n\n\nclass NetWork(object):\n conv, pool, fc = [], [], []\n logits = None\n\n deep = 0\n\n weights_init_type = 0\n weights_decay = None\n biases_init_value = 0.1\n\n def __init__(self, inp_shape, nb_classes, y_mode='oh', mission='cls', X_dtype=tf.float32, y_dtype=tf.int32):\n self.HEIGHT, self.WIDTH, self.CHANNEL = inp_shape\n self.SHAPE = [self.HEIGHT, self.WIDTH, self.CHANNEL]\n self.MISSION = mission\n print('input :', self.SHAPE)\n self.NUM_CLASSES = nb_classes\n self.X_tf = tf.placeholder(X_dtype, [None, self.HEIGHT, self.WIDTH, self.CHANNEL], name='X')\n y_shape = [None, ] if y_mode == 'le' else [None, self.NUM_CLASSES]\n y_shape = [None, 5] if self.MISSION == 'loc' else y_shape\n self.y_tf = tf.placeholder(y_dtype, y_shape, name='y') # gt_bbox when location mission\n\n self.NAME = 'NetWork'\n\n def get_network(self, include_top=True, deep=0, pooling=None):\n ret = self.logits if include_top else self.conv[deep-1]\n if not include_top:\n size = ret.get_shape().as_list()[1:3]\n if pooling == 'max':\n ret = Max_pool(ret, size, size, isGlobal=True)\n elif pooling == 'avg':\n ret = Avg_pool(ret, size, size, isGlobal=True)\n return ret, self.y_tf\n\n def compile_weights(self, weights_init_type=0, weights_decay=None, biases_init_value=0.1):\n self.weights_init_type = weights_init_type\n self.weights_decay = weights_decay\n self.biases_init_value = biases_init_value\n\n def build(self, cnn_dict, x, save_tensor=False):\n \"\"\"\n only build the Conv, Pool, FC\n \"\"\"\n for key in cnn_dict.keys():\n key_type, layer_name = _process_key(key)\n p = [None] * 6\n for i in range(len(cnn_dict[key])):\n p[i] = cnn_dict[key][i]\n\n input_shape = x.get_shape().as_list()\n if key_type == 'fc': # channel, activation, keep_drop, flatten, bn\n s = input_shape[1] * input_shape[2] * input_shape[3] if len(input_shape) == 4 else input_shape[1]\n x = FC(x, [s, p[0]], activation=p[1], keep_prob=p[2], flatten=p[3], bn=p[4], name=layer_name,\n bias_init=self.biases_init_value, init_type=self.weights_init_type, decay=self.weights_decay)\n elif key_type == 'conv': # kernel_size, stribe, padding, channels, activation, bn\n x = Conv2D(x, [x.get_shape().as_list()[3], p[3]], p[0], strides=p[1], padding=p[2],\n activation=p[4] if len(p) > 4 else 'relu', bn=p[5], name=layer_name,\n bias_init=self.biases_init_value, init_type=self.weights_init_type, decay=self.weights_decay)\n elif key_type in ['maxpool', 'mp', 'maxp']: # kernel_size, stribe, padding, global\n x = Max_pool(x, p[0], strides=p[1], padding=p[2], name=layer_name, isGlobal=p[3])\n elif key_type in ['avgpool', 'ap', 'avgp']: # kernel_size, stribe, padding, global\n x = Avg_pool(x, p[0], strides=p[1], padding=p[2], name=layer_name, isGlobal=p[3])\n if save_tensor:\n self.save_tensor(x, key_type)\n return x\n\n def branch(self, branch_dict, x, shortcut=False, save_tensor=True, activation=None, conv=None, name='branches'):\n branches = []\n with tf.name_scope(name):\n for bd in branch_dict.keys():\n with tf.name_scope(bd):\n branches.append(self.build(branch_dict[bd], x))\n out = tf.concat(branches, axis=3)\n if shortcut:\n out = self.shortcut(x, out, activation, conv)\n if save_tensor:\n self.save_tensor(out)\n return out\n\n def shortcut(self, x, b, activation=None, conv=None):\n x = tf.identity(x) if conv is None else self.build({'conv': conv}, x)\n x = tf.add(b, x)\n if activation is not None:\n x = _activation(x, activation)\n return x\n\n # def bottleneck(self, x, shape, k, weight, name):\n # # BN ReLU conv1 BN ReLU conv3\n # with tf.name_scope(name):\n # x = BN(x, name='bn')\n # x = tf.nn.relu(x, name='relu')\n # x = self.build({'conv1': [],\n # 'conv2': []}, x, False)\n # x = Conv2D(x, [shape[2], k], [1, 1], activation='relu', bn=True,\n # bias_init=weight[2], init_type=weight[0], decay=weight[1], name='conv')\n # x = Conv2D(x, [k, k], [3, 3], activation=None, bn=False,\n # bias_init=weight[2], init_type=weight[0], decay=weight[1], name='conv')\n # return x\n\n def save_tensor(self, x, key='conv'):\n dtype, _ = _process_key(key)\n if dtype == 'fc':\n self.fc.append(x)\n self.deep += 1\n elif dtype in ['maxpool', 'mp', 'maxp', 'avgpool', 'ap', 'avgp']:\n if len(self.pool) < self.deep:\n self.pool += [None] * (self.deep - 1 - len(self.pool))\n self.pool.append(x)\n elif dtype == 'conv':\n self.conv.append(x)\n self.deep += 1\n print(key + str(self.deep), ':', x.get_shape().as_list()[1:4])\n\ndef _process_key(key):\n layer_name = key\n if '_' in key:\n key_type, layer_name = key.split('_')\n else:\n key_type = re.match('[a-z]+', key).group()\n return key_type, layer_name\n\ndef _variable_summaries(var, name):\n with tf.name_scope(name):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n tf.summary.histogram('histogram', var)\n\ndef _weights(shape, decay=None, dtype=tf.float32, name='weights', init_type=0, trainable=True):\n factor, mode, uniform = None, None, None\n if init_type == 0: # MSRA initializer\n factor, mode, uniform = 2.0, 'FAN_IN', False\n elif init_type == 1: # Convolutional Architecture for Fast Feature Embedding\n factor, mode, uniform = 1.0, 'FAN_IN', True\n elif init_type == 2: # To get understanding the difficulty of training deep feed forward neural networks\n factor, mode, uniform = 1.0, 'FAN_AVG', True\n elif init_type == 3: # xavier initializer\n factor, mode, uniform = 1.0, 'FAN_AVG', True\n with tf.name_scope(name) as scope:\n initial = tf.contrib.layers.variance_scaling_initializer(factor, mode=mode, uniform=uniform)\n weight = tf.get_variable(scope, shape, initializer=initial, dtype=dtype, trainable=trainable)\n # tf.add_to_collection(tf.GraphKeys.WEIGHTS, weight)\n # if decay is not None:\n # loss = tf.multiply(tf.nn.l2_loss(weight), decay, name=name + '_decay_loss')\n # tf.add_to_collection('weight_losses', loss)\n # _variable_summaries(weight, scope)\n return weight\n\ndef _biases(shape, init_value=0.0, dtype=tf.float32, name='biases'):\n with tf.name_scope(name) as scope:\n initial = tf.constant_initializer(init_value, dtype=dtype)\n biases = tf.get_variable(scope, shape, initializer=initial, dtype=dtype)\n # _variable_summaries(biases, scope)\n return biases\n\ndef _activation(x, activation):\n assert activation in activation_list\n if activation == 'relu':\n return tf.nn.relu(x, name='relu')\n elif activation == 'softmax':\n return tf.nn.softmax(x, name='softmax')\n\ndef _check(kernel_size, strides, padding):\n def _pool_size_check(size):\n assert type(size) in [list, int]\n if type(size) != list:\n return [1, size, size, 1]\n else:\n assert len(size) in [2, 4]\n if len(size) == 2:\n kernel = [1]\n kernel.extend(size)\n kernel.append(1)\n else:\n kernel = size\n return kernel\n assert padding in ['SAME', 'S', 'VALID', 'V']\n kernel_size = _pool_size_check(kernel_size)\n strides = _pool_size_check(strides)\n if padding == 'S':\n padding = 'SAME'\n if padding == 'V':\n padding = 'VALID'\n return kernel_size, strides, padding\n\ndef Conv2D(x, channels, kernel_size, strides=[1, 1], padding='SAME', activation='relu', bn=True,\n bias_init=None, bias_dtype=tf.float32, name='conv', **weights_args):\n scope_name = name + '_' + str(kernel_size[0]) + 'x' + str(kernel_size[1]) + '_' + str(strides[0]) + padding[0]\n k_size = kernel_size.copy()\n k_size.extend(channels)\n k_size, strides, padding = _check(k_size, strides, padding)\n with tf.name_scope(scope_name):\n conv_weights = _weights(k_size, name='weight', **weights_args)\n conv = tf.nn.conv2d(x, conv_weights, strides=strides, padding=padding)\n if bias_init is not None:\n conv_biases = _biases(channels[1], init_value=bias_init, dtype=bias_dtype, name='biases')\n conv = tf.nn.bias_add(conv, conv_biases)\n if bn is None or bn:\n conv = BN(conv, name='bn')\n if activation is not None:\n conv = _activation(conv, activation) # 设置activation可以为None,对应Block的最后一层\n return conv\n\ndef Max_pool(x, kernel_size, strides=[1, 1, 1, 1], padding='SAME', isGlobal=False, name='max_pool'):\n scope_name = name + '_' + str(kernel_size[0]) + 'x' + str(kernel_size[1]) + '_' + str(strides[0]) + padding[0]\n if isGlobal:\n scope_name = 'global_maximum_pooling'\n with tf.name_scope(scope_name) as scope:\n kernel_size, strides, padding = _check(kernel_size, strides, padding)\n x = tf.nn.max_pool(x, kernel_size, strides, padding, name=scope)\n if isGlobal:\n x = tf.reduce_max(x, axis=[1, 2])\n return x\n\ndef Avg_pool(x, kernel_size, strides=[1, 1, 1, 1], padding='SAME', isGlobal=False, name='avg_pool'):\n scope_name = name + '_' + str(kernel_size[0]) + 'x' + str(kernel_size[1]) + '_' + str(strides[0]) + padding[0]\n if isGlobal:\n scope_name = 'global_averager_pooling'\n with tf.name_scope(scope_name) as scope:\n kernel_size, strides, padding = _check(kernel_size, strides, padding)\n x = tf.nn.avg_pool(x, kernel_size, strides, padding, name=scope)\n if isGlobal:\n x = tf.reduce_mean(x, axis=[1, 2])\n return x\n\ndef FC(x, channels, activation='relu', keep_prob=None, flatten=False, bn=True,\n bias_init=None, bias_dtype=tf.float32, name='fc', **weights_args):\n with tf.name_scope(name):\n if flatten is not None and flatten:\n x = tf.reshape(x, [-1, channels[0]], name='flatten')\n fc_weights = _weights(channels, name='weight', **weights_args)\n fc = tf.matmul(x, fc_weights)\n if bias_init is not None:\n fc_biases = _biases(channels[1], init_value=bias_init, dtype=bias_dtype, name='biases')\n fc = tf.nn.bias_add(fc, fc_biases)\n if bn is None or bn:\n fc = BN(fc, name='bn')\n if activation is not None:\n fc = _activation(fc, activation)\n if keep_prob is not None:\n fc = tf.nn.dropout(fc, keep_prob)\n return fc\n\ndef BN(x, name='bn', trainable=True):\n with tf.name_scope(name):\n x_shape = x.get_shape()\n params_shape = x_shape[-1:]\n axes = list(range(len(x_shape) - 1))\n trainable = tf.convert_to_tensor(trainable, dtype='bool', name='is_training')\n\n with tf.name_scope('beta') as s:\n beta = tf.get_variable(s, params_shape, dtype=tf.float32, initializer=tf.zeros_initializer)\n with tf.name_scope('gamma') as s:\n gamma = tf.get_variable(s, params_shape, dtype=tf.float32, initializer=tf.ones_initializer)\n with tf.name_scope('moving_mean') as s:\n moving_mean = tf.get_variable(s, params_shape, dtype=tf.float32,\n initializer=tf.zeros_initializer, trainable=False) # False\n with tf.name_scope('moving_variance') as s:\n moving_variance = tf.get_variable(s, params_shape, dtype=tf.float32,\n initializer=tf.ones_initializer, trainable=False)\n\n # these ops will only be preformed when training.\n mean, variance = tf.nn.moments(x, axes=axes)\n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAT)\n update_moving_variance = moving_averages.assign_moving_average(moving_mean, variance, BN_DECAT)\n tf.add_to_collection('update_ops', update_moving_mean)\n tf.add_to_collection('update_ops', update_moving_variance)\n\n mean, variance = control_flow_ops.cond(trainable,\n lambda: (mean, variance),\n lambda: (moving_mean, moving_variance))\n x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_ESPILON)\n return x\n\n# TODO\ndef SPP(x, pool='avg'):\n assert pool in ['avg', 'max']\n return x\n\ndef multi_SPP(x):\n return x\n","sub_path":"Model/CNN/tf/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":13528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"301472672","text":"\nfrom ejecta import crater\nfrom geometry import sphere\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nfrom mpl_toolkits.axes_grid1 import host_subplot\nimport mpl_toolkits.axisartist as AA\nfrom matplotlib import pyplot as plot\n\nmatplotlib.rcParams.update({'font.size': 9})\n\n\nr, M, K_M, FD_M = crater.create_FD_M()\nr, V, K_V, FD_V = crater.create_FD_V()\nd = sphere.calculate_mass_diameter(M)\n\nfig, ax1 = plot.subplots()\nax2 = ax1.twinx()\n\nplot.title('Ejecta Distribution - INITIAL')\n\nax1.set_xlabel('Radius from impact center (m)')\nax1.set_ylabel('Mass (kg)', color='b')\nax1.plot(r, M)\n\nax2.set_ylabel('Velocity (m/s)', color='r')\nax2.plot(r, V, 'r')\n\nax1.grid(True)\nax2.grid(True)\nplot.savefig('ejecta.png')\n\nd, M, Mlog, V, Vlog, Nr, N, dlog, Nlog = crater.calculate_N()\n\nprint('>>> d = ' + str(d))\nprint('>>> N = ' + str(N))\nprint('>>> M = ' + str(M))\nprint('>>> V = ' + str(V))\n\nfig, ax1 = plot.subplots()\nax2 = ax1.twinx()\n\nplot.title('Particle per Diameter')\n\nax1.set_xlabel('diameter (m)')\nax1.set_ylabel('Particle Count (10^x)', color='g')\nax1.set_xlim((-0.05, 3.75))\nax1.plot(d[1:], Nlog, 'g', label='Particles')\n\nax2.set_ylabel('Mass (kg, 10^x)', color='b')\n# ax2.set_ylim((-0.05, 0.5))\nax2.plot(d, Mlog, 'b', label='Mass')\nax1.plot((0, 10), (0, 0), 'r--')\n\nax1.grid(True)\nax2.grid(True)\nplot.savefig('N.png')\n\n# ### PARTICLE DISTRIBUTION\n\nfig, ax1 = plot.subplots()\nax2 = ax1.twinx()\n\nplot.title('Particle Distribution')\n\nax1.set_xlabel('Particle Count (10^x)')\n\nax1.set_ylabel('Mass (kg, 10^x)', color='b')\nax2.set_ylabel('Velocity (m/s)', color='g')\n\nax1.plot(Nlog, Mlog[:-1])\nax2.plot(Nlog, V[:-1], 'g')\n\nax2.plot((0, 0), (-5, 250), 'r--')\nax2.plot((-10, 11), (0, 0), 'r--')\nax2.text(-6.5, 0.75, 'velocity th')\n\nax1.plot((-10, 11), (-9, -9), 'r--')\nax1.text(-6.5, -8.75, 'micrograms th')\nax1.text(-0.5, -11, '1 particle', rotation=90)\n\nax2.set_xlim((-7, 11))\nax2.set_ylim((-5, 250))\n\nax1.grid(True)\nax2.grid(True)\nplot.savefig('NMV.png')\n\n","sub_path":"src/aim_ejecta.py","file_name":"aim_ejecta.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"340471739","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\n\nfrom .models import Film\n\n\nclass ModelTestCase(TestCase):\n\t\"\"\"\n\tTest for the models\n\t\"\"\"\n\tdef setUp(self):\n\t\tself.title = 'The Big Lebowski'\n\t\tself.summary = 'Some summary for this overrated movie'\n\t\tself.cost = 11\n\t\tself.status = 'a'\n\t\tself.film = Film(title=self.title, summary=self.summary, cost=self.cost, status=self.status)\n\n\tdef test_model_can_create_a_film(self):\n\t\told_films = Film.objects.count()\n\t\tself.film.save()\n\t\tnew_films = Film.objects.count()\n\t\tself.assertNotEqual(old_films, new_films)\n\n\tdef test_model_can_be_readable_in_the_admin(self):\n\t\tself.assertEqual(str(self.film), self.title)\n\n\nclass ViewTestCase(TestCase):\n\t\"\"\"\n\tTest for the api views\n\t\"\"\"\n\tdef setUp(self):\n\t\tuser = User.objects.create(username='testuser')\n\t\tself.client = APIClient()\n\t\tself.client.force_authenticate(user=user)\n\t\tself.film_data ={'title': 'test movie', 'summary': 'summary movie', 'cost': 1, 'status': 'a'}\n\t\tself.response = self.client.post(\n\t\t\t'/api/films/',\n\t\t\tself.film_data,\n\t\t\tformat='json')\n\n\tdef test_api_can_create_a_film(self):\n\t\tself.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n\n\tdef test_api_can_get_a_specific_film(self):\n\t\tfilm = Film.objects.get(id=1)\n\t\tresponse = self.client.get(\n\t\t\t'/api/films/',\n\t\t\tkwargs={'pk': film.id}, \n\t\t\tformat='json')\n\n\tdef test_api_can_update_a_film(self):\n\t\ttest_film = Film.objects.get()\n\t\tnew_film = {'title': 'New test title film', 'summary': 'test new summary', 'cost': 1, 'status': 'r'}\n\t\tres = self.client.put(\n\t\t\treverse('film-detail', kwargs={'pk': test_film.id}),\n\t\t\tnew_film,\n\t\t\tformat='json')\n\t\tself.assertEqual(res.status_code, status.HTTP_200_OK)\n\n\tdef test_api_can_delete_a_film(self):\n\t\ttest_film = Film.objects.get()\n\t\tresponse = self.client.delete(\n\t\t\treverse('film-detail', kwargs={'pk': test_film.id}),\n\t\t\tformat='json',\n\t\t\tfollow=True)\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)","sub_path":"catalog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"402843193","text":"def is_comment(item):\n return isinstance(item,str) and item.startswith('#')\n\ndef execute(program):\n\n #finds whether all lines are comment or not\n while program :\n item = program.pop()\n if not is_comment(item):\n program.append(item)\n break\n else:\n print(\"All lines are comments\")\n return\n\n pending = []\n while program:\n item = program.pop()\n if callable(item):\n try:\n result = item(*pending)\n except Exception as e :\n print(e)\n pending = []\n pending.append(result)\n else:\n pending.append(item)\n else:\n print(pending)\n\n print(\"finished\")\n\n\nif __name__ == '__main__' :\n import operator\n program = list( reversed\n (( '#A short program to add some constants' ,\n 5,\n 2,\n operator.add,\n 3,\n operator.mul)))\n\n execute(program)","sub_path":"Practise/StackPractise.py","file_name":"StackPractise.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"89343935","text":"\"\"\"\r\nEverything about GPIB control for the Ref Step algorithm.\r\nThe GUI should only allow one GPIB thread at a time.\r\nThread also writes raw data file to an excel sheet.\r\nLog files are written at event termination from graphframe.py.\r\n\"\"\"\r\n\r\nimport stuff\r\nimport csv\r\nimport time\r\nimport wx\r\nimport numpy as np\r\nimport gpib_inst\r\nimport openpyxl\r\n\r\nclass GPIBThreadF(stuff.WorkerThread):\r\n \"\"\"\r\n Ref step main thread\r\n \"\"\"\r\n def __init__(self, notify_window, EVT, param, data,start_time,OverideSafety):\r\n stuff.WorkerThread.__init__(self, notify_window, EVT, param, data,start_time,OverideSafety)\r\n #param[0] holds self.inst_bus, the chosen real or simulated visa\r\n self.inst_bus = param[0] #visa or simulated visa2\r\n self.grid = param[1]\r\n self.start_row = param[2]\r\n self.stop_row = param[3]\r\n self.dvm_nordgs_col = param[4]\r\n self.voltmeter = param[5]\r\n self.dvm_range_col = param[6]\r\n self.sourceX = param[7]\r\n self.sourceX_range_col = param[8]\r\n self.sourceX_setting_col = param[9]\r\n self.sourceS = param[10]\r\n self.sourceS_range_col = param[11]\r\n self.sourceS_setting_col = param[12]\r\n self.delay_col = param[13]\r\n self.Analysis_file_name = param[14]\r\n self.OverideSafety = OverideSafety\r\n self.MadeSafe = False\r\n\r\n time_bit = time.strftime(\"%Y.%m.%d.%H.%M.%S\",time.localtime())\r\n log_file_name = 'log_files/log.'+time_bit+\".txt\"\r\n self.raw_file_name = 'data/raw.'+time_bit\r\n self.wb = openpyxl.Workbook()\r\n self.sh = self.wb.active\r\n self.logfile = open(log_file_name,'w')\r\n first_line = [self.read_grid_cell(6, i) for i in range(self.grid.GetNumberCols())]\r\n first_line = first_line+['start time','end time','readings...']\r\n\r\n for i in range(6):\r\n self.sh.append( [self.read_grid_cell(i,column) for column in range(10)])\r\n #append empty things, but they can contain more useful info later\r\n #dates, instruments, start time finish time?\r\n #this skeeps the analysis sheets compatible with the normal input files.\r\n self.sh.append(first_line)\r\n #must be the last command:\r\n self.start()\r\n\r\n \r\n def PrintSave(self, text):\r\n \"\"\"\r\n Prints a string, and saves to the log file too.\r\n \"\"\"\r\n if self._want_abort: #stop the pointless printing if the thread wants to abort\r\n return\r\n else:\r\n self.logfile.write(str(text)+\"\\n\")\r\n print(str(text))\r\n \r\n def com(self, command,send_item=None):\r\n \"\"\"\r\n Combine the instrument command with an item (if one is specified).\r\n All commands go through this function, as it wraps each command\r\n with a check to see if the thread should abort. Saves having to\r\n surround each instrument send command with an if statement.\r\n Will also recieve the status of the command from the instrument\r\n class, and if it failes this will flag the thread to abort.\r\n \"\"\"\r\n if not self._want_abort:\r\n if send_item != None:\r\n sucess,val,string = command(send_item)\r\n self.PrintSave(string)\r\n if sucess == False:\r\n self._want_abort = 1\r\n return val\r\n else:\r\n sucess,val,string = command()\r\n self.PrintSave(string)\r\n if sucess == False:\r\n self._want_abort = 1\r\n return val\r\n else:\r\n return 0\r\n \r\n def MakeSafe(self):\r\n \"\"\"\r\n Make the instruments safe, bypass the self.com\r\n command in order to pass it directly to the instruments,\r\n that way recording weather or not the making safe worked\r\n for the instruments. Reports to GUI if it is unsafe.\r\n \"\"\"\r\n sucessX,valX,stringX = self.sourceX.make_safe()\r\n sucessS,valS,stringS = self.sourceS.make_safe()\r\n sucessM,valM,stringM = self.voltmeter.make_safe()\r\n self.PrintSave(\"Make safe sent. status is:\")\r\n self.PrintSave(\"SourceX {}\\nSourceS {}\\nMeter {}\".format(sucessX,sucessS,sucessM))\r\n if not all([sucessX,sucessS,sucessM]):\r\n wx.PostEvent(self._notify_window, stuff.ResultEvent(self.EVT, \"UNSAFE\"))\r\n else:\r\n wx.PostEvent(self._notify_window, stuff.ResultEvent(self.EVT, None))\r\n \r\n def Error_string_maker(self):\r\n \"\"\"\r\n Reads all errors in the instruments, and appends them together in a string.\r\n If there are errors during the running of the code, they will be printed\r\n on the left of the table as a warning flag.\r\n \"\"\"\r\n #somehow still prints \"0\" when the instruments have no error.\r\n string = \" \"\r\n #query instrument errors, and save individual error strings.\r\n self.com(self.sourceX.query_error)\r\n x_esr = str(self.com(self.sourceX.read_instrument))\r\n self.PrintSave('sourceX ESR = '+x_esr)\r\n if x_esr != self.sourceX.com['NoError']: string = 'Source X: '+x_esr\r\n self.com(self.sourceS.query_error)\r\n s_esr = str(self.com(self.sourceS.read_instrument))\r\n self.PrintSave('sourceS ESR = '+s_esr)\r\n if s_esr != self.sourceS.com['NoError']: string = string +' source S: '+s_esr\r\n self.com(self.voltmeter.query_error)\r\n m_esr = str(self.com(self.voltmeter.read_instrument))\r\n self.PrintSave('meter ESR = '+m_esr)\r\n if m_esr != self.voltmeter.com['NoError']: string = string +' meter: ' +m_esr\r\n self.PrintSave('')\r\n return string\r\n\r\n def set_grid_val(self,row,col,data):\r\n \"\"\" Set the value of a grid cell\"\"\"\r\n wx.CallAfter(self.grid.SetCellValue, row, col, str(data))\r\n \r\n def read_grid_cell(self,row,col):\r\n \"\"\" Read a grid cell\"\"\"\r\n value = 0\r\n if not self._want_abort:\r\n value = self.grid.GetCellValue(row, col)\r\n return value\r\n \r\n def end(self):\r\n \"\"\"\r\n Function to close files and post an event to the main grid,\r\n letting it know that the thread has ended.\r\n \"\"\"\r\n self.com(self.sourceS.Standby)\r\n self.com(self.sourceX.Standby)\r\n self.logfile.close()\r\n self.wb.save(filename = str(self.raw_file_name+'.xlsx'))\r\n wx.CallAfter(self.Analysis_file_name.SetValue,self.raw_file_name+'.xlsx')\r\n wx.PostEvent(self._notify_window, stuff.ResultEvent(self.EVT, 'GPIB ended'))\r\n\r\n def wait(self,wait_time):\r\n \"\"\"\r\n safely loop until time is up, checking for abort\r\n \"\"\"\r\n t=time.time()\r\n while time.time() abs(0.05*range_max):\r\n #taking abs to allow for negative ranging.\r\n #readings is more than 5% away from nominal, abort\r\n error_strings = error_strings+' reading '+str(i)+' is more than 5% from nominal.'\r\n self.PrintSave(\"Readings exceeded 5% from nominal, aborting. Check settle times, ranges, and connections\")\r\n self._want_abort = 1\r\n elif abs((a-nominal_reading)) > abs(0.001*range_max):\r\n #readings is more than 0.1% of range away from nominal, issue a warning\r\n error_strings = error_strings+' reading '+str(i)+' is more than 0.1% from nominal.'\r\n self.PrintSave(\"Readings exceeded 0.1% from nominal. Check settle times, ranges, and connections\")\r\n \r\n self.wait(self.voltmeter.measure_seperation) #wait if voltmeter needs to not be read continuously\r\n self.PrintSave(time.strftime(\"%Y.%m.%d.%H.%M.%S, \",time.localtime()) + repr(a) + '\\n')\r\n self.data.add_to_list(a)\r\n these_readings.append(a)\r\n return these_readings,error_strings\r\n \r\n def print_instrument_status(self,row):\r\n \"\"\"Read the status of all instruments, print to grid\"\"\"\r\n self.com(self.voltmeter.inst_status)\r\n self.set_grid_val(row, self.dvm_nordgs_col+3, str(self.com(self.voltmeter.read_instrument)))\r\n #force string of value as it needs to go into the grid and grid only takes in strings.\r\n \r\n self.com(self.sourceS.inst_status)\r\n self.set_grid_val(row, self.dvm_nordgs_col+4, str(self.com(self.sourceS.read_instrument)))\r\n \r\n self.com(self.sourceX.inst_status)\r\n self.set_grid_val(row, self.dvm_nordgs_col+5, str(self.com(self.sourceX.read_instrument)))\r\n\r\n def run(self):\r\n \"\"\"\r\n Main thread, reads through the table and executes commands.\r\n \"\"\"\r\n self.com(self.voltmeter.create_instrument)\r\n self.com(self.sourceX.create_instrument)\r\n self.com(self.sourceS.create_instrument)\r\n #If the initialisations failed, stop the thread immediatly.\r\n #There is no point running make safe if the instruments arent there.\r\n if self._want_abort:\r\n #Notify the window that the thread ended, so buttons are enabled.\r\n wx.PostEvent(self._notify_window, stuff.ResultEvent(self.EVT, 'GPIB ended'))\r\n return\r\n \r\n if self.OverideSafety == False: #Then do the safety checks.\r\n state = self.SafetyCheck()\r\n if state != 'clear':\r\n self._want_abort = True\r\n self.PrintSave('safety checks failed, making safe, aborting.')\r\n self.MakeSafe()\r\n \r\n #initialise instruments\r\n self.initialise_instruments()\r\n \r\n for row in range(self.start_row,self.stop_row + 1):\r\n if self._want_abort:\r\n break #Breaks and skips to the end, where it runs \"self.end()\".\r\n \r\n #do the row highlighting, force a refresh.\r\n self.PrintSave(\"Spread sheet row \"+str(int(row)+1))\r\n wx.CallAfter(self.grid.SelectRow,row)\r\n wx.CallAfter(self.grid.ForceRefresh)\r\n wx.CallAfter(self.grid.Update)\r\n\r\n #prepare ranges for sources\r\n self.set_source_ranges(row)\r\n #operate sources\r\n self.com(self.sourceS.Operate)\r\n self.com(self.sourceX.Operate)\r\n\r\n #read errors in meters. The error string later has more warnings appended to it. \r\n error_strings = self.Error_string_maker()\r\n \r\n #need to determine maximum of DVM range to find accuracy threshold\r\n range_max = self.set_meter_range(row)\r\n\r\n delay_time = int(float(self.read_grid_cell(row,self.delay_col)))\r\n #delay for settle time\r\n delay_time2 = float(self.read_grid_cell(row,self.delay_col+1))\r\n #delay for DVM between mesmnts\r\n nominal_reading = float(self.read_grid_cell(row,self.dvm_range_col+1))\r\n #nominal reading for comparison to actual reading\r\n \r\n self.com(self.voltmeter.MeasureSetup)\r\n \r\n #wait desired time for instruments to settle\r\n self.wait(delay_time)\r\n \r\n these_readings = [] #array for readings from a single table row.\r\n before_msmnt_time =time.time() #start time of measuremtns.\r\n nordgs = int(float(self.read_grid_cell(row, self.dvm_nordgs_col) ))\r\n #Do the readings\r\n these_readings,issues = self.do_readings(delay_time2,range_max,nominal_reading,nordgs)\r\n \r\n #Update the full error string\r\n error_strings = error_strings + issues\r\n #print the error report of this data sequence to the table\r\n self.set_grid_val(row, 0,str(error_strings))\r\n \r\n if len(these_readings)>1:\r\n data_stdev = np.std(these_readings)\r\n data_mean = np.mean(these_readings)\r\n else:\r\n data_stdev = 'No readings' #avoid infinity for stdev\r\n data_mean = 'No readings'\r\n\r\n #create array to send to csv data sheet\r\n after_msmnt_time = time.time()\r\n Time = time.localtime() #time at end of msmnt\r\n row_time = str(Time[0])+str(Time[1])+str(Time[2])+str(Time[3])+str(Time[4])\r\n \r\n #print results\r\n self.set_grid_val(row, self.dvm_nordgs_col + 6,repr(data_mean))\r\n self.set_grid_val(row, self.dvm_nordgs_col + 7,repr(data_stdev))\r\n \r\n self.print_instrument_status(row)\r\n \r\n #put csv reading at end so all values are on table.\r\n csv_line = [self.read_grid_cell(row, i) for i in range(self.grid.GetNumberCols())]\r\n csv_line = csv_line+[before_msmnt_time,after_msmnt_time]+these_readings\r\n self.sh.append(csv_line)\r\n \r\n self.end()\r\n \r\n\r\n def initialchecks(self,instrument):\r\n \"\"\"\r\n Short function used in the safety check routine, given an instrument it resets\r\n the instrument, does initialisation and reads errors. Will continuously read\r\n errors until the meter returns the no-error string. This can cause problems if\r\n the no-error string is inputed wrongly.\r\n \"\"\"\r\n self.com(instrument.reset_instrument) #reset voltmeter\r\n self.com(instrument.initialise_instrument)\r\n self.com(instrument.query_error)\r\n error = self.com(instrument.read_instrument)\r\n self.PrintSave(error)\r\n if error != instrument.com['NoError']:\r\n self.PrintSave('Error '+str(error)+' while resetting instrument, or queriying status')\r\n return 'failed'\r\n else: return 'clear'\r\n \r\n def CheckInstruments(self,*args):\r\n \"\"\"\r\n Given any number of instruments, will read through each and PrintSave errors.\r\n Each instrumetn is continuously interrogated until it returns its no-error string.\r\n \"\"\"\r\n for instrument in args:\r\n self.com(instrument.query_error)\r\n error = self.com(instrument.read_instrument)\r\n if error != instrument.com['NoError']:\r\n while error != instrument.com['NoError'] and not self._want_abort:\r\n #loop as there could be several errors\r\n self.PrintSave('Error '+str(error)+' in instrument '+instrument.com['label'])\r\n self.com(instrument.query_error)\r\n error = self.com(instrument.read_instrument)\r\n return 'failed'\r\n return 'clear'\r\n\r\n \r\n def SafetyCheck(self):\r\n \"\"\"\r\n A linear function for the determination of some basic safety settings.\r\n It can be overriden.\r\n Will check: errors, resetting, outputting zero volts, ouputting 1 volt.\r\n If this is sucessful it will also determine if the wires are connected the wrong way.\r\n \"\"\"\r\n #determine how long to wait for an output of 1V to settle. do this\r\n #by reading ranges that include 1V, and finding maximum delay\r\n\r\n #IS this a good settle time? Since now the instrument settle times are\r\n #no longer written with the particular ranges, I cant extract the user-specified\r\n #settle time for outputs of one volts.\r\n SettleTime = 5\r\n \r\n state = 'clear'\r\n state = self.initialchecks(self.voltmeter)\r\n if state == 'clear': state = self.initialchecks(self.sourceX)\r\n if state == 'clear': state = self.initialchecks(self.sourceS)\r\n if state == 'clear':\r\n self.com(self.sourceX.Standby)\r\n self.com(self.sourceS.Standby)\r\n state = self.CheckInstruments(self.sourceX,self.sourceS)\r\n if state == 'clear':\r\n self.com(self.sourceX.set_DCvalue,0)\r\n self.com(self.sourceS.set_DCvalue,0)\r\n state = self.CheckInstruments(self.sourceX,self.sourceS)\r\n if state == 'clear':\r\n self.PrintSave('testing voltage is setting correctly, and zeros')\r\n \r\n self.com(self.sourceX.Operate)\r\n self.com(self.sourceS.Operate)\r\n if self.voltmeter.com[\"SingleMsmntSetup\"] != 'SKIP':\r\n #only send the extra command if the voltmeter has somethings to be sent\r\n self.com(self.voltmeter.SingleMsmntSetup)\r\n self.wait(SettleTime) #let instrumetns settle\r\n reading = float(self.com(self.voltmeter.read_instrument))\r\n if reading>5e-3:\r\n self.PrintSave('Expect reading to be near zero (less than 5e-3), but it is '+str(reading)+'.')\r\n state = 'failed'\r\n self.PrintSave(state)\r\n \r\n if state == 'clear':\r\n state = self.CheckInstruments(self.sourceX,self.sourceS)\r\n self.PrintSave('testing voltage setting correctly')\r\n self.com(self.sourceX.Standby)\r\n #find correct range for 1volt\r\n for r in self.sourceX.range[::-1]:\r\n if r[1]>=1:\r\n r_use = r\r\n self.com(self.sourceX.set_DCrange,r_use[2])\r\n self.com(self.sourceX.set_DCvalue,1)\r\n self.com(self.sourceX.Operate)\r\n if self.voltmeter.com[\"SingleMsmntSetup\"] != 'SKIP':\r\n #only send the extra command if the voltmeter has somethings to be sent\r\n self.com(self.voltmeter.SingleMsmntSetup)\r\n self.wait(SettleTime) #let instrumetns settle\r\n reading = float(self.com(self.voltmeter.read_instrument))\r\n if round(reading,2)!= 1.00:\r\n self.PrintSave('Source X set to 1, expect reading to be near 1, but it is '+str(reading)+' check output load on X and SettleTime')\r\n if round(reading,2) == -1:\r\n self.PrintSave('reading negative, check if wires are the right way')\r\n state = 'failed'\r\n return state\r\n state = self.CheckInstruments(self.sourceX,self.sourceS)\r\n #I suppose there is no point testing the wiring direction, since if it was correct\r\n #Before it must be correct now.\r\n if state == 'clear':\r\n self.com(self.sourceX.Standby)\r\n self.com(self.sourceX.set_DCvalue,0)\r\n self.com(self.sourceX.Operate)\r\n self.com(self.sourceS.Standby)\r\n for r in self.sourceS.range[::-1]:\r\n if r[1]>=1:\r\n r_use = r\r\n self.com(self.sourceS.set_DCrange,r_use[2])\r\n self.com(self.sourceS.set_DCvalue,1)\r\n self.com(self.sourceS.Operate)\r\n if self.voltmeter.com[\"SingleMsmntSetup\"] != 'SKIP':\r\n #only send the extra command if the voltmeter has somethings to be sent\r\n self.com(self.voltmeter.SingleMsmntSetup)\r\n self.wait(SettleTime) #let instrumetns settle\r\n reading = float(self.com(self.voltmeter.read_instrument))\r\n if round(reading,2)!= -1.00:\r\n self.PrintSave('Set source S to 1, expect reading to be near -1, but it is '+str(reading)+' output load on S and SettleTime')\r\n state = 'failed'\r\n return state\r\n state = self.CheckInstruments(self.sourceX,self.sourceS)\r\n \r\n return state\r\n \r\n","sub_path":"modules/gpib_data.py","file_name":"gpib_data.py","file_ext":"py","file_size_in_byte":23503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"381914083","text":"# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dlrm.data.datasets import SyntheticDataset\nfrom dlrm.data.factories import create_synthetic_datasets\nfrom dlrm.data.utils import write_dataset_to_disk, get_categorical_feature_sizes\nfrom absl import app, flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"num_numerical_features\", 13,\n \"Number of numerical features in the dataset. Defaults to 13 for the Criteo Terabyte Dataset\")\nflags.DEFINE_integer(\"synthetic_dataset_num_entries\",\n default=int(32768 * 1024), # 1024 batches for single-GPU training by default\n help=\"Number of samples per epoch for the synthetic dataset\")\nflags.DEFINE_list(\"synthetic_dataset_table_sizes\", default=','.join(26 * [str(10 ** 5)]),\n help=\"Embedding table sizes to use with the synthetic dataset\")\nflags.DEFINE_string(\"synthetic_dataset_dir\", default=\"/tmp/dlrm_synthetic_data\",\n help=\"Destination of the saved synthetic dataset\")\n\n\ndef main(argv):\n table_sizes = [int(s) for s in FLAGS.synthetic_dataset_table_sizes]\n train_dataset = SyntheticDataset(\n num_entries=FLAGS.synthetic_dataset_num_entries,\n numerical_features=FLAGS.num_numerical_features,\n categorical_feature_sizes=table_sizes\n )\n test_dataset = SyntheticDataset(\n num_entries=FLAGS.synthetic_dataset_num_entries,\n numerical_features=FLAGS.num_numerical_features,\n categorical_feature_sizes=table_sizes\n )\n\n write_dataset_to_disk(\n FLAGS.synthetic_dataset_dir,\n train_dataset,\n test_dataset,\n FLAGS.synthetic_dataset_table_sizes\n )\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"PyTorch/Recommendation/DLRM/dlrm/scripts/prepare_synthetic_dataset.py","file_name":"prepare_synthetic_dataset.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"493799436","text":"import numpy as np\nimport pandas as pd\nimport sqlite3\nimport random\nimport statsmodels.api as sm\nfrom statsmodels.tools import eval_measures\nfrom sklearn.metrics import r2_score\nfrom util import all_variable_names_in_df, train_test_split, RANDOM_SEED\n\n\ndef regression(train_df, test_df, ind_var_names: list, dep_var_name: str):\n \"\"\"\n Implement Linear Regression using StatsModel.\n\n inputs:\n - train_df: a Pandas DataFrame, containing all the training samples\n - test_df: a Pandas DataFrame, containing all the testing samples\n - ind_var_names: a list of strings of independent variable columns\n that we want to include in the model\n - dep_var_name: the name of the dependent variable of our model\n \n\n outpus:\n - mse_train: the mean-squared error of the model (trained on the training\n data), evaluated on the training dataset\n - mse_test: the mean-squared error of the model (trained on the training\n data), evaluated on the testing dataset\n - rsquared_val: the r-squared value of the model (trained on the training\n data), evaluated on the testing dataset\n \"\"\"\n ## Stencil: Error check whether the input that you provided to the function is correct or not\n # Do not modify\n for df in [train_df, test_df]:\n assert all_variable_names_in_df(ind_var_names + [dep_var_name], df)\n\n # Construct X_train, X_test, y_train, y_test from train_df and test_df, where\n # X_train is a numpy array of all the independent variable instances from train_df,\n # y_train is a numpy array of all the dependent variable instances from train_df,\n # and the same applies to X_test and y_test from test_df.\n # Hint: Look up (1) how to select a Pandas DataFrame B with a subset of columns from a given DataFrame A,\n # and (2) how to use Pandas .to_numpy() function.\n \n train_ind_var_df = train_df[ind_var_names]\n train_dep_var_df = train_df[dep_var_name]\n test_ind_var_df = test_df[ind_var_names]\n test_dep_var_df = test_df[dep_var_name]\n # construct training data and convert to numpy array\n X_train = train_ind_var_df.to_numpy()\n y_train = train_dep_var_df.to_numpy()\n # construct testing data and convert to numpy array\n X_test = test_ind_var_df.to_numpy()\n y_test = test_dep_var_df.to_numpy()\n\n # Using statsmodel, fit a linear regression model to the training dataset\n # You may checkout statsmodel's documentation here: https://www.statsmodels.org/stable/regression.html\n # spector_data = sm.datasets.spector.load(as_pandas=False)\n X_train = sm.add_constant(X_train)\n train_mod = sm.OLS(y_train, X_train)\n train_res = train_mod.fit()\n\n # Add constant for testing independent variable\n X_test = sm.add_constant(X_test)\n\n # Using statsmodel's eval_measures MSE calculation function,\n # calculate the Mean-squared Error of the model above (on the training dataset)\n train_mse = eval_measures.mse(y_train, train_res.predict(X_train))\n\n # Similarly, calculate the Mean-squared Error of the model above (on the testing dataset)\n test_mse = eval_measures.mse(y_test, train_res.predict(X_test))\n\n # Calculate the *test* R-squared value (using sklearn's r2_score function)\n test_r2 = r2_score(y_test, train_res.predict(X_test))\n\n # Print out the summary to see more information as needed\n print(train_res.summary())\n\n # Replace these values with whatever you found!\n mse_train, mse_test, rsquared_val = train_mse, test_mse, test_r2\n \n # And return them! :)\n return mse_train, mse_test, rsquared_val\n \n\ndef main():\n # Load the data from the bike-sharing.csv file into a Pandas DataFrame. Do not change\n # the variable name /data/\n # Hint: Look at the Pandas' read_csv function\n conn = sqlite3.connect('../data-cleaning/data.db')\n c = conn.cursor()\n\n dfgames = pd.read_sql_query(\"select * from games_final_cat;\", conn)\n \n print(\"Columns: \", dfgames.columns)\n\n IND_VAR_NAMES = ['elo_diff', 'age_diff', 'time_since_gm_diff']\n\n DEP_VAR_NAME = \"result\"\n\n dfnew = dfgames[[DEP_VAR_NAME] + IND_VAR_NAMES]\n\n\n for col in [DEP_VAR_NAME] + IND_VAR_NAMES:\n dfnew[col] = dfnew[col].astype(int)\n\n\n print(dfnew)\n # Using the imported train_test_split function (from util.py), create the train_df and\n # test_df that will be passed into regression.\n split = train_test_split(dfgames)\n train_df, test_df = split[0], split[1]\n\n # Call regression and perform other calculations as you deem necessary to answer the\n # questions posed for this section.\n print(regression(train_df, test_df, IND_VAR_NAMES, DEP_VAR_NAME))\n\n############ DON'T MODIFY BELOW THIS LINE ############\n\nif __name__ == \"__main__\":\n main()","sub_path":"analysis/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"270739018","text":"# -*- coding: utf-8 -*-\nfrom django.urls import reverse\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Attendee\n\n\ndef _get_attendee(attendee_id):\n try:\n return Attendee.objects.get(pk=attendee_id)\n except Attendee.DoesNotExist:\n return None\n\n\ndef event_ajax_handler(event, request):\n action = request.POST['action']\n administrating_user = request.user\n\n if action == 'attended':\n attendee = _get_attendee(request.POST['attendee_id'])\n if not attendee:\n return {'message': 'Fant ingen påmeldte med oppgitt ID (%s).' % request.POST['attendee_id'],\n 'status': 400}\n return handle_attended(attendee)\n elif action == 'paid':\n attendee = _get_attendee(request.POST['attendee_id'])\n if not attendee:\n return {'message': 'Fant ingen påmeldte med oppgitt ID (%s).' % request.POST['attendee_id'],\n 'status': 400}\n return handle_paid(attendee)\n elif action == 'add_attendee':\n return handle_add_attendee(event, request.POST['user_id'])\n elif action == 'remove_attendee':\n return handle_remove_attendee(event, request.POST['attendee_id'], administrating_user)\n else:\n raise NotImplementedError\n\n\ndef handle_attended(attendee):\n \"\"\"\n Toggle attending-status of an attendee between attending and not attending\n :param attendee_id: ID of attendee wanted to toggle\n :return:\n \"\"\"\n attendee.attended = not attendee.attended\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_paid(attendee):\n \"\"\"\n Toggle paid status of an attendee between paid and not paid\n :param attendee_id: ID of attendee wanted to toggle\n :return:\n \"\"\"\n attendee.paid = not attendee.paid\n attendee.save()\n\n return {'message': 'OK', 'status': 200}\n\n\ndef handle_add_attendee(event, user_id):\n resp = {}\n if event.attendance_event.number_of_seats_taken >= event.attendance_event.max_capacity:\n if not event.attendance_event.waitlist:\n return 'Det er ingen ledige plasser på %s.' % event.title\n\n user = User.objects.filter(pk=user_id)\n if user.count() != 1:\n return 'Fant ingen bruker med oppgitt ID (%s).' % user_id\n user = user[0]\n if Attendee.objects.filter(user=user, event=event.attendance_event).count() != 0:\n return '%s er allerede påmeldt %s.' % (user.get_full_name(), event.title)\n\n attendee = Attendee(user=user, event=event.attendance_event)\n attendee.save()\n\n resp['message'] = '%s ble meldt på %s' % (user.get_full_name(), event)\n resp['attendees'] = []\n\n for number, a in enumerate(attendee.event.attending_attendees_qs):\n resp['attendees'].append({\n 'number': number+1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n resp['waitlist'] = []\n for number, a in enumerate(attendee.event.waitlist_qs):\n resp['waitlist'].append({\n 'number': number+1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n return resp\n\n\ndef handle_remove_attendee(event, attendee_id, admin_user):\n resp = {}\n attendee = Attendee.objects.filter(pk=attendee_id)\n if attendee.count() != 1:\n return 'Fant ingen påmeldte med oppgitt ID (%s).' % attendee_id\n attendee = attendee[0]\n attendee.unattend(admin_user)\n resp['message'] = '%s ble fjernet fra %s' % (attendee.user.get_full_name(), attendee.event)\n resp['attendees'] = []\n for number, a in enumerate(attendee.event.attending_attendees_qs):\n resp['attendees'].append({\n 'number': number+1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n resp['waitlist'] = []\n for number, a in enumerate(attendee.event.waitlist_qs):\n resp['waitlist'].append({\n 'number': number+1,\n 'id': a.id,\n 'first_name': a.user.first_name,\n 'last_name': a.user.last_name,\n 'paid': a.paid,\n 'extras': str(a.extras),\n 'attended': a.attended,\n 'link': reverse('dashboard_attendee_details', kwargs={'attendee_id': a.id})\n })\n return resp\n","sub_path":"apps/events/dashboard/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"482066447","text":"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks for ChooseFastestBranchDataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.benchmarks import benchmark_base\nfrom tensorflow.python.data.experimental.ops import optimization\nfrom tensorflow.python.data.ops import dataset_ops\n\n\nclass ChooseFastestBranchBenchmark(benchmark_base.DatasetBenchmarkBase):\n \"\"\"Benchmarks for ChooseFastestBranchDatast.\"\"\"\n\n def make_benchmark_datasets(self):\n\n dataset = dataset_ops.Dataset.range(1000**2).repeat()\n\n def branch_0(dataset):\n return dataset.map(lambda x: x + 1).batch(100)\n\n def branch_1(dataset):\n return dataset.batch(100).map(lambda x: x + 1)\n\n map_batch_dataset = branch_0(dataset)\n batch_map_dataset = branch_1(dataset)\n choose_fastest_dataset = optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access\n dataset, [branch_0, branch_1],\n ratio_numerator=100)\n return map_batch_dataset, batch_map_dataset, choose_fastest_dataset\n\n def benchmarkChooseFastest(self):\n map_batch, batch_map, choose_fastest = self.make_benchmark_datasets()\n\n def benchmark(dataset, name):\n self.run_and_report_benchmark(dataset, 5000, name, iters=1)\n\n benchmark(map_batch, \"map_batch_dataset\")\n benchmark(batch_map, \"batch_map_dataset\")\n benchmark(choose_fastest, \"choose_fastest_dataset\")\n\n def benchmarkChooseFastestFirstNIterations(self):\n\n map_batch, batch_map, choose_fastest = self.make_benchmark_datasets()\n\n def benchmark(dataset, name):\n self.run_and_report_benchmark(\n dataset, num_elements=10, name=\"%s_first_10\" % name, iters=5)\n\n benchmark(map_batch, \"map_batch_dataset\")\n benchmark(batch_map, \"batch_map_dataset\")\n benchmark(choose_fastest, \"choose_fastest_dataset\")\n\n\nif __name__ == \"__main__\":\n benchmark_base.test.main()\n","sub_path":"tensorflow/python/data/experimental/benchmarks/choose_fastest_branch_benchmark.py","file_name":"choose_fastest_branch_benchmark.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"172303590","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='event-home'),\n path('event_list/', views.event_list, name='event-event_list'),\n path('top-rated/', views.top_rated_list, name='event-top-rated'),\n path('event//', views.view_event, name='event-view'),\n path('event//users', views.users_attend, name='event-users'),\n path('event/delete-comment//',\n views.delete_comment, name='delete_comment'),\n path('event/report-comment//',\n views.report_comment, name='report_comment'),\n path('event/choose-comment//',\n views.choose_comment, name='choose_comment'),\n path('recommended/', views.recommended_event_list,\n name='event-recommended_list'),\n path('myevents/', views.my_events, name='event-my_events'),\n path('myevents/past', views.my_events_past, name='event-my_events_past'),\n path('myevents/all', views.my_events_all, name='event-my_events_all'),\n path('myevents//delete', views.remove_my_event,\n name='event-remove_my_event'),\n path('calendar/', views.calendar, name='event-calendar'),\n]\n","sub_path":"event_manager/event/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"301220256","text":"from PyQt5 import QtWidgets\nfrom kenttaeditori import Kenttaeditori\nfrom pelinohjain import Pelinohjain\nfrom maaston_lukija import Maaston_lukija\nfrom yksikoiden_lukija import Yksikoiden_lukija\nfrom kartan_lukija import Kartan_lukija\nfrom kayttoliittyman_lukija import Kayttoliittyman_lukija\nfrom pelaa_valikko import Pelaa_valikko\nfrom pelitilanteen_lukija import Pelitilanteen_lukija\nimport sys\n\n\nclass Paavalikko(QtWidgets.QMainWindow):\n \n def __init__(self):\n super(Paavalikko, self).__init__()\n self.__kayttoliittyman_lukija = Kayttoliittyman_lukija()\n if self.__kayttoliittyman_lukija.koko != 0:\n self.__scene_size = self.__kayttoliittyman_lukija.koko\n else:\n # siltä varalta että tiedoston lukeminen epäonnistuu, määritellään oletuskoko\n self.__scene_size = 880\n self.setCentralWidget(QtWidgets.QWidget())\n self.__paa_layout = QtWidgets.QVBoxLayout()\n self.centralWidget().setLayout(self.__paa_layout)\n\n self.setGeometry(0, 0, self.__scene_size + 420, self.__scene_size + 20)\n self.setWindowTitle('Strategiapeli')\n self.show()\n\n # widgetit\n self.__virheteksti = QtWidgets.QLabel(\"\")\n self.__virheteksti_kartat = QtWidgets.QLabel(\"\")\n self.__virheteksti_lataus = QtWidgets.QLabel(\"\")\n self.__jatka_nappi = QtWidgets.QPushButton(\"JATKA PELIÄ\")\n self.__jatka_nappi.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n self.__pelaa_nappi = QtWidgets.QPushButton(\"PELAA\")\n self.__pelaa_nappi.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n self.__kenttaeditori_nappi = QtWidgets.QPushButton(\"KENTTÄEDITORI\")\n self.__kenttaeditori_nappi.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n self.__poistu_nappi = QtWidgets.QPushButton(\"POISTU\")\n self.__poistu_nappi.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\n\n self.__virheteksti.setStyleSheet(\"font: 20pt Arial\")\n self.__jatka_nappi.setStyleSheet(\"font: 10pt Arial\")\n self.__virheteksti_kartat.setStyleSheet(\"font: 20pt Arial\")\n self.__virheteksti_lataus.setStyleSheet(\"font: 20pt Arial\")\n self.__pelaa_nappi.setStyleSheet(\"font: 10pt Arial\")\n self.__kenttaeditori_nappi.setStyleSheet(\"font: 10pt Arial\")\n self.__poistu_nappi.setStyleSheet(\"font: 10pt Arial\")\n\n # nappien yhdistäminen\n self.__jatka_nappi.clicked.connect(self.__jatka)\n self.__pelaa_nappi.clicked.connect(self.__pelaa)\n self.__kenttaeditori_nappi.clicked.connect(self.__kenttaeditori)\n self.__poistu_nappi.clicked.connect(self.__poistu)\n\n # nappi widgetit\n self.__paa_layout.addWidget(self.__virheteksti, 1)\n self.__paa_layout.addWidget(self.__virheteksti_kartat, 1)\n self.__paa_layout.addWidget(self.__virheteksti_lataus, 1)\n self.__paa_layout.addWidget(self.__jatka_nappi, 2)\n self.__paa_layout.addWidget(self.__pelaa_nappi, 2)\n self.__paa_layout.addWidget(self.__kenttaeditori_nappi, 2)\n self.__paa_layout.addWidget(self.__poistu_nappi, 2)\n\n # kenttäeditori\n self.kenttaeditori = None\n self.__pelaa_valikko = None\n\n self.__virheelliset_kartat = []\n\n # tiedostojen lukijat\n self.__maastojen_lukija = Maaston_lukija()\n self.__yksikoiden_lukija = Yksikoiden_lukija()\n self.__kartan_lukija = Kartan_lukija(self)\n self.kartan_lukija.lue_kaikki_kartat()\n self.__pelitilanteen_lukija = Pelitilanteen_lukija()\n self.lue_tallennus()\n self.__pelinohjain = None\n\n # virheet\n if not self.__kayttoliittyman_lukija.lukeminen_onnistui:\n self.__virhe_lukemisessa(\"kayttoliittyma\")\n if not self.__yksikoiden_lukija.lukeminen_onnistui:\n self.__virhe_lukemisessa(\"yksikot\")\n if not self.__maastojen_lukija.lukeminen_onnistui:\n self.__virhe_lukemisessa(\"maastot\")\n self.__nayta_virheelliset_kartat()\n\n # keskelle liikuttaminen\n if self.kayttoliittyman_lukija.x != 0 and self.kayttoliittyman_lukija.y != 0:\n res_x = self.kayttoliittyman_lukija.x\n res_y = self.kayttoliittyman_lukija.y\n else:\n res_x = 1920\n res_y = 1080\n self.move(int(res_x / 2) - int(self.frameSize().width() / 2),\n int(res_y / 2) - int(self.frameSize().height() / 2))\n\n @property\n def maastojen_lukija(self):\n return self.__maastojen_lukija\n\n @property\n def yksikoiden_lukija(self):\n return self.__yksikoiden_lukija\n\n @property\n def kartan_lukija(self):\n return self.__kartan_lukija\n\n @property\n def kayttoliittyman_lukija(self):\n return self.__kayttoliittyman_lukija\n\n @property\n def scene_size(self):\n return self.__scene_size\n\n @property\n def pelaa_valikko(self):\n return self.__pelaa_valikko\n\n def lisaa_virheellinen_kartta(self, nimi):\n self.__virheelliset_kartat.append(nimi)\n\n def __kriittinen_virhe(self):\n self.__pelaa_nappi.setEnabled(False)\n self.__kenttaeditori_nappi.setEnabled(False)\n\n def __virhe_lukemisessa(self, tyyppi):\n # virheen sattuessa näytetään päävalikossa virheteksti\n if tyyppi == \"kayttoliittyma\":\n self.__kriittinen_virhe()\n self.__virheteksti.setText(\"Käyttöliittymän tietojen lukemisessa tapahtui virhe.\\n\"\n \"Korjaa tiedosto ja avaa ohjelma uudestaan\")\n elif tyyppi == \"yksikot\":\n self.__kriittinen_virhe()\n self.__virheteksti.setText(\"Yksiköiden tietojen lukemisessa tapahtui virhe.\\n\"\n \"Korjaa tiedosto ja avaa ohjelma uudestaan\")\n elif tyyppi == \"maastot\":\n self.__kriittinen_virhe()\n self.__virheteksti.setText(\"Maastojen tietojen lukemisessa tapahtui virhe.\\n\"\n \"Korjaa tiedosto ja avaa ohjelma uudestaan\")\n elif tyyppi == \"tilanne\":\n self.__jatka_nappi.setEnabled(False)\n self.__virheteksti_lataus.setText(\"Pelitilanteen lukeminen epäonnistui\")\n\n def __nayta_virheelliset_kartat(self):\n # näytetään virheelliset kartat päävalikossa\n if len(self.__virheelliset_kartat) > 0:\n teksti = \"Virheelliset kartat:\\n\"\n for kartta in self.__virheelliset_kartat:\n teksti += kartta + \"\\n\"\n self.__virheteksti_kartat.setText(teksti)\n\n # kutsutaan, kun tallennettu peli ladataan\n def __jatka(self):\n # luodaan pelinohjain ja kartta ilman yksiköitä\n self.__pelinohjain = Pelinohjain(self.__kartan_nimi, self, False)\n\n # lisätään yksiköt, muutetaan niiden elämä ja energia sopivaksi, lisätään tilavaikutukset\n for yksikko in self.__tilanne:\n tiedot = yksikko[0]\n x = tiedot[0]\n y = tiedot[1]\n self.__pelinohjain.kartta.lisaa_yksikko(self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y], tiedot[3],\n self.yksikoiden_lukija.yksikot[tiedot[3]], tiedot[2])\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.ominaisuudet.nyk_elama = tiedot[4]\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.ominaisuudet.nyk_energia = tiedot[5]\n if tiedot[6] == \"kylla\":\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.liikuttu()\n if tiedot[7] == \"kylla\":\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.hyokatty()\n # tilavaikutusten lisäys\n hyokkaysvaikutus = yksikko[1]\n vaikutukset = yksikko[2]\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.hyokkays_vaikutus = hyokkaysvaikutus\n for v in vaikutukset:\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].yksikko.lisaa_tilavaikutus(v.kesto,\n v.hyokkaysbonus, v.puolustusbonus, v.liikkumisbonus, v.verenvuoto, v.taintuminen, v.loppuvaikutus)\n\n # tehdään pelin jatkamiseksi tarvittavat toiminnot\n self.__pelinohjain.kartta.etsi_yksikot()\n self.__pelinohjain.kartta.palauta_pelaajan_toimivat_yksikot()\n # tarkistus joudutaan tekemään jostain syystä kahdesti\n self.__pelinohjain.kartta.tarkista_toimivat_yksikot()\n self.__pelinohjain.kartta.tarkista_toimivat_yksikot()\n for yksikko in self.__pelinohjain.kartta.pelaajan_yksikot:\n yksikko.grafiikka.palauta_vari()\n yksikko.grafiikka.elamapalkki.paivita_koko()\n yksikko.grafiikka.elamapalkki.paivita_tilavaikutukset()\n yksikko.grafiikka.paivita_tooltip()\n for yksikko in self.__pelinohjain.kartta.tietokoneen_yksikot:\n yksikko.grafiikka.elamapalkki.paivita_koko()\n yksikko.grafiikka.elamapalkki.paivita_tilavaikutukset()\n yksikko.grafiikka.paivita_tooltip()\n self.__pelinohjain.kayttoliittyma.tyhjenna_valinta()\n self.__pelinohjain.kayttoliittyma.laita_napit_kayttoon()\n self.__pelinohjain.kayttoliittyma.paivita_nappien_aktiivisuus()\n\n # kiilojen lisäys\n for kiila in self.__kiilat:\n x = kiila[0]\n y = kiila[1]\n tiedot = self.yksikoiden_lukija.yksikot[\"jousimiehet\"][1]\n self.__pelinohjain.kartta.ruudut_koordinaateilla[x][y].luo_kiilat(\n float(tiedot['kyky2_bonus']), float(tiedot['kyky2_bonus_ratsuvaki']))\n\n def __pelaa(self):\n if self.__pelaa_valikko is None:\n self.__pelaa_valikko = Pelaa_valikko(self)\n self.__pelaa_valikko.show()\n self.hide()\n\n def __kenttaeditori(self):\n if self.kenttaeditori is None:\n self.kenttaeditori = Kenttaeditori(self)\n self.kenttaeditori.show()\n self.hide()\n\n def __poistu(self):\n sys.exit()\n\n def lue_tallennus(self):\n if self.__pelitilanteen_lukija.lue_pelitilanne(self.__kartan_lukija.kartat, self.__yksikoiden_lukija.yksikot) \\\n is not None:\n self.__kartan_nimi, self.__tilanne, self.__kiilat = self.__pelitilanteen_lukija.lue_pelitilanne\\\n (self.__kartan_lukija.kartat, self.__yksikoiden_lukija.yksikot)\n self.__jatka_nappi.setEnabled(True)\n else:\n self.__jatka_nappi.setEnabled(False)\n if not self.__pelitilanteen_lukija.lukeminen_onnistui:\n self.__virhe_lukemisessa(\"tilanne\")\n else:\n self.__virheteksti_lataus.setText(\"\")\n\n def poista_pelinohjain(self):\n if self.__pelinohjain is not None:\n self.__pelinohjain.kayttoliittyma.deleteLater()\n self.__pelinohjain = None\n","sub_path":"koodi/paavalikko.py","file_name":"paavalikko.py","file_ext":"py","file_size_in_byte":10955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"420446325","text":"# coding=utf-8\n# @Time : 2021/6/10 9:15\n# @Author : zyk\n# @Email : zhangyongke1105@163.com\n# @File : log.py\n# @Software: PyCharm\nimport logging\nfrom logging.handlers import RotatingFileHandler\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\nDATE_FORMAT = \"%Y/%m/%d %H:%M:%S %p\"\n\nlogging.basicConfig(filename=r'F:\\workplace\\GitWork\\python_project\\logs\\my.log', level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)\n\nlogging.debug(\"This is a debug log.\")\nlogging.info(\"This is a info log.\")\nlogging.warning(\"This is a warning log.\")\nlogging.error(\"This is a error log.\")\nlogging.critical(\"This is a critical log.\")\n","sub_path":"framwork_study/selenium/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"570859825","text":"import Lesson3_part2\n# from Lesson3_part2 import div\n# try:\n# div(10, 0)\n# except TypeError.:\n# print(\"TypeError\")\n# except ZeroDivisionError.:\n# print(\"ZeroDivisionError\")\n# except Exception:\n# print(e.args)\n# print(\"division error\")\n\n# from Lesson3 import check_website\nimport requests\n\ndef check_website(url):\n url = input()\n req = requests.get(url)\n print(req)\n if req.status_code == 200:\n print(\"is ok\")\n\n\ntry:\n url = input()\n check_website(url)\nexcept MissingSchema:\n print(\"you have a schema issue\")\n raise (\"Github is a bad website\")\n","sub_path":"pythonProjecttest/exception3.py","file_name":"exception3.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"36244972","text":"#===============================================================================\n# Copyright 2011 Jake Ross\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#===============================================================================\n\n\n\n#========== standard library imports ==========\nimport os\n\ndef unique_dir(root, base):\n p = os.path.join(root, '{}001'.format(base))\n i = 2\n while os.path.exists(p):\n p = os.path.join(root, '{}{:03n}'.format(base, i))\n i += 1\n\n os.mkdir(p)\n\n return p\n\ndef unique_path(root, base, extension='txt'):\n '''\n\n '''\n if extension:\n if '.' not in extension:\n extension = '.{}'.format(extension)\n else:\n extension = ''\n\n p = os.path.join(root, '{}001{}'.format(base, extension))\n cnt = 1\n i = 2\n while os.path.exists(p):\n p = os.path.join(root, '{}{:03n}{}'.format(base, i, extension))\n i += 1\n cnt += 1\n\n return p, cnt\n\ndef str_to_bool(a):\n '''\n '''\n\n tks = ['true', 't', 'yes', 'y', '1', 'ok']\n fks = ['false', 'f', 'no', 'n', '0']\n\n if a is not None:\n a = str(a).strip().lower()\n\n if a in tks:\n return True\n elif a in fks:\n return False\n\ndef parse_xy(p, delimiter=','):\n '''\n '''\n data = parse_file(p)\n if data:\n func = lambda i, data: [float(l.split(delimiter)[i]) for l in data]\n\n return func(0, data), func(1, data)\n\ndef commented_line(l):\n '''\n '''\n if l[:1] == '#':\n return True\n else:\n return False\n\ndef parse_file(p, delimiter=None):\n '''\n '''\n if os.path.exists(p) and os.path.isfile(p):\n with open(p, 'U') as file:\n r = filetoarray(file)\n if delimiter:\n r = [ri.split(delimiter) for ri in r]\n\n return r\n\n\ndef parse_setupfile(p):\n '''\n '''\n\n file = parse_file(p)\n if file:\n return [line.split(',') for line in file]\n\ndef parse_canvasfile(p, kw):\n '''\n \n '''\n # kw=['origin','valvexy','valvewh','opencolor','closecolor']\n\n if os.path.exists(p) and os.path.isfile(p):\n with open(p, 'r') as file:\n indices = {}\n i = 0\n f = filetoarray(file)\n count = 1\n for i in range(len(f)):\n if f[i][:1] == '!':\n for k in kw:\n if f[i][1:] == k:\n i += 1\n if k in indices:\n k = k + str(count)\n count += 1\n\n indices[k] = f[i].split(',')\n\n i += 1\n break\n\n return indices\ndef filetoarray(f, commentchar='#'):\n '''\n\n '''\n def isNewLine(c):\n return c == chr(10) or c == chr(13)\n\n r = []\n\n for line in f:\n cc = line[:1]\n if not cc == commentchar and not isNewLine(cc):\n # l = line[:-1] if line[-1:] == '\\n' else line\n # remove inline comments\n line = line.split('#')[0]\n r.append(line.strip())\n return r\n","sub_path":"src/helpers/filetools.py","file_name":"filetools.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"469881422","text":"#!/usr/bin/env python\n__author__ = \"Dmitry Timofeev\"\n__version__ = \"0.2.1\"\n__email__ = \"dimkat@gmail.com\"\n\n\"\"\"\nSimplelogs is a logging system as a service (RESTful API).\nIt was made using MongoDB and Flask Framework.\nNow I'm working on extending methods-list and client libraries.\nNow it available for Python and Java. In future I think I will add lib for Objective-C.\nSimplelogs is a very tiny layer between MongoDB and HTTP requests.\n\nFell free to contact with me by e-mail or IM.\n\n\"\"\"\n\nfrom app import app\napp.run(debug = False) # If you are working on improvements I recommending you change debugging mo to True.","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"9738917","text":"from typology.ontology import infinity\n\ndef test_infinity_properties():\n '''\n Test if we can define custom properties to them as dictionaries.\n After all, we want to index the real items of the world.\n '''\n\n KeyEvent = infinity.Concept('GH:mindey/ooio/keylogger-event')\n Date = infinity.Concept('https://github.com/infamily/terms/wiki/date')\n\n # We want a keyevent with 4 legs, owned by Alice.\n key = KeyEvent({'legs': 4, 'owner': 'Alice'})\n\n # We want a date with 4 legs, owned by Bob.\n date = Date({'legs': 4, 'owner': 'Bob'})\n\n\n assert date.details == {'legs': 4, 'owner': 'Bob'}\n assert key.details == {'legs': 4, 'owner': 'Alice'}\n","sub_path":"typology/ontology/test_infinity.py","file_name":"test_infinity.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"507388280","text":"import tensorflow as tf\nimport tensorflow.contrib.keras as keras\n\nclass ThreesNN:\n def __init__(self, in_dim, hidden_dims):\n self.input_dim = in_dim\n self.hidden_dims = hidden_dims\n\n def build_model(self):\n self.nn = keras.models.Sequential()\n in_dim = self.input_dim\n for h in self.hidden_dims:\n self.nn.add(tf.keras.layers.Dense(h, input_shape=(in_dim,), activation=tf.nn.relu,\n kernel_initializer=tf.keras.initializers.glorot_uniform()))\n in_dim = h","sub_path":"rl/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"385016548","text":"# pylint: disable=non-parent-init-called\n# -*- coding: utf-8 -*-\n#\n# ramstk.views.gtk3.assistants.options.py is part of The RAMSTK\n# Project\n#\n# All rights reserved.\n# Copyright 2007 - 2020 Doyle Rowland doyle.rowland reliaqual com\n\"\"\"RAMSTK Configuration Options Module.\"\"\"\n\n# Standard Library Imports\nfrom typing import Dict\n\n# Third Party Imports\nfrom pubsub import pub\nfrom treelib import Tree\n\n# RAMSTK Package Imports\nfrom ramstk import integer_to_boolean\nfrom ramstk.views.gtk3 import Gtk, _\nfrom ramstk.views.gtk3.widgets import (\n RAMSTKCheckButton, RAMSTKDialog, RAMSTKLabel\n)\n\n\nclass EditOptions(RAMSTKDialog):\n \"\"\"Provide a GUI to set various RAMSTK configuration options.\n\n RAMSTK options are stored in the RAMSTK Common database and the RAMSTK\n Program database. RAMSTK options are site-specific or program-specific and\n apply to all users. Options should not be confused with user-specific\n configurations preferences which are stored in RAMSTK.conf in each user's\n $HOME/.config/RAMSTK directory and are applicable only to that specific\n user. Configuration preferences are edited with the Preferences assistant.\n\n Attributes of the EditOptions are:\n \"\"\"\n # Define private dict class attributes.\n _dic_keys: Dict[int, str] = {\n 0: 'function_active',\n 1: 'requirement_active',\n 2: 'hardware_active',\n 3: 'vandv_active',\n 4: 'fmea_active',\n 5: 'pof_active'\n }\n\n def __init__(self, parent: object = None) -> None:\n \"\"\"Initialize an instance of the Options assistant.\n\n :param parent: the parent window for this assistant.\n \"\"\"\n super().__init__(_(\"RAMSTK Program Options Assistant\"),\n dlgparent=parent)\n\n # Initialize private dictionary attributes.\n\n # Initialize private list attributes.\n\n # Initialize private scalar attributes.\n\n # Initialize public dictionary attributes.\n\n # Initialize public list attributes.\n\n # Initialize public scalar attributes.\n self.chkFunctions: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"Function Module Active\"))\n self.chkRequirements: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"Requirements Module Active\"))\n self.chkHardware: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"Hardware Module Active\"))\n self.chkValidation: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"Validation Module Active\"))\n self.chkFMEA: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"(D)FME(C)A Module Active\"))\n self.chkPoF: RAMSTKCheckButton = RAMSTKCheckButton(\n label=_(\"Physics of Failure (PoF) Module Active\"))\n\n self.lst_widgets = [\n self.chkFunctions, self.chkRequirements, self.chkHardware,\n self.chkValidation, self.chkFMEA, self.chkPoF\n ]\n\n self.__make_ui()\n self.__set_callbacks()\n\n # Subscribe to PyPubSub messages.\n pub.subscribe(self._do_load_page, 'succeed_get_options_tree')\n\n def __make_ui(self) -> None:\n \"\"\"Build the user interface.\n\n :return: None\n :rtype: None\n \"\"\"\n self.set_default_size(250, -1)\n\n _fixed = Gtk.Fixed()\n\n _label = RAMSTKLabel(\n _(\"This is the RAMSTK Program options editor. This assistant \"\n \"will allow you to select the work stream modules to use in \"\n \"the open RAMSTK Program. Keep in mind that the active work \"\n \"stream modules are Revision dependent.\"))\n _label.do_set_properties(width=600, height=-1, wrap=True)\n _fixed.put(_label, 5, 10)\n\n _y_pos: int = _label.get_preferred_size()[0].height + 50\n\n _fixed.put(self.chkFunctions, 10, _y_pos)\n _fixed.put(self.chkRequirements, 10, _y_pos + 35)\n _fixed.put(self.chkHardware, 10, _y_pos + 65)\n _fixed.put(self.chkValidation, 10, _y_pos + 95)\n _fixed.put(self.chkFMEA, 10, _y_pos + 125)\n\n self.vbox.pack_start(_fixed, True, True, 0)\n\n self.show_all()\n\n def __set_callbacks(self) -> None:\n \"\"\"Set EditOption widgets callback methods.\n\n :return: None\n :rtype: None\n \"\"\"\n self.chkFunctions.dic_handler_id[\n 'toggled'] = self.chkFunctions.connect('toggled', self._on_toggled,\n 0)\n self.chkRequirements.dic_handler_id[\n 'toggled'] = self.chkRequirements.connect('toggled',\n self._on_toggled, 1)\n self.chkHardware.dic_handler_id['toggled'] = self.chkHardware.connect(\n 'toggled', self._on_toggled, 2)\n self.chkValidation.dic_handler_id[\n 'toggled'] = self.chkValidation.connect('toggled',\n self._on_toggled, 3)\n self.chkFMEA.dic_handler_id['toggled'] = self.chkFMEA.connect(\n 'toggled', self._on_toggled, 4)\n self.chkPoF.dic_handler_id['toggled'] = self.chkPoF.connect(\n 'toggled', self._on_toggled, 5)\n\n def _cancel(self, __button: Gtk.Button):\n \"\"\"Destroy the assistant when the 'Cancel' button is pressed.\n\n :param __button: the Gtk.Button() that called this method.\n :type __button: :class:`Gtk.Button`\n \"\"\"\n self.do_destroy()\n\n def _do_load_page(self, tree: Tree) -> None:\n \"\"\"Load the current options.\n\n :return: None\n :rtype: None\n \"\"\"\n try:\n _program_options = tree.get_node(\n 'programinfo').data['programinfo'].get_attributes()\n except AttributeError:\n _program_options = dict(function_active=0,\n requirement_active=0,\n hardware_active=0,\n vandv_active=0,\n fmea_active=0,\n pof_active=0)\n\n self.chkFunctions.set_active(\n integer_to_boolean(_program_options['function_active']))\n self.chkRequirements.set_active(\n integer_to_boolean(_program_options['requirement_active']))\n self.chkHardware.set_active(\n integer_to_boolean(_program_options['hardware_active']))\n self.chkValidation.set_active(\n integer_to_boolean(_program_options['vandv_active']))\n self.chkFMEA.set_active(\n integer_to_boolean(_program_options['fmea_active']))\n\n def _on_toggled(self, checkbutton: RAMSTKCheckButton, index: int) -> None:\n \"\"\"Handle RAMSTKCheckButton() 'toggle' signals.\n\n :param checkbutton: the RAMSTKCheckButton() that called this method.\n :type: :class:`gui.gtk.ramstk.Button.RAMSTKToggleButton`\n :param index: the index of the Gtk.CheckButton() in the list\n handler list.\n :return: None\n :rtype: None\n \"\"\"\n # ISSUE: Add code to record user creating program database.\n # //\n # // There should be code to record in the RAMSTKProgramInfo table\n # // the logged in user who creates the database. This will require\n # // a function/method to identify the logged in user as well as a\n # // function/method to write this to the new database.\n # //\n # // labels: status:globalbacklog, severity:normal, type:enhancement\n\n # ISSUE: Add code to record user updating program database.\n # //\n # // There should be code to record in the RAMSTKProgramInfo table\n # // the logged in user who is updating the database. This will\n # // require a function/method to identify the logged in user as well\n # // as a function/method to write this to the database everytime a\n # // datamanager successfully updates a database table.\n # //\n # // labels: status:globalbacklog, severity:normal, type:enhancement\n try:\n _key = self._dic_keys[index]\n except KeyError as _error:\n _key = ''\n self.RAMSTK_LOGGER.do_log_exception(__name__, _error)\n\n _new_text = int(checkbutton.get_active())\n\n checkbutton.do_update(_new_text, signal='toggled')\n\n pub.sendMessage('request_set_option_attributes',\n node_id=['programinfo', -1],\n package={_key: _new_text})\n","sub_path":"src/ramstk/views/gtk3/assistants/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"102520380","text":"# encoding=utf-8\n\n# Test__str__ , __repr__\nclass Student(object):\n\tdef __init__(self, name):\n\t\tself.name = name\n\t'''这样print打印的时候就会打印该函数的内容'''\t\n\tdef __str__(self):\t\t\t\n\t\treturn 'Student object (name: %s)' % self.name\n\t''' 如果不用打印函数print,直接敲变量结果和没有__str__函数一样的效果,所以加上 '''\n\t__repr__ = __str__\n\n#print(Student('Jack'))\t\n\n# Test __iter__\nclass Fib(object):\n\t# 初试a,b的值\n\tdef __init__(self):\n\t\tself.a, self.b = 0, 1\n\t# 返回可迭代的对象 \n\tdef __iter__(self):\n\t\treturn self\n\t# for循环时用next返回下一次的值 \n\tdef __next__(self):\t\t# 2.7版 next() 3.0版本 __next__()\n\t\tself.a, self.b = self.b, self.a + self.b\n\t\tif self.a > 30:\n\t\t\traise StopIteration();\n\t\treturn self.a\n\t\t\n# for循环的时候只运行next()方法 \nfor n in Fib():\n\tprint(n)\n\n# Test __getitem__\nclass Fib(object): \n\tdef __getitem__(self, n):\n\t\tif isinstance(n, int): # 一个元素\n\t\t\ta, b = 0, 1\n\t\t\tfor n in range(n):\n\t\t\t\ta, b = b , a + b \n\t\t\treturn a\n\t\tif isinstance(n, slice): # 一个切片\n\t\t\tstart = n.start\n\t\t\tstop = n.stop\n\t\t\tif start is None:\t\t#开始为空\n\t\t\t\tstart = 0\n\t\t\ta, b = 0, 1\n\t\t\tL = []\n\t\t\tfor n in range(stop): \n\t\t\t\ta, b = b, a + b \n\t\t\t\tif n >= start: # 正序 \n\t\t\t\t\tL.append(a)\t\t\n\t\t\treturn L\nf = Fib()\nf[1:10]\n\n# Test __getattr__\nclass Student(object):\n\n\tdef __init__(self):\n\t\tself.name = 'jack'\n\t# 当调用不存在的属性时,会查找__getattr__中有属性否\n\tdef __getattr__(self, sttr): \n\t# 属性\n\t\tif sttr == 'score': \n\t\t\treturn 30\n\t# 方法\n\t\telif sttr == 'age':\n\t\t\treturn lambda : 30\n\t# 抛出错误\n\t\traise AttributeError('\\'stdent\\' object has not attribute \\'%s\\'' % sttr)\n\n\t\t\ns = Student()\nprint(s.name) \nprint(s.score)\nprint(s.age())\nprint(s.sex) # 抛出错误\n","sub_path":"Python_code/__str__,__repr__,__iter__.py","file_name":"__str__,__repr__,__iter__.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"525870575","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\nimport polyaxon_lib as plx\n\nfrom polyaxon_schemas.losses import MeanSquaredErrorConfig\nfrom polyaxon_schemas.optimizers import AdadeltaConfig\nfrom polyaxon_schemas.processing.feature_processors import FeatureProcessorsConfig\nfrom polyaxon_schemas.processing.pipelines import TFRecordImagePipelineConfig\n\n\ndef encoder_fn(mode, features):\n x = plx.layers.Dense(units=128)(features)\n x = plx.layers.Dense(units=256)(x)\n return x\n\n\ndef decoder_fn(mode, features):\n x = plx.layers.Dense(units=256)(features)\n return plx.layers.Dense(units=784)(x)\n\n\ndef bridge_fn(mode, features, labels, loss, encoder_fn, decoder_fn):\n return plx.bridges.NoOpBridge(mode)(features, labels, loss, encoder_fn, decoder_fn)\n\n\ndef model_fn(features, labels, params, mode, config):\n model = plx.models.Generator(\n mode=mode,\n encoder_fn=encoder_fn,\n decoder_fn=decoder_fn,\n bridge_fn=bridge_fn,\n loss=MeanSquaredErrorConfig(),\n optimizer=AdadeltaConfig(learning_rate=0.9),\n summaries=['loss'])\n return model(features=features, labels=labels, params=params, config=config)\n\n\ndef get_input_fn(mode, data_files, meta_data_file):\n return plx.processing.create_input_data_fn(\n mode=mode,\n pipeline_config=TFRecordImagePipelineConfig(\n shuffle=plx.Modes.is_train(mode),\n dynamic_pad=False,\n batch_size=64 if plx.Modes.is_train(mode) else 32,\n data_files=data_files,\n meta_data_file=meta_data_file,\n feature_processors=FeatureProcessorsConfig.from_dict(\n {'image': {\n 'input_layers': [['image', 0, 0]],\n 'output_layers': [['reshape', 0, 0]],\n 'layers': [\n {'Cast': {\n 'name': 'cast',\n 'dtype': 'float32',\n 'inbound_nodes': [['image', 0, 0]]\n }},\n {'Standardization': {\n 'name': 'std',\n 'inbound_nodes': [['cast', 0, 0]]\n }},\n {'Flatten': {\n 'name': 'flatten',\n 'inbound_nodes': [['std', 0, 0]]\n }},\n {'Reshape': {\n 'name': 'reshape',\n 'target_shape': [784],\n 'inbound_nodes': [['flatten', 0, 0]]\n }}\n ]\n }})\n )\n )\n\n\ndef experiment_fn(output_dir):\n \"\"\"Creates an auto encoder on MNIST handwritten digits.\n\n inks:\n * [MNIST Dataset] http://yann.lecun.com/exdb/mnist/\n \"\"\"\n dataset_dir = '../data/mnist'\n plx.datasets.mnist.prepare(dataset_dir)\n train_data_file = plx.datasets.mnist.RECORD_FILE_NAME_FORMAT.format(\n dataset_dir, plx.Modes.TRAIN)\n eval_data_file = plx.datasets.mnist.RECORD_FILE_NAME_FORMAT.format(dataset_dir, plx.Modes.EVAL)\n meta_data_file = plx.datasets.mnist.META_DATA_FILENAME_FORMAT.format(dataset_dir)\n\n run_config = plx.estimators.RunConfig()\n experiment = plx.experiments.Experiment(\n estimator=plx.estimators.Estimator(\n model_fn=model_fn, model_dir=output_dir, config=run_config),\n train_input_fn=get_input_fn(plx.Modes.TRAIN, train_data_file, meta_data_file),\n eval_input_fn=get_input_fn(plx.Modes.EVAL, eval_data_file, meta_data_file),\n train_steps=1000,\n eval_steps=10)\n\n return experiment\n\n\ndef main(*args):\n plx.experiments.run_experiment(experiment_fn=experiment_fn,\n output_dir=\"/tmp/polyaxon_logs/autoencoder\",\n schedule='continuous_train_and_eval')\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n","sub_path":"examples/programatic_examples/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"594976403","text":"# -*- coding: utf-8 -*-\n# !python3\nfrom __future__ import absolute_import\nimport re\nimport glob\nimport os\nfrom normalizr import Normalizr\n\n\nnormalizr = Normalizr(language='es')\n\n\ndef main():\n print(os.path.basename(__file__), \"running...\")\n\n renamer(\"*.pdf\", r\"\\s+\", \"_\")\n renamer(\"*.pdf\", r\"\\.\", \"_\")\n renamer(\"*_pdf\", r\"_pdf\", \".pdf\")\n renamer(\"*.pdf\", r\"__\", \"_\")\n # renamer(\"*.jpg-large\", r\"^(.*)\\.jpg-large$\", r\"\\1.jpg\")\n # renamer(\"*.unsafe\", r\"^(.*)\\.unsafe$\", r\"\\1\")\n # renamer(\"*.doc\", r\"^(.*)\\.doc$\", r\"new(\\1).doc\") #forward\n # renamer(\"*.doc\", r\"^new\\((.*)\\)\\.doc\", r\"\\1.doc\") #reverse\n print(os.path.basename(__file__), \"complete...\")\n\n\ndef renamer(files, pattern, replacement):\n isRemove = True\n\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n normalizations = ['remove_accent_marks']\n basename = normalizr.normalize(basename, normalizations)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n try:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n print(\"success: {} changed to -> {}\".format(pathname,\n new_filename))\n except:\n print(\"error: {} already exists\".format(new_filename))\n\n if ('isRemove'):\n try:\n os.remove(pathname)\n print(\"removed: {} because its a dupe\".format(pathname))\n except:\n print(\"error: cannot remove {}\".format(pathname))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"503297102","text":"from Color import Color\nfrom PyQt6.QtWidgets import QWidget, QSizePolicy\nfrom random import randint\nfrom PyQt6 import QtGui, QtCore\nimport copy\n\n\ndef swap(numb, numb2):\n temp = numb\n numb = numb2\n numb2 = temp\n return numb, numb2\n\n\nclass Game(QWidget):\n def __init__(self, window):\n QWidget.__init__(self)\n self.window = window\n self.background = None\n self.grid = None\n self.painter = None\n sizePolicy = QSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred)\n sizePolicy.setHeightForWidth(True)\n self.setSizePolicy(sizePolicy)\n self.setMinimumWidth(250)\n self.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)\n\n def draw(self):\n self.painter.setBrush(Color.emptyTile)\n self.painter.end()\n self.painter.begin(self)\n self.painter.drawPixmap(0, 0, self.background.data)\n self.painter.setPen(0)\n self.updateGrid()\n self.painter.end()\n\n def paintEvent(self, event):\n self.painter = QtGui.QPainter()\n self.painter.begin(self.background.data)\n pen = QtGui.QPen()\n pen.setStyle(QtCore.Qt.PenStyle.SolidLine)\n self.painter.setPen(pen)\n self.draw()\n\n def resizeEvent(self, *args, **kwargs):\n self.background.generateBackground(self.window.width(), self.window.height())\n if self.grid is None:\n self.grid = self.background.getGrid()\n self.newTile(2)\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key.Key_Up:\n gridBefore = copy.deepcopy(self.grid)\n self.moveGridUp()\n if not self.gridsAreEqual(gridBefore, self.grid):\n self.newTile(1)\n self.repaint()\n elif event.key() == QtCore.Qt.Key.Key_Down:\n gridBefore = copy.deepcopy(self.grid)\n self.moveGridDown()\n if not self.gridsAreEqual(gridBefore, self.grid):\n self.newTile(1)\n self.repaint()\n elif event.key() == QtCore.Qt.Key.Key_Left:\n gridBefore = copy.deepcopy(self.grid)\n self.moveGridLeft()\n if not self.gridsAreEqual(gridBefore, self.grid):\n self.newTile(1)\n self.repaint()\n elif event.key() == QtCore.Qt.Key.Key_Right:\n gridBefore = copy.deepcopy(self.grid)\n self.moveGridRight()\n if not self.gridsAreEqual(gridBefore, self.grid):\n self.newTile(1)\n self.repaint()\n elif event.key() == QtCore.Qt.Key.Key_Space:\n row = [16, 8, 8, 4]\n self.combineRow(row)\n row = [4, 4, 8, 4]\n self.combineRow(row)\n\n def gridsAreEqual(self, grid, grid2):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileX in range(xMax):\n for tileY in range(yMax):\n if grid[tileX][tileY].value is not grid2[tileX][tileY].value:\n return False\n return True\n\n def moveGridUp(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileX in range(xMax):\n row = []\n for tileY in range(yMax):\n row.append(self.grid[tileX][tileY].value)\n row = self.combineRow(row)\n for i in range(len(row)):\n self.grid[tileX][i].value = row[i]\n\n def moveGridDown(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileX in range(xMax):\n row = []\n for tileY in range(yMax - 1, -1, -1):\n row.append(self.grid[tileX][tileY].value)\n row = self.combineRow(row)\n for i in range(len(row)):\n self.grid[tileX][len(row) - 1 - i].value = row[i]\n\n def moveGridLeft(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileY in range(yMax):\n row = []\n for tileX in range(xMax):\n row.append(self.grid[tileX][tileY].value)\n row = self.combineRow(row)\n for i in range(len(row)):\n self.grid[i][tileY].value = row[i]\n\n def moveGridRight(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileY in range(yMax):\n row = []\n for tileX in range(xMax - 1, -1, -1):\n row.append(self.grid[tileX][tileY].value)\n row = self.combineRow(row)\n for i in range(len(row)):\n self.grid[len(row) - 1 - i][tileY].value = row[i]\n\n def combineRow(self, row):\n print(\"Start row: \" + str(row))\n for i in range(len(row)):\n if row[i] is not None:\n for j in range(i):\n if row[j] is None:\n row[j] = row[i]\n row[i] = None\n elif row[j] is not None:\n if row[j] == row[i] and self.selectionIsEmpty(row, j, i):\n row[j] = row[j] + row[i]\n for k in range(i, len(row) - 1):\n row[k] = row[k + 1]\n row[len(row) - 1] = None\n print(\"End row: \" + str(row) + \"\\n\")\n return row\n\n def selectionIsEmpty(self, row, begin, end):\n if begin > end:\n begin, end = swap(begin, end)\n for i in range(begin + 1, end):\n if row[i] is not None:\n return False\n return True\n\n def heightForWidth(self, width):\n ratio = 5.0 / 7.0\n self.window.setMaximumHeight(width / ratio)\n return width / ratio\n\n def setBackground(self, bg):\n self.background = bg\n\n def updateGrid(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n\n for tileX in range(xMax):\n for tileY in range(yMax):\n if self.grid[tileX][tileY].value is not None:\n self.painter.setBrush(Color().getTileColor(self.grid[tileX][tileY].value))\n self.painter.drawRoundedRect(self.grid[tileX][tileY].x,\n self.grid[tileX][tileY].y,\n self.grid[tileX][tileY].length,\n self.grid[tileX][tileY].length,\n self.grid[tileX][tileY].rectRounding,\n self.grid[tileX][tileY].rectRounding)\n self.painter.setPen(QtGui.QColor(0, 0, 0))\n self.painter.setFont(QtGui.QFont('clear-sans', 30))\n self.painter.drawText(self.grid[tileX][tileY].x,\n self.grid[tileX][tileY].y,\n self.grid[tileX][tileY].length,\n self.grid[tileX][tileY].length,\n QtCore.Qt.AlignmentFlag.AlignCenter,\n str(self.grid[tileX][tileY].value))\n self.painter.setPen(0)\n\n def newTile(self, tileCount):\n for count in range(tileCount):\n x, y = self.getNewTilePosition()\n if x is not None or y is not None:\n self.grid[x][y].value = self.grid[x][y].generateNewValue()\n\n def getNewTilePosition(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n emptyTileCount = self.getEmptyTileCount()\n if emptyTileCount <= 0:\n return None, None\n count = 0\n random = randint(0, emptyTileCount - 1)\n\n for tileX in range(xMax):\n for tileY in range(yMax):\n if count == random and self.grid[tileX][tileY].value is None:\n return tileX, tileY\n if self.grid[tileX][tileY].value is None:\n count += 1\n\n def getEmptyTileCount(self):\n xMax = len(self.grid)\n yMax = len(self.grid[len(self.grid) - 1])\n emptyTileCount = 0\n\n for tileX in range(0, xMax):\n for tileY in range(0, yMax):\n if self.grid[tileX][tileY].value is None:\n emptyTileCount += 1\n return emptyTileCount\n","sub_path":"2048/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"559223200","text":"def solution(X, A):\n # write your code in Python 3.6\n if X == 0:\n return 0\n check = [0]*(X)\n filled = 0\n for i in range(0,len(A)):\n if check[A[i]-1]==0:\n check[A[i]-1] = 1\n filled+=1\n if filled == X:\n return i\n \n return -1\n\n","sub_path":"Side Projects/Codility Solutions/FrogRiverOne.py","file_name":"FrogRiverOne.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"205226860","text":"from PyQt5.QtWidgets import QDialog\n\nfrom app.resources.resources import RESOURCES\n\nfrom app.extensions.custom_gui import ResourceListView\nfrom app.editor.data_editor import SingleResourceEditor\nfrom app.editor.base_database_gui import DatabaseTab\n\nfrom app.editor.portrait_editor import portrait_model, portrait_properties\n\nfrom app.editor import timer\n\nclass PortraitDatabase(DatabaseTab):\n @classmethod\n def create(cls, parent=None):\n data = RESOURCES.portraits\n title = \"Unit Portrait\"\n right_frame = portrait_properties.PortraitProperties\n collection_model = portrait_model.PortraitModel\n deletion_criteria = None\n\n dialog = cls(data, title, right_frame, deletion_criteria,\n collection_model, parent, button_text=\"Add New %s...\",\n view_type=ResourceListView)\n return dialog\n\n @classmethod\n def edit(cls, parent=None):\n window = SingleResourceEditor(PortraitDatabase, ['portraits'], parent)\n window.exec_()\n\ndef get():\n timer.get_timer().start_for_editor()\n window = SingleResourceEditor(PortraitDatabase, ['portraits'])\n result = window.exec_()\n timer.get_timer().stop_for_editor()\n if result == QDialog.Accepted:\n selected_portrait = window.tab.right_frame.current\n return selected_portrait, True\n else:\n return None, False\n\n# Testing\n# Run \"python -m app.editor.portrait_editor.portrait_tab\" from main directory\nif __name__ == '__main__':\n import sys\n from PyQt5.QtWidgets import QApplication\n app = QApplication(sys.argv)\n RESOURCES.load('default.ltproj')\n window = SingleResourceEditor(PortraitDatabase, ['portraits'])\n window.show()\n app.exec_()\n","sub_path":"app/editor/portrait_editor/portrait_tab.py","file_name":"portrait_tab.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"333261890","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport steamfront\nfrom natsort import natsorted\n\nfrom kellog import info, error, debug\n\n# ==================================================================================================\ndef main(args):\n\tclient = steamfront.Client()\n\n\tpaths = natsorted(args.dir.glob(\"*.png\"))\n\tinfo(f\"Found {len(paths)} screenshots to sort\")\n\n\tgameList = []\n\tfor path in paths:\n\t\tgameID = path.stem.split(\"_\")[0]\n\t\tif gameID not in gameList:\n\t\t\tgameList.append(gameID)\n\tinfo(f\"Identified {len(gameList)} different games\")\n\n\tbadChars = '<>:\"/\\|?*'\n\tfor i, gameID in enumerate(gameList):\n\t\ttry:\n\t\t\tgame = client.getApp(appid=gameID)\n\t\texcept TypeError as e:\n\t\t\terror(e)\n\t\t\terror(f\"Attempted gameID was `{gameID}`\")\n\t\t\tcontinue\n\t\texcept steamfront.app._AppNotFound as e:\n\t\t\terror(e)\n\t\t\terror(f\"Attempted gameID was `{gameID}`\")\n\t\t\tcontinue\n\t\texcept:\n\t\t\terror(\"Some other error\")\n\t\t\terror(f\"Attempted gameID was `{gameID}`\")\n\t\t\tcontinue\n\n\t\tname = game.name\n\t\tinfo(f\"{i + 1}/{len(gameList)}: {name}\")\n\t\tfor char in badChars:\n\t\t\tname = name.replace(char, \"_\")\n\n\t\t# Make directory, move files over while renaming\n\t\t(args.dir / name).mkdir(exist_ok=True)\n\t\tfor path in natsorted(args.dir.glob(f\"{gameID}_*.png\")):\n\t\t\tn = path.name.split(f\"{gameID}_\")[1]\n\t\t\tn = f\"{n[:4]}-{n[4:6]}-{n[6:8]}_{n[8:10]}-{n[10:12]}-{n[12:]}\"\n\t\t\tdest = (args.dir / name / n).with_suffix(path.suffix)\n\t\t\tif not dest.exists():\n\t\t\t\tpath.replace(dest)\n\t\t\telse:\n\t\t\t\terror(f\"Destination '{dest.relative_to(args.dir)}' exists when moving '{path.relative_to(args.dir)}'\")\n\n\n\n# ==================================================================================================\ndef parse_args():\n\tparser = ArgumentParser()\n\tparser.add_argument(\"--dir\", \"-d\", type=str, metavar=\"PATH\", default=Path.home() / \"Pictures\" / \"Screenshots\" / \"Steam\", help=\"Directory where the screenshots are stored\")\n\n\targs = parser.parse_args()\n\targs.dir = Path(args.dir)\n\n\treturn args\n\n\n# ==================================================================================================\nif __name__ == \"__main__\":\n\tmain(parse_args())\n","sub_path":"steam_screenshot_manager.py","file_name":"steam_screenshot_manager.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"390131950","text":"\"\"\"Unit tests go here\"\"\"\r\nfrom django.test import TestCase\r\nfrom notifications.models import Notification\r\n# Create your tests here.\r\nNOTIFICATION1 = Notification(text='Hello',\r\n new=True, )\r\nNOTIFICATION2 = Notification(text='Hi',\r\n new=False, )\r\nNOTIFICATION3 = Notification(text='Hello',\r\n new=False, )\r\nNOTIFICATION4 = Notification(text='Bonjour',\r\n new=False, )\r\n\r\n\r\ndef compare_notifications(self=Notification, other=Notification):\r\n \"\"\"\r\n If 2 notifications have the same text, they are equal (return TRUE)\r\n Otherwise, they are different (return FALSE)\r\n \"\"\"\r\n if self.text != other.text:\r\n return False\r\n if self.new != other.new:\r\n return False\r\n return True\r\n\r\n\r\nclass TestNotifications(TestCase):\r\n \"\"\"Notification testing\"\"\"\r\n def test_notification_is_valid(self):\r\n \"\"\"Check whether notification is valid\"\"\"\r\n self.assertTrue(isinstance(NOTIFICATION1, Notification))\r\n self.assertTrue(isinstance(NOTIFICATION2, Notification))\r\n self.assertTrue(isinstance(NOTIFICATION3, Notification))\r\n self.assertTrue(isinstance(NOTIFICATION4, Notification))\r\n\r\n def test_same_notification_should_be_equal(self):\r\n \"\"\"Whether the same notification equal to itself\"\"\"\r\n self.assertTrue(compare_notifications(NOTIFICATION1, NOTIFICATION1))\r\n\r\n def test_different_notification_should_not_be_equal(self):\r\n \"\"\"Notification with different text will not be equal\"\"\"\r\n self.assertFalse(compare_notifications(NOTIFICATION1, NOTIFICATION2))\r\n\r\n def test_different_text_should_not_be_equal(self):\r\n \"\"\"Old and new notifications will not be equal \"\"\"\r\n self.assertFalse(compare_notifications(NOTIFICATION1, NOTIFICATION3))\r\n\r\n def test_different_new_should_not_be_equal(self):\r\n \"\"\"Notification with different text and age will not be equal\"\"\"\r\n self.assertFalse(compare_notifications(NOTIFICATION2, NOTIFICATION4))\r\n\r\n","sub_path":"notifications/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"26081159","text":"#external imports\nimport argparse\nimport json\nimport os\nimport platform\nfrom unityagents import UnityEnvironment\n\n#local imports\nfrom agent_trainer import AgentTrainer\nfrom time_analysis import TimeAnalysis\n\ndef create_results_folders(filepath):\n path_parts = filepath.split('/')\n if len(path_parts) > 1:\n for p in range(len(path_parts)-1):\n full_path = './Results/' + '/'.join(path_parts[:-(p+1)])\n print(full_path)\n if not os.path.exists(full_path):\n os.mkdir(full_path)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Parameters for continuous control project')\n parser.add_argument('mode')\n \n parser.add_argument('--test_model', dest='test_model', default='random')\n parser.add_argument('--test_params', dest='test_params', default='default_params.json')\n parser.add_argument('--test_no_display', dest='test_no_display', action='store_true')\n parser.add_argument('--test_results_path', dest='test_results_path', default='test_results.csv')\n parser.add_argument('--test_episodes', dest='test_episodes', default=1)\n \n parser.add_argument('--train_params', dest='train_params', default='default_params.json')\n parser.add_argument('--train_start_id', dest='train_start_id', default=0)\n parser.add_argument('--train_results_path', dest='train_results_path', default='train_results.csv')\n parser.add_argument('--train_debug', dest='train_debug', default=False)\n parser.add_argument('--train_worker_id', dest='train_worker_id', default=0)\n args = parser.parse_args()\n \n env = None\n try:\n if platform.system() == 'Linux':\n file_name = './Reacher_Linux_1/Reacher.x86_64'\n else:\n file_name = './Reacher_Windows_x86_64/Reacher.exe'\n\n if args.mode == 'test':\n params_file = open('./Params/' + args.test_params, 'r')\n test_params = json.loads(params_file.read())\n params_file.close()\n\n env = UnityEnvironment(\n file_name=file_name,\n no_graphics=args.test_no_display\n )\n for testing_params in test_params['params']:\n for i in range(int(args.test_episodes)):\n agent_trainer = AgentTrainer(\n env=env, \n params=testing_params,\n results_path='./Results/' + args.test_results_path if args.test_results_path != '' else ''\n )\n test_model = args.test_model\n if test_model == 'auto':\n test_model = testing_params['agent']['model_tag']\n agent_trainer.test(model_weights=test_model, test_id=i+1)\n elif args.mode == 'train':\n params_file = open('./Params/' + args.train_params, 'r')\n train_params = json.loads(params_file.read())\n params_file.close()\n\n create_results_folders(args.train_results_path)\n\n env = UnityEnvironment(\n file_name=file_name,\n no_graphics=True,\n worker_id=int(args.train_worker_id)\n )\n for training_params in train_params['params']:\n if(training_params['id'] < int(args.train_start_id)):\n continue\n \n print(args)\n print('Train id', training_params['id'])\n time_analysis = TimeAnalysis()\n agent_trainer = AgentTrainer(\n env=env,\n params=training_params,\n results_path='./Results/' + args.train_results_path,\n debug_mode=args.train_debug,\n time_analysis=time_analysis\n )\n agent_trainer.train()\n print(time_analysis.to_str())\n print('Training ended')\n else:\n print('Unknown mode', args.mode)\n except Exception as e:\n print('Unexpected error:', str(e))\n raise(e)\n finally:\n if env is not None:\n env.close()\n","sub_path":"ContinuousControl/continuous_control.py","file_name":"continuous_control.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"383744274","text":"import hashlib\nimport sys\n\nmd5 = open(\"md5_rainbow.txt\", \"w\")\nsha2 = open(\"s2_rainbow.txt\", \"w\")\nsha5 = open(\"s5_rainbow.txt\", \"w\")\nntlm = open(\"ntlm_rainbow.txt\", \"w\")\nf = open(\"rockyou.txt\", encoding=\"ISO-8859-1\")\n\nquestion = input(\"Are you sure you want to write hashes into the rainbow tables again? \").lower()\n\nif question == 'yes':\n for i in f:\n pw = i.strip('\\n')\n encoded = pw.encode()\n encoded_utf16le = pw.encode('utf-16le')\n\n m = hashlib.md5(encoded).hexdigest()\n s2 = hashlib.sha256(encoded).hexdigest()\n s5 = hashlib.sha512(encoded).hexdigest()\n n = hashlib.new('md4', encoded_utf16le).hexdigest()\n\n md5.write(\"%s:%s\" % (m,i))\n sha2.write(\"%s:%s\" % (s2, i))\n sha5.write(\"%s:%s\" % (s5, i))\n ntlm.write(\"%s:%s\" % (n,i))\n\nelif question == 'no':\n print(\"That's what I thought.\")\n sys.exit()\n","sub_path":"append_hash.py","file_name":"append_hash.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"148975874","text":"'''\nCreated on Mar 2, 2021\n\n@author: Jishan Desai and Gazal Arora\n'''\nimport random\nclass Board(object):\n '''\n classdocs\n '''\n def __init__(self, dim, num_mines):\n self.dim = dim\n self.total_mines = num_mines\n self.board = [['_' for r in range(dim)] for c in range(dim)]\n self.initialize_board()\n self.print_board()\n def initialize_board(self):\n mines_placed = 0\n while mines_placed != self.total_mines:\n loc = random.randint(0,self.dim**2 - 1)\n r = loc // (self.dim)\n c = loc % (self.dim)\n if self.board[r][c] != \"x\":\n self.board[r][c] = \"x\"\n mines_placed = mines_placed+1\n self.fill_mine_distances()\n def print_board(self):\n print(\" \", end = \"\\t\")\n for i in range(self.dim):\n print(i, end = \" \")\n print()\n row_num = 0\n for row in self.board:\n print(row_num,end=\"\\t\")\n row_num=row_num+1\n for col in row:\n print(col,end = \" \")\n print()\n def fill_mine_distances(self):\n for i in range(self.dim):\n for j in range(self.dim):\n counter = 0\n if self.board[i][j] == \"x\":\n continue\n if (i-1) > -1 and self.board[i-1][j] == \"x\":\n counter = counter + 1\n if (i+1) < self.dim and self.board[i+1][j] == \"x\":\n counter = counter + 1 \n if (j-1) > -1 and self.board[i][j-1] == \"x\":\n counter = counter + 1\n if (j+1) < self.dim and self.board[i][j+1] == \"x\": \n counter = counter + 1\n if (i+1) < self.dim and (j+1) < self.dim and self.board[i+1][j+1] == \"x\":\n counter = counter + 1\n if (i+1) < self.dim and (j-1) > -1 and self.board[i+1][j-1] == \"x\":\n counter = counter + 1\n if (i-1) > -1 and (j-1) > -1 and self.board[i-1][j-1] == \"x\":\n counter = counter + 1\n if (i-1) > -1 and (j+1) < self.dim and self.board[i-1][j+1] == \"x\": \n counter = counter + 1 \n self.board[i][j] = str(counter)\n def get_loc(self,row,col):\n return self.board[row][col] \nif __name__ == '__main__':\n dimension = int(input(\"Enter Dimension: \"))\n mine_num = int(input(\"Enter Mine number: \"))\n b = Board(dimension,mine_num)","sub_path":"Environment.py","file_name":"Environment.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"574704994","text":"## Author Name: Flynn Gaur\n## Description: This program prints a circle, a triangle and a rectangle\n## with random y-coordinates and increasing x-coordinates\n## with the frame rate of 30. The shapes seem to warp around \n## the canvas.\n\nimport sys\nimport os \nimport random\ncwd = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, cwd)\nfrom graphics import graphics\n\ndef main():\n '''\n The fuction creates the canvas and prints a circle, rectangle and a triangle. \n Nested while loop is used to move the shapes for an infinite amount of time,\n to change the x-coorinates.\n '''\n gui=graphics(500, 500, 'three')\n while True: #infinite loop\n i=0 \n rectangle_y=random.randint(50,450) #randint used for getting random y-coordinates\n ellipse_y=random.randint(50,450) \n tri_y=random.randint(50,450) \n while i<500: #to move till the edge of the canvas\n offset=i-50 #to achieve a warp like effect\n gui.rectangle(offset,rectangle_y,50,50,'green')\n gui.ellipse(offset+25,ellipse_y+25,50,50,'orange')\n gui.triangle(offset,tri_y,offset+25, \\\n tri_y-50,offset+50,tri_y,'blue')\n gui.update_frame(30)\n gui.clear() #to clear the canvas\n i+=10 #for changing the x-coordinate\n\nmain()","sub_path":"three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"107226827","text":"# -*- coding: utf-8 -*-\n# __author__ = 'XingHuan'\n# 6/30/2018\n\n# Copyright 2018 XingHuan\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom sins.module.sqt import *\nfrom sins.utils.time_utils import current_time\nfrom sins.utils.res import resource\nfrom sins.db.models import *\nfrom sins.ui.widgets.label.tip import InnerTip\n\n\nlogger = get_logger(__name__)\n\nEDITLABEL_SIZE = 20\n\n\n# base\nclass EditLabel(QLabel):\n clicked = Signal()\n\n def __init__(self, editable=False, parent=None):\n super(EditLabel, self).__init__(parent)\n self.editable = editable\n if self.editable:\n self.setPixmap(resource.get_pixmap(\"icon\", \"edit_white.png\", scale=EDITLABEL_SIZE))\n else:\n self.setPixmap(resource.get_pixmap(\"icon\", \"edit_gray.png\", scale=EDITLABEL_SIZE))\n\n self.setFixedSize(EDITLABEL_SIZE, EDITLABEL_SIZE)\n\n def mouseReleaseEvent(self, *args, **kwargs):\n if self.editable:\n self.clicked.emit()\n\n\nclass CellWidget(QWidget):\n def __init__(self,\n editable=False,\n treeitem=None,\n column=0,\n db_instance=None,\n model_attr='',\n column_label='',\n parent=None):\n super(CellWidget, self).__init__(parent)\n self.editable = editable\n self.treeitem = treeitem\n self.column = column\n self.column_label = column_label\n self.db_instance = db_instance\n self.model_attr = model_attr\n self.autoHeight = False\n self.targetHeight = 20\n\n self.data_value = None\n self.update_db_time = 0\n self.has_edit_label = False\n\n # self.setStyleSheet(\"border:none;background:transparent\")\n self.back = QLabel(self)\n # self.back = QPushButton('AAA', self)\n # self.back.setStyleSheet(\"background:rgb(100, 140, 100, 250)\")\n self.setMinimumHeight(22)\n\n def add_front(self, editlabel=True):\n self.has_edit_label = editlabel\n\n def set_value(self, value):\n pass\n\n def set_db_instance(self, db_instance):\n self.db_instance = db_instance\n\n def set_read_only(self, editable=False):\n self.editable = editable\n\n def set_editable(self):\n pass\n\n def set_no_editable(self):\n pass\n\n def edit_finished(self):\n self.set_no_editable()\n if current_time() - self.update_db_time > 0.01:\n # print self.db_instance, self.model_attr\n if self.db_instance is not None and (hasattr(self.db_instance, self.model_attr)):\n self.update_data()\n self.update_db_time = current_time()\n self.show_tip()\n\n def update_data(self):\n logger.debug(u'setattr({}, {}, {})'.format(self.db_instance, self.model_attr, self.data_value))\n self.db_instance.update_field(**{self.model_attr: self.data_value})\n\n def show_tip(self):\n parent = self.parent()\n if self.treeitem is not None:\n parent = self.treeitem.tree.parent()\n tip = InnerTip(parent=parent)\n tip.showText('Change {} {} to {}'.format(self.db_instance, self.model_attr, self.data_value))\n\n def resizeEvent(self, *args, **kwargs):\n super(CellWidget, self).resizeEvent(*args, **kwargs)\n if hasattr(self, \"editlabel\"):\n self.editlabel.move(self.width() - EDITLABEL_SIZE, 0)\n self.back.resize(self.size())\n\n def enterEvent(self, QEvent):\n super(CellWidget, self).enterEvent(QEvent)\n if self.has_edit_label and not hasattr(self, \"editlabel\"):\n self.editlabel = EditLabel(self.editable, self)\n self.editlabel.clicked.connect(self.set_editable)\n self.editlabel.move(self.width() - EDITLABEL_SIZE, 0)\n if hasattr(self, \"editlabel\"):\n self.editlabel.setHidden(False)\n\n def leaveEvent(self, QEvent):\n super(CellWidget, self).leaveEvent(QEvent)\n if hasattr(self, \"editlabel\"):\n self.editlabel.setHidden(True)\n","sub_path":"sins/ui/widgets/data_view/cell_edit/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"122886866","text":"\"\"\" helper functions \"\"\"\nfrom math import sqrt\nimport csv\n\n\ndef get_value_as_int(value):\n \"\"\" takes an index of a csv row, and returns it as an int, replacing with 0 of empty\n :param value: an index of a row from a csv.reader object\"\"\"\n if not value or value == 0:\n return 0\n else:\n return int(value)\n\n\ndef calculate_active(confirmed, recovered, deaths):\n \"\"\" creates a list of active cases, by subtracting recoveries and deaths from confirmed\n \"\"\"\n active = []\n for i in range(len(confirmed)):\n active.append(confirmed[i] - recovered[i] - deaths[i])\n return active\n\n\ndef calculate_death_rate(confirmed, deaths):\n \"\"\" creates a list of active cases, by subtracting recoveries and deaths from confirmed\"\"\"\n death_rate = []\n for i in range(len(confirmed)):\n try:\n death_rate.append(deaths[i] / confirmed[i])\n except ZeroDivisionError:\n death_rate.append(0)\n return death_rate\n\n\ndef make_hover_txt(confirmed, recovered, deaths, active, death_rate, regions):\n hover_txt = []\n for i in range(len(confirmed)):\n death_perc = f\"{(float('{0:.3f}'.format(death_rate[i]*100)))}%\"\n txt = f\"{regions[i]}
\" \\\n f\"{active[i]} active cases
\" \\\n f\"Until now:
\" \\\n f\"{confirmed[i]} confirmed cases,
\" \\\n f\"{recovered[i]} recovered patients,
\" \\\n f\"{deaths[i]} deaths
\" \\\n f\"Total death rate: {death_perc}.\"\n hover_txt.append(txt)\n return hover_txt\n\n\ndef marker_size(act):\n size = 2 * sqrt(act / 3.14)\n if size >= 6:\n return size\n else:\n return 6\n\n\ndef make_country_dict(filename):\n \"\"\" tales a csv file with country codes, and returns a dictionary of these codes \"\"\"\n print(\"Creating country code dictionary...\")\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n names, iso_3 = [], []\n for row in reader:\n names.append(row[0])\n iso_3.append(row[3])\n\n dic = {}\n for i in range(len(names)):\n dic[names[i].lower()] = iso_3[i]\n\n print(\"Country code dictionary created successfully.\")\n\n return dic\n\ndef make_iso_data(day):\n iso_data = {}\n\n for i in range(len(day.iso_3)): # per ogni regione di un tal giorno\n current_iso = day.iso_3[i]\n if not current_iso in iso_data.keys():\n iso_data[current_iso] = {\n \"time\": day.time,\n \"country\": day.countries[i],\n \"confirmed\": day.confirmed[i],\n \"deaths\": day.deaths[i],\n \"recovered\": day.recovered[i],\n }\n else:\n iso_data[current_iso][\"confirmed\"] += iso_data[current_iso][\"confirmed\"]\n iso_data[current_iso][\"deaths\"] += iso_data[current_iso][\"deaths\"]\n iso_data[current_iso][\"recovered\"] += iso_data[current_iso][\"recovered\"]\n\n iso_iso, iso_conf, iso_rec, iso_dea, iso_act, iso_dr, iso_reg = [], [], [], [], [], [], []\n\n for k in iso_data:\n iso_data[k][\"active\"] = (iso_data[k][\"confirmed\"] - iso_data[k][\"recovered\"] - iso_data[k][\"deaths\"])\n try:\n iso_data[k][\"death_rate\"] = iso_data[k][\"deaths\"] / iso_data[k][\"confirmed\"]\n except ZeroDivisionError:\n iso_data[k][\"death_rate\"] = 0\n\n iso_conf.append(iso_data[k][\"confirmed\"])\n iso_rec.append(iso_data[k][\"recovered\"])\n iso_dea.append(iso_data[k][\"deaths\"])\n iso_act.append(iso_data[k][\"active\"])\n iso_dr.append(iso_data[k][\"death_rate\"])\n iso_reg.append(iso_data[k][\"country\"])\n iso_iso.append(k)\n\n return iso_iso, iso_conf, iso_rec, iso_dea, iso_act, iso_dr, iso_reg\n\n","sub_path":"visualisation/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"300184728","text":"# Funções\n\n#As funções são rotinas no Python. Exemplos são print, len, input, int, float\n#Cria comandos personalizados\n\ndef lin(): #função sem parâmetro, com parênteses vazio ()\n print('-' * 30)\n#Entre o def e o programa principal deve ter 2 linhas para organizar o código\n\nlin()\nprint(' CURSO EM VÍDEO ')\nlin()\nprint(' APRENDA PYTHON ')\nlin()\nprint(' GUSTAVO GUANABARA ')\nlin()\n\n#----------------------------------------------------------------------\ndef mensagem(msg): #Ao incluir a 'msg' dentro do parênteses da função cria-se um parâmetro\n print('-' * 30)\n print(msg) #e chamar 'msg' entre a função automatiza a rotina criando o link\n print('-'*30)\n\nmensagem('SISTEMA DE ALUNOS')\n#---------------------------------------------------------------------\ndef título(txt):\n print('-' * 30)\n print(txt)\n print('-' * 30)\n\n\ntítulo(' CURSO EM VÍDEO ')\ntítulo(' APRENDA PYTHON ')\ntítulo(' GUSTAVO GUANABARA ')\n\n#____________________________________________________________________\ndef soma(a, b):\n print(f'A = {a} e B = {b}')\n s = a + b\n print(f'A soma A + B = {s}')\n\n\n#Programa Principal\nsoma(4, 5)\nsoma(a=8, b=9) #Declarando explicitamente o parâmetro\nsoma(b=2, a=1) #Declarando explicitamente o parâmetro em qualquer ordem\n#Precisa explicitar sempre os dois parâmetros ou deixar implícito os dois, caso contrário irá gerar erro.\n\n#Empacotamento de Parâmetros (Tuplas)\ndef contador(* núm):\n for valor in núm:\n print(f'{valor} ', end='')\n print('FIM!')\n\n\ncontador(2, 1, 7)\ncontador(8, 0)\ncontador(4, 4, 7, 6, 2)\n\ndef contador(* núm):\n tam = len(núm)\n print(f'Recebi os valores {núm} e são ao todo {tam} números')\n\n\ncontador(2, 1, 7)\ncontador(8, 0)\ncontador(4, 4, 7, 6, 2)\n\n#Empacotamento de Parâmetros (Lista)\ndef dobra(lst):\n for pos in range(len(lst)):\n lst[pos] *= 2\n\n\nvalores = [6, 3, 9, 1, 0, 2]\ndobra(valores)\nprint(valores)\n\ndef soma(* valores):\n s = sum(valores)\n print(f'Somando os valores {valores} temos {s}')\n\n\nsoma(5, 2)\nsoma(2, 9, 4)\n","sub_path":"python_aulas/aula20.py","file_name":"aula20.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"551269186","text":"from azure.keyvault import KeyVaultClient, KeyVaultAuthentication\nfrom azure.common.credentials import ServicePrincipalCredentials\n\nclass KeyVault(object):\n \"\"\"\n Abstraction for the Azure Key Vaults\n \"\"\"\n _config = None\n _client_id = ''\n _tenant = ''\n _secret = ''\n _resource = 'https://vault.azure.net'\n _client = None\n _vault = None\n\n def __init__(self, config):\n \"\"\"\n :param config: config: pyhocon configuration object\n \"\"\"\n _config = config\n self._tenant = config.get('azure.tenant')\n self._vault = config.get('azure.vaultid')\n self._client_id = config.get('azure.app')\n self._secret = config.get('azure.client-secret')\n self._client = KeyVaultClient(KeyVaultAuthentication(authorization_callback=self._auth_callback))\n\n\n def _auth_callback(self, server, resource, scope,x):\n credentials = ServicePrincipalCredentials(\n client_id= self._client_id,\n secret=self._secret,\n tenant=self._tenant,\n resource=self._resource\n )\n token = credentials.token\n return token['token_type'], token['access_token']\n\n def fetch_secret(self, secret_id, secret_version):\n \"\"\"\n Fetch the known client secret from the KeyVault.\n :param secret_id: the secret's name\n :param secret_version: the secrets's version hex\n :return:\n \"\"\"\n secret_bundle = self._client.get_secret(\n 'https://{0}.vault.azure.net/'.format(self._vault),\n secret_id,\n secret_version)\n return secret_bundle","sub_path":"src/db_conversion/keyvault.py","file_name":"keyvault.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"549796876","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport requests as rq\nimport io\nimport pytz\nimport sys\nimport importlib\nimport hashlib\n\nsys.path.insert(0,'..')\nimport jhu_utils\nfrom script_utils import *\n\n\ndef get_hash(df, **kwargs):\n r = kwargs.get(\"response\")\n if not r:\n return None\n return hashlib.md5(r.headers[\"Last-Modified\"].encode()).hexdigest()\n\n\ndef process_data(df, **kwargs):\n clean = df[[\"Race\", \"Latino\", \"Date of Death\"]]\n clean.rename(columns={\"Date of Death\": \"Date\"}, inplace=True)\n clean[\"Date\"] = pd.to_datetime(clean[\"Date\"]).apply(lambda v: v.strftime(\"%Y-%m-%d\"))\n return clean.set_index(\"Date\")\n\n\ndef get_updated_data(df, di, **kwargs):\n last_row = df.tail(1).iloc[0]\n prev_row = df.tail(2).iloc[0]\n # d_str = dt.datetime.strptime(last_row.name, \"%Y-%m-%d\").strftime('%-m/%-d')\n d_today_str = dt.datetime.now(pytz.timezone('US/Central')).strftime('%-m/%-d')\n d_prev = dt.datetime.now(pytz.timezone('US/Central')) - dt.timedelta(days=1)\n d_prev_prev = d_prev - dt.timedelta(days=1)\n try:\n new_prev = len(df.loc[d_prev.strftime(\"%Y-%m-%d\")])\n except KeyError:\n new_prev = 0\n try:\n new_prev_prev = len(df.loc[d_prev_prev.strftime(\"%Y-%m-%d\")])\n except:\n new_prev_prev = 0\n d_prev_str = d_prev.strftime('%-m/%-d')\n return {\n \"smart_tiles\": [\n {\n \"figure\": short_format(new_prev),\n \"subheader\": \"On {}\".format(d_prev_str),\n \"value_change\": round(percent_change(new_prev_prev, new_prev), 1)\n },\n {},\n {\n \"figure\": short_format(len(df)),\n \"subheader\": \"As of {}\".format(d_today_str),\n \"unit\": '+' + short_format(new_prev),\n \"value_change\": round(percent_change(len(df) - new_prev, len(df)), 1)\n },\n {}\n ]\n }","sub_path":"COVID/Lansing/lansing.py","file_name":"lansing.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"206503455","text":"from model.hanlpUnit import HanlpUnit\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom controller.baseController import BaseController\nfrom model.neo4j import Neo4j\nfrom model.models import *\nfrom model.mongodb import Mongodb\nfrom django.forms.models import model_to_dict\nfrom model.some_data_deal_func import some_data_deal_func\nfrom Time_NLP.time_deal import Time_deal\nfrom bson import ObjectId\n\n\nclass ExtractUnit:\n def __init__(self):\n self.hanlp_tool = HanlpUnit()\n\n @staticmethod\n def extract_relationship_from_structured_data(request, category_id, json_data):\n \"\"\"\n 从结构化数据中抽取关系\n :param request:\n :param category_id:类目id\n :param json_data:新增节点数据\n :return:\n \"\"\"\n if category_id == -1:\n return\n neo4j = Neo4j()\n # 片段1 找出所有由该条数据指出的关系\n relationship_attribute_list = []\n new_node_category_name = TCategory.objects.get(id=category_id).category_name\n new_node_category_name = BaseController.get_category_name(request=request, category_name=new_node_category_name)\n all_attribute = TAttribute.objects.filter(category_id=category_id)\n # 获取所有该数据所属类目指向的类目\n for item in all_attribute:\n cur_data_type = TDataType.objects.get(id=item.data_type_id)\n if cur_data_type.category_id == -1 or item.attribute_name not in json_data:\n continue\n else:\n if item.is_single_value == 1 or type(json_data[item.attribute_name]) == \"str\":\n attribute_content = [json_data[item.attribute_name]]\n else:\n attribute_content = json_data[item.attribute_name]\n relationship_attribute_list.append(\n {\"attribute_name\": attribute_content, \"object_category_id\": cur_data_type.category_id,\n \"relationship_name\": item.attribute_name})\n\n # 遍历类目数据匹配名字\n for item in relationship_attribute_list:\n for attribute_item in item[\"attribute_name\"]:\n category_name = TCategory.objects.get(id=item[\"object_category_id\"]).category_name\n category_name_to = BaseController.get_category_name(request=request, category_name=category_name)\n # category_name_to = category_name + \"_1_2\"\n\n object_from = {\"label_name\": category_name_to, \"content\": {\"名字\": attribute_item}}\n match_result = neo4j.match(object_from=object_from)\n if match_result is not None and len(match_result) == 1:\n node = match_result[0][\"n1\"]\n relationship_match = neo4j.match(\n object_from={\"label_name\": new_node_category_name, \"content\": {\"_id\": json_data[\"_id\"]}},\n object_to={\"label_name\": category_name_to, \"content\": {\"_id\": node[\"_id\"]}},\n relationship={\"label_name\": item[\"relationship_name\"], \"content\": {}}, return_info=\"2\")\n print(relationship_match)\n if relationship_match is not None and len(relationship_match) == 0:\n neo4j.createRelationship(\n labelOne=new_node_category_name,\n labelTwo=category_name_to, relationShipName=item[\"relationship_name\"],\n propertyOne={\"_id\": json_data[\"_id\"]}, propertyTwo={\"_id\": node[\"_id\"]})\n\n # 片段2 找出所有指向该数据的参数\n if \"名字\" not in json_data or json_data[\"名字\"] == \"\":\n return\n data_type_to = TDataType.objects.get(category_id=category_id)\n relationship_attribute_from = TAttribute.objects.filter(data_type_id=data_type_to.id)\n for item in relationship_attribute_from:\n try:\n category_from = TCategory.objects.get(id=item.category_id, repo_id=request.session[\"repo_id\"], create_id=request.session[\"user_id\"])\n category_name_from = BaseController.get_category_name(request=request, category_name=category_from.category_name)\n object_from = {\"label_name\": category_name_from, \"content\": {item.attribute_name: json_data[\"名字\"]}}\n match_result = neo4j.match(object_from=object_from)\n if match_result is not None and len(match_result) == 1:\n node = match_result[0][\"n1\"]\n relationship_match = neo4j.match(\n object_from={\"label_name\": category_name_from, \"content\": {\"_id\": node[\"_id\"]}},\n object_to={\"label_name\": new_node_category_name, \"content\": {\"_id\": json_data[\"_id\"]}},\n relationship={\"label_name\": item.attribute_name, \"content\": {}}, return_info=\"2\")\n print(relationship_match)\n if relationship_match is not None and len(relationship_match) == 0:\n neo4j.createRelationship(\n labelOne=category_name_from,\n labelTwo=new_node_category_name, relationShipName=item.attribute_name,\n propertyOne={\"_id\": node[\"_id\"]}, propertyTwo={\"_id\": json_data[\"_id\"]})\n except ObjectDoesNotExist:\n continue\n\n @staticmethod\n def merge_list(list1, list2):\n for item in list2:\n if item not in list1:\n list1.append(item)\n return list1\n\n def extract_relationship_from_unstructured_data(self, request, file_id, relationship_attribute_list=None):\n \"\"\"\n 从非结构化数据中抽取关系\n :param file_id:文件id,获取mongodb中对应要分析的数据\n :param relationship_attribute_list:关系属性列表,所有使用该算法的关系属性id集合\n :param request:\n :return:\n \"\"\"\n print(\"------------------------非结构关系抽取\")\n tmp_info = {'file_id': file_id, 'user_id': request.session[\"user_id\"], 'repo_id': request.session[\"repo_id\"]}\n collection = Mongodb(db='knowledge', collection='text').get_collection()\n ret_entity = collection.find(tmp_info)\n ret_entity_map = list()\n for item in ret_entity:\n if \"内容\" in item[\"value\"]:\n ret_entity_map.append(item)\n\n if len(ret_entity_map) == 0 or relationship_attribute_list is None:\n print(\"无可抽取内容\")\n return\n relationship_list = []\n # all_category = TCategory.objects.filter(repo_id=request.session[\"repo_id\"], create_id=request.session[\"user_id\"], category_type=1)\n added_category_id = set()\n for attribute_id in relationship_attribute_list:\n cur_attribute = TAttribute.objects.get(id=attribute_id)\n category_from = TCategory.objects.get(id=cur_attribute.category_id)\n data_type = TDataType.objects.get(id=cur_attribute)\n category_to = TCategory.objects.get(id=data_type.category_id)\n\n category_from_name = BaseController.get_category_name(request, category_from.category_name)\n category_to_name = BaseController.get_category_name(request, category_to.category_name)\n\n one_relationship = list()\n one_relationship.append(cur_attribute.attribute_name)\n one_relationship.append(category_from_name)\n one_relationship.append(BaseController.get_category_name(request, cur_attribute.attribute_name))\n one_relationship.append(category_to_name)\n relationship_list.append(one_relationship)\n self.hanlp_tool.add_word_list([{\"word\": alia_item.attribute_alias,\n \"mask\": BaseController.get_category_name(request,\n cur_attribute.attribute_name)}\n for alia_item in\n TAttrbuteAlias.objects.filter(attribute_id=cur_attribute.id)])\n if category_from.id not in added_category_id:\n ret_list_id, ret_list_val = some_data_deal_func().inputCategoryIdReturnName(categoryId=category_from.id,\n repoId=request.session[\n \"repo_id\"],\n createId=request.session[\n \"user_id\"])\n self.hanlp_tool.add_word_list(\n [{\"word\": val_item, \"mask\": category_from_name} for val_item in ret_list_val])\n added_category_id.add(category_from.id)\n if category_to.id not in added_category_id:\n ret_list_id, ret_list_val = some_data_deal_func().inputCategoryIdReturnName(\n categoryId=category_to.id, repoId=request.session[\"repo_id\"],\n createId=request.session[\"user_id\"])\n self.hanlp_tool.add_word_list(\n [{\"word\": val_item, \"mask\": category_to_name} for val_item in ret_list_val])\n added_category_id.add(category_to.id)\n\n # for category_item in all_category:\n # try:\n # one_data_type = TDataType.objects.get(category_id=category_item.id, repo_id=request.session[\"repo_id\"], create_id=request.session[\"user_id\"])\n # attribute_list = TAttribute.objects.filter(data_type_id=one_data_type.id)\n # category_to_name = BaseController.get_category_name(request, category_item.category_name)\n # for attribute_item in attribute_list:\n # category_from = TCategory.objects.get(id=attribute_item.category_id)\n # category_from_name = BaseController.get_category_name(request, category_from.category_name)\n # one_relationship = list()\n # one_relationship.append(attribute_item.attribute_name)\n # one_relationship.append(category_from_name)\n # one_relationship.append(BaseController.get_category_name(request, attribute_item.attribute_name))\n # one_relationship.append(category_to_name)\n # relationship_list.append(one_relationship)\n # self.hanlp_tool.add_word_list([{\"word\": alia_item.attribute_alias,\n # \"mask\": BaseController.get_category_name(request,\n # attribute_item.attribute_name)}\n # for alia_item in\n # TAttrbuteAlias.objects.filter(attribute_id=attribute_item.id)])\n # print([{\"word\": alia_item.attribute_alias,\n # \"mask\": BaseController.get_category_name(request,\n # attribute_item.attribute_name)}\n # for alia_item in\n # TAttrbuteAlias.objects.filter(attribute_id=attribute_item.id)])\n # if category_from.id not in added_category_id:\n # ret_list_id, ret_list_val = some_data_deal_func().inputCategoryIdReturnName(categoryId=category_from.id, repoId=request.session[\"repo_id\"], createId=request.session[\"user_id\"])\n # self.hanlp_tool.add_word_list([{\"word\": val_item, \"mask\": category_from_name} for val_item in ret_list_val])\n # added_category_id.add(category_from.id)\n # if category_item.id not in added_category_id:\n # ret_list_id, ret_list_val = some_data_deal_func().inputCategoryIdReturnName(\n # categoryId=category_item.id, repoId=request.session[\"repo_id\"],\n # createId=request.session[\"user_id\"])\n # self.hanlp_tool.add_word_list(\n # [{\"word\": val_item, \"mask\": category_to_name} for val_item in ret_list_val])\n # added_category_id.add(category_item.id)\n # except ObjectDoesNotExist:\n # continue\n neo4j = Neo4j()\n cout = 0\n for i in ret_entity_map:\n _id = i['_id']\n value = i['value']\n content = value['内容']\n text = HanlpUnit().get_text_from_html(content)\n\n sentenceList = self.hanlp_tool.split_paragraph(text)\n extract_relationship = []\n for sent in sentenceList:\n sent = sent.strip()\n\n relationships = self.eventExtractionByTemplateMatching(sent, relationship_list)\n # relationships = self.eventExtractionByTemplateMatching(text.strip(), relationship_list)\n for item in relationships:\n relation_id = item[0]\n cur_relationship = relationship_list[relation_id]\n\n extract_relationship.append(\n {\"object_from_category\": cur_relationship[1], \"object_to_category\": cur_relationship[3],\n \"object_from_name\": item[1], \"object_relationship_name\": item[2], \"object_to_name\": item[3]})\n object1 = neo4j.match(object_from={\"label_name\": cur_relationship[1], \"content\": {\"名字\": item[1]}})\n object2 = neo4j.match(object_from={\"label_name\": cur_relationship[3], \"content\": {\"名字\": item[3]}})\n if object1 is not None and len(object1) == 1 and object2 is not None and len(object2) == 1:\n neo4j.createRelationship(labelOne=cur_relationship[1], labelTwo=cur_relationship[3],\n relationShipName=item[2], propertyOne={\"名字\": item[1]},\n propertyTwo={\"名字\": item[3]})\n if \"relationship_extract_result\" in i:\n extract_relationship = self.merge_list(extract_relationship, i[\"relationship_extract_result\"])\n cout += 1\n print(str(cout) + \"个文章\" + \",抽取数量:\" + str(len(extract_relationship)))\n collection.update_one({\"_id\": ObjectId(_id)}, {\"$set\": {\"relationship_extract_result\": extract_relationship}})\n\n def eventExtractionByTemplateMatching(self, sentence, eventLabelList):\n \"\"\"\n 把术语单词假如到分词库里面 然后进行分词 得出想要的结果\n :param sentence:\n :param eventLabelList:\n :return:\n \"\"\"\n reteventList = []\n\n # print(sentence)\n sentenceDealResult = self.hanlp_tool.cut(sentence)\n # print(sentenceDealResult)\n wordList = []\n natureList = []\n for i in sentenceDealResult:\n splitRes = str(i).split(r\"/\")\n wordList.append(splitRes[0])\n natureList.append(splitRes[1])\n wordSize = len(wordList)\n # print(wordSize,natureList)\n cnt = 0\n for tmpList in eventLabelList:\n ruleId = 1\n if (len(tmpList) == 3):\n ruleId = 2\n\n triggerWordList = []\n triggerWordIdList = []\n subjectCategoryList = []\n subjectCategoryIdList = []\n objectCategoryList = []\n objectCategoryIdList = []\n\n if (ruleId == 1):\n for i in range(wordSize):\n if (natureList[i] == tmpList[2]):\n triggerWordList.append(wordList[i])\n triggerWordIdList.append(i)\n elif (natureList[i] == tmpList[1]):\n subjectCategoryList.append(wordList[i])\n subjectCategoryIdList.append(i)\n elif (natureList[i] == tmpList[3]):\n objectCategoryList.append(wordList[i])\n objectCategoryIdList.append(i)\n # 枚举三元组\n for subjectId in subjectCategoryIdList:\n for triggerId in triggerWordIdList:\n if (subjectId > triggerId):\n continue\n for objectId in objectCategoryIdList:\n if (triggerId > objectId):\n continue\n oneEventList = []\n oneEventList.append(cnt)\n oneEventList.append(wordList[subjectId])\n oneEventList.append(wordList[triggerId])\n oneEventList.append(wordList[objectId])\n reteventList.append(oneEventList)\n else:\n for i in range(wordSize):\n if (natureList[i] == tmpList[2]):\n triggerWordList.append(wordList[i])\n triggerWordIdList.append(i)\n elif (natureList[i] == tmpList[1]):\n subjectCategoryList.append(wordList[i])\n subjectCategoryIdList.append(i)\n # 枚举两元组\n for subjectId in subjectCategoryIdList:\n for triggerId in triggerWordIdList:\n if (subjectId > triggerId):\n continue\n oneEventList = []\n oneEventList.append(cnt)\n oneEventList.append(wordList[subjectId])\n oneEventList.append(wordList[triggerId])\n reteventList.append(oneEventList)\n cnt += 1\n return reteventList\n\n def eventExtraction(self, request, file_id,lEventCategoryId):\n \"\"\"\n 功能 进行模板匹配的事件抽取\n :param request: request参数\n :param file_id: 数据类型str 文件id\n :param lEventCategoryId: 数据类型list 事件类目id\n :return: True\n \"\"\"\n #加入ruleId 1或者2\n #1的事件是三元组主谓宾 2的话变事件是主谓\n #only for debug\n #request.session['user_id'] = 1\n #request.session['repo_id'] = 1\n #fileId = 13\n #only for debug\n\n #fileId = request.POST['fileId']\n #request.session['repo_id']=1\n #request.session['user_id']=1\n repoId = request.session['repo_id']\n createId = request.session['user_id']\n\n #存到这个file_id 里面\n tmp_info = {'file_id': file_id,'user_id':createId,'repo_id':repoId}\n news_col = Mongodb(db='knowledge', collection='text').get_collection()\n cnt = 1\n ret_entity = news_col.find(tmp_info)\n ret_entity_map = list()\n for item in ret_entity:\n if \"内容\" in item[\"value\"]:\n ret_entity_map.append(item)\n\n if len(ret_entity_map) == 0:\n return\n\n print(\"--------------------事件抽取\")\n #在这个之前把所有的词语都加进去\n #整个循环都是为了把这个repoId的所有的触发词以及他们的事件主题客体都加入进去\n retTriggerWordList = TTriggerWord.objects.filter(repo_id=repoId)\n eventLabelList=[]\n # hanlpUnit=HanlpUnit()\n #这边要修 我们要从事类目开始查询\n for i in retTriggerWordList:\n tmpLableList=[]\n ruleId=1\n retTriggerWordDict = model_to_dict(i)\n triggerId = retTriggerWordDict['id']\n eventId = retTriggerWordDict['event_rule_id']\n #print(111,eventId)\n #触发词名字和触发词标注\n retEventRule=TEventRule.objects.get(id=eventId)\n #print(333,retEventRule.category_id)\n retCategoryName=TCategory.objects.get(id=retEventRule.category_id).category_name\n #print(444,retCategoryName)\n #这里的时候触发词的label要变成事件的label\n #到时候改一下\n triggerWord = retTriggerWordDict['trigger_word']\n triggerWordId =BaseController.get_category_name(request, retCategoryName)\n #print(222,eventId)\n\n eventRule = TEventRule.objects.get(id=eventId, repo_id=repoId)\n eventRuleDict = model_to_dict(eventRule)\n eventCategoryId = eventRuleDict['category_id']\n if(eventCategoryId not in lEventCategoryId):\n continue\n eventCategory = TCategory.objects.get(id=eventCategoryId, repo_id=repoId, create_id=createId)\n eventCategoryDict = model_to_dict(eventCategory)\n eventCategoryName = eventCategoryDict['category_name']\n tmpLableList.append(eventCategoryName)\n #事件类目\n\n subjectCategoryId = eventRuleDict['event_subject_id']\n subjectCategory = TCategory.objects.get(id=subjectCategoryId, repo_id=repoId, create_id=createId)\n subjectCategoryDict = model_to_dict(subjectCategory)\n subjectCategoryName = subjectCategoryDict['category_name']\n subjectId=BaseController.get_category_name(request,subjectCategoryName)\n tmpLableList.append(subjectId)\n retListId,retListVal = some_data_deal_func().inputCategoryIdReturnName(subjectCategoryId,repoId,createId)\n #对于retListVal里面的所有的值都把他们加入到分词器中然后进行分词\n #构造wordList word 和mask 对应\n constructWordList=[]\n tmpSet = self.hanlp_tool.added_word_list\n #print(len(retListVal ))\n for word in retListVal:\n if (word == None):\n continue\n tmpDict={}\n tmpDict['word'] =word\n #print(word)\n #item[\"word\"], item[\"mask\"]\n tmpDict['mask'] = subjectId\n constructWordList.append(tmpDict)\n\n #这边这个要加入list[{'word':123,mask:13}]\n self.hanlp_tool.add_word_list(constructWordList)\n #print(constructWordList)\n objectCategoryId = eventRuleDict['event_object_id']\n negativeOne = -1\n if (objectCategoryId == negativeOne):\n ruleId = 2\n\n constructWordList = []\n tmpDict={}\n tmpDict['word']=triggerWord\n tmpDict['mask']=str(triggerWordId)\n tmpSet = self.hanlp_tool.added_word_list\n constructWordList.append(tmpDict)\n self.hanlp_tool.add_word_list(constructWordList)\n tmpLableList.append(str(triggerWordId))\n print(ruleId)\n if( ruleId== 1):\n objectCategoryId = eventRuleDict['event_object_id']\n objectCategory = TCategory.objects.get(id=objectCategoryId, repo_id=repoId, create_id=createId)\n objectCategoryDict = model_to_dict(objectCategory)\n objectCategoryName = objectCategoryDict['category_name']\n objectId = BaseController.get_category_name(request, objectCategoryName)\n retListId, retListVal = some_data_deal_func().inputCategoryIdReturnName(objectCategoryId, repoId,\n createId)\n tmpLableList.append(objectId)\n constructWordList = []\n tmpSet = self.hanlp_tool.added_word_list\n #这个代码有变动需要改一下\n for word in retListVal:\n if(word==None):\n continue\n tmpDict = {}\n tmpDict['word'] = word\n # item[\"word\"], item[\"mask\"]\n tmpDict['mask'] = str(objectId)\n constructWordList.append(tmpDict)\n # 这边这个要加入list[{'word':123,mask:13}]\n #print(constructWordList)\n self.hanlp_tool.add_word_list(constructWordList)\n\n eventLabelList.append(tmpLableList)\n\n #eventLabelList\n #事件类目 事件主题 事件触发词 事件客体\n #print(eventLabelList)\n # print(\"list里面内容\")\n # tmpS=self.hanlp_tool.added_word_list\n # for name in tmpS:\n # print(name)\n #print(\"list里面内容结束\")\n #return True\n #name\n attribute = TAttribute.objects.get(category_id=1)\n attributeDict = model_to_dict(attribute)\n attributeName = attributeDict['attribute_name']\n #print(self.hanlp_tool.added_word_list)\n cnt =1\n for i in ret_entity_map:\n _id = i['_id']\n #根据这个id放回去就好了\n value = i['value']\n basetime = str(value['时间'])\n content = value['内容']\n text = HanlpUnit().get_text_from_html(content)\n sentenceList = self.hanlp_tool.split_paragraph(text)\n #print(sentenceList)\n #这边把所有的东西都拿出来\n event_extract_result=[]\n count = 0\n countIndex = 0 ;\n #时间 地点 事件主体 事件客体 主体的类目 和客体的类目\n tmpEventSet = set()\n for sent in sentenceList:\n sent = sent.strip()\n #print(sent)\n #对每一个sent进行分词获取他们的事件\n #11111\n #sent=\"浙江杭州明天林更新出演动作喜剧《快手枪手快枪手》\"\n sentenceDealResult = self.hanlp_tool.cut(sent)\n event = self.eventExtractionByTemplateMatching(sent, eventLabelList)\n #事件抽取完成\n #dateTime还要调整一下basetime会出问题\n #print(basetime)\n\n dateTime=basetime\n timeIndex = -1\n #print(123,timeIndex)\n timeIndex,timeWord,dateTime = Time_deal().dealTime(sent, basetime)\n if(timeIndex!=-1):\n timeIndex = timeIndex+countIndex\n #print(46, timeIndex)\n #print(11111111,dateTime)\n\n locationList = Time_deal().dealArea(sent)\n location = ''\n locationindex = -1\n for val in locationList:\n if(len(val['place']) > len(location)):\n location = val['place']\n locationindex=val['index']+countIndex\n #print(location,locationindex)\n countIndex+= len(sentenceDealResult)\n\n #这三个的名字需要和事件一起返回\n #print(event)\n for eve in event:\n ruleId = 1\n if(len(eve) == 3):\n ruleId =2\n eveId = eve[0]\n subjectLabel = eventLabelList[eveId][1]\n #triggerLabel = BaseController.get_category_name()eventLabelList[eveId][0]\n\n attribute = {}\n attribute['发生时间'] = dateTime\n attribute['地点'] = location\n eveString = ''\n\n for j in range(1,len(eve),1):\n eveString = eveString + str(eve[j])\n attribute['名字'] = eveString\n #eventlabel要通过查询结果得到\n eventLabel = BaseController.get_category_name(request, eventLabelList[eveId][0])\n #print(eventLabel)\n #print(eventLabelList[eveId])\n #print(event)\n subjectLabel = eventLabelList[eveId][1]\n\n Neo4j().create_node_mjy_edition(eventLabel, attribute)\n subjectNameVal = eve[1]\n # print(subjectCategoryName,attributeName,subjectNameVal)\n neo4jSubjectId = Neo4j().quesIdByLabelAttribute(subjectLabel, attributeName, '\\''+subjectNameVal+'\\'')\n neo4jEventId = Neo4j().quesIdByLabelAttribute(eventLabel, '名字', '\\''+eveString+'\\'')\n Neo4j().createRelationship(subjectLabel,eventLabel,\"主谓关系\",{'id':neo4jSubjectId},{'id':neo4jEventId})\n if (ruleId == 1):\n objectNameVal = eve[3]\n objectLabel = eventLabelList[eveId][3]\n neo4jObjectId = Neo4j().quesIdByLabelAttribute(objectLabel, attributeName, '\\''+objectNameVal+'\\'')\n Neo4j().createRelationship(eventLabel, objectLabel, \"动宾关系\", {'id':neo4jEventId}, {'id':neo4jObjectId})\n #print(neo4jSubjectId, neo4jEventId, neo4jObjectId)\n tmpEventDict = {}\n tmpEventDict['actual_event_time']=dateTime\n #事件抽取内容拿出来\n tmpEventDict['time'] = timeWord\n tmpEventDict['timeIndex']=timeIndex\n tmpEventDict['location'] = location\n tmpEventDict['locationIndex']=locationindex\n #print(111,dateTime,location)\n tmpEventDict['eventSubject'] =eve[1]\n tmpEventDict['eventSubjectLabel']=subjectLabel\n tmpEventDict['triggerLabel']=eventLabel\n tmpEventDict['triggerWord'] = eve[2]\n tmpEventDict['eventName'] = eveString\n if (ruleId == 1):\n tmpEventDict['eventObject'] = eve[3]\n objectLabel = eventLabelList[eveId][3]\n tmpEventDict['eventObjectLabel'] = objectLabel\n if(eveString not in tmpEventSet):\n tmpEventSet.add(eveString)\n event_extract_result.append(tmpEventDict)\n print(tmpEventDict)\n count +=1\n #插入到mongodb\n #print(count,event_extract_result)\n news_col.update_one({'_id': _id}, {\"$set\": {'event_extract_result':event_extract_result }})\n #news_col.insert_one()\n cnt+=1\n #if(cnt>=2):\n # break\n return True","sub_path":"knowledge_map_system/model/extractUnit.py","file_name":"extractUnit.py","file_ext":"py","file_size_in_byte":30720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"558324302","text":"from collections import deque\n\ndef solution(bridge_length, weight, truck_weights):\n answer = 0\n bridge_weight = 0\n trucks_on_bridge = deque([0 for _ in range(bridge_length)])\n while trucks_on_bridge:\n answer += 1\n removed_truck = trucks_on_bridge.popleft()\n bridge_weight -= removed_truck\n\n if truck_weights:\n if bridge_weight + truck_weights[0] <= weight:\n new_truck = truck_weights.pop(0)\n bridge_weight += new_truck\n trucks_on_bridge.append(new_truck)\n else:\n trucks_on_bridge.append(0)\n return answer\n\nprint(solution(2, 10, [7,4,5,6]))\nprint(solution(100, 100, [10]))\nprint(solution(100, 100, [10,10,10,10,10,10,10,10,10,10]))","sub_path":"코딩테스트 고득점 Kit/스택큐/다리를 지나는 트럭.py","file_name":"다리를 지나는 트럭.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"557453836","text":"from __future__ import annotations\n\nimport numpy as np\nfrom numba import guvectorize\n\nimport pygama.lgdo.lh5_store as lh5\nfrom pygama.dsp.errors import DSPFatal\nfrom pygama.dsp.utils import numba_defaults_kwargs as nb_kwargs\n\n\ndef wiener_filter(file_name_array: list[str]) -> np.ndarray:\n \"\"\"Apply a Wiener filter to the waveform.\n\n Note\n ----\n The convolution is performed in the frequency domain. This processor is\n composed of a factory function that is called using the `init_args`\n argument. The input and output waveforms are passed using `args`. The input\n must be the Fourier transform of the waveform. The output is the filtered\n waveform in the frequency domain.\n\n Parameters\n ----------\n file_name_array\n Array with path to an HDF5 file containing the time domain version of\n the superpulse in one column and noise waveform in another, the\n superpulse HDF5 group must be titled ``spms/processed/superpulse`` and\n the noise waveform must be called ``spms/processed/noise_wf``.\n\n JSON Configuration Example\n --------------------------\n\n .. code-block :: json\n\n \"wf_wiener\": {\n \"function\": \"wiener_filter\",\n \"module\": \"pygama.dsp.processors\",\n \"args\": [\"wf_bl_fft\", \"wf_wiener(2000,f)\"],\n \"unit\": \"dB\",\n \"init_args\": [\"/path/to/file/wiener.lh5\"]\n }\n \"\"\"\n\n sto = lh5.LH5Store()\n\n # Check that the file is valid and the data is in the correct format\n\n try:\n file_name_array[0]\n except Exception:\n raise DSPFatal(\"init_args must be an array with the filename\")\n\n file_name = file_name_array[0]\n\n try:\n f = sto.gimme_file(file_name, \"r\")\n except Exception:\n raise DSPFatal(\"File must be a valid lh5 file\")\n\n if \"spms/processed/superpulse\" not in f:\n raise DSPFatal(\"lh5 file must have 'spms/processed/superpulse' as a group\")\n\n if \"spms/processed/noise_wf\" not in f:\n raise DSPFatal(\"lh5 file must have 'spms/processed/noise_wf' as a group\")\n\n # Read in the data\n\n superpulse, _ = sto.read_object(\"spms/processed/superpulse\", file_name)\n superpulse = superpulse.nda\n\n noise_wf, _ = sto.read_object(\"spms/processed/noise_wf\", file_name)\n noise_wf = noise_wf.nda\n\n # Now check that the data are valid\n\n if len(superpulse) <= 0:\n raise DSPFatal(\"The length of the filter must be positive\")\n\n if len(superpulse) != len(noise_wf):\n raise DSPFatal(\n \"The length of the superpulse must be equal to the length of the noise waveform\"\n )\n\n if np.argmax(superpulse) <= 0 or np.argmax(superpulse) > len(superpulse):\n raise DSPFatal(\n \"The index of the maximum of the superpulse must occur within the waveform\"\n )\n\n # Transform these to the frequency domain to eventually create the wiener filter\n\n fft_superpulse = np.fft.fft(superpulse)\n fft_noise_wf = np.fft.fft(noise_wf)\n\n # Create the point spread function for the detector's response\n\n def psf(superpulse, fft_superpulse):\n\n delta = np.zeros_like(superpulse)\n arg_max = np.argmax(superpulse)\n delta[arg_max] = np.amax(superpulse)\n\n return fft_superpulse / np.fft.fft(delta)\n\n # Now create the wiener filter in the frequency domain\n\n fft_psf = psf(superpulse, fft_superpulse)\n psd_noise_wf = fft_noise_wf * np.conj(fft_noise_wf)\n psd_superpulse = fft_superpulse * np.conj(fft_superpulse)\n\n w_filter = (np.conj(fft_psf)) / (\n (fft_psf * np.conj(fft_psf)) + (psd_noise_wf / psd_superpulse)\n )\n\n # Create a factory function that performs the convolution with the wiener filter, the output is still in the frequency domain\n\n @guvectorize(\n [\"void(complex64[:], complex64[:])\", \"void(complex128[:], complex128[:])\"],\n \"(n)->(n)\",\n **nb_kwargs(\n cache=False,\n forceobj=True,\n ),\n )\n def wiener_out(fft_w_in: np.ndarray, fft_w_out: np.ndarray) -> None:\n \"\"\"\n Parameters\n ----------\n fft_w_in\n the Fourier transformed input waveform.\n fft_w_out\n the filtered waveform, in the frequency domain.\n \"\"\"\n fft_w_out[:] = np.nan\n\n if np.isnan(fft_w_in).any():\n return\n\n if len(w_filter) != len(fft_w_in):\n raise DSPFatal(\"The filter is not the same length of the input waveform\")\n\n fft_w_out[:] = fft_w_in * w_filter\n\n return wiener_out\n","sub_path":"src/pygama/dsp/processors/wiener_filter.py","file_name":"wiener_filter.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"442370007","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom argparse import ArgumentParser\n\ndef get_correct_idx(idx, max_digits):\n idx_string = '0'*max_digits\n i = len(str(idx))\n return idx_string[:-i] + str(idx)\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('-p', help='Path to directory with images', default='./dataset3d/images')\n parser.add_argument('-d', help='Destination for save', default='./dataset3d')\n\n args = parser.parse_args()\n names = sorted(os.listdir(args.p))\n\n names = list(map(lambda x: x.split('.')[0]+'.', names))\n names = np.array(names).astype(str).reshape(-1, 1)\n\n folds = np.zeros((len(names), 1)).astype(int)\n val_idx = np.random.choice(len(names), replace=False, size=int(0.1*len(names)))\n\n folds[val_idx] = 1\n\n df = pd.DataFrame(np.hstack((names, folds)), columns=['ImageId', 'fold'])\n df['fold'] = pd.to_numeric(df['fold'])\n df.to_csv(args.d + '/folds.csv', sep='\\t')\n\nif __name__ == '__main__':\n main()","sub_path":"make_split.py","file_name":"make_split.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"638109715","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: create_instances.py\n\n Description: Unit testing of create_instances in mysql_rep_change.py.\n\n Usage:\n test/unit/mysql_rep_change/create_instances.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport mysql_rep_change\nimport version\n\n__version__ = version.__version__\n\n\nclass MasterRep(object):\n\n \"\"\"Class: MasterRep\n\n Description: Class stub holder for mysql_class.MasterRep class.\n\n Methods:\n __init__\n connect\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n \"\"\"\n\n self.name = \"Server_Name\"\n self.read_only = \"OFF\"\n self.server_id = 10\n self.sql_user = \"User\"\n self.sql_pass = None\n self.machine = \"Linux\"\n self.host = \"HostName\"\n self.port = 3306\n self.defaults_file = None\n\n def connect(self, silent=False):\n\n \"\"\"Method: connect\n\n Description: connect method.\n\n Arguments:\n\n \"\"\"\n\n status = True\n\n if silent:\n status = True\n\n return status\n\n\nclass Cfg(object):\n\n \"\"\"Class: Cfg\n\n Description: Stub holder for configuration file.\n\n Methods:\n __init__\n\n \"\"\"\n\n def __init__(self):\n\n \"\"\"Method: __init__\n\n Description: Class initialization.\n\n Arguments:\n\n \"\"\"\n\n self.name = \"name\"\n self.sid = 10\n self.user = \"user\"\n self.japd = None\n self.serv_os = \"Linux\"\n self.host = \"hostname\"\n self.port = 3306\n self.cfg_file = \"cfg_file\"\n self.rep_user = \"repuser\"\n self.rep_japd = None\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_create_instances\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.cfg = Cfg()\n self.master = MasterRep()\n self.args_array = {\"-c\": \"mysql_cfg\", \"-d\": \"config\", \"-s\": \"slave\"}\n self.name = \"Server_Name\"\n\n @mock.patch(\"mysql_rep_change.mysql_libs.create_slv_array\",\n mock.Mock(return_value=\"SlaveArray\"))\n @mock.patch(\"mysql_rep_change.gen_libs.create_cfg_array\",\n mock.Mock(return_value=[]))\n @mock.patch(\"mysql_rep_change.gen_libs.transpose_dict\")\n @mock.patch(\"mysql_rep_change.gen_libs.load_module\")\n @mock.patch(\"mysql_rep_change.mysql_class.MasterRep\")\n def test_create_instances(self, mock_inst, mock_cfg, mock_trans):\n\n \"\"\"Function: test_create_instances\n\n Description: Test create_instances function.\n\n Arguments:\n\n \"\"\"\n\n mock_inst.return_value = self.master\n mock_cfg.return_value = self.cfg\n mock_trans.return_value = []\n\n master, slaves = mysql_rep_change.create_instances(self.args_array)\n\n self.assertEqual((master.name, slaves), (self.name, \"SlaveArray\"))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/unit/mysql_rep_change/create_instances.py","file_name":"create_instances.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"269197909","text":"import os, sys\nsys.path.append(\"./src\")\n\nimport matplotlib.pyplot as plt\nfrom ThreeChannel import MitosisClassifierZProj\nfrom model_analysis import plot_confusion_matrix\nimport numpy as np\n\nn_of_it = 10\nmito_runs = list()\npath = '/root/projects/three_channel/ZProj'\n\nfor m_it in range(n_of_it):\n mito_runs.append( MitosisClassifierZProj(path, m_it))\n mito_runs[m_it].run_me()\n print(\"itteration_{0} complete.\".format(str(m_it).zfill(2)))\n\n\nmaster_table = {k: {'true_labels': [], 'pred_labels': [], 'probability': []} for k in mito_runs[0].mito_labels.keys()}\np_master_table = {k: {'pred_labels': [], 'pred_entropy': [], 'pred_uid': [], 'probability':[]} for k in mito_runs[0].pred_mito_labels.keys()}\n\nfor k in range(n_of_it):\n for lname in mito_runs[0].mito_labels.keys():\n print(\"lname: {0}\".format(lname))\n master_table[lname]['true_labels'] += mito_runs[k].mito_labels[lname]['true_label']\n master_table[lname]['pred_labels'] += mito_runs[k].mito_labels[lname]['pred_label']\n master_table[lname]['probability'] += mito_runs[k].mito_labels[lname]['probability']\n\nfor k in range(n_of_it):\n for lname in mito_runs[0].pred_mito_labels.keys():\n p_master_table[lname]['pred_labels'] += mito_runs[k].pred_mito_labels[lname]['pred_label']\n p_master_table[lname]['pred_entropy'] += mito_runs[k].pred_mito_labels[lname]['pred_entropy']\n p_master_table[lname]['pred_uid'] += mito_runs[k].pred_mito_labels[lname]['pred_uid']\n p_master_table[lname]['probability'] += mito_runs[k].pred_mito_labels[lname]['probability']\n\nfig, ax = plot_confusion_matrix(master_table['train']['true_labels'], master_table['train']['pred_labels'])\ncmtrain = os.path.join(path, 'CM_master_train.png')\nfig.savefig(cmtrain, bbox_extra_artists=(ax,), bbox_inches='tight')\nplt.close(fig)\n\nfig, ax = plot_confusion_matrix(master_table['test']['true_labels'], master_table['test']['pred_labels'])\ncmtest = os.path.join(path, 'CM_master_test.png')\nfig.savefig(cmtest, bbox_extra_artists=(ax,), bbox_inches='tight')\nplt.close(fig)\n\nall_preds_labels = np.array(p_master_table['all']['pred_labels'])\nall_preds_entropy = np.array(p_master_table['all']['pred_entropy'])\nnz_preds_entropy = all_preds_entropy[all_preds_labels != 0]\n\nn, bins, patches = plt.hist([all_preds_entropy, nz_preds_entropy], 25, density=True, alpha=0.75)\ncmhist = os.path.join(path, 'Master_hist.png')\nplt.savefig(cmhist)\nplt.close()\n\nprecision, recall = mito_runs[0].precision_recall_vec(master_table['test']['true_labels'],\n master_table['test']['probability'])\nprplot = os.path.join(path, \"CM_precision_recall.png\")\nmito_runs[0].plot_prec_recall(precision, recall, prplot)\n\n\nprint(\"all Done.\")\n","sub_path":"RunnerZproj.py","file_name":"RunnerZproj.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"434930789","text":"\r\n\r\ndef settings(place):\r\n threshold_dict = None\r\n target_list = None\r\n time_to_kill = None\r\n move_another_area = None\r\n target_img_path = None\r\n area_dict = None\r\n x_max, x_min, y_max, y_min = (0, 29, 0, 14)\r\n transition_time = list()\r\n transition = list()\r\n\r\n if place == \"sea\":\r\n threshold_dict = {\"Berger\": 0.9, \"Preta\": 0.8, \"Mermaid\": 0.9}\r\n target_list = [\"Berger\", \"Preta\", \"Mermaid\"] # ordered by priority, left is higher than right\r\n time_to_kill = 3\r\n move_another_area = True\r\n area_dict = {\"area1\": (11, 0), \"area2\": (9, 13), \"next_area\": \"2\"}\r\n target_img_path = \"data/target/sea\"\r\n elif place == \"magma\":\r\n target_list = [\"Item\", \"Berger\", \"Zombie\", \"YellowPreta\"] # ordered by priority, left is higher than right\r\n threshold_dict = {\"Berger\": 0.9, \"YellowPreta\": 0.85, \"Zombie\": 0.82, \"Item\": 0.86}\r\n time_to_kill = 5\r\n move_another_area = True\r\n area_dict = {\"area1\": (15, 0), \"area2\": (15, 13), \"next_area\": \"1\"}\r\n target_img_path = \"data/target/magma\"\r\n x_min, x_max = (0, 29)\r\n y_min, y_max = (0, 14)\r\n transition_time = [3, 4, 4, 3, 6, 7, 7, 7, 3, 4, 4, 6]\r\n transition = [(21, 13), (23, 13), (25, 13), (29, 8), (23, 13), (0, 13), (6, 13), (29, 10), (7, 13), (9, 11),\r\n (5, 13), (27, 11)]\r\n elif place == \"forest\":\r\n threshold_dict = {\"Berger\": 0.9, \"Wolf\": 0.90, \"Me\": 0.9, \"MyHead\": 0.80}\r\n target_list = [\"Wolf\", \"Berger\", \"Me\", \"MyHead\"] # ordered by priority, left is higher than right\r\n time_to_kill = 8\r\n move_another_area = False\r\n target_img_path = \"data/target/forest\"\r\n elif place == \"iceCastle\":\r\n threshold_dict = {\"Berger\": 0.9, \"Iceg\": 0.90, \"Me\": 0.9, \"Item\": 0.87}\r\n target_list = [\"Item\", \"Iceg\", \"Berger\", \"Me\"] # ordered by priority, left is higher than right\r\n time_to_kill = 11\r\n move_another_area = False\r\n target_img_path = \"data/target/iceCastle\"\r\n x_min, x_max = (6, 23)\r\n y_min, y_max = (1, 13)\r\n transition = [(15, 0), (14, 5), (14, 0), (14, 4), (14, 0), (14, 2), (14, 0), (14, 0), (14, 0)]\r\n transition_time = [3, 3, 4, 2, 3, 3, 3, 3, 5]\r\n elif place == \"basement\":\r\n threshold_dict = {\"Berger\": 0.9, \"Flaredeathknight\": 0.90, \"Me\": 0.9, \"Kaonashi\": 0.80, \"Item\": 0.88}\r\n target_list = [\"Kaonashi\", \"Flaredeathknight\", \"Item\", \"Berger\", \"Me\"] # ordered by priority, left is higher than right\r\n time_to_kill = 18\r\n move_another_area = False\r\n target_img_path = \"data/target/basement\"\r\n x_min, x_max = (4, 24)\r\n y_min, y_max = (4, 13)\r\n\r\n info_dict = {\"threshold_dict\": threshold_dict, \"target_list\": target_list, \"time_to_kill\": time_to_kill,\r\n \"move_another_area\": move_another_area, \"target_img_path\": target_img_path, \"area_dict\": area_dict,\r\n \"range\": (x_min, x_max, y_min, y_max), \"transition\": transition, \"transition_time\": transition_time}\r\n\r\n return info_dict","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"176052353","text":"# -*- coding: utf8 -*\r\n\r\n# system lib\r\nimport argparse\r\nimport sys\r\nimport time\r\nimport os\r\n\r\n# third part lib\r\nimport torch\r\nimport torch.nn as nn\r\nimport pandas as pd\r\n\r\n# my libs\r\nsys.path.append(os.getcwd())\r\nsys.path.append(os.path.abspath(\"..\"))\r\n\r\nimport getData\r\nimport Predicte.myModules\r\nimport Predicte.myUtils.myTrainTest\r\nimport Predicte.myUtils.myData\r\n\r\n\r\ndef getFCNPams(rowNum, colNum, device, lr):\r\n fcn = Predicte.myModules.FCN(rowNum=rowNum, colNum=colNum)\r\n fcn = fcn.to(device)\r\n optimizer = torch.optim.Adam(fcn.parameters(), lr=lr)\r\n lossFunc = nn.CrossEntropyLoss()\r\n return (fcn, optimizer, lossFunc)\r\n\r\n\r\ndef getCNNPams(zn,xn,yn, device, lr):\r\n cnnXout = Predicte.myUtils.myData.getCNNOutSize(xn, 3, 2)\r\n cnnYout = Predicte.myUtils.myData.getCNNOutSize(yn, 3, 2)\r\n cnn = Predicte.myModules.CNN(inChannels=zn,\r\n kernels=6,\r\n kernelSize=2,\r\n outSize=16 * cnnXout * cnnYout)\r\n cnn = cnn.to(device)\r\n optimizer = torch.optim.Adam(cnn.parameters(), lr=lr)\r\n lossFunc = nn.CrossEntropyLoss()\r\n return (cnn, optimizer, lossFunc)\r\n\r\n\r\n# end\r\n\r\n\r\ndef main(runPams):\r\n timeStam = str(int(time.time()))\r\n #saveExcelPath = \"C:\\\\Users\\\\pdang\\\\Desktop\\\\\" + timeStam + \".xlsx\"\r\n saveExcelPath = \"/N/project/zhangclab/pengtao/myProjectsDataRes/20200113Predicte/results/l1NormCRNumCNN/block1/excelRes/\" + timeStam + \".xlsx\"\r\n #st = time.time()\r\n # get samples, featureMap, optFeatureMap\r\n olabel, samples, featureMap, optFeatureMap = getData.main(\r\n runPams)\r\n '''\r\n [print(olabel[i], samples[i]) for i in range(len(samples))]\r\n print(\"------------------------------------------------------\")\r\n [print(olabel[i], featureMap[i]) for i in range(len(featureMap))]\r\n print(\"--------------------------------------------------\")\r\n [print(olabel[i], optFeatureMap[i]) for i in range(len(optFeatureMap))]\r\n print(\"---------------------------------------------------------------\")\r\n print(olabel.size())\r\n print(samples.size())\r\n print(featureMap.size())\r\n print(optFeatureMap.size())\r\n sys.exit()\r\n '''\r\n\r\n # choose spu or gpu automatically\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n # samples data\r\n net, optimizer, lossFunc = getCNNPams(\r\n samples.size()[1],\r\n samples.size()[2],\r\n samples.size()[3],\r\n device,\r\n runPams.lr)\r\n sres, sytrue_ypred = Predicte.myUtils.myTrainTest.train_test(\r\n olabel, samples, net, device, optimizer, lossFunc, runPams)\r\n\r\n # featureMap data\r\n net, optimizer, lossFunc = getCNNPams(\r\n featureMap.size()[1],\r\n featureMap.size()[2],\r\n featureMap.size()[3],\r\n device,\r\n runPams.lr)\r\n fres, fytrue_ypred = Predicte.myUtils.myTrainTest.train_test(\r\n olabel, featureMap, net, device, optimizer, lossFunc, runPams)\r\n # optFeatureMap data\r\n net, optimizer, lossFunc = getCNNPams(\r\n optFeatureMap.size()[1],\r\n optFeatureMap.size()[2],\r\n optFeatureMap.size()[3],\r\n device,\r\n runPams.lr)\r\n ores, oytrue_ypred = Predicte.myUtils.myTrainTest.train_test(\r\n olabel, optFeatureMap, net, device, optimizer, lossFunc, runPams)\r\n # prepare results\r\n\r\n res = list()\r\n if runPams.minusMean == 1:\r\n res.append(\"c*r-E\")\r\n else:\r\n res.append(\"c*r\")\r\n res.append(runPams.xn)\r\n res.append(\"N(0-\" + str(runPams.stdBias / 10) + \")\")\r\n res.append(\"10*\"+str(runPams.sampleNum))\r\n res.append(runPams.numThreshold)\r\n res.append(\"7*\" + str(samples.size()[2]))\r\n res.append(sres)\r\n res.append(\"7*\" + str(featureMap.size()[3]))\r\n res.append(fres)\r\n res.append(\"7*\" + str(optFeatureMap.size()[3]))\r\n res.append(ores)\r\n # save data to excel\r\n resDF = pd.DataFrame(res)\r\n resDF.columns = [\"res\"]\r\n sytrue_ypred = pd.DataFrame(sytrue_ypred)\r\n sytrue_ypred.columns = [\"true\", \"pred\"]\r\n cytrue_ypred = pd.DataFrame(fytrue_ypred)\r\n cytrue_ypred.columns = [\"true\", \"pred\"]\r\n oytrue_ypred = pd.DataFrame(oytrue_ypred)\r\n oytrue_ypred.columns = [\"true\", \"pred\"]\r\n\r\n writer = pd.ExcelWriter(saveExcelPath) # 写入Excel文件\r\n resDF.to_excel(writer, index=False)\r\n sytrue_ypred.to_excel(writer, startcol=2, index=False)\r\n cytrue_ypred.to_excel(writer, startcol=5, index=False)\r\n oytrue_ypred.to_excel(writer, startcol=8, index=False)\r\n writer.save()\r\n writer.close()\r\n # output data\r\n res = ','.join(str(i) for i in res)\r\n print(res)\r\n return ()\r\n\r\n\r\n# run main\r\nif __name__ == \"__main__\":\r\n # parameters\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--n_epochs\", type=int, default=10)\r\n parser.add_argument(\r\n \"--batch_size\",\r\n type=int,\r\n default=os.cpu_count(),\r\n )\r\n parser.add_argument(\"--lr\", type=float, default=0.0002)\r\n parser.add_argument(\"--n_cpu\", type=int, default=os.cpu_count())\r\n parser.add_argument(\"--minusMean\", type=int, default=0)\r\n parser.add_argument(\"--stdBias\", type=int, default=0)\r\n parser.add_argument(\"--numThreshold\", type=int, default=7)\r\n parser.add_argument(\"--xn\", type=int, default=20)\r\n parser.add_argument(\"--crType\", type=str, default=\"norm\")\r\n parser.add_argument(\"--sampleNum\", type=int, default=5)\r\n runPams = parser.parse_args()\r\n main(runPams)\r\n","sub_path":"20200113Predicte/l1NormCRNumCNN/testMain.py","file_name":"testMain.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"420696149","text":"from rex.core import StrVal, FloatVal, Error\n\nfrom rex.instrument.interface import Assessment\nfrom rex.db import get_db\n\n\n__all__ = ('DemoAssessment',)\n\nclass DemoAssessment(Assessment):\n @classmethod\n def get_by_uid(cls, uid, user=None):\n db = get_db()\n with db:\n data = db.produce('/assessment?id()=$uid', uid=uid)\n if not data:\n return None\n return cls(\n data[0].uid,\n DemoSubject.get_by_uid(data[0].subject),\n DemoInstrumentVersion.get_by_uid(data[0].instrumentversion),\n data[0].data,\n evaluation_date=data[0].evaluation_date,\n status=data[0].status,\n )\n\n @classmethod\n def find(cls, offset=0, limit=None, user=None, **search_criteria):\n db = get_db()\n with db:\n data = db.produce('/assessment.sort(uid)')\n return [\n cls(\n d.uid,\n DemoSubject.get_by_uid(d.subject),\n DemoInstrumentVersion.get_by_uid(d.instrumentversion),\n d.data,\n evaluation_date=d.evaluation_date,\n status=d.status,\n )\n for d in data\n ]\n\n @classmethod\n def bulk_retrieve(cls, uids):\n db = get_db()\n with db:\n data = db.produce(\n \"/assessment{uid, instrumentversion.uid :as iv, data}.filter(uid=$uids).filter(status='completed').sort(uid)\",\n uids=uids,\n )\n return [\n cls.BulkAssessment(\n uid=str(d.uid),\n data=AnyVal().parse(d.data),\n instrument_version_uid=str(d.iv),\n )\n for d in data\n ]\n\n @classmethod\n def create(cls, subject, instrument_version, data=None, evaluation_date=None, implementation_context=None):\n return cls(\n 'fake_assessment_1',\n subject,\n instrument_version,\n data,\n evaluation_date=evaluation_date,\n )\n\n def save(self, implementation_context=None):\n print('### SAVED ASSESSMENT ' + self.uid)\n\n @classmethod\n def bulk_create(cls, assessments, validate=True):\n for assessment in assessments:\n if assessment.context['study1'] < 0:\n raise Error('Bulk create failed with unexpected study1.')\n print('### CREATED %s ASSESSMENTS' % len(assessments))\n\n @classmethod\n def get_implementation_context(cls, action):\n if action == cls.CONTEXT_ACTION_CREATE:\n return {\n 'study': {\n 'required': False,\n 'validator': StrVal(),\n },\n 'study1': {\n 'required': True,\n 'validator': FloatVal(),\n }\n }\n return Assessment.get_implementation_context(action)\n","sub_path":"src/rex.assessment_import/demo/src/rex/assessment_import_demo.py","file_name":"assessment_import_demo.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"230600010","text":"##################################################\n# Libraries\n##################################################\n\nimport os\nimport math\nimport numpy as np\nimport pandas as pd\nimport shutil\n\n\n##################################################\n# Check FreeSurfer files\n##################################################\n# Check FreeSurfer folder for subject list, error log and folder size.\n\nclass CheckFreesurfer():\n def __init__(self,\n #path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/10_recon',\n #path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/10.1_recon_t1qcout/output',\n #path_exp='/media/atiroms/MORITA_HDD4/MRI/pnTTC/pnTTC1_T1_C/FS/12_recon_t1exist/output',\n path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/17_recon/output',\n string_log_ok='finished without error at',\n #file_output='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/check_freesurfer.csv'\n file_output='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/17_recon/log/18_checkfreesurfer.csv'\n ):\n\n list_dir_all = os.listdir(path_exp)\n list_sub = [d for d in list_dir_all if (os.path.isdir(os.path.join(path_exp,d)) and not d.startswith('fsaverage'))]\n list_sub.sort()\n df_out=pd.DataFrame(columns=['sub','log','size'])\n list_log_error=[]\n for sub in list_sub:\n print('Checking subject '+ str(sub)+ ' ...')\n with open(os.path.join(path_exp,sub,'scripts/recon-all.log'),'r') as file_log:\n text_log=file_log.read()\n if string_log_ok in text_log:\n log_ok=1\n else:\n log_ok=0\n list_log_error.append(sub)\n df_out=df_out.append(pd.Series([sub,log_ok,self.get_dir_size(path=os.path.join(path_exp,sub))],index=df_out.columns),ignore_index=True)\n df_out.to_csv(file_output,index=False)\n print('Total FreeSurfer subject folders: ' + str(len(list_sub)))\n print('FreeSurfer Log Error in:')\n print(list_log_error)\n print('All done.')\n\n def get_dir_size(self, path='.'):\n total = 0\n with os.scandir(path) as it:\n for entry in it:\n if entry.is_file():\n total += entry.stat().st_size\n elif entry.is_dir():\n total += self.get_dir_size(entry.path)\n return total\n\n\n##################################################\n# Zero-pad and concatenate id file\n##################################################\n# read id file, zero-pad and concatenate into a string\n# for general use\n\nclass ZeropadConcat():\n def __init__(self,\n path_file_id='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/18_meas/log/list_id.txt',\n n_zfill=5,\n path_file_output='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/18_meas/log/str_id.txt'\n ):\n with open(path_file_id, 'r') as list_id:\n list_id=list_id.readlines()\n list_id=[int(x.strip('\\n')) for x in list_id]\n list_id.sort()\n text_id=''\n for index in list_id:\n text_id=text_id + ' ' + str(index).zfill(n_zfill)\n \n with open(path_file_output,'w') as file_output:\n file_output.write(text_id)\n print('All done.')\n\n\n##################################################\n# read ID file into list\n##################################################\n\nclass ReadID():\n def __init__(self,\n #path_file_id='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/script/id_11_recon.txt'\n path_file_id='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/10.1_recon_t1qcout/input/id.txt'\n ):\n\n file=open(path_file_id, 'r')\n file=file.readlines()\n self.output=[int(x.strip('\\n')) for x in file]\n\n\n##################################################\n# Scan FreeSurfer folder into ID list\n##################################################\n\nclass ScanFSFolder():\n def __init__(self,\n #path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC1_T1_C/FS/10_recon',\n #path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/15_recon',\n path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/17_recon/output',\n list_exceptions=['fsaverage', 'id.txt','script.txt']\n ):\n\n list_dir = os.listdir(path_exp)\n list_dir.sort()\n #self.output = [int(i) for i in list_dir if i != 'fsaverage' and i != 'id.txt' and i != 'script.txt']\n self.output = [int(i) for i in list_dir if i not in list_exceptions]\n\n\n##################################################\n# Scan .nii.gz folder into ID list\n##################################################\n\nclass ScanNiiFolder():\n def __init__(self,\n path_exp='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/13_nii.gz_unite',\n list_exceptions=['fsaverage', 'id.txt','script.txt']\n ):\n\n list_dir = os.listdir(path_exp)\n list_dir.sort()\n list_id= [int(i.replace('CSUB-','').replace('C-02.nii.gz','')) for i in list_dir if i not in list_exceptions]\n list_id.sort()\n self.output=list_id\n\n\n##################################################\n# save list id into text file\n##################################################\n\nclass SaveListID():\n def __init__(self,\n list_id,\n #path_file_output='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/16_recon_t1qcout/log/list_id.txt'\n path_file_output='/media/veracrypt1/MRI/pnTTC/pnTTC2_T1_C/FS/18_meas/log/list_id.txt'\n ):\n\n self.output=''\n for item in list_id:\n self.output=self.output + str(item) + '\\n'\n file=open(path_file_output,'w')\n file.write(self.output)\n file.close()\n\n\n##################################################\n# generate script from ID list\n##################################################\n\nclass GenerateScript():\n def __init__(self,\n list_id,\n path_src='',\n path_dst='',\n id_ses=0,\n head_script='SUBJECTS_DIR={path_dst}/output\\ncd $SUBJECTS_DIR\\n',\n script='recon-all -i {path_src}/output/sub-{id_sub}_ses-{id_ses}_T1w.nii -subject {id_sub} -all -qcache',\n connector=' ; '\n ):\n self.output=head_script.replace('{path_dst}',path_dst)\n for id_subj in list_id:\n id_sub=str(id_subj).zfill(5)\n id_ses=str(id_ses).zfill(2)\n script_subj=script\n script_subj=script_subj.replace('{path_src}',path_src).replace('{path_dst}',path_dst)\n script_subj=script_subj.replace('{id_sub}',id_sub).replace('{id_ses}',id_ses)\n self.output=self.output+script_subj\n if id_subj != list_id[-1]:\n self.output=self.output + connector\n\n\n##################################################\n# Generate FreeSurfer scripts for multiprocessing\n##################################################\n\nclass PrepFS():\n def __init__(self,\n file_id='20_id_c2_fs.csv',\n path_src='/media/veracrypt1/MRI/pnTTC/c2_struc/freesurfer/20_nii',\n path_dst='/media/veracrypt1/MRI/pnTTC/c2_struc/freesurfer/21_recon',\n id_ses=2,\n file_script='20_fs_mp.sh',\n n_proc=28\n ):\n\n print('Starting PrepFS()')\n\n # Create output folder\n print('Starting to create output folder.')\n list_path_mkdir=[]\n list_path_mkdir.append(path_dst)\n list_path_mkdir.append(os.path.join(path_dst,'output'))\n for p in list_path_mkdir:\n if not os.path.exists(p):\n os.makedirs(p)\n print('Finished creating output folder.')\n\n # Copy log folder\n print('Starting to copy log folder.')\n path_log_src=os.path.join(path_src,'log')\n path_log_dst=os.path.join(path_dst,'log')\n shutil.copytree(path_log_src,path_log_dst)\n print('Finished copying log folder.')\n\n # Create ID list\n with open(os.path.join(path_dst,'log',file_id), 'r') as list_id:\n list_id=list_id.readlines()\n list_id=[int(x.strip('\\n')) for x in list_id]\n list_id.sort()\n print('Number of subjects: '+str(len(list_id)))\n\n # Create list of ID lists\n n_subj=len(list_id)\n n_subj_per_proc=int(np.ceil(n_subj/n_proc))\n n_proc_floor=n_proc*n_subj_per_proc-n_subj\n n_proc_ceil=n_proc-n_proc_floor\n print('Multiprocessing: '+str(n_subj)+' total subs, '+str(n_proc)+' total procs: '+str(n_proc_ceil)+' procs x '\n +str(n_subj_per_proc)+' subs, '+str(n_proc_floor)+' procs x '+ str(n_subj_per_proc-1)+' subs.')\n list_list_id=[]\n for id_proc in range(n_proc):\n if id_proc= self.screen_width - 1:\n return False\n else:\n if self.maze[row][col + 2] == 0:\n return True\n else:\n return False\n elif direction == 2:\n if row + 2 >= self.screen_height - 1:\n return False\n else:\n if self.maze[row + 2][col] == 0:\n return True\n else:\n return False\n elif direction == 3:\n if row - 2 <= 0:\n return False\n else:\n if self.maze[row - 2][col] == 0:\n return True\n else:\n return False\n else:\n return False\n\n def __write_data(self):\n redis_object = redis.Redis(host='localhost', port=6379,db=0)\n redis_object.flushall()\n redis_object.set('maze',self.maze)\n redis_object.set('is_goal',self.is_goal)\n redis_object.set('player_x',self.player_x)\n redis_object.set('player_y',self.player_y)\n redis_object.set('start_x',self.start_x)\n redis_object.set('start_y',self.start_y)\n redis_object.set('goal_x',self.goal_x)\n redis_object.set('goal_y',self.goal_y)\n redis_object.set('width',self.screen_width)\n redis_object.set('height',self.screen_height)\n\n def move_player(self,direction):\n #up\n if direction == 0:\n if self.maze[self.player_y - 1][self.player_x] == 1:\n self.player_y -= 1\n #down\n elif direction == 1:\n if self.maze[self.player_y + 1][self.player_x] == 1:\n self.player_y += 1\n #right\n elif direction == 2:\n if self.maze[self.player_y][self.player_x + 1] == 1:\n self.player_x += 1\n #left\n elif direction == 3:\n if self.maze[self.player_y][self.player_x - 1] == 1:\n self.player_x -= 1\n\n if self.player_x == self.goal_x and self.player_y == self.goal_y:\n self.is_goal = 1\n else:\n self.is_goal = 0\n\n self.__write_data()\n\n def display_maze(self):\n for y in range(self.screen_height):\n for x in range(self.screen_width):\n if y == self.player_y and x == self.player_x:\n print(\"o \",end=\"\")\n elif self.maze[y][x] == 0:\n print(\"x \",end=\"\")\n else:\n print(\" \",end=\"\")\n print(\"\")\n print(\"\")\n","sub_path":"Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"244279393","text":"import os\nfrom typing import Tuple\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom pathlib import Path\n\nfrom common import patient_slice_generator\nfrom config import reu2018\nfrom patient import Patient\nfrom utils import get_dir_list, read_file, save_slice_pair\nfrom utils import resample, get_dimension\nfrom skimage import exposure\nfrom skimage.io import imread, imsave\nfrom PIL import ImageEnhance\n\n\ndef slice_mri_data(config):\n #dirs = get_dir_list(config['mri_dir'])\n dirhgg = Path(r'/Users/xiaoxiaozhou/Desktop/Research/REU UNet 2D/preprocessed/HGG')\n #dirhgg = Path(r'/Users/xiaoxiaozhou/Desktop/Research/REU UNet 2D/preprocessed/LGG')\n hggfilelist = get_dir_list(dirhgg)\n\n for subject_dir in tqdm(hggfilelist, total=len(hggfilelist), desc=\"Patients\"):\n #slice_id = 64\n #if os.path.exists(config['sliced_mri_data_dir'] / '{}'.format(subject_dir) / '{}_{}.png'.format(subject_dir, slice_id)) and os.path.exists(config['sliced_mri_mask_dir'] / '{}'.format(subject_dir) / '{}_{}.png'.format(subject_dir, slice_id)):\n # continue\n\n print(subject_dir)\n direction = dirhgg / subject_dir\n subject_folder = subject_dir\n patient = process_directory(direction, config)\n\n image, mask = patient.unpack()\n image = resample(image, None)\n mask = resample(mask, None)\n #dim = get_dimension(config['slice_orientation'])\n\n #idx = int(128 / 2)\n masktemplate = np.zeros(mask[..., 0, 0].shape)\n maxid = 0\n\n for i in range(128):\n #idd = idx-20+i\n image_slice = image[..., i, 0]\n mask_slice = mask[..., i, 0]\n #mask_slice = mask[..., i, 0]\n\n if np.max(mask_slice) > 0:\n if np.sum(mask_slice) >= np.sum(masktemplate):\n masktemplate = mask_slice\n maxid = i\n\n for j in range(40):\n maxidd = maxid - 20 + j\n image_slice = image[..., maxidd, 0]\n mask_slice = mask[..., maxidd, 0]\n\n mask_slice[mask_slice == 2] = 0\n mask_slice[mask_slice != 0] = 1\n\n if np.sum(image_slice) != 0 and config['equalize_histogram']:\n image_slice = exposure.equalize_adapthist(image_slice.astype(np.int))\n # if np.sum(image_slice) != 0 and config['contrast_enhance']:\n # image_slice = ImageEnhance.Contrast(image_slice)\n\n save_slice_pair(subject_dir, maxidd, image_slice, mask_slice, config)\n\ndef load_mri_file(self, file_path: Path) -> None:\n id = file_path.parts[-1].split('.')[0]\n data, affine = utils.read_file(file_path)\n data = data.astype(np.float32)\n\n self.patient = Patient(id, data, None, affine)\n self.original_size = data.shape\n\n return data\n\n\ndef process_directory(_dir, config):\n my_patient = Patient(_dir.parts[-1])\n\n extension = config['mri_file_extensions']\n if extension == '*.nii.gz':\n for mri_file in _dir.glob(extension):\n name = config['data_filename_contains'] + '.nii.gz'\n if name == mri_file.parts[-1]:\n data, _ = read_file(mri_file)\n my_patient.add_data(data)\n if config['mask_filename_contains'] in mri_file.parts[-1]:\n mask, _ = read_file(mri_file)\n my_patient.add_mask(mask)\n\n return my_patient\n\ndef preprocessing():\n direction = Path(r'/Users/xiaoxiaozhou/Desktop/Research/REU UNet 2D/mediumslices_images')\n hggfiles = get_dir_list(dirhgg)\n for patientfile in tqdm(hggfilelist, total=len(hggfiles), desc=\"onepatient\"):\n patfilepath = direction / patientfile\n image = imread(patfilepath)\n\n\n\n\n\nif __name__ == \"__main__\":\n slice_mri_data(reu2018)\n preprocessing()\n","sub_path":"Desktop/Research/unet2dforgithub/mediumslice.py","file_name":"mediumslice.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"137568943","text":"from flask import Flask\nfrom twilio import twiml\n\napp = Flask(__name__)\n\n@app.route('/conference', methods=['POST'])\ndef voice():\n response = twiml.Response()\n\n with response.dial() as dial:\n dial.conference(\"Josh's Party\")\n\n return str(response)\n\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True, use_reloader=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"229211630","text":"#!/usr/bin/python\n# coding=UTF-8\n\n\"\"\"\n多进程游戏检测结果下载\n图片结果汇总:\n 文件夹名称:\n gindex_游戏名称\n 文件名称:\n 结果_类别_分数_gindex_游戏名称_图片名称\n\n ps:\n 结果:\n 嫌疑、不通过确定\n 类别:\n 色情、性感、广告、二维码、暴恐、违禁\n 分数:\n 0~1区间内的小数,数字越大,确定性越高\n\n文本结果汇总:\n 文件名称:\n gindex_游戏名称\n 每行内容:\n 结果_类别_级别_【关键词】_类别序号_类别名称_事件序号_事件名称_文本(关键词用【】标出)\n 说明:\n 结果:\n 嫌疑、不通过\n 类别:\n 色情、广告、违禁、谩骂、灌水\n 级别:\n 不确定、确定\n\"\"\"\n\n# noinspection PyUnresolvedReferences\nimport shutil\n# noinspection PyUnresolvedReferences\nfrom multiprocessing import Pool\n# noinspection PyUnresolvedReferences\nfrom multiprocessing.pool import ThreadPool\n\nfrom python_cgtools.utils_bin_map import *\nfrom python_cgtools.utils_dirty_project_yidun import DirtyProjectYiDun\n\n# 避免SettingWithCopyWarning警告\npd.set_option('mode.chained_assignment', None)\n\ninitial_checked_material_sql = \"\"\"\n SELECT\n result.category_name,\n result.material_id,\n result.material_name,\n yidun.md5,\n yidun.level,\n yidun.label,\n yidun.rate\n FROM\n `bi_analysis`.`dirty_material_result` AS result\n INNER JOIN `bi_analysis`.`dirty_material_yidun` AS yidun ON yidun.id = result.yidun_id\n WHERE\n yidun.level <> 0 AND yidun.status = 1 AND MOD(result.category_id, {total}) = {sub}\n\"\"\"\n\n\ndef download_checked_material(_total, _sub):\n conn_online, cur_online = get_conn_cur_by_class(BiUserOnline)\n\n # 图片处理\n sql = initial_checked_material_sql.format(total=_total, sub=_sub)\n (conn_online, cur_online), _, res = fetch_all(conn_online, cur_online, sql)\n df = pd.DataFrame(list(res), columns=[\"category_name\", \"material_id\", \"material_name\", \"md5\",\n \"level\", \"label\", \"rate\"])\n\n for category_name, frame in df.groupby([\"category_name\"]):\n print(str(_sub) + \"/\" + str(_total) + \":\" + str(category_name))\n # 创建文件夹\n category_name = clean_gname(category_name)\n file_dir = \"G:\\\\PycharmProjects\\\\python_llrpg\\\\dirty_project\\\\bigdata\\\\dirty_material\\\\\" + category_name\n if os.path.exists(file_dir):\n shutil.rmtree(file_dir)\n os.mkdir(file_dir)\n\n # 下载图片\n for _, row in frame.iterrows():\n graphic_url = get_resource_url_by_md5(row[\"md5\"])\n material_name = clean_gname(row[\"material_name\"])\n file_name = DirtyProjectYiDun.graphic_level_dict[row[\"level\"]] + \"_\" + \\\n DirtyProjectYiDun.graphic_label_dict[row[\"label\"]] + \"_\" + \\\n \"%.4f\" % row[\"rate\"] + \"_\" + material_name\n # 文件名不能太长\n file_name, file_ext_name = os.path.splitext(file_name)\n file_name = file_name[:120].strip() + file_ext_name\n # print(file_name)\n urlretrieve(graphic_url, os.path.join(file_dir, file_name))\n\n close_all(cur_online, conn_online)\n\n\nif __name__ == \"__main__\":\n\n # 测试多进程\n t0 = time.time()\n p = Pool(8)\n total = 32\n for sub in range(total):\n p.apply_async(download_checked_material, args=(total, sub))\n time.sleep(10)\n p.close()\n p.join()\n t1 = time.time()\n print(('spent {:.4f}s.'.format(t1 - t0))) # 392.4074s\n","sub_path":"dirty_project/material_dirty_show_multiprocessing.py","file_name":"material_dirty_show_multiprocessing.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"260458581","text":"#!/usr/bin/python3\nimport sys\nimport curses\nimport os\nimport datetime\nimport traceback\n\ndef getUser(line):\n\twords = line.split(\" \")\n\tuser = \"\";\n\tif (len(words)==0):\n\t\treturn \"\"\n\tfor i in range (0,len(words)-1):\n\t\tuser+=words[i]+\" \"\n\tuser = user[:-1]\n\treturn user\n\t\t\ndef signin(filename, user):\n\tif (len(user) == 0):\n\t\treturn -1\n\tfile = open(\"messages/\"+filename, \"r\")\n\tfor line in file:\n\t\tif (user == getUser(line)):#user == line[0:len(user)]):\n\t\t\treturn 1\n\treturn -1\n\ndef getRead(filename, user):\n\tif (len(user) == 0):\n\t\treturn -1\n\tfile = open(\"messages/\"+filename+\"StreamUsers\", \"r\")\n\tfor line in file:\n\t\tif (user == getUser(line)):#user == line[0:len(user)]):\n\t\t\tarr = line.split(\" \")\n\t\t\treturn int(arr[-1])\n\treturn 0\n\ndef setRead(filename, user, postNum):\n\tif (len(user) == 0):\n\t\treturn -1\n\t#curses.endwin()\n\tfile = open(\"messages/\"+filename+\"StreamUsers\", \"r\")\n\tnewFile = []\n\tfor line in file:\n\t\tif (user == getUser(line)):#user == line[0:len(user)]):\n\t\t\tarr = line.split(\" \")\n\t\t\tnewline = \"\"\n\t\t\tfor a in range(0, len(arr)-1):\n\t\t\t\tnewline += arr[a]+\" \"\n\t\t\tnewline += str(postNum)\n\t\t\tnewFile.append(newline)\n\t\telse:\n\t\t\tnewFile.append(line.rstrip(\"\\n\"))\n\tfile = open(\"messages/\"+filename+\"StreamUsers\", \"w\")\n\tfor line in newFile:\n\t\tfile.write(line+\"\\n\")\n\t#print(newFile)\n\t#exit(0)\n\treturn 0\n\ndef loadStream(streamName, user):\n\tif (not os.path.isfile(\"messages/\"+streamName+\"Stream\")):\n\t\treturn []\n\tstream = open(\"messages/\"+streamName+\"Stream\", \"r\")\n\tstreamData = open(\"messages/\"+streamName+\"StreamData\", \"r\")\n\t\n\t#curses.endwin()\n\tnumBytes = 0\n\tposts = []\n\tnumposts = 0\n\tfor line in streamData:\n\t\tline = line.rstrip(\"\\n\")\n\t\tpostBytes = 0\n\t\tpostlines = [streamName]\n\t\t#print(\"*********\")\n\t\tfor line2 in stream:\n\t\t\tline2 = line2.rstrip(\"\\n\")\n\t\t\tpostBytes += len(line2)\n\t\t\tpostlines.append(line2)\n\t\t\t#print(line2)\n\t\t\t#print(postBytes+numBytes)\n\t\t\tif (postBytes+numBytes >= int(line)):\n\t\t\t\tbreak\n\t\t#print(postlines)\n\t\tnumBytes += postBytes\n\t\tposts.append([])\n\t\tposts[numposts] = postlines\n\t\tnumposts+=1\n\t#exit(0)\n\treturn sortPosts(posts)\n\ndef indexOfXStream(stream, postNum, posts):\n\tindex = 0\n\tstreamPos = 0\n\tfor a in range(0, len(posts)):\n\t\tif (posts[a][0] == stream):\n\t\t\tif (streamPos == postNum):\n\t\t\t\treturn index\n\t\t\tstreamPos += 1\n\t\tindex += 1\n\treturn index\n\ndef indexInStream(stream, indexMaster, posts):\n\tstreamPos = 0\n\tfor a in range(0, len(posts)):\n\t\tif (posts[a][0] == stream):\n\t\t\tstreamPos += 1\n\t\tif (a == indexMaster):\n\t\t\treturn streamPos\n\treturn streamPos\n\ndef changeStream (csr, user):\n\tcsr.clear()\n\tc = 1\n\tfor f in os.listdir(\"messages\"):\n\t\tif (f.endswith(\"StreamUsers\") and signin(f, user) == 1):\n\t\t\tcsr.addstr(c, 0, f[:-11])\n\t\t\tc+=1\n\tcsr.addstr(0,0,\"Enter Stream: \")\n\tstream = csr.getstr(0,14);\n\tcsr.clear()\n\tposts = []\n\tindex = 0\n\tnameIndex = \"\"\n\tdateIndex = \"\"\n\tif (stream.decode() == \"all\"):\n\t\tprint(\"all\")\n\t\tfor f in os.listdir(\"messages\"):\n\t\t\tif (f.endswith(\"Stream\") and signin(f+\"Users\", user) == 1):\n\t\t\t\tstreamPosts = (list(loadStream(f[:-6], name)))\n\t\t\t\t\n\t\t\t\tif (len(dateIndex) != 0):\n\t\t\t\t\ttmpIndex = getRead(f[:-6], name)\n\n\t\t\t\t\t#Tmp is older than index\n\t\t\t\t\tif (tmpIndex < len(streamPosts)):\n\t\t\t\t\t\tif (cmp(dateIndex, streamPosts[tmpIndex][2]) == 0):\n\t\t\t\t\t\t\tindex = tmpIndex\n\t\t\t\t\t\t\tnameIndex = f[:-6]\n\t\t\t\t\t\t\tdateIndex = streamPosts[tmpIndex][2]\n\t\t\t\telse:\n\t\t\t\t\tindex = getRead(f[:-6], name)\n\t\t\t\t\tnameIndex = f[:-6]\n\t\t\t\t\tif (index >= len(streamPosts) and index > 0):\n\t\t\t\t\t\tindex = -1\n\t\t\t\t\telse: \n\t\t\t\t\t\tdateIndex = streamPosts[index][2]\n\t\t\t\tposts += streamPosts\n\telse:\n\t\tif (not os.path.isfile(\"messages/\"+stream.decode()+\"Stream\")):\n\t\t\treturn 0,[], \"\"\n\t\tposts = list(loadStream(stream.decode(), name))\n\t\tindex = getRead(stream.decode(), name)\n\t\tnameIndex = stream.decode()\n\tposts = sortPosts(posts)\n\t#get index of first unread post in stream\n\tif (index == -1):\n\t\tindex = len(posts)-1\n\telse:\n\t\tindex = indexOfXStream(nameIndex, index, posts)\n\treturn index, posts, stream.decode()\n\ndef refreshStream (csr, user, stream):\n\tposts = []\n\tindex = 0\n\tnameIndex = \"\"\n\tdateIndex = \"\"\n\tif (stream == \"all\"):\n\t\tprint(\"all\")\n\t\tfor f in os.listdir(\"messages\"):\n\t\t\tif (f.endswith(\"Stream\") and signin(f+\"Users\", user) == 1):\n\t\t\t\tstreamPosts = (list(loadStream(f[:-6], name)))\n\t\t\t\t\n\t\t\t\tif (len(dateIndex) != 0):\n\t\t\t\t\ttmpIndex = getRead(f[:-6], name)\n\n\t\t\t\t\t#Tmp is older than index\n\t\t\t\t\tif (tmpIndex < len(streamPosts)):\n\t\t\t\t\t\tif (cmp(dateIndex, streamPosts[tmpIndex][2]) == 0):\n\t\t\t\t\t\t\tindex = tmpIndex\n\t\t\t\t\t\t\tnameIndex = f[:-6]\n\t\t\t\t\t\t\tdateIndex = streamPosts[tmpIndex][2]\n\t\t\t\telse:\n\t\t\t\t\tindex = getRead(f[:-6], name)\n\t\t\t\t\tnameIndex = f[:-6]\n\t\t\t\t\tif (index >= len(streamPosts)):\n\t\t\t\t\t\tindex = -1\n\t\t\t\t\telse: \n\t\t\t\t\t\tdateIndex = streamPosts[index][2]\n\t\t\t\tposts += streamPosts\n\telse:\n\t\tif (not os.path.isfile(\"messages/\"+stream+\"Stream\")):\n\t\t\treturn 0,[]\n\t\tposts = list(loadStream(stream, name))\n\t\tindex = getRead(stream, name)\n\t\tnameIndex = stream\n\tposts = sortPosts(posts)\n\t#get index of first unread post in stream\n\tif (index == -1):\n\t\tindex = len(posts)-1\n\telse:\n\t\tindex = indexOfXStream(nameIndex, index, posts)\n\treturn index, posts\n\ndef displayBar(csr, yMax):\n\tcsr.addstr(yMax-1, 0, \"↑ ↓ O-order toggle M-mark all S-stream C-check for new\")\n\ndef getTime(line):\n\tline = line.split(\":\")\n\tdate = line[1].split(\"/\")\n\ttime = line[0].split(\"/\")\n\tdateTime = datetime.datetime(int(date[2]), int(date[1]), int(date[0]))\n\tstrTime = dateTime.strftime(\"%b %d, %y - \")\n\tstrTime += time[2]+\":\"+time[1]\n\treturn strTime\n\ndef displayScreen(csr, posts, size, index, user, flag, lineStart):\n\tcsr.addstr(0,0,\"ID: \"+name)\n\tdisplayBar(csr, size[0])\n\tc = 1\n\tnumPosts = 0\n\t#curses.endwin()\n\tif (index >= len(posts) and index > 0):\n\t\tindex -= 1\n\tlinesIn = 0\n\tfor i in range(index,len(posts)):\n\t\tnumPosts+=1\n\t\tlinesIn = 0\n\t\tif (c > size[0]-2):\n\t\t\treturn numPosts-1, linesIn\n\t\tcsr.addstr(c,0,\"************\")\n\t\tc+=1\n\t\tif (c > size[0]-2):\n\t\t\treturn numPosts-1, linesIn\n\t\tcsr.addstr(c,0,\"Stream: \"+posts[i][0])\n\t\tc+=1\n\t\tif (c > size[0]-2):\n\t\t\treturn numPosts-1, linesIn\n\t\tcsr.addstr(c,0,\"User: \"+posts[i][1])\n\t\tc+=1\n\t\t#date\n\t\tif (c > size[0]-2):\n\t\t\treturn numPosts-1, linesIn\n\n\t\tdate = getTime(posts[i][2])\n\t\tcsr.addstr(c,0,\"Date: \"+date)\n\t\tc+=1\n\t\t#text section\n\t\tlinesIn += lineStart\n\t\tfor j in range(3+lineStart,len(posts[i])):\n\t\t\tif (len(posts[i][j]) > 80):\n\t\t\t\tlines = posts[i][j]\n\t\t\t\tlines = [lines[i:i+80] for i in range(0, len(lines), 80)]\n\t\t\t\tfor a in range(0,len(lines)):\n\t\t\t\t\tif (c > size[0]-2):\n\t\t\t\t\t\treturn numPosts-1, linesIn\n\t\t\t\t\tcsr.addstr(c,0,lines[a])\n\t\t\t\t\tc+=1\n\t\t\t\t\tlinesIn+=1\n\t\t\telse:\n\t\t\t\tif (c > size[0]-2):\n\t\t\t\t\treturn numPosts-1, linesIn\n\t\t\t\tcsr.addstr(c,0,posts[i][j])\n\t\t\t\tc+=1\n\t\t\t\tlinesIn+=1\n\t\tlineStart = 0\n\t\tlinesIn = 0\n\t\t#mark post as read\n\t\tindexIn = indexInStream(posts[i][0], i, posts)\n\t\tif (flag == 1 and getRead(posts[i][0], user) < indexIn):\n\t\t\tsetRead(posts[i][0], user, indexIn)\n\t\t#for line in posts[i]:\n\t\t#\tcsr.addstr(c,0,line)\n\t\t#\t#print(line)\n\t\t#\tc+=1\n\t#exit(0)\n\t\n\treturn numPosts, linesIn\n\ndef cmpIntList(a, b):\n\tfor i in range(len(a)-1, -1, -1):\n\t\tif (int(a[i]) > int(b[i])):\n\t\t\treturn 0\n\t\telif (int(a[i]) < int(b[i])):\n\t\t\treturn 1\n\treturn -1\n\n#will return 1 if 2 is newer than 1\ndef cmp(date1, date2):\n\ttime1 = date1.split(\":\")\n\ttime2 = date2.split(\":\")\n\tr = -1\n\ttmp = cmpIntList(time1[1].split(\"/\"), time2[1].split(\"/\"))\n\ttmp2 = cmpIntList(time1[0].split(\"/\"), time2[0].split(\"/\"))\n\tif (tmp!=-1):\n\t\tr = tmp\n\telif (tmp2!=-1):\n\t\tr = tmp2\n\t#if (r == 1):\n\t#\tprint(date1+ \" > \" + date2)\n\t#else:\n\t#\tprint(date1+ \" < \" + date2)\n\treturn r\n\t\ndef sortPosts(posts):\n\tfor a in range(0,len(posts)-1):\n\t\tfor b in range(0, len(posts)-1):\n\t\t\tif (cmp(posts[b][2], posts[b+1][2]) == 0):\n\t\t\t\ttmp = posts[b]\n\t\t\t\tposts[b] = posts[b+1]\n\t\t\t\tposts[b+1] = tmp\n\treturn posts\n\ndef sortPostsAuthor(posts):\n\tfor a in range(0,len(posts)-1):\n\t\tfor b in range(0, len(posts)-1):\n\t\t\tif (posts[b][1] > posts[b+1][1]):\n\t\t\t\ttmp = posts[b]\n\t\t\t\tposts[b] = posts[b+1]\n\t\t\t\tposts[b+1] = tmp\n\treturn posts\n\ndef pageUpShift(posts, index, csr, lineStart):\n\tif (index == 0):\n\t\treturn 0, 0\n\tscreenFill = 0\n\toffset = 1\n\twhile screenFill < 24 and index-offset >= 0:\n\t\t#csr.addstr(28+offset, 0, \"index up: \"+str(index)+\" lines: \"+str(len(posts[index-offset])+1))\n\t\tscreenFill += len(posts[index-offset])+1\n\t\toffset += 1\n\toffset-=1\n\tif (screenFill > 24):\n\t\toffset -= 1\n\t\tif (offset == 0):\n\t\t\toffset = 1\n\t\t\tlenOfPost = len(posts[index])-3\n\t\t\tlineStart = 0\n\t\telse:\n\t\t\tlineStart = 0\n\telse:\n\t\tlineStart = 0\n\n\treturn offset, lineStart\n\ndef markAllRead(posts, user):\n\tstream = {}\n\tfor a in range(0,len(posts)):\n\t\tstream[posts[a][0]] = stream.get(posts[a][0], 0)+1\n\tfor key, value in stream.items():\n\t\tsetRead(key, user, value)\n\t\t#print(key +\" : \"+ str(value))\n\t#print(stream)\n\nif __name__ == \"__main__\":\n\tif (len(sys.argv) <= 1):\n\t\tprint(\"No User specified\")\n\t\texit(0)\n\n\tname = \"\"\n\tfor x in sys.argv[1:]:\n\t\tname+=x+\" \"\n\tname = name[:-1]\n\n\tcsr = curses.initscr()\n\n\ttry:\n\t\tcurses.start_color()\n\t\tsize = list(csr.getmaxyx())\n\t\tif (size[0] > 24):\n\t\t\tsize[0] = 24\n\n\t\t#stream = changeStream(csr, name)\n\t\tflag = 1\n\t\tindex = 0\n\t\tindex, posts, curStream = list(changeStream(csr,name))\n\t\tnumPostsOnScreen, lineStart = displayScreen(csr, posts, size, index, name, flag, 0)\n\t\twhile (1==1):\n\t\t\tc = csr.getch(0,0)\n\t\t\tcsr.clear()\n\t\t\tif (c == ord('q')):\n\t\t\t\tbreak\n\t\t\telif (c == ord('o')):\n\t\t\t\tflag = flag*-1\n\t\t\t\tif (flag == 1):\n\t\t\t\t\tposts = sortPosts(posts)\n\t\t\t\telse:\n\t\t\t\t\tposts = sortPostsAuthor(posts)\n\n\t\t\telif (c == ord('m')):\n\t\t\t\tmarkAllRead(posts, name)\n\t\t\telif (c == ord('c')):\n\t\t\t\tindex, posts = refreshStream(csr, name, curStream)\n\t\t\t\tif (flag == -1):\n\t\t\t\t\tposts = sortPostsAuthor(posts)\n\t\t\telif (c == ord('s')):\n\t\t\t\t#stream = changeStream(csr, name)\n\t\t\t\t#posts = list(loadStream(stream, name))\n\t\t\t\tindex, posts, curStream = (list(changeStream(csr,name)))\n\t\t\t\tif (flag == -1):\n\t\t\t\t\tposts = sortPostsAuthor(posts)\n\n\t\t\telif (c == 65): #up\n\t\t\t\tif (index > 0):\n\t\t\t\t\tif (index >= len(posts)):\n\t\t\t\t\t\tindex -= 1\n\t\t\t\t\ttmp, lineStart = pageUpShift(posts, index, csr, lineStart)\n\t\t\t\t\tindex-=tmp\n\t\t\t\telse:\n\t\t\t\t\tlineStart = 0\n\t\t\telif (c == 66): #down\n\t\t\t\tif (index+numPostsOnScreen < len(posts)):\n\t\t\t\t\tindex += numPostsOnScreen\n\t\t\t#refresh page\n\t\t\tnumPostsOnScreen, lineStart = displayScreen(csr, posts, size, index, name, flag, lineStart)\n\texcept Exception:\n\t\tcurses.endwin()\n\t\ttraceback.print_exc()\n\t\texit(0)\n\tcurses.endwin()\n\n","sub_path":"a2/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"291703422","text":"from pain_detector import PainDetector\nimport cv2\nimport time\n\n\npain_detector = PainDetector(image_size=160, checkpoint_path='checkpoints/50342566/50343918_3/model_epoch4.pt')\nprint(pain_detector.device)\nref_frame1 = cv2.imread('example_frames/example-reference-frame.png')\nref_frame2 = cv2.imread('example_frames/example-reference-frame.png')\nref_frame3 = cv2.imread('example_frames/example-reference-frame.png')\n# In this example the reference frames are identical, but in a real scenario, the idea is to use different\n# reference frames from the same person. Ideally, the reference frames should have a neutral expression and should\n# exhibit slight lighting and camera angle variations.\npain_detector.add_references([ref_frame1, ref_frame2, ref_frame3])\ntarget_frame = cv2.imread('example_frames/example-target-frame.png')\npain_estimate = pain_detector.predict_pain(target_frame)\nprint(pain_estimate)\n\n\nnum_of_frames = 30\nprint('Testing frame rate with {} frames'.format(num_of_frames))\nstart_time = time.time()\nfor _ in range(num_of_frames):\n pain_detector.predict_pain(target_frame)\nprint('FPS: {}'.format(num_of_frames / (time.time() - start_time)))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"520029415","text":"import pickle\n\n\nour_dict = pickle.load(open(\"found_slang_words\",\"rb\"))\nfor key in our_dict:\n print(\"%s : %s\"%(key,our_dict[key][0]))\n print(\"Example : %s\"%(our_dict[key][1]))\n\nour_failed_searches = pickle.load(open(\"failed_searches\",\"rb\"))\n\nfor element in our_failed_searches:\n print(element)\n\nour_impropers = pickle.load(open(\"probably_improper_word_forms\",\"rb\"))\n\nfor element in our_impropers:\n print(element)\n\n\nif \"byygeen\" in our_impropers:\n print(\"yayeuh\")","sub_path":"core_files/Finnish Slang/put_away/my_inspector.py","file_name":"my_inspector.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"600863697","text":"# 3rd party python modules\r\nimport pygame\r\npygame.init()\r\n\r\n# Own modules\r\nfrom game.classes.Preferences import *\r\nfrom game.classes.Button import Button\r\n\r\nclass Option_Button( Button ):\r\n\t\"\"\"\r\n\t\"\"\"\r\n\tdef __init__( self, button_texts, ret_value, text_inactive, centerxy, mouse_over_sound, mouse_click_sound, unique_name=None, text_active=None, size=20, font=\"SysFont\", color_inactive=BLACK, color_active=YELLOW, antialias=True, bold_inactive=False, bold_active=True, italic_inactive=False, italic_active=True, background_inactive=None, background_active=None, is_option=False, unique_id = None ):\r\n\t\tsuper( Option_Button, self ).__init__( ret_value, text_inactive, centerxy, mouse_over_sound, mouse_click_sound, unique_name, text_active, size, font, color_inactive, color_active, antialias, bold_inactive, bold_active, italic_inactive, italic_active, background_inactive, background_active, unique_id )\r\n\r\n\t\tself._button_texts = button_texts\r\n\t\tself._max = len( self._button_texts )\r\n\t\tself._counter = 0\r\n\r\n\t\tself._text_inactive = self._button_texts[0]\r\n\t\tself._text_active = self._button_texts[0]\r\n\r\n\t\tself._is_option = is_option\r\n\r\n\tdef next_value( self ):\r\n\t\tif self._counter >= self._max - 1:\r\n\t\t\tself._counter = 0\r\n\t\telse:\r\n\t\t\tself._counter += 1\r\n\t\t\t\r\n\t\tself._text_inactive = self._button_texts[self._counter]\r\n\t\tself._text_active = self._button_texts[self._counter]\r\n\r\n\t\t# Draw the new text onto the surfaces.\r\n\t\tself.draw_button_states()\r\n\r\n\t\t# Show the inactive state first.\r\n\t\tself.image = self._button_inactive.image\r\n\t\tself.rect = self._button_inactive.rect\r\n\r\n\r\n\tdef update( self, events ):\r\n\t\tif self.selected:\r\n\t\t\tself.image = self._button_active.image\r\n\t\t\tself.rect = self._button_active.rect\r\n\t\t\tif not self._mouse_over_sound_played:\r\n\t\t\t\tsound_channel.play( self._mouse_over_sound )\r\n\t\t\t\tself._mouse_over_sound_played = True\r\n\t\t\t\t\r\n\t\t\tfor e in events:\r\n\t\t\t\tif e.type == pygame.KEYDOWN:\r\n\t\t\t\t\tif e.key == pygame.K_RETURN:\r\n\t\t\t\t\t\tif self._is_option:\r\n\t\t\t\t\t\t\tself.next_value()\r\n\t\t\t\t\t\tsound_channel.play( self._mouse_click_sound )\r\n\t\t\t\t\t\treturn self._ret_value\t\t\t\r\n\t\telse:\r\n\t\t\tself.image = self._button_inactive.image\r\n\t\t\tself.rect = self._button_inactive.rect\r\n\t\t\tself._mouse_over_sound_played = False\t\r\n\r\n\r\n\t\treturn\r\n\t\t\r\n\t\tif self.rect.collidepoint( pygame.mouse.get_pos() ):\r\n\t\t\r\n\t\t\tif not self._mouse_over_sound_played:\r\n\t\t\t\t# Play the mouse over button sound.\r\n\t\t\t\tsound_channel.play( self._mouse_over_sound )\r\n\t\t\t\tself._mouse_over_sound_played = True\r\n\t\t\t\t\t\t\r\n\t\t\tself.image = self._button_active.image\r\n\t\t\tself.rect = self._button_active.rect\r\n\t\t\tif pygame.mouse.get_pressed()[0] and not self._mouse_already_down:\r\n\t\t\t\t# Change the value of the button.\r\n\t\t\t\tself._mouse_already_down = True\r\n\t\t\t\tsound_channel.play( self._mouse_click_sound )\t\t\t\t\t\t\r\n\t\t\tif not pygame.mouse.get_pressed()[0] and self._mouse_already_down:\r\n\t\t\t\tself._mouse_already_down = False\r\n\t\t\t\tself.next_value()\r\n\t\t\t\treturn self._ret_value\t\t\t\t\r\n\t\telse:\r\n\t\t\tself.image = self._button_inactive.image\r\n\t\t\tself.rect = self._button_inactive.rect\r\n\t\t\tself._mouse_over_sound_played = False\r\n\t\t\t\r\n\r\n\tdef set_text( self, new_text ):\r\n\t\tsuper( Option_Button, self ).set_text( new_text )\r\n\r\n\t\t# Find the position of \"new_text\" in self._button_texts and set \"self._counter\" to the appropriate value.\r\n\t\tself._counter = 0\r\n\t\tfor item in self._button_texts:\r\n\t\t\tif item == new_text:\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tself._counter += 1\r\n","sub_path":"game/classes/Option_Button.py","file_name":"Option_Button.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"386596182","text":"import numpy as np\r\nimport argparse\r\n\r\n\r\ndef get_args():\r\n\t\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument(\"start\", type=int, help=\"Lower bound\")\r\n\tparser.add_argument(\"finish\", type=int, help=\"Upper bound\")\r\n\t\r\n\treturn parser.parse_args()\r\n\r\n\r\ndef is_nondecr(val):\r\n\r\n\ti = 0\r\n\twhile i<5:\r\n\t\tif val[i]>val[i+1]:\r\n\t\t\treturn False\r\n\t\ti += 1\r\n\r\n\treturn True\r\n\r\ndef has_two_adj_same(val):\r\n\r\n\ti = 0\r\n\twhile i<5:\r\n\t\tif val[i]==val[i+1]:\r\n\t\t\tif i==4:\r\n\t\t\t\tif val[i-1] != val[i]:\r\n\t\t\t\t\treturn True\r\n\t\t\telif i==0:\r\n\t\t\t\tif val[i+1] != val[i+2]:\r\n\t\t\t\t\treturn True\r\n\t\t\telse:\r\n\t\t\t\tif all([val[i-1] != val[i], val[i+1] != val[i+2]]):\r\n\t\t\t\t\treturn True\r\n\t\ti += 1\r\n\r\n\treturn False\r\n\r\ndef increment_place(val, place):\r\n\r\n\tif val[5-place]==9:\r\n\t\tval[5-place] = 0\r\n\t\tincrement_place(val, place+1)\r\n\telse:\r\n\t\tval[5-place] += 1\r\n\r\n\treturn True\r\n\r\n\r\ndef increment(val):\r\n\tincrement_place(val, 0)\r\n\r\n\r\ndef getvalue(val):\r\n\treturn sum([val[i]*(10**(5-i)) for i in range(6)])\r\n\r\n\r\ndef run_main():\r\n\r\n\targs = get_args()\r\n\r\n\tval = [int(v) for v in list(str(args.start))]\r\n\r\n\tc=0\r\n\twhile getvalue(val)<=args.finish:\r\n\t\tif is_nondecr(val) and has_two_adj_same(val):\r\n\t\t\tc+=1\r\n\t\tincrement(val)\r\n\r\n\tprint(c)\r\n\treturn(c)\r\n\r\n\r\nif __name__=='__main__':\r\n\r\n\trun_main()\r\n","sub_path":"04/04b_pw.py","file_name":"04b_pw.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"66607712","text":"from django.db import models\nfrom VeterinariaPatagonica.Apps.GestionDeInsumos.models import Insumo\nfrom django.views.generic.list import ListView\n\n# Create your models here.\n#class CantidadList(ListView):\n# cantidadInsumos = models.PositiveSmallIntegerField()\n\n\nclass InsumosList(ListView):\n insumo = models.ForeignKey(Insumo, null=False, blank=False, on_delete=models.CASCADE)\n cantidadInsumo = models.PositiveSmallIntegerField\n\nclass Servicio(models.Model):\n t_tipo = (('C','Consulta'),('Q','Quirurgica'))\n tipo = models.CharField(max_length=25, choices=t_tipo, default='Tipo de Práctica aqui')\n nombre = models.CharField(max_length=50, null=False, blank=False, help_text='Ingresa el nombre del Servicio')\n descripcion = models.CharField(max_length=200)\n insumos = InsumosList\n #cantidadInsumos = CantidadList\n tiempoEstimado = models.TimeField(auto_now=False, help_text='Tiempo de Duración del Servicio')\n precioManoDeObra = models.PositiveSmallIntegerField()\n\n\n def __str__(self):\n cadena = 'Nombre de Servicio: {0}, Duración Estimada: {1} Precio: {2}.'\n return cadena.format(self.nombre, self.tiempoEstimado, self.precioManoDeObra)\n","sub_path":"VeterinariaPatagonica/VeterinariaPatagonica/Apps/GestionDeServicios/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"189846194","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/2/8 15:17\n# @Author : Warren.wang\n# @File : test_alert.py\n# @Software: PyCharm\n'''\n常用方法:\n页面操作会遇到js所生成的alert、confirm以及prompt弹框。\n可以使用switch_to.alert()方法定位,然后使用text/accept/dismiss/send_keys等方法进行操作。\n操作alert常用方法:\nswitch_to.alert():获取当前页面上的警告框\ntext:返回alert、confirm以及prompt中文字信息\naccept():接受现有警告框\ndismiss():解散现有警告框\nsend_keys(keysToSend):发送文本至警告框\nkeyToSend:将文本发送至警告框\n'''\nfrom time import sleep\n\nfrom selenium.webdriver import ActionChains\n\nfrom test_selenium.selenium_frame_window.base import Base\n\n\nclass TestAlert(Base):\n def test_alert(self):\n '''\n 打开网页 https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\n 操作窗口右侧页面,将元素1拖拽到元素2\n 这时候会有一个alert弹框,点击弹框中的\"确定\"\n 然后再按\"点击运行\"\n 关闭网页\n :return:\n '''\n self.driver.get(\"https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable\")\n #1切换到frame下\n self.driver.switch_to.frame(\"iframeResult\")\n drag = self.driver.find_element_by_id(\"draggable\")\n drop = self.driver.find_element_by_id(\"droppable\")\n #2拖拽元素\n action = ActionChains(self.driver)\n action.drag_and_drop(drag, drop)\n action.perform()\n\n print(\"点击alert 确认\")\n self.driver.switch_to.alert.accept() #接受现有警告框\n\n #3.切换到默认frame\n self.driver.switch_to.default_content()\n\n self.driver.find_element_by_id(\"submitBTN\").click()\n sleep(3)","sub_path":"test_selenium/selenium_file_alert/test_alert.py","file_name":"test_alert.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"44575591","text":"#!/usr/bin/env python3\n'''\nSolution for \"Longest Valid Parentheses\"\n'''\n\nclass Solution(object):\n def longestValidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n buffer = []\n max_length = 0\n start = 0\n for index, char in enumerate(s):\n if char == '(':\n buffer.append(index)\n else:\n if len(buffer) == 0:\n start = index + 1\n else:\n matched = buffer.pop()\n if len(buffer) == 0:\n current_len = index - start + 1\n else:\n current_len = index - buffer[-1]\n if current_len > max_length:\n max_length = current_len\n return max_length\nif __name__ == \"__main__\":\n print(Solution().longestValidParentheses(\"(()()()()(((()()()()(\"))","sub_path":"32/32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"377488141","text":"from .strategy import Strategy\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.utils.data import DataLoader\n\nimport math\n\nclass GLISTER(Strategy):\n \"\"\"\n This is implementation of GLISTER-ACTIVE from the paper GLISTER: Generalization based Data \n Subset Selection for Efficient and Robust Learning :footcite:`killamsetty2020glister`. GLISTER \n methods tries to solve a bi-level optimisation problem.\n\n .. math::\n \\\\overbrace{\\\\underset{{S \\\\subseteq {\\\\mathcal U}, |S| \\\\leq k}}{\\\\operatorname{argmin\\\\hspace{0.7mm}}} L_V(\\\\underbrace{\\\\underset{\\\\theta}{\\\\operatorname{argmin\\\\hspace{0.7mm}}} L_T( \\\\theta, S)}_{inner-level}, {\\\\mathcal V})}^{outer-level}\n \n In the above equation, :math:`\\\\mathcal{U}` denotes the Data without lables i.e. `unlabeled_x`, \n :math:`\\\\mathcal{V}` denotes the validation set that guides the subset selection process, :math:`L_T` denotes the\n training loss, :math:`L_V` denotes the validation loss, :math:`S` denotes the data subset selected at each round, and :math:`k` is the `budget`.\n Since, solving the complete inner-optimization is expensive, GLISTER-ONLINE adopts a online one-step meta approximation where we approximate the solution to inner problem\n by taking a single gradient step.\n The optimization problem after the approximation is as follows:\n \n .. math::\n \\\\overbrace{\\\\underset{{S \\\\subseteq {\\\\mathcal U}, |S| \\\\leq k}}{\\\\operatorname{argmin\\\\hspace{0.7mm}}} L_V(\\\\underbrace{\\\\theta - \\\\eta \\\\nabla_{\\\\theta}L_T(\\\\theta, S)}_{inner-level}, {\\\\mathcal V})}^{outer-level}\n \n In the above equation, :math:`\\\\eta` denotes the step-size used for one-step gradient update.\n\n \n Parameters\n ----------\n X: Numpy array \n Features of the labled set of points \n Y: Numpy array\n Lables of the labled set of points \n unlabeled_x: Numpy array\n Features of the unlabled set of points \n net: class object\n Model architecture used for training. Could be instance of models defined in `distil.utils.models` or something similar.\n handler: class object\n It should be a subclass of torch.utils.data.Dataset i.e, have __getitem__ and __len__ methods implemented, so that is could be passed to pytorch DataLoader.Could be instance of handlers defined in `distil.utils.DataHandler` or something similar.\n nclasses: int \n No. of classes in tha dataset\n args: dictionary\n This dictionary should have keys 'batch_size' and 'lr'. \n 'lr' should be the learning rate used for training. 'batch_size' 'batch_size' should be such \n that one can exploit the benefits of tensorization while honouring the resourse constraits.\n valid: boolean\n Whether validation set is passed or not\n X_val: Numpy array, optional\n Features of the points in the validation set. Mandatory if `valid=True`.\n Y_val:Numpy array, optional\n Lables of the points in the validation set. Mandatory if `valid=True`.\n loss_criterion: class object, optional\n The type of loss criterion. Default is **torch.nn.CrossEntropyLoss()**\n typeOf: str, optional\n Determines the type of regulariser to be used. Default is **'none'**.\n For random regulariser use **'Rand'**.\n To use Facility Location set functiom as a regulariser use **'FacLoc'**.\n To use Diversity set functiom as a regulariser use **'Diversity'**.\n lam: float, optional\n Determines the amount of regularisation to be applied. Mandatory if is not `typeOf='none'` and by default set to `None`.\n For random regulariser use values should be between 0 and 1 as it determines fraction of points replaced by random points.\n For both 'Diversity' and 'FacLoc', `lam` determines the weightage given to them while computing the gain.\n kernel_batch_size: int, optional\n For 'Diversity' and 'FacLoc' regualrizer versions, similarity kernel is to be computed, which \n entails creating a 3d torch tensor of dimenssions kernel_batch_size*kernel_batch_size*\n feature dimenssion.Again kernel_batch_size should be such that one can exploit the benefits of \n tensorization while honouring the resourse constraits. \n \"\"\"\n \n\n def __init__(self,X, Y,unlabeled_x, net, handler, nclasses, args,valid,X_val=None,Y_val=None,\\\n loss_criterion=nn.CrossEntropyLoss(),typeOf='none',lam=None,kernel_batch_size = 200): # \n super(GLISTER, self).__init__(X, Y, unlabeled_x, net, handler,nclasses, args)\n\n if valid:\n self.X_Val = X_val\n self.Y_Val = Y_val\n self.loss = loss_criterion\n self.valid = valid\n self.typeOf = typeOf\n self.lam = lam\n self.kernel_batch_size = kernel_batch_size\n #self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n def distance(self,x, y, exp = 2):\n\n n = x.size(0)\n m = y.size(0)\n d = x.size(1)\n\n x = x.unsqueeze(1).expand(n, m, d)\n y = y.unsqueeze(0).expand(n, m, d)\n\n if self.typeOf == \"FacLoc\":\n dist = torch.pow(x - y, exp).sum(2) \n elif self.typeOf == \"Diversity\":\n dist = torch.exp((-1 * torch.pow(x - y, exp).sum(2))/2)\n \n return dist \n\n def _compute_similarity_kernel(self):\n \n g_is = []\n for item in range(math.ceil(len(self.grads_per_elem) / self.kernel_batch_size)):\n inputs = self.grads_per_elem[item *self.kernel_batch_size:(item + 1) *self.kernel_batch_size]\n g_is.append(inputs)\n\n with torch.no_grad():\n \n new_N = len(self.grads_per_elem)\n self.sim_mat = torch.zeros([new_N, new_N], dtype=torch.float32).to(self.device)\n first_i = True\n for i, g_i in enumerate(g_is, 0):\n if first_i:\n size_b = g_i.size(0)\n first_i = False\n for j, g_j in enumerate(g_is, 0):\n self.sim_mat[i * size_b: i * size_b + g_i.size(0),\n j * size_b: j * size_b + g_j.size(0)] = self.distance(g_i, g_j)\n\n if self.typeOf == \"FacLoc\":\n const = torch.max(self.sim_mat).item()\n #self.sim_mat = const - self.sim_mat\n\n self.min_dist = (torch.ones(new_N, dtype=torch.float32)*const).to(self.device)\n\n def _compute_per_element_grads(self):\n \n self.grads_per_elem = self.get_grad_embedding(self.unlabeled_x)\n self.prev_grads_sum = torch.sum(self.get_grad_embedding(self.X,self.Y),dim=0).view(1, -1)\n\n def _update_grads_val(self,grads_currX=None, first_init=False):\n\n embDim = self.model.get_embedding_dim()\n \n if first_init:\n if self.valid:\n if self.X_Val is not None:\n loader = DataLoader(self.handler(self.X_Val,self.Y_Val,select=False),shuffle=False,\\\n batch_size=self.args['batch_size'])\n self.out = torch.zeros(self.Y_Val.shape[0], self.target_classes).to(self.device)\n self.emb = torch.zeros(self.Y_Val.shape[0], embDim).to(self.device)\n else:\n raise ValueError(\"Since Valid is set True, please pass a appropriate Validation set\")\n \n else:\n predicted_y = self.predict(self.unlabeled_x)\n self.X_new = np.concatenate((self.unlabeled_x,self.X), axis = 0)\n self.Y_new = np.concatenate((predicted_y,self.Y), axis = 0)\n\n loader = DataLoader(self.handler(self.X_new,self.Y_new,select=False),shuffle=False,\\\n batch_size=self.args['batch_size'])\n self.out = torch.zeros(self.Y_new.shape[0], self.target_classes).to(self.device)\n self.emb = torch.zeros(self.Y_new.shape[0], embDim).to(self.device)\n\n self.grads_val_curr = torch.zeros(self.target_classes*(1+embDim), 1).to(self.device)\n \n with torch.no_grad():\n\n for x, y, idxs in loader:\n x = x.to(self.device)\n y = y.to(self.device)\n init_out, init_l1 = self.model(x,last=True)\n self.emb[idxs] = init_l1 \n for j in range(self.target_classes):\n try:\n self.out[idxs, j] = init_out[:, j] - (1 * self.args['lr'] * (torch.matmul(init_l1, self.prev_grads_sum[0][(j * embDim) +\n self.target_classes:((j + 1) * embDim) + self.target_classes].view(-1, 1)) + self.prev_grads_sum[0][j])).view(-1)\n except KeyError:\n print(\"Please pass learning rate used during the training\")\n \n scores = F.softmax(self.out[idxs], dim=1)\n one_hot_label = torch.zeros(len(y), self.target_classes).to(self.device)\n one_hot_label.scatter_(1, y.view(-1, 1), 1)\n l0_grads = scores - one_hot_label\n l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)\n l1_grads = l0_expand * init_l1.repeat(1, self.target_classes)\n\n self.grads_val_curr += torch.cat((l0_grads, l1_grads), dim=1).sum(dim=0).view(-1, 1)\n \n if self.valid:\n self.grads_val_curr /= self.Y_Val.shape[0]\n else:\n self.grads_val_curr /= predicted_y.shape[0]\n\n elif grads_currX is not None:\n # update params:\n with torch.no_grad():\n\n for j in range(self.target_classes):\n try:\n self.out[:, j] = self.out[:, j] - (1 * self.args['lr'] * (torch.matmul(self.emb, grads_currX[0][(j * embDim) +\n self.target_classes:((j + 1) * embDim) + self.target_classes].view(-1, 1)) + grads_currX[0][j])).view(-1)\n except KeyError:\n print(\"Please pass learning rate used during the training\")\n\n \n scores = F.softmax(self.out, dim=1)\n if self.valid:\n Y_Val = torch.tensor(self.Y_Val,device=self.device)\n one_hot_label = torch.zeros(Y_Val.shape[0], self.target_classes).to(self.device)\n one_hot_label.scatter_(1,Y_Val.view(-1, 1), 1) \n else:\n \n one_hot_label = torch.zeros(self.Y_new.shape[0], self.target_classes).to(self.device)\n one_hot_label.scatter_(1, torch.tensor(self.Y_new,device=self.device).view(-1, 1), 1)\n l0_grads = scores - one_hot_label\n l0_expand = torch.repeat_interleave(l0_grads, embDim, dim=1)\n l1_grads = l0_expand * self.emb.repeat(1, self.target_classes)\n\n self.grads_val_curr = torch.cat((l0_grads, l1_grads), dim=1).mean(dim=0).view(-1, 1)\n\n def eval_taylor_modular(self, grads,greedySet=None,remset=None):\n\n with torch.no_grad():\n if self.typeOf == \"FacLoc\":\n gains = torch.matmul(grads, self.grads_val_curr) + self.lam*((self.min_dist - \\\n torch.min(self.min_dist,self.sim_mat[remset])).sum(1)).view(-1, 1).to(self.device)\n \n elif self.typeOf == \"Diversity\" and len(greedySet) > 0:\n gains = torch.matmul(grads, self.grads_val_curr) - \\\n self.lam*self.sim_mat[remset][:, greedySet].sum(1).view(-1, 1).to(self.device)\n else:\n gains = torch.matmul(grads, self.grads_val_curr)\n return gains\n \n def select(self, budget):\n\n \"\"\"\n Select next set of points\n \n Parameters\n ----------\n budget: int\n Number of indexes to be returned for next set\n \n Returns\n ----------\n chosen: list\n List of selected data point indexes with respect to unlabeled_x\n \"\"\" \n\n self._compute_per_element_grads()\n self._update_grads_val(first_init=True)\n \n numSelected = 0\n greedySet = list()\n remainSet = list(range(self.unlabeled_x.shape[0]))\n\n if self.typeOf == 'Rand':\n if self.lam is not None:\n if self.lam >0 and self.lam < 1:\n curr_bud = (1-self.lam)*budget\n else:\n raise ValueError(\"Lambda value should be between 0 and 1\")\n else:\n raise ValueError(\"Please pass a appropriate lambda value for random regularisation\")\n else:\n curr_bud = budget\n\n if self.typeOf == \"FacLoc\" or self.typeOf == \"Diversity\":\n if self.lam is not None:\n self._compute_similarity_kernel()\n else:\n if self.typeOf == \"FacLoc\":\n raise ValueError(\"Please pass a appropriate lambda value for Facility Location based regularisation\")\n elif self.typeOf == \"Diversity\":\n raise ValueError(\"Please pass a appropriate lambda value for Diversity based regularisation\")\n \n while (numSelected < curr_bud):\n\n if self.typeOf == \"Diversity\":\n gains = self.eval_taylor_modular(self.grads_per_elem[remainSet],greedySet,remainSet)\n elif self.typeOf == \"FacLoc\":\n gains = self.eval_taylor_modular(self.grads_per_elem[remainSet],remset=remainSet)\n else:\n gains = self.eval_taylor_modular(self.grads_per_elem[remainSet])#rem_grads)\n \n bestId = remainSet[torch.argmax(gains).item()]\n greedySet.append(bestId)\n remainSet.remove(bestId)\n numSelected += 1\n \n self._update_grads_val(self.grads_per_elem[bestId].view(1, -1))\n\n if self.typeOf == \"FacLoc\":\n self.min_dist = torch.min(self.min_dist,self.sim_mat[bestId])\n \n if self.typeOf == 'Rand':\n greedySet.extend(list(np.random.choice(remainSet, size=budget - int(curr_bud),replace=False)))\n \n return greedySet","sub_path":"distil/active_learning_strategies/glister.py","file_name":"glister.py","file_ext":"py","file_size_in_byte":14243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"27590473","text":"from vehicles.logic.vehiclesLogic import vehiclesLogic\nfrom vehicles.utils import vehicleConstants\n\ndef test_registerVehicle() :\n try :\n # Ensure \"TEST_VEH_1\" is not already registered\n testVehId1 = \"TEST_VEH_1\"\n deregisterStatus = vehiclesLogic.deregisterVehicle(testVehId1)\n\n # Register new vehicle \"TEST_VEH_1\"\n registerStatus = vehiclesLogic.registerVehicle(testVehId1)\n assert(registerStatus == vehicleConstants.SUCCESS)\n\n # Register already registered vehicle\n registerStatus = vehiclesLogic.registerVehicle(testVehId1)\n assert(registerStatus == vehicleConstants.VEHICLE_ALREADY_REGISTERED)\n\n deregisterStatus = vehiclesLogic.deregisterVehicle(testVehId1)\n\n # TODO : Add assertions for exceptions\n except :\n assert (True == False)\n\ndef test_deregisterVehicle() :\n try :\n # Ensure \"TEST_VEH_1\" is already registered\n testVehId2 = \"TEST_VEH_2\"\n registerStatus = vehiclesLogic.registerVehicle(testVehId2)\n\n # Deregister vehicle \"TEST_VEH_1\"\n deregisterStatus = vehiclesLogic.deregisterVehicle(testVehId2)\n assert(deregisterStatus == vehicleConstants.SUCCESS)\n\n # Degister unegistered vehicle\n deregisterStatus = vehiclesLogic.deregisterVehicle(testVehId2)\n assert(deregisterStatus == vehicleConstants.VEHICLE_NOT_REGISTERED)\n except :\n assert (True == False) \n\ndef test_updateLocation() :\n try :\n # Ensure \"TEST_VEH_1\" is not already registered\n # call updateLocation on \"TEST_VEH_1\"\n # return value should be vehicleConstants.VEHICLE_NOT_REGISTERED\n testVehId3 = \"TEST_VEH_3\"\n deregisterStatus = vehiclesLogic.deregisterVehicle(testVehId3)\n\n # valid Location - within 3.5 km radius from door2door\n locDict = {\n \"lat\" : \"52.531\",\n \"lng\" : \"13.403\",\n \"at\" : \"2019-09-01T12:59:13.5123Z\"\n }\n updateStatus = vehiclesLogic.updateLocation(testVehId3, locDict)\n assert(updateStatus == vehicleConstants.VEHICLE_NOT_REGISTERED)\n\n # Register \"TEST_VEH_1\"\n # call updateLocation on \"TEST_VEH_1\" with valid location\n # return value should be vehicleConstants.SUCCESS\n vehiclesLogic.registerVehicle(testVehId3)\n updateStatus = vehiclesLogic.updateLocation(testVehId3, locDict)\n assert(updateStatus == vehicleConstants.SUCCESS)\n\n # call updateLocation on \"TEST_VEH_1\" again with the same at value\n # Return value should be vehicleConstants.VEHICLE_UPDATE_WITHIN_3_SECONDS\n updateStatus = vehiclesLogic.updateLocation(testVehId3, locDict)\n assert(updateStatus == vehicleConstants.VEHICLE_UPDATE_WITHIN_3_SECONDS)\n\n # call updateLocation on \"TEST_VEH_1\" again with an older timestamp\n # Return value should be vehicleConstants.VEHICLE_UPDATE_FOR_OLDER_TIMESTAMP\n locDict[\"at\"] = \"2019-09-01T12:58:13.5123Z\"\n updateStatus = vehiclesLogic.updateLocation(testVehId3, locDict)\n assert(updateStatus == vehicleConstants.VEHICLE_UPDATE_FOR_OLDER_TIMESTAMP)\n\n # call updateLocation on \"TEST_VEH_1\" again with coordinates outside city boundary\n # Return value should be vehicleConstants.VEHICLE_OUTSIDE_CITY_BOUNDARY\n locDict[\"at\"] = \"2019-09-01T13:00:13.5123Z\"\n locDict[\"lat\"] = \"2.531\"\n updateStatus = vehiclesLogic.updateLocation(testVehId3, locDict)\n assert(updateStatus == vehicleConstants.VEHICLE_OUTSIDE_CITY_BOUNDARY)\n except :\n assert (True == False)\n\ndef test_getVehicles() :\n try :\n # Call getVehicles\n vehDict = vehiclesLogic.getVehicles()\n\n # Register 1 new vehicle\n testVehId4 = \"TEST_VEH_4\"\n vehiclesLogic.registerVehicle(testVehId4)\n\n # Call getVehicles and check if count of vehicles returned by\n # getVehicles increased by 1\n vehDict1 = vehiclesLogic.getVehicles()\n assert (len(vehDict1) == len(vehDict) + 1)\n\n # Deregister vehicle\n vehiclesLogic.deregisterVehicle(testVehId4)\n except :\n assert (True == False)\n\n\n","sub_path":"vehicles_docker_compose/server/vehicles/logic/test_vehiclesLogic.py","file_name":"test_vehiclesLogic.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"55990742","text":"# -*- coding: utf-8 -*-\nimport hashlib\n\n\nclass Examinee:\n examinee_id = \"\"\n examinee_name = \"\"\n company = \"\"\n ic_no = \"\"\n cert_no = \"\"\n telephone = \"\"\n md5 = \"\"\n\n def __init__(self,\n examinee_id,\n examinee_name,\n company,\n ic_no,\n cert_no,\n telephone):\n self.examinee_name = examinee_name\n self.examinee_id = examinee_id\n self.company = company\n self.ic_no = ic_no\n self.cert_no = cert_no\n self.telephone = telephone\n m = hashlib.md5()\n m.update(examinee_id)\n self.md5 = m.hexdigest()\n","sub_path":"ExamClientPy/Model/Examinee.py","file_name":"Examinee.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"495377880","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 21 09:47:22 2019\n\n@author: dekockmi\n\"\"\"\nimport h5py\nimport numpy as np\nimport torch as th\nimport matplotlib.pyplot as plt\nfrom ptycho_modules import FarfieldPtychoNet\nfrom ptycho_functions import poisson_likelihood\nfrom scipy import optimize\nfrom ptycho_utilities import zplot, focused_probe\n\ndef disc_probe(r,n):\n yy, xx = np.ogrid[-n>>1:n>>1, -n>>1:n>>1]\n mask = yy*yy + xx*xx <= r*r\n return mask\n\ndef error_metric(img_approx, img_target):\n \"\"\"\n Returns the relative error between a approx and target image.\n \"\"\"\n diff_fun = lambda phi : np.abs(img_approx*np.exp(1j*phi) - img_target).flatten()\n out = optimize.least_squares(diff_fun, [1])\n return out.cost, out.x\n\ndef power_iter(img, data, num):\n for _ in range(num):\n img_fft = np.fft.fftshift(np.fft.fft2(img))\n img_fft = data*np.conjugate(img_fft)\n img = np.fft.ifft2(np.fft.fftshift(img_fft))\n # print(np.sum(np.abs(img)**2.0))\n img = img/np.sqrt(np.sum(np.abs(img)**2.0))\n return img\n \n\"\"\"Read in simulation data\"\"\"\nfile = h5py.File('./sim/simulation.h5','r+')\nprint('Keys in h5 file:')\nfor keys in file:\n print(' {}'.format(keys))\nprobe_sol = np.array(file['probe'])\nobj_sol = np.array(file['object'])\npositions = np.array(file['positions'])\ndata = np.array(file['intensities'])\n\"\"\"Compute and print norms\"\"\"\nprobe_sol_z = (th.from_numpy(probe_sol.real),th.from_numpy(probe_sol.imag))\nprobe_sol_z = th.stack(probe_sol_z, dim=-1).double()\nprobe_sol_norm = th.sum(probe_sol_z**2.0).numpy()\nobj_sol_norm = np.sum(np.abs(obj_sol)**2.0)\nprint(f'Object solution norm: {obj_sol_norm}')\nprint(f'Probe solution norm: {probe_sol_norm}')\nplt.figure('Average Data')\nplt.clf()\nplt.imshow(np.mean(data,axis=0))\n\"\"\"Initialise probe\"\"\"\nprobe = np.mean(np.sqrt(data),axis=0)\nprobe = probe/np.sqrt(np.sum(np.abs(probe)**2.0))*np.sqrt(probe_sol_norm)\n#probe = probe_sol\nprobe_error, probe_phase = error_metric(probe, probe_sol)\nprint(f'Initial probe error {np.log(probe_error):.4f}') \n\"\"\"Initialise Model \"\"\"\nmodel = FarfieldPtychoNet(640, 128)\nmodel.set_probe(probe)\n\"\"\"Object error\"\"\"\nobj_z = model.obj_z.detach().numpy()\nobj = obj_z[...,0] + 1j*obj_z[...,1]\nobj_error, obj_phase = error_metric(obj, obj_sol)\nprint(f'Intial object error {np.log(obj_error):.4f}') \n\"\"\"Setup Iterations\"\"\"\nn_batches = 512\nK, _ = positions.shape\nMx, My, _ = model.probe_z.shape\nNx, Ny, _ = model.obj_z.shape\nprobe_grad = th.zeros((Mx,My,2), dtype=th.double)\nprobe_velo = th.zeros((Mx,My,2), dtype=th.double)\nobj_grad = th.zeros((Nx,Ny,2), dtype=th.double)\nobj_step = th.zeros((Nx,Ny), dtype=th.double)\nfor x,y in positions:\n obj_step[x:x+Mx,y:y+My] += 1\nobj_velo = th.zeros((Nx,Ny,2), dtype=th.double)\n\"\"\"Iterate over epoch over chunk\"\"\"\nfor epoch in np.arange(10):\n \"\"\" normalise object and probe\"\"\"\n obj_norm = th.sum(model.obj_z.data**2.0)\n probe_norm = th.sum(model.probe_z.data**2.0)\n model.obj_z.data = model.obj_z.data/np.sqrt(obj_norm)\n model.probe_z.data = model.probe_z.data*np.sqrt(probe_sol_norm)/th.sqrt(probe_norm)\n #print(f'object norm {obj_norm} probe norm {probe_norm}')\n \"\"\" iterate over sample \"\"\"\n probe_grad.zero_()\n obj_grad.zero_()\n index = np.random.permutation(K)\n chunks = np.array_split(index, n_batches)\n total_loss = 0\n #index = np.arange(K)\n for sample in chunks:\n \"\"\" Reset gradients \"\"\"\n if model.obj_z.grad is not None:\n model.obj_z.grad.detach_()\n model.obj_z.grad.zero_()\n if model.probe_z.grad is not None:\n model.probe_z.grad.detach_()\n model.probe_z.grad.zero_()\n \"\"\" Loss of model and data\"\"\"\n target = th.from_numpy(data[sample])\n output = model(positions[sample], subpixel=False, normed = False)\n loss = poisson_likelihood.apply(output, target, False)\n loss.backward()\n total_loss += loss.detach()\n obj_grad += model.obj_z.grad\n probe_grad += model.probe_z.grad\n #obj_velocity = 0.1*obj_velocity + model.obj_z.grad\n \"\"\"update object\"\"\"\n obj_grad[...,0].div_(obj_step)\n obj_grad[...,1].div_(obj_step)\n model.obj_z.data.add_(-0.5*obj_grad)\n #\"\"\"update probe\"\"\"\n probe_grad.div_(K)\n model.probe_z.data.add_(-0.5*probe_grad)\n \"\"\" set margins to zero\"\"\"\n model.obj_z.data[0:64,:] = 0\n model.obj_z.data[576:640,:] = 0\n model.obj_z.data[:,0:64] = 0\n model.obj_z.data[:,576:640] = 0\n \"\"\"compute error\"\"\"\n obj_z = model.obj_z.detach().numpy()\n obj = obj_z[...,0] + 1j*obj_z[...,1]\n obj_error, obj_phase = error_metric(obj, obj_sol)\n probe_z = model.probe_z.detach().numpy()\n probe = probe_z[...,0] + 1j*probe_z[...,1]\n probe_error, probe_phase = error_metric(probe, probe_sol)\n print(f'epoch {epoch}: loss {total_loss.data:.8f}')\n print(f'object error {np.log(obj_error):.4f} and probe error {np.log(probe_error):.4f}') \n \"\"\"Plot Reconstruction\"\"\"\n zplot(obj,f'object {epoch}')\n zplot(probe,f'probe {epoch}')","sub_path":"ptychography/code/ptycho_run.py","file_name":"ptycho_run.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"2300819","text":"from flask import (\n Flask, request, redirect, render_template, url_for,\n flash)\nfrom wtforms import Form, TextAreaField\n\n\napp = Flask(__name__)\n\n\nclass RecipeInput(Form):\n recipe = TextAreaField('Recipe')\n\n\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n form = RecipeInput(request.form)\n recipe = None\n if request.method == 'POST':\n recipe = prep_recipe(request.form['recipe'].splitlines())\n return render_template('index.html', recipe=recipe)\n return render_template('index.html', form=form)\n\n\ndef prep_recipe(recipe):\n [recipe.remove(n) for n in recipe if n == '']\n return recipe\n\n\napp.debug = True\napp.secret_key = 'BLAH'\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"203872537","text":"# Name: Joshua Fogus\n# Last Modified: February 11, 2021\n# Description: A function which writes headers and data to a csv.\n\nimport csv\n\n\ndef csv_writer(headers, data, file_path):\n \"\"\" Accepts an array of data, which should have length 3 and an optional\n file path to which the data should be written. \"\"\"\n\n with open(file_path, 'w') as csv_file:\n writer = csv.writer(csv_file)\n\n writer.writerow(headers)\n # All data must be strings to be written\n for toy in data:\n writer.writerow([str(attr) for attr in toy])\n\n\nif __name__ == \"__main__\":\n print(\"This is not meant to be run as a script. Please import this module.\")\n","sub_path":"life_generator/modules/csv_writer.py","file_name":"csv_writer.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"213439317","text":"\"\"\" Pathfinding methods of the board \"\"\"\n\nfrom typing import Tuple, List, Set\n\n\nclass Node:\n \"\"\"A node class for A* Pathfinding\"\"\"\n\n def __init__(self, parent=None, position=None):\n self.parent = parent\n self.position = position\n\n self.g = 0\n self.h = 0\n self.f = 0\n\n def __eq__(self, other):\n return self.position == other.position\n\n def __str__(self):\n return str(self.position)\n\n def __repr__(self):\n return str(self.position)\n\n\nclass Mixin:\n\n def bfs_nearest(\n self, start: Tuple[int, int], end_points: List[Tuple[int, int]]\n ) -> Tuple[int, int]:\n \"\"\" return the nearest point to the start \"\"\"\n\n end_points = set(end_points)\n visited = set()\n queue = [start]\n\n while queue:\n node = queue.pop()\n visited.add(node)\n\n neighbors = self._get_neighbors(node)\n for neighbor in neighbors:\n if neighbor not in visited:\n if neighbor in end_points:\n return neighbor\n queue.append(neighbor)\n return ()\n\n def astar(\n self, start: Tuple[int, int], end: Tuple[int, int]\n ) -> List[Tuple[int, int]]:\n \"\"\"\n Returns a list of tuples as a path from the given start\n to the given end in the given maze\n \"\"\"\n\n # Create start and end node\n start_node = Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n # print()\n # print(\"open_list: \", open_list)\n # print(\"closed_list: \", closed_list)\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n # print(\"current_node: \", current_node.position)\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n neighbors = self._get_neighbors(current_node.position)\n # Generate children\n children = []\n for neighbor in neighbors:\n new_node = Node(current_node, neighbor) # Create new node\n children.append(new_node) # Append\n\n # Loop through children\n for child in children:\n\n stop_itaration = False\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n stop_itaration = True\n break\n if stop_itaration:\n continue\n\n # Create the f, g, and h values\n child.g = current_node.g + 1\n if (abs((current_node.position[0] - child.position[0])\n + (current_node.position[1] - child.position[1])) > 1):\n child.g += 1\n\n child.h = self._mht_dist(child.position, end_node.position)\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n if child == open_node and child.g > open_node.g:\n stop_itaration = True\n break\n if stop_itaration:\n continue\n\n # Add the child to the open list\n open_list.append(child)\n\n def _get_neighbors(self, start: Tuple[int, int]) -> List[Tuple[int, int]]:\n \"\"\" return 4 Adjacent squares \"\"\"\n\n non_barrier = self._non_barrier\n jump_over = self._jump_over\n directions = self.directions[:-1]\n\n neighbors = []\n for new_position in directions: # Adjacent squares\n\n # Get node position\n node_position = (start[0] + new_position[0],\n start[1] + new_position[1])\n\n # where to jump if it is box or hole\n if node_position in jump_over:\n node_position = (\n start[0] + new_position[0] * 2,\n start[1] + new_position[1] * 2,\n )\n\n # Make sure walkable terrain\n if node_position not in non_barrier:\n continue\n\n # Make sure no object on second layer\n if node_position in jump_over:\n continue\n\n neighbors.append(node_position)\n return neighbors\n\n def _mht_dist(self, start: Tuple[int, int], end: Tuple[int, int]):\n return ((end[0] - start[0]) ** 2) + ((end[1] - start[1]) ** 2)\n","sub_path":"board/_pathfinding.py","file_name":"_pathfinding.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"578413093","text":"#!/usr/bin/env python3\nfrom distance import *\nfrom collections import defaultdict\nfrom GA import GA\nfrom py_common_subseq import *\nimport random, math\n\n# Native speaker counts from Nationalencyklopedin\n# Total speaker counts from Ethnologue\n# Family speaker counts from Ethnologue\n\nar = {\n\t'#native': 295,\n\t'#total': 452,\n\t'#family': 381, # Afro-Asiatic\n\t'human': {'insan': 428+392, 'basyar': 2032+400, 'nas': 19014+1048},\n\t'child': {'tipr': 4361+4252, 'ibn': 3181+252, 'sabiy': 745+474, 'syab': 1825+1353, 'adat': 19873+522},\n\t'woman': {'imraa': 2544, 'maraa': 3140, 'nisa': 3517+762},\n\t'man': {'radyur': 33463+21381, 'mar': 0, 'imru': 0},\n\t'big': {'kabir': 1},\n\t'small': {'sagir': 1},\n\t'old': {'kadim': 0, 'musin': 87+68, 'adyus': 2388+1084},\n\t'young': {'sagir': 957+503, 'adat': 19873+522, 'syab': 1825+1353},\n\t'fire': {'arik': 0, 'nar': 7991+919},\n\t'water': {'ma': 1},\n\t'1s': {'ana': 1},\n\t'2s': {'anta': 149124/2, 'anti': 149124/2},\n\t'3s': {'uwa': 111754, 'iya': 58429+41118},\n\t'-PL': {'un': 1, 'in': 1},\n\t'-GEN': {'i': 1, 'in': 1},\n\t'NEG': {'ra': 493566, 'ma': 218700, 'ram': 136193, 'ran': 61345, 'raisa': 0, 'rastu': 23413/3, 'rasta': 23413/3, 'rasti': 23413/3, 'misy': 787},\n\t'and': {'wa': 1},\n\t'or': {'au': 1},\n\t'zero': {'sepr': 1},\n\t'one': {'waid': 1},\n\t'two': {'itnan': 1},\n\t'three': {'tarata': 1},\n\t'four': {'arbaa': 1},\n\t'five': {'kamsa': 1},\n\t'six': {'sita': 1},\n\t'seven': {'saba': 1},\n\t'eight': {'tamaniya': 1},\n\t'nine': {'ˈtisa': 1},\n\t'ten': {'asyara': 1},\n\t'hundred': {'mia': 1},\n\t'thousand': {'arp': 1},\n\t'-ORD': {},\n}\n\nbn = {\n\t'#native': 205,\n\t'#total': 208,\n\t'#family': 2914, # Indo-European\n\t'human': {'manusa': 1},\n\t'child': {'syisyu': 1},\n\t'big': {'boro': 1},\n\t'small': {'tyoto': 1},\n\t# 'old': {'purono': 1, 'probin': 1},\n\t# 'young': {},\n\t# 'fire': {},\n\t'water': {'dyor': 1, 'pani': 1},\n\t'1s': {'ami': 1, 'amake': 1},\n\t'2s': {'tui': 1, 'tumi': 1, 'apni': 1, 'toke': 1, 'tomake': 1, 'apnake': 1},\n\t'3s': {'e': 1, 'ini': 1, 'o': 1, 'uni': 1, 'sye': 1, 'tini': 1, 'eke': 1, 'oke': 1, 'take': 1},\n\t'-PL': {},\n\t'-GEN': {'r': 1},\n\t'NEG': {'na': 1},\n\t'and': {'ebon': 1, 'ar': 1},\n\t'or': {'ba': 1},\n\t'zero': {'syunya': 1},\n\t'one': {'ak': 1},\n\t'two': {'dui': 1},\n\t'three': {'tin': 1},\n\t'four': {'tyar': 1},\n\t'five': {'paty': 1},\n\t'six': {'tyoe': 1},\n\t'seven': {'syat': 1},\n\t'eight': {'at': 1},\n\t'nine': {'noe': 1},\n\t'ten': {'dosy': 1},\n\t'hundred': {'syata': 1, 'ekasy': 1},\n\t'thousand': {'adyar': 1},\n\t'-ORD': {'om': 1},\n\t}\n\ncmn = {\n\t'#native': 955,\n\t'#total': 1026,\n\t'#family': 1268, # Sino-Tibetan\n\t'human': {'san': 31382},\n\t'child': {'kaidyi': 4857, 'artun': 0, 'tun': 335, 'dyi': 8976, 'syaukai': 908, 'syaukaidyi': 148, 'syauponyou': 141},\n\t'woman': {'nwi': 1782, 'nwisyin': 191, 'nwida': 199, 'nwisan': 2718, 'gunyan': 364, 'nyan': 355, 'nwisi': 1364, 'punwi': 0, 'pu': 360, 'nwidyi': 148, 'nwison': 158, 'saunwi': 48, 'nwikai': 1913, 'nwikaidyi': 216},\n\t'man': {'nan': 523, 'nansyin': 110, 'nansan': 2214, 'nandyi': 148, 'nandyikan': 0, 'nanda': 156},\n\t'big': {'da': 9780, 'dasyin': 57},\n\t'small': {'syau': 1},\n\t'old': {'dyou': 437, 'rau': 3203, 'gurau': 99},\n\t'young': {'nyentyin': 10},\n\t'fire': {'kwo': 1852, 'kwodyai': 0},\n\t'water': {'swei': 1},\n\t'1s': {'wo': 1},\n\t'2s': {'ni': 1},\n\t'3s': {'ta': 1},\n\t'-PL': {'man': 1},\n\t'-GEN': {'da': 1},\n\t'NEG': {'bu': 46776, 'mei': 29175, 'meiyou': 0},\n\t'and': {'ko': 20164, 'wi': 2608, 'yidyi': 488},\n\t'or': {'kwo': 2265, 'kwodyo': 1771},\n\t'zero': {'rin': 1},\n\t'one': {'yi': 1},\n\t'two': {'ar': 999, 'ryan': 2670},\n\t'three': {'san': 1},\n\t'four': {'si': 1},\n\t'five': {'wu': 1},\n\t'six': {'ryou': 1},\n\t'seven': {'tyi': 1},\n\t'eight': {'ba': 1},\n\t'nine': {'dyou': 1},\n\t'ten': {'si': 1}, # ????\n\t'hundred': {'bai': 1},\n\t'thousand': {'tyen': 1},\n\t'-ORD': {},\n}\n\nen = {\n\t'#native': 360,\n\t'#total': 841,\n\t'#family': 2914, # Indo-European\n\t'human': {'yuman': 21440, 'parsan': 31433},\n\t'child': {'tyaird': 34125, 'kid': 42100},\n\t'big': {'big': 92052, 'rardy': 7502, 'greit': 112540, 'yudy': 8016, 'dyaiant': 3753},\n\t'small': {'smar': 23673, 'ritar': 187619, 'taini': 5386},\n\t'old': {'ourd': 1},\n\t'young': {'yon': 1},\n\t'fire': {'pair': 1},\n\t'water': {'watar': 1},\n\t'1s': {'ai': 5685306, 'mi': 1444168},\n\t'2s': {'yu': 1},\n\t'3s': {'i': 1159154, 'syi': 537022, 'it': 2879962, 'dei': 664611, 'im': 533812, 'ar': 417920, 'dem': 297126},\n\t'-PL': {'s': 1, 'is': 1},\n\t'-GEN': {'s': 1, 'is': 1},\n\t'NEG': {'nat': 864776, 'dount': 928842},\n\t'and': {'and': 1},\n\t'or': {'or': 1},\n\t'zero': {'sirou': 4053},\n\t'one': {'won': 1},\n\t'two': {'tu': 1},\n\t'three': {'tri': 1},\n\t'four': {'por': 1},\n\t'five': {'paib': 1},\n\t'six': {'siks': 1},\n\t'seven': {'seban': 1},\n\t'eight': {'eit': 1},\n\t'nine': {'nain': 1},\n\t'ten': {'ten': 1},\n\t'hundred': {'ondred': 1},\n\t'thousand': {'tausand': 1},\n\t'-ORD': {'t': 1},\n\t}\n\nes = {\n\t'#native': 405,\n\t'#total': 489,\n\t'#family': 2914, # Indo-European\n\t'human': {'umano': 1, 'umana': 1},\n\t'child': {'ninyo': 1, 'ninya': 1, 'tyiko': 1, 'tyika': 1},\n\t'big': {'grande': 1},\n\t'small': {'pekenyo': 1, 'pekenya': 1, 'tyiko': 1, 'tyika': 1},\n\t# 'old': {'kadim': 1, 'musin': 1},\n\t# 'young': {},\n\t# 'fire': {},\n\t'water': {'agwa': 1},\n\t'1s': {'dyo': 1, 'me': 1, 'mi': 1},\n\t'2s': {'tu': 1, 'te': 1, 'ti': 1, 'bos': 1, 'usted': 1},\n\t'3s': {'er': 1, 'erya': 1, 'eryo': 1, 're': 1, 'ro': 1, 'ra': 1},\n\t'-PL': {'s': 1, 'es': 1},\n\t'-GEN': {},\n\t'NEG': {'no': 1},\n\t'and': {'i': 1},\n\t'or': {'o': 1, 'u': 1},\n\t'zero': {'sero': 1},\n\t'one': {'uno': 1},\n\t'two': {'dos': 1},\n\t'three': {'tres': 1},\n\t'four': {'kwatro': 1},\n\t'five': {'sinko': 1},\n\t'six': {'ses': 1},\n\t'seven': {'syete': 1},\n\t'eight': {'otyo': 1},\n\t'nine': {'nwebe': 1},\n\t'ten': {'dyes': 1},\n\t'hundred': {'syen': 1, 'syento': 1},\n\t'thousand': {'mir': 1},\n\t'-ORD': {'to': 1, 'ta': 1, 'imo': 1, 'ima': 1, 'abo': 1, 'aba': 1, 'ero': 1, 'era': 1},\n\t}\n\nfr = {\n\t'#native': 74,\n\t'#total': 163,\n\t'#family': 2914, # Indo-European\n\t'human': {'yumen': 1},\n\t'child': {'anpan': 1},\n\t'big': {'gran': 1, 'grand': 1},\n\t'small': {'pati': 1, 'patit': 1},\n\t'water': {'o': 1},\n\t'1s': {'sya': 1, 'ma': 1, 'mwa': 1},\n\t'2s': {'tyu': 1, 'ta': 1, 'twa': 1, 'bu': 1},\n\t'3s': {'ir': 1, 'er': 1, 'ra': 1, 'rwi': 1},\n\t'-PL': {'s': 1},\n\t'-GEN': {},\n\t'NEG': {'na': 1, 'pa': 1},\n\t'and': {'e': 1},\n\t'or': {'u': 1},\n\t'zero': {'sero': 1},\n\t'one': {'en': 1},\n\t'two': {'dyo': 1},\n\t'three': {'trwa': 1},\n\t'four': {'katr': 1},\n\t'five': {'senk': 1},\n\t'six': {'sis': 1},\n\t'seven': {'set': 1},\n\t'eight': {'wit': 1},\n\t'nine': {'nyop': 1},\n\t'ten': {'dis': 1},\n\t'hundred': {'san': 1},\n\t'thousand': {'mir': 1},\n\t'-ORD': {'yem': 1},\n\t}\n\nhi = {\n\t'#native': 376, # Hindi + Urdu\n\t'#total': 538, # Hindi + Urdu\n\t'#family': 2914, # Indo-European\n\t'human': {'manusya': 1, 'insan': 1, 'admi': 1},\n\t'child': {'batya': 1, 'syisyu': 1, 'bar': 1},\n\t'big': {'bara': 1, 'bari': 1},\n\t'small': {'tyota': 1, 'tyoti': 1},\n\t'water': {'pani': 1, 'dyar': 1, 'ab': 1},\n\t'1s': {'main': 1, 'muty': 1, 'ham': 1},\n\t'2s': {'tu': 1, 'tuty': 1, 'tum': 1, 'ap': 1},\n\t'3s': {'ye': 1, 'is': 1, 'wo': 1, 'hai': 1},\n\t'-PL': {'on': 1},\n\t'-GEN': {'ka': 1, 'ke': 1, 'ki': 1},\n\t'NEG': {'nain': 1, 'na': 1, 'mat': 1},\n\t'and': {'aur': 1},\n\t'or': {'wa': 1},\n\t'zero': {'syunya': 1, 'sipar': 1},\n\t'one': {'ek': 1},\n\t'two': {'do': 1},\n\t'three': {'tin': 1},\n\t'four': {'tyar': 1},\n\t'five': {'panty': 1},\n\t'six': {'tye': 1},\n\t'seven': {'sat': 1},\n\t'eight': {'at': 1},\n\t'nine': {'nau': 1},\n\t'ten': {'das': 1},\n\t'hundred': {'sau': 1, 'saikra': 1},\n\t'thousand': {'asar': 1, 'saasr': 1},\n\t'-ORD': {'wan': 1},\n\t}\n\nja = {\n\t'#native': 125,\n\t'#total': 128,\n\t'#family': 129, # Japonic\n\t'human': {'ningen': 1, 'kyito': 1},\n\t'child': {'kodomo': 1, 'ko': 1},\n\t'big': {'oki': 1, 'okina': 1},\n\t'small': {'tyisai': 1},\n\t'water': {'misu': 1},\n\t'1s': {'watasi': 1, 'watakusi': 1, 'ware': 1, 'ore': 1, 'boku': 1, 'atasi': 1, 'atakusi': 1},\n\t'2s': {'anata': 1, 'anta': 1, 'otaku': 1, 'omae': 1, 'kimi': 1},\n\t'3s': {'kore': 1,'sore': 1, 'are': 1}, #??\n\t'-PL': {'tati': 1, 'ra': 1, 'tomo': 1},\n\t'-GEN': {'no': 1},\n\t'NEG': {'nai': 1, 'masen': 1},\n\t'and': {'to': 1, 'katu': 1, 'ya': 1},\n\t'or': {'matawa': 1, 'aruiwa': 1, 'ka': 1, 'soretomo': 1},\n\t'zero': {'rei': 1, 'sero': 1},\n\t'one': {'iti': 1, 'kyito': 1},\n\t'two': {'ni': 1, 'puta': 1},\n\t'three': {'san': 1, 'mi': 1},\n\t'four': {'yo': 1, 'yon': 1, 'si': 1},\n\t'five': {'go': 1, 'itu': 1},\n\t'six': {'mu': 1, 'roku': 1},\n\t'seven': {'siti': 1, 'nana': 1},\n\t'eight': {'hati': 1, 'ya': 1},\n\t'nine': {'ku': 1, 'kyu': 1, 'kono': 1},\n\t'ten': {'so': 1, 'to': 1},\n\t'hundred': {'yaku': 1},\n\t'thousand': {'sen': 1},\n\t'-ORD': {'me': 1},\n\t}\n\njv = {\n\t'#native': 82,\n\t'#total': 84,\n\t'#family': 323, # Austronesian\n\t'human': {'manunsa': 1},\n\t'child': {'anak': 1}, #??\n\t'big': {'gede': 1},\n\t'small': {'tyilik': 1, 'arit': 1},\n\t'water': {'banyu': 1},\n\t'1s': {'aku': 1, 'kura': 1, 'darem': 1},\n\t'2s': {'kowe': 1, 'sampeyan': 1, 'pandyenenan': 1}, #????\n\t'3s': {'deweke': 1, 'pandyenenanipun': 1}, #????\n\t'-PL': {},\n\t'-GEN': {},\n\t'NEG': {'dudu': 1, 'ora': 1},\n\t'and': {'ran': 1, 'sarta': 1},\n\t'or': {}, #????\n\t'zero': {'nor': 1},\n\t'one': {'sidyi': 1, 'setungar': 1},\n\t'two': {'roro': 1, 'kali': 1},\n\t'three': {'telu': 1, 'tiga': 1},\n\t'four': {'papat': 1, 'sekawan': 1},\n\t'five': {'rima': 1, 'gansai': 1},\n\t'six': {'enem': 1},\n\t'seven': {'pitu': 1},\n\t'eight': {'woru': 1},\n\t'nine': {'sana': 1},\n\t'ten': {'sepulu': 1, 'sedasa': 1},\n\t'hundred': {'atus': 1},\n\t'thousand': {'ewu': 1},\n\t'-ORD': {},\n\t}\n\nms = {\n\t'#native': 77,\n\t'#total': 163,\n\t'#family': 323, # Austronesian\n\t'human': {'manusya': 1},\n\t'child': {'ana': 1},\n\t'big': {'basar': 10321, 'raya': 213, 'gadan': 0},\n\t'small': {'ketyir': 1},\n\t'water': {'air': 1},\n\t'1s': {'aku': 267879, 'saya': 91183},\n\t'2s': {'kamu': 46443, 'anda': 114366},\n\t'3s': {'ya': 13291, 'dya': 98959},\n\t'-PL': {'an': 1}, #??\n\t'-GEN': {},\n\t'NEG': {'tida': 1},\n\t'and': {'dan': 1},\n\t'or': {'atau': 1},\n\t'zero': {'koson': 2245, 'sipar': 3, 'nor': 238, 'nier': 24},\n\t'one': {'satu': 22619, 'asa': 10, 'tungar': 0, 'eka': 3, 'aat': 8},\n\t'two': {'dua': 8804, 'dwi': 6},\n\t'three': {'tiga': 4482, 'teru': 0, 'tari': 2},\n\t'four': {'ampat': 1961, 'pat': 64, 'tyator': 134},\n\t'five': {'rima': 2414, 'pantya': 2},\n\t'six': {'anam': 1},\n\t'seven': {'tudyo': 968, 'pitu': 0, 'sapta': 1},\n\t'eight': {'rapan': 42, 'sarapan': 0, 'asta': 2, 'darapan': 730},\n\t'nine': {'sambiran': 536, 'sarapan': 0, 'nawa': 1},\n\t'ten': {'sapuro': 939, 'ekadasa': 0},\n\t'hundred': {'ratus': 1},\n\t'thousand': {'ribu': 1},\n\t'-ORD': {},\n\t}\n\npa = {\n\t'#native': 102,\n\t'#total': 92,\n\t'#family': 2914, # Indo-European\n\t'human': {'manuk': 1},\n\t'child': {'batya': 1},\n\t'big': {'bara': 1},\n\t'small': {'tyota': 1},\n\t'water': {'pani': 1},\n\t'1s': {'me': 1},\n\t'2s': {'tun': 1, 'tusin': 1},\n\t'3s': {'e': 1, 'is': 1, 'enan': 1, 'o': 1, 'us': 1, 'onan': 1},\n\t'-PL': {'an': 1, 'e': 1, 'ian': 1},\n\t'-GEN': {'da': 1, 'di': 1},\n\t'NEG': {'nain': 1},\n\t'and': {'ate': 1},\n\t'or': {'dya': 1}, #???\n\t'zero': {'sipar': 1},\n\t'one': {'ik': 1},\n\t'two': {'do': 1},\n\t'three': {'tin': 1},\n\t'four': {'tyar': 1},\n\t'five': {'pandy': 1},\n\t'six': {'tye': 1},\n\t'seven': {'sat': 1},\n\t'eight': {'at': 1},\n\t'nine': {'naun': 1},\n\t'ten': {'das': 1},\n\t'hundred': {'sau': 1},\n\t'thousand': {'asar': 1},\n\t'-ORD': {'wa': 1}, #????\n\t}\n\npt = {\n\t'#native': 215,\n\t'#total': 231,\n\t'#family': 2914, # Indo-European\n\t'human': {'umanu': 1, 'umana': 1},\n\t'child': {'piryu': 1, 'pirya': 1, 'kriansa': 1, 'mininu': 1, 'minina': 1},\n\t'big': {'grandyi': 1},\n\t'small': {'pikenu': 1, 'pikena': 1},\n\t'water': {'agwa': 1},\n\t'1s': {'eu': 1, 'me': 1, 'mi': 1, 'min': 1},\n\t'2s': {'tu': 1, 'te': 1, 'tyi': 1, 'bose': 1},\n\t'3s': {'eri': 1, 'era': 1, 'u': 1, 'ru': 1, 'nu': 1, 'a': 1, 'ra': 1, 'na': 1},\n\t'-PL': {'s': 1, 'is': 1},\n\t'-GEN': {},\n\t'NEG': {'naun': 1, 'nun': 1},\n\t'and': {'i': 1},\n\t'or': {'ou': 1},\n\t'zero': {'seru': 1},\n\t'one': {'un': 1},\n\t'two': {'dois': 1},\n\t'three': {'tres': 1},\n\t'four': {'kwatru': 1},\n\t'five': {'sinku': 1},\n\t'six': {'seis': 1},\n\t'seven': {'setyi': 1},\n\t'eight': {'oitu': 1},\n\t'nine': {'nobi': 1},\n\t'ten': {'des': 1},\n\t'hundred': {'sein': 1, 'sentu': 1},\n\t'thousand': {'mir': 1},\n\t'-ORD': {'eiru': 1, 'eira': 1, 'tu': 1, 'ta': 1, 'abu': 1, 'aba': 1, 'imu': 1, 'ima': 1},\n\t}\n\nru = {\n\t'#native': 155,\n\t'#total': 276,\n\t'#family': 2914, # Indo-European\n\t'human': {'tyirawek': 1, 'tyek': 1},\n\t'child': {'ribyonak': 1, 'dyitya': 1},\n\t'big': {'barsoi': 1, 'barsoye': 1, 'barsaya': 1, 'wirikii': 1, 'wirikaya': 1, 'wirikoye': 1, 'krupnii': 1, 'krupnaya': 1, 'krupnoye': 1},\n\t'small': {'marinkii': 1, 'marinkaya': 1, 'marinkoye': 1, 'marii': 1, 'maraya': 1, 'maroye': 1},\n\t'water': {'wada': 1},\n\t'1s': {'ya': 1, 'minya': 1},\n\t'2s': {'ti': 1, 'tibya': 1, 'wi': 1, 'was': 1},\n\t'3s': {'on': 1, 'ano': 1, 'ana': 1, 'yiwo': 1, 'niwo': 1},\n\t'-PL': {'i': 1, 'op': 1, 'a': 1, 'ya': 1, 'ei': 1},\n\t'-GEN': {'i': 1, 'a': 1, 'ya': 1},\n\t'NEG': {'ne': 1, 'ni': 1},\n\t'and': {'i': 1, 'da': 1},\n\t'or': {'iri': 1, 'ribo': 1},\n\t'zero': {'nur': 1, 'nor': 1, 'siro': 1},\n\t'one': {'adin': 1},\n\t'two': {'dwa': 1},\n\t'three': {'tri': 1},\n\t'four': {'tyitirya': 1},\n\t'five': {'pyat': 1},\n\t'six': {'sest': 1},\n\t'seven': {'sem': 1},\n\t'eight': {'wosim': 1},\n\t'nine': {'dewit': 1},\n\t'ten': {'desit': 1},\n\t'hundred': {'sto': 1, 'sotnya': 1},\n\t'thousand': {'tisyatya': 1},\n\t'-ORD': {'ii': 1, 'oi': 1},\n\t}\n\nsw = {\n\t'#native': 26,\n\t'#total': 150, # ????\n\t'#family': 437, # Niger-Congo\n\t'human': {'binadamu': 1480+87+8+1, 'mwanadamu': 102+4},\n\t'child': {'mtoto': 1019+75+4+1, 'mwana': 259+165+2+1},\n\t'big': {'kubwa': 389+325+175+98+39+38+15+14+4+3+1+1},\n\t'small': {'dogo': 1138+1035+936+587+378+308+294+188+98+78+45+9+6+5+2+2+1+1+1},\n\t'water': {'madyi': 144+1+1+1},\n\t'1s': {'mimi': 1, 'ni': 1},\n\t'2s': {'wewe': 1, 'u': 1, 'ku': 1},\n\t'3s': {'yeye': 1, 'a': 1, 'yu': 1, 'm': 1},\n\t'-PL': {},\n\t'-GEN': {},\n\t'NEG': {'i': 1},\n\t'and': {'na': 1},\n\t'or': {'au': 1},\n\t'zero': {'sipuri': 1}, #????\n\t'one': {'modya': 2147+487+335+142+47+5+2+1+1+1+1+1, 'mosi': 8+3},\n\t'two': {'wiri': 44+542+2+284+2+16+2406+9+1, 'piri': 1957+266+1},\n\t'three': {'tatu': 1},\n\t'four': {'nne': 1},\n\t'five': {'tano': 1},\n\t'six': {'sita': 1},\n\t'seven': {'saba': 1},\n\t'eight': {'nane': 1},\n\t'nine': {'tisa': 1},\n\t'ten': {'kumi': 1},\n\t'hundred': {'mia': 1},\n\t'thousand': {'erpu': 1},\n\t'-ORD': {},\n\t}\n\nyo = {\n\t'#native': 28,\n\t'#total': 19,\n\t'#family': 437, # Niger-Congo\n\t'human': {'omoniyan': 1, 'eniyan': 1},\n\t'child': {'omo': 1, 'ewe': 1},\n\t'big': {'nra': 1, 'tobi': 1, 'gborin': 1},\n\t'small': {'kere': 1, 'die': 1},\n\t'water': {'omi': 1},\n\t'1s': {'emi': 1, 'mi': 1},\n\t'2s': {'iwo': 1},\n\t'3s': {'oun': 1, 'un': 1, 'o': 1},\n\t'-PL': {},\n\t'-GEN': {},\n\t'NEG': {'ko': 1},\n\t'and': {'ati': 1},\n\t'or': {'tabi': 1},\n\t'zero': {'odo': 1}, #????\n\t'one': {'eni': 1, 'okan': 1},\n\t'two': {'edyi': 1},\n\t'three': {'eta': 1},\n\t'four': {'erin': 1},\n\t'five': {'arun': 1},\n\t'six': {'epa': 1},\n\t'seven': {'edye': 1},\n\t'eight': {'edyo': 1},\n\t'nine': {'esan': 1},\n\t'ten': {'ewa': 1},\n\t'hundred': {'ogorun': 1},\n\t'thousand': {'egberun': 1},\n\t'-ORD': {},\n\t}\n\ndef distance(w1, w2):\n\treturn edit_distance(w1, w2)\n\ndef getFitnessFunction(langs, word, weightFunc, avg=True):\n\tif not avg:\n\t\treturn gff2(langs, word, weightFunc)\n\twords = defaultdict(int)\n\tfor l in langs:\n\t\tweightSum = sum([v for k,v in l[word].items()])\n\t\tfor wd,wt in l[word].items():\n\t\t\twords[wd] += weightFunc(l) * wt/weightSum\n\treturn lambda w: -1 * sum([wt * distance(w, wd) for wd, wt in words.items()])\n\ndef gff2(langs, word, weightFunc):\n\tdef ff(w):\n\t\tcost = 0\n\t\tfor l in langs:\n\t\t\tif len(l[word]) > 0:\n\t\t\t\tcost += weightFunc(l) * min(map(lambda x: distance(w, x), l[word]))\n\t\treturn -1 * cost\n\treturn ff\n\ndef rs(pop, fitness):\n\tpopFitnesses = list(map(fitness, pop))\n\tminFitness = min(popFitnesses)\n\tif minFitness < 0:\n\t\tposFitnesses = list(map((lambda x: x-minFitness), popFitnesses))\n\telse:\n\t\tposFitnesses = list(popFitnesses)\n\tfitsum = sum(posFitnesses)\n\tif fitsum == 0: # all equally fit\n\t\tindex = random.randrange(len(pop))\n\t\treturn pop[index]\n\tnormalizedFits = list(map((lambda x: x/fitsum), posFitnesses))\n\trfits = list(normalizedFits)\n\t#print(len(rfits))\n\tfor i in range(1,len(pop)):\n\t\trfits[i] += rfits[i-1]\n\t#assert rfits[-1] == 1.0 # hmm floats might make this problematic\n\trand = random.random()\n\tfor i in range(len(pop)):\n\t\tif rand <= rfits[i]:\n\t\t\treturn pop[i]\n\treturn pop[-1] # shouldn't have to do this, probably? but floats\n\ndef rep(x, y):\n\tminlen = min(len(x), len(y))\n\tif minlen <= 1:\n\t\treturn x + y\n\tsplit = random.randrange(minlen)\n\tsx = x[:split]\n\tsy = y[split:]\n\treturn sx + sy\n\ndef mut(s):\n\tchars = list(s)\n\tif s is '':\n\t\tchoice = random.choice(['insert', 'overwrite'])\n\telse:\n\t\tchoice = random.choice(['insert','delete','overwrite'])\n\tif choice is 'insert':\n\t\tindex = random.randint(0, len(chars))\n\t\tchar = random.choice('mpbntdsrkgywieaou')\n\t\tchars.insert(index, char)\n\telif choice is 'delete':\n\t\tindex = random.randrange(len(chars))\n\t\tdel chars[index]\n\telif choice is 'overwrite':\n\t\tindex = random.randrange(len(chars))\n\t\tchar = random.choice('mpbntdsrkgywieaou')\n\t\tchars[index] = char\n\treturn str.join('', chars)\n\nclass weight(object):\n\tnative = lambda x: x['#native']\n\ttotal = lambda x: x['#total']\n\tfamily = lambda x: x['#family']\n\tconst = lambda x: 1\n\ndef gen_words(langs, words, weightFunc, popsize, generations, mutationProb, avg=True):\n\tout = {}\n\tfor word in words:\n\t\tfitness = getFitnessFunction(langs, word, weightFunc, avg)\n\t\tpopulation = []\n\t\tfor l in langs:\n\t\t\tfor wd, wt in l[word].items():\n\t\t\t\tpopulation.append(wd)\n\t\tpopfactor = math.ceil(popsize/len(population))\n\t\tga = GA(population * popfactor, fitness, rs, rep, mutationProb, mut)\n\t\tga.run(generations)\n\t\tsortedPop = sorted(ga.population, key=fitness, reverse=True)\n\t\tout[word] = list(zip(sortedPop, map(fitness, sortedPop)))\n\t\tprint('best for %s: %s (fitness %f)' % (word, out[word][0][0], out[word][0][1]))\n\treturn out\n\ndef pick_words(langs, words, weightFunc, avg=True):\n\tout = {}\n\tfor word in words:\n\t\tcandidates = []\n\t\tfitness = getFitnessFunction(langs, word, weightFunc, avg)\n\t\tfor l in langs:\n\t\t\tfor wd, wt in l[word].items():\n\t\t\t\tcandidates.append(wd)\n\t\tout[word] = sorted(candidates, key=fitness, reverse=True)[0]\n\treturn out\n\ndef generate_syllables():\n\tinitials = ['','m','p','b','n','t','d','s','r','k','g']\n\tonglides = [''] # ['','y','w']\n\tvowels = ['a','e','i','o','u']\n\tfinals = [''] # ['', 'n']\n\tsylls = set()\n\tfor i in initials:\n\t\tfor g in onglides:\n\t\t\tfor v in vowels:\n\t\t\t\tfor f in finals:\n\t\t\t\t\tsylls.add(i+g+v+f)\n\treturn sylls\n\ndef generate_candidates():\n\tsylls = generate_syllables()\n\tcands = sylls.copy()\n\tprev = cands\n\tnew = set()\n\t# for s1 in sylls:\n\t# \tfor s2 in sylls:\n\t# \t\tcands.add(s1+s2)\n\tfor i in range(2):\n\t\tfor c in prev:\n\t\t\tfor s in sylls:\n\t\t\t\tnew.add(c+s)\n\t\tcands.update(new)\n\t\tprev = new\n\t\tnew = set()\n\treturn cands\n\ndef utility(lang, word, cand):\n\tdef limit_seqlen(l):\n\t\treturn l\n\t\t# return l if l < 3 else 3\n\tu = 0\n\tnum = len(lang[word])\n\ttotal = sum(lang[word].values())\n\tfor wd, wt in lang[word].items():\n\t\tu += (wt / total) * limit_seqlen(max(map(lambda s: (len(s), s), find_common_subsequences(wd, cand)))[0]) / len(wd)\n\treturn u\n\ndef remove_glides_from_langs(langs):\n\tfor l in langs:\n\t\tfor w in l:\n\t\t\tif w[0] != '#':\n\t\t\t\tnew = {}\n\t\t\t\tfor k in l[w]:\n\t\t\t\t\tnew[k.replace('y','').replace('w','')] = l[w][k]\n\t\t\t\tl[w] = new\n\ndef choose_words(langs, words):\n\tcands = list(generate_candidates())\n\tresults = {}\n\tdef mapfunc(c, w):\n\t\tutil = 0\n\t\tfor l in langs:\n\t\t\tutil += math.log(l['#family']) * utility(l, w, c)\n\t\treturn (util, c)\n\n\tfor word in words:\n\t\tresults[word] = sorted(map(lambda c: mapfunc(c, word), cands), reverse=True)[0][1]\n\treturn results\n\n# def gismu_utility(langs, word, cand):\n# \tu = 0\n# \tfor l in langs:\n# \t\tif \n","sub_path":"yaial2.py","file_name":"yaial2.py","file_ext":"py","file_size_in_byte":19482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"482432443","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport time\nimport os\n\nos.environ['ORABLE_BASE'] = '/u01/app/oracle'\nos.environ['ORACLE_HOME'] = '/u01/app/oracle/product/11.2.0/dbhome_1'\nos.environ['LD_LIBRARY_PATH'] = '/u01/app/oracle/product/11.2.0/dbhome_1/core:/usr/core'\nos.environ['PATH'] = '/usr/local/jdk1.6.0_45/bin:/usr/lib64/qt-3.3/bin:/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/home/oracle/bin:/home/oracle/bin:/u01/app/oracle/product/11.2.0/dbhome_1/bin'\nos.environ['ORACLE_SID'] = 'znyj'\n\ndump = '/home/oracle/dmp/znts'\nExpDate = time.strftime(\"%Y%m%d\", time.localtime())\n\nos.chdir(dump)\n\nfor i in os.listdir(os.getcwd()):\n if i.endswith('.zip') or i.endswith('.log'):\n os.remove(i)\n\nDumpFile = 'znts_all_' + ExpDate + '.dmp'\nLogFile = 'znts_all_' + ExpDate + '.log'\nExpZnts = 'exp system/XXXX@znts file=/home/oracle/dmp/znts/' + DumpFile+' log=/home/oracle/dmp/znts/' + LogFile + ' owner=us_sys,us_app,us_bdxj,us_file,scyw,us_bdjx'\nos.system(ExpZnts)\n\n\nZipDmp = 'zip -r znts_all_' + ExpDate + '.dmp.zip znts_all_' + ExpDate + '.dmp'\nos.system(ZipDmp)\n\nfor i in os.listdir(os.getcwd()):\n if i.endswith('.dmp'):\n os.remove(i)\n\n","sub_path":"oldboy/实战/数据库备份及上传/oracle_znts.py","file_name":"oracle_znts.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"240165081","text":"from django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom .models import Post, UserProfile\nfrom django.utils import timezone\nfrom .forms import PostForm, AddressForm, UserProfileForm\nfrom django.contrib.auth.models import User\n\ndef hello(request):\n return HttpResponse(\n 'HI from me'\n )\n\ndef post_list(request):\n return render(request, 'blog/post_list.html', {})\n\ndef post_list_dynamic(request):\n posts = Post.objects.all().order_by('published_date')\n return render(request, 'blog/post_list_dynamic.html', {'posts' : posts})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n\n# At starting use this \n# def post_new(request):\n# form = PostForm()\n# return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n form_type = 'New'\n return render(request, 'blog/post_edit.html', {'form': form,'form_type':form_type})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form_type = 'Edit'\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form, 'form_type':form_type})\n\ndef signup(request):\n form = AddressForm()\n return render(request,'sign_up.html',{'form':form})\n\ndef registration(request):\n if request.method=='POST':\n email = request.POST['email']\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n password = request.POST['password']\n print(email,first_name,last_name,password)\n # Create user\n user, is_created = User.objects.get_or_create(username=email, email=email)\n if is_created:\n print(user.id)\n user.set_password(password)\n user.save()\n # Create User Profile now\n if user:\n up, is_created = UserProfile.objects.get_or_create(\n user=user,\n email=email,\n first_name=first_name,\n last_name = last_name\n )\n print(up.id,'================upid')\n return redirect('/')\n\n\n else:\n form = UserProfileForm()\n return render(request,'registration_page.html',{'form':form})\n ","sub_path":"mydjango/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"448859120","text":"from RandomInputGeneration.DataGenerator import DataGenerator\nfrom Optimizer import Optimizer\nimport random as r\nfrom Heuristics.MaxKHeuristic import MaxKHeuristic\nfrom Heuristics.RABMCP_MIP import RABMCP_MIP\nfrom Heuristics.SmallKHeuristic import SmallKHeuristic\nfrom Heuristics.ColumnGeneration import ColumnGeneration\nfrom typing import List\nimport math\n\n\nif __name__ == '__main__':\n num_difficulties = 4\n num_instances = 3\n num_runs = 3\n timelimit = [300, 600, 3600, 4 * 3600]\n\n output_log = \"\"\n\n # Parameters\n num_abilities = [3, 5, 15, 25]\n num_jobs = [2, 5, 10, 20]\n max_num_operations = [5, 5, 25, 50]\n cost_min = [1, 1, 1, 1]\n cost_max = [10, 10, 10, 25]\n JSA_reps = [1000, 500, 2000, 750]\n\n high_multiplicity = [3, 10, 20, 50]\n\n for i in range(num_difficulties):\n for seed in range(num_instances):\n num_ab = num_abilities[i]\n num_j = num_jobs[i]\n max_num_o = max_num_operations[i]\n c_min = cost_min[i]\n c_max = cost_max[i]\n reps = JSA_reps[i]\n\n t_limit = timelimit[i]\n\n hm = high_multiplicity[i]\n\n dg = DataGenerator(seed, num_ab, num_j, max_num_o, c_min, c_max)\n dg.high_multiplicity = hm\n jobs, abilities = dg.generate()\n\n operations_hm = [o for j in jobs for o in j.iter_operations()]\n\n total_proc_time = sum(o.get_proc_time() * hm for o in operations_hm)\n longest_proc_time = max(o.get_proc_time() for o in operations_hm)\n\n makespan = longest_proc_time*(hm)*1.5\n\n for run in range(num_runs):\n # Fix and Spread\n opt = Optimizer(jobs, abilities, makespan)\n\n opt.set_seed(seed)\n opt.set_solution_method(Optimizer.COLUMN_GENERATION)\n opt.high_multiplicty = True\n\n opt.set_timelimit_subproblem(t_limit)\n opt._timelimit_scheduling = t_limit\n opt.set_timelimit(t_limit * 6)\n opt.choose_paths_randomly = True\n opt.opt_choose_paths = i <= 1\n opt.JSA_repetitions = reps\n opt.JSA_method = Optimizer.JSA_APPROX\n opt.verbose = True\n\n output_log += \"F-and-S: Lvl_%s_Seed_%s_Run_%s\\r\\n\" % (i, seed, run)\n try:\n opt.optimize()\n ub = sum(m.get_cost() for m in opt.machines)\n output_log += \"\\tUB: \" + str(ub) + \"\\r\\n\"\n except Exception as e:\n output_log += \"\\tERROR: \" + str(e) + \"\\r\\n\"\n print(output_log)\n\n for o in operations_hm:\n o.clear_solution()\n\n opt.set_solution_method(Optimizer.FULL_MIP_MODEL)\n try:\n opt.optimize()\n output_log += \"Model:\\r\\n\"\n ub = sum(m.get_cost() for m in opt.machines)\n output_log += \"\\tUB: \" + str(ub) + \"\\r\\n\"\n except Exception as e:\n output_log += \"\\tERROR: \" + str(e) + \"\\r\\n\"\n\n h = ColumnGeneration(jobs, abilities, makespan, t_limit * 5)\n h.verbose = True\n h.high_multiplicty = True\n h.multiplier = hm\n try:\n h._only_relaxed = True\n h.solve()\n lb = h._lower_bound\n output_log += \"\\tLB: \" + str(lb) + \"\\r\\n\"\n except AssertionError as e:\n output_log += \"ASSERTION ERROR: \" + str(e) + \"\\r\\n\"\n except Exception as e:\n output_log += \"ERROR: \" + str(e) + \"\\r\\n\"\n\n output_log += \"---------------\" + \"\\r\\n\"\n print(output_log)\n","sub_path":"Benchmarking_ABMCP.py","file_name":"Benchmarking_ABMCP.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"137181444","text":"\nclass Student:\n grade = \"\"\n def __init__(self,roll_no,name,score):\n self.roll_no = roll_no\n self.name = name\n self.score = score\n def grde(self):\n x = \"\"\n if sum(self.score) >= 80.00:\n x = \"A\"\n elif sum(self.score) >= 60.00:\n x = \"B\"\n elif sum(self.score) >= 50.00:\n x = \"C\"\n else:\n x =\"F\"\n self.grade = x\n\n\ndef inx():\n name = input(\"Enter the name: \")\n roll_no = input(\"Enter the roll_call: \")\n score = list(map(int,input(\"Enter the marks: \").split()))\n stu = Student(roll_no,name,score)\n stu.grde()\n print(f\"Student: {stu.name} Roll_no: {stu.roll_no} Grade: {stu.grade}\")\n\nprint(\"Code started\")\n\nwhile(True):\n inx()\n x = input(\"wanna continue? Y/N: \")\n if x == 'n' or x == 'N':\n break\n","sub_path":"experiments/exp5.py","file_name":"exp5.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"540092497","text":"import math\nfrom base_object import BaseObject\n\nclass Bin(BaseObject):\n bin_id = 0\n \"\"\" an imaged bin b \"\"\"\n\n def __init__(self, corner1, corner2, corner3, corner4):\n BaseObject.__init__(self)\n self.type = \"bin\"\n self.corner1 = corner1\n self.corner2 = corner2\n self.corner3 = corner3\n self.corner4 = corner4\n self.corners = [corner1, corner2, corner3, corner4]\n self.theta = 0\n self.midx = rect_midpointx(corner1, corner2, corner3, corner4)\n self.midy = rect_midpointy(corner1, corner2, corner3, corner4)\n self.area = 0\n self.id = 0 # id identifies which bin your looking at\n self.lastseen = 2 # how recently you have seen this bin\n # how many times you have seen this bin (if you see it enough it\n # becomes confirmed)\n self.seencount = 1\n\n\ndef line_distance(corner_a, corner_b):\n distance = math.sqrt((corner_b[0] - corner_a[0]) ** 2 +\n (corner_b[1] - corner_a[1]) ** 2)\n return distance\n\n\ndef rect_midpointx(corner_a, corner_b, corner_c, corner_d):\n midpoint_x = (corner_a[0] + corner_b[0] + corner_c[0] + corner_d[0]) / 4\n return midpoint_x\n\n\ndef rect_midpointy(corner_a, corner_b, corner_c, corner_d):\n midpoint_y = (corner_a[1] + corner_b[1] + corner_c[1] + corner_d[1]) / 4\n return midpoint_y\n","sub_path":"vision/entities/entity_types/bin.py","file_name":"bin.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"128504233","text":"import numpy as np\nimport galsim\n\nfrom .galaxymaker import GalaxyMaker\nfrom .great3_cosmos_gals.galaxies import COSMOSGalaxyBuilder\nfrom .great3_cosmos_gals.noise import PlaceholderNoiseBuilder\n\nclass COSMOSGalaxyMaker(GalaxyMaker):\n \"\"\"\n Returns COSMOS galaxies as if viewed from the ground. \n \n See the GREAT3 challenge docs for details. Code was pulled from the \n GREAT3 simulations code base.\n \n You will need the COSMOS data, which can be downloaded from \n \n http://great3.jb.man.ac.uk/leaderboard/data/public/COSMOS_23.5_training_sample.tar.gz\n http://great3.jb.man.ac.uk/leaderboard/data/public/great3_galaxy_selection_files.tar.gz\n \n Once the data is unpacked, place all files in a single directory and feed this path to \n the code as `cosmos_dir` below.\n \n Examples:\n \n atmos_seeing = 0.55\n cosmos_dir = '/path/to/data'\n seed = 12345\n cgm = COSMOSGalaxyMaker(seed,cosmos_dir)\n \n #build a catalog\n cgm.build_catalog_for_seeing(seeing)\n \n # now draw from it\n for i in xrange(10):\n galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_xsize,max_ysize,pixel_scale)\n \n # you can also just draw galaxies at random, but then the catalog is rebuilt each time\n # which is slow.\n galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_xsize,max_ysize,pixel_scale)\n \n #if you specify save_catalog=True, then you can skip the building step (the code\n # will do it internally).\n galaxy,galinfo = cgm.get_galaxy(seeing,n_epochs,max_xsize,max_ysize,pixel_scale,save_catalog=True) \n \"\"\"\n def __init__(self,seed,cosmos_data,real_galaxy=True,preload=False,**kw):\n self.noise_mult = 1.0\n self.rng = galsim.UniformDeviate(seed)\n self.rng_np = np.random.RandomState(int(self.rng() * 1000000))\n self.cosmos_data = cosmos_data\n self.preload = preload\n self.cosmosgb = COSMOSGalaxyBuilder(real_galaxy,cosmos_data,preload=preload)\n self.real_galaxy = real_galaxy\n self.catalog_dtype = self.cosmosgb.generateSubfieldParameters()['schema']\n self.catalog_dtype.append(('n_epochs','i4'))\n self.catalogs = {}\n\n def get_galaxy_from_info(self,record_in,seeing,n_epochs,max_xsize,max_ysize,pixel_scale):\n \"\"\"\n Get a COSMOS galaxy from a specific row in table.\n \"\"\"\n record = record_in.copy()\n if record['n_epochs'] != n_epochs:\n rat = float(record['n_epochs']/n_epochs)\n record['n_epochs'] = n_epochs\n for tag in [\"bulge_flux\",\"disk_flux\",\"flux_rescale\"]:\n if tag in record.dtype.names:\n record[tag] *= rat\n nb = PlaceholderNoiseBuilder()\n nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)\n galaxy = self.cosmosgb.makeGalSimObject(record, max_xsize, max_ysize, pixel_scale, self.rng)\n galinfo = {}\n galinfo['noise_builder'] = nb\n galinfo['noise_builder_params'] = nb_params\n galinfo['info'] = record\n galinfo['seeing'] = seeing\n galinfo['noise'] = np.sqrt(galinfo['noise_builder_params']['variance'])\n return galaxy,galinfo\n \n def build_catalog_for_seeing(self,seeing,verbose=False,randomly_rotate=True):\n \"\"\"\n Build a galaxy catalog a specific seeing value.\n \n If you build a catalog and then get galaxies with the same seeing value,\n the code will skip subsequent building steps.\n \"\"\"\n nb = PlaceholderNoiseBuilder()\n nb_params = nb.generateEpochParameters(self.rng,1,seeing,self.noise_mult)\n # NOTE\n # typical_variance is for SE by definition, so just make a typical gal for the seeing and one epoch\n # will handle increased variance for multiple epochs below\n # also will rescale flux comps below\n self.catalogs[seeing] = (self.cosmosgb.generateCatalog(self.rng,None,None,nb.typical_variance, \\\n self.noise_mult,seeing=seeing,verbose=verbose, \\\n randomly_rotate=randomly_rotate),\n nb.typical_variance)\n \n def get_catalog_for_seeing(self,seeing,verbose=False,randomly_rotate=True):\n \"\"\"\n Get a catalog for a specific seeing value.\n \"\"\"\n if seeing not in self.catalogs:\n self.build_catalog_for_seeing(seeing,verbose=verbose,randomly_rotate=randomly_rotate)\n return self.catalogs[seeing][0].copy()\n \n def get_galaxy(self,seeing,n_epochs,max_xsize,max_ysize,pixel_scale,verbose=False, \\\n randomly_rotate=True,save_catalog=False):\n \"\"\"\n Get a galaxy from COSMOS postage stamp a la GREAT3.\n \n In GREAT3, seeing was set to atmospheric PSF FWHM. \n \"\"\"\n if save_catalog or seeing in self.catalogs:\n if seeing not in self.catalogs:\n self.build_catalog_for_seeing(seeing,verbose=verbose,randomly_rotate=randomly_rotate)\n \n #now get catalog\n catalog = self.catalogs[seeing][0]\n Ncosmos = len(catalog)\n \n #now draw at random with weights\n # seed numpy.random to get predictable behavior\n while True: \n randind = self.rng_np.choice(Ncosmos,replace=True)\n if self.rng_np.uniform() < self.catalogs[seeing][0]['weight'][randind]:\n break \n record = catalog[randind].copy()\n record['n_epochs'] = n_epochs\n for tag in [\"bulge_flux\",\"disk_flux\",\"flux_rescale\"]:\n if tag in record.dtype.names:\n record[tag] /= n_epochs\n nb = PlaceholderNoiseBuilder()\n nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)\n assert nb.typical_variance == self.catalogs[seeing][-1]\n else:\n record = np.zeros(1,dtype=self.catalog_dtype)[0]\n record['n_epochs'] = n_epochs\n nb = PlaceholderNoiseBuilder()\n nb_params = nb.generateEpochParameters(self.rng,record['n_epochs'],seeing,self.noise_mult)\n self.cosmosgb.generateCatalog(self.rng,[record],None,nb.typical_variance,self.noise_mult,seeing=seeing, \\\n verbose=verbose,randomly_rotate=randomly_rotate)\n \n galaxy = self.cosmosgb.makeGalSimObject(record, max_xsize, max_ysize, pixel_scale, self.rng)\n galinfo = {}\n galinfo['noise_builder'] = nb\n galinfo['noise_builder_params'] = nb_params\n galinfo['info'] = record.copy()\n galinfo['seeing'] = seeing\n galinfo['noise'] = np.sqrt(galinfo['noise_builder_params']['variance'])\n \n return galaxy,galinfo\n\n def finish_galaxy_image(self,galim,final_galaxy,galinfo):\n \"\"\"\n This routine finishes the galaxies after they have been PSF convolved, etc.\n It adds the requested amount of noise in the galinfo dict to the image \n taking into account the noise already in the HST image.\n \n You might want to do things like this first:\n \n galaxy.applyLensing(g1=g1, g2=g2, mu=mu)\n final = galsim.Convolve([psf, pixel, galaxy]) \n galim = final.draw(scale=pixel_scale)\n \n Doing the stuff above first matches how the GREAT3 sims were done.\n \"\"\"\n if hasattr(final_galaxy,'noise'):\n current_var = final_galaxy.noise.applyWhiteningTo(galim)\n else:\n current_var = 0.0\n \n galinfo['noise_builder'].addNoise(self.rng,galinfo['noise_builder_params'],galim,current_var)\n\n return galim\n","sub_path":"egret/makers/cosmosgalaxymaker.py","file_name":"cosmosgalaxymaker.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"240082755","text":"\"\"\"\nThere are many beautiful libraries to deal with HTTP headers:\n\n- `httpheader `\n- `Werkzeug `\n- `Paste `\n\nIf this __file__ will become to grow please consider to use one of whem. :-)\n\"\"\"\n__all__ = ['Mimer', 'MimerDataException', 'translate_request_data']\n\n\nfrom email.message import Message\n\n\nclass MimerDataException(Exception):\n \"\"\"\n Raised if the content_type and data don't match\n \"\"\"\n pass\n\n\nclass MimerLoaderRegistry(object):\n \"\"\"This is the registry of deserializers.\"\"\"\n \n def __init__(self):\n self._types = dict()\n\n def get_loader_for_type(self, media_type):\n \"\"\"\n Gets a function ref to deserialize content\n for a certain mimetype.\n \"\"\"\n loader = None\n content_type, params = parse_content_type_header(media_type)\n for loadee, mimes in self._types.iteritems():\n if content_type in mimes:\n if getattr(loadee, 'accepts_content_type_params', False):\n loader = lambda data: loadee(data, **params)\n else:\n loader = loadee\n break\n return loader\n\n def register(self, loadee, media_types):\n \"\"\"Register this deserializer for given list of media types.\"\"\"\n content_types = [parse_content_type_header(media_type)[0]\n for media_type in media_types]\n self._types[loadee] = content_types\n\n def unregister(self, loadee):\n \"\"\"Remove this deserializer.\"\"\"\n self._types.pop(loadee)\n\n\n# This is global registry of mimer loaders\nDEFAULT_REGISTRY = MimerLoaderRegistry()\n\n\nclass Mimer(object):\n\n def __init__(self, media_type, registry=None):\n self._media_type = media_type.strip()\n self._registry = registry or DEFAULT_REGISTRY\n\n @classmethod\n def from_request(cls, request):\n \"\"\"Create a `Mimer` from a django.http.HttpRequest.\"\"\"\n media_type = request.META.get(\"CONTENT_TYPE\", \"\")\n return cls(media_type)\n\n @property\n def media_type(self):\n \"\"\"Return entry media type.\"\"\"\n return self._media_type\n\n @property\n def content_type(self):\n \"\"\"Return content type (media type without parameters).\n\n For example when media_type is \"text/html; charset=ISO-8859-4\" this\n returns \"text/html\"\n \"\"\"\n media_type = self.media_type\n if ';' in media_type:\n content_type = media_type.split(';', 1)[0].strip()\n else:\n content_type = media_type\n return content_type\n\n @property\n def is_form_data(self):\n \"\"\"Retrun True then a ``content_type`` is type of form data.\"\"\"\n return self.content_type in (\n \"application/x-www-form-urlencoded\", \n \"multipart/form-data\")\n\n @property\n def loadee(self):\n \"\"\"Return a loader for our media type or None.\"\"\"\n return self.registry.get_loader_for_type(self.media_type)\n\n @property\n def registry(self):\n \"\"\"Return a registry of deserializers.\"\"\"\n return self._registry\n\n def translate(self, raw_data):\n \"\"\"Will try to deserialize the ``raw_data`` according to an our\n media_type.\n \n This will work for JSON, YAML, XML and Pickle.\n \"\"\"\n try:\n return self.loadee(raw_data)\n except (TypeError, ValueError):\n # This also catches if loadee is None.\n raise MimerDataException\n\n @classmethod\n def register(cls, loadee, types):\n \"\"\"Register loadee in default global registry.\"\"\"\n DEFAULT_REGISTRY.register(loadee, types)\n\n @classmethod\n def unregister(cls, loadee):\n \"\"\"Remove loadee from default global registry.\"\"\"\n return DEFAULT_REGISTRY.unregister(loadee)\n\n\ndef translate_request_data(request):\n \"\"\"Translate nested datastructs into `request.data`.\n \n And set `request.content_type` with content type for request data\n (excluding a ``Content-type`` parameters).\n \"\"\"\n mimer = Mimer.from_request(request)\n request.data = dict()\n if has_body(request):\n request.content_type = mimer.content_type\n if not request.content_type:\n pass\n elif mimer.is_form_data:\n # Use Django to translate form data.\n django_coerse_to_post(request)\n request.data = request.POST\n # When the request contains a form data and a request.method is\n # a POST it is no bad to leave a request.POST as alias for\n # a request.data for compability with Django's world.\n if request.method != 'POST':\n request.POST = dict()\n else:\n request.data = mimer.translate(request.raw_post_data)\n # We override request.POST because of it contains\n # a QueryDict with garbage when data is not a form.\n request.POST = dict()\n\ndef has_body(request):\n \"\"\"Determinate a presence of a message body in a ``request``.\"\"\"\n # \"The presence of a message-body in a request is signaled by the\n # inclusion of a Content-Length or Transfer-Encoding header field in\n # the request's message-headers.\"\n # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3\n return request.META.get('CONTENT_LENGTH', None) or \\\n request.META.get('HTTP_TRANSFER_ENCODING', None)\n\ndef parse_content_type_header(header):\n \"\"\"Parse a ``Content-Type`` like header into a tuple with the content type\n and the options.\n\n >>> parse_content_type_header('text/plain; charset=utf-8')\n ('text/plain, {'charset': 'utf-8'})\n\n :param header: the header to parse.\n :return: (str, options)\n \"\"\"\n msg = Message()\n msg['Content-type'] = header\n ctype, params = msg.get_content_type(), dict(msg.get_params()[1:])\n return ctype, params\n\ndef django_coerse_to_post(request):\n \"\"\"\n Django doesn't particularly understand REST. In case we send data over\n PUT, Django won't actually look at the data and load it. We need to twist\n its arm here.\n\n The try/except abominiation here is due to a bug in mod_python. This should\n fix it.\n \"\"\"\n method = request.method\n if method == \"POST\":\n # Assume it's already coersed \n return\n\n # Bug fix: if _load_post_and_files has already been called, for\n # example by middleware accessing request.POST, the below code to\n # pretend the request is a POST instead of a PUT will be too late\n # to make a difference. Also calling _load_post_and_files will result\n # in the following exception:\n # AttributeError: You cannot set the upload handlers after the upload has been processed.\n # The fix is to check for the presence of the _post field which is set\n # the first time _load_post_and_files is called (both by wsgi.py and\n # modpython.py). If it's set, the request has to be 'reset' to redo\n # the query value parsing in POST mode.\n if hasattr(request, '_post'):\n del request._post\n del request._files\n\n try:\n request.method = \"POST\"\n request._load_post_and_files()\n request.method = method\n except AttributeError:\n request.META['REQUEST_METHOD'] = 'POST'\n request._load_post_and_files()\n request.META['REQUEST_METHOD'] = method\n\n","sub_path":"piston/mimer.py","file_name":"mimer.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"318761802","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 23 15:03:18 2018\r\n\r\n@author: Leon Koren\r\n\"\"\"\r\n\r\nimport urllib3\r\nimport json\r\n\r\ncode='300E5E1B8C71C14001B458D1'\r\n\r\nurl='http://jobfair.tagitsolutions.com/q934dr4/'\r\nheaders=urllib3.util.make_headers(basic_auth='secret:tU3kk!?xxx')\r\nhttp=urllib3.PoolManager()\r\n\r\ncode_bin=bin(int(code,16))[2:]\r\nif len(code_bin)<96:\r\n code_bin='0'*(96-len(code_bin))+code_bin\r\n \r\npartition=code_bin[11:14]\r\n\r\ncompany_prefix_bits=[40,37,34,30,27,24,20]\r\nitem_reference_bits=[4,7,10,14,17,20,24]\r\n\r\ncompany_prefix_bin=code_bin[14:(company_prefix_bits[int(partition,2)]+14)]\r\nitem_reference_bin=code_bin[company_prefix_bits[int(partition,2)]+14:(item_reference_bits[int(partition,2)]+company_prefix_bits[int(partition,2)]+14)]\r\nserial_bin=code_bin[(item_reference_bits[int(partition,2)]+company_prefix_bits[int(partition,2)]+14):]\r\n\r\ncompany_prefix=str(int(company_prefix_bin,2))\r\nitem_reference=str(int(item_reference_bin,2))\r\n\r\nrequest_url=url+company_prefix+'/'+item_reference\r\nresponse=http.request('GET',request_url,headers=headers)\r\n\r\ndata=json.loads(response.data.decode('utf-8'))\r\n\r\nprint('Item name: {0}\\n'.format(data['itemName']))\r\nprint('Item reference: {0}\\n'.format(data['itemReference']))\r\nprint('Item serial: {0}\\n'.format(int(serial_bin,2)))\r\nprint('Item manufacturer: {0}\\n'.format(data['company']['companyName']))\r\nprint('Manufacturer prefix: {0}\\n'.format(data['company']['companyPrefix']))\r\n","sub_path":"JobFair_Tagit_Leon_Koren.py","file_name":"JobFair_Tagit_Leon_Koren.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"422413835","text":"from django.core.mail import send_mail\nfrom django.conf import settings\n\ndef OTPEmailSender(email, verify_otp):\n\tmessage = f\"{email},\\n Your OTP is {verify_otp}\\nThanks\"\n\tsend_mail(\n\t\t\"Email Verify OTP Is Ready - BUKINOW\",\n\t\tmessage,\n \tsettings.EMAIL_HOST_USER,\n \t[email],\n \tfail_silently=False)","sub_path":"accounts/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"117916321","text":"### Simulator 정의\n'''\n Logic\n SELECT nextval('SEQ_EVENT_CODE') -> EVENT_CODE\n SELECT DISTINCT DATE TRADING_DATE FROM JONGMOK_DAILY_SISE ORDER BY 1 -> TRADING_DATE\n LOOP CURRENT_DATE IN TRADING_DATE\n BOTS[].DECIDE_SELL() RETURN {SELL_JONGMOK_CODE, SELL_QUANTITY}\n LOOP ACCOUNT_ID IN BOTS[]\n SELL(ACCOUNT_ID, SELL_JONGMOK_CODE, SELL_QUANTITY)\n BOTS[].DECIDE_BUY() RETURN {BUY_JONGMOK_CODE, BUY_QUANTITY}\n LOOP ACCOUNT_ID IN BOTS[]\n BUY(ACCOUNT_ID, BUY_JONGMOK_CODE, BUY_QUANTITY)\n EVALUATE() \n'''\n### Simulator 정의 끝\n\n##################\n# Import module\n##################\nimport sys\nimport psycopg2\nimport psycopg2.extras\nimport os\n\nsys.path.insert(0, '../bots')\n\n##################\n# Variable Define\n##################\nloop=True # 메인 Loop Flag\nstr_event_code = \"\"\nstr_current_date = \"\"\nselected_bot_name = \"\"\nbot = None\n\n\n\n##################\n# Functions Define\n##################\ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\n\ndef print_menu(): ## Your menu design here\n print(30 * \"-\" , \"MENU\" , 30 * \"-\")\n sys.stdout.write(\"1. Bot 선택 현재 : \"+str(selected_bot_name))\n if bot != None:\n sys.stdout.write(\" '\")\n bot.hello()\n sys.stdout.write(\"'\")\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()\n print(\" \")\n print(\"3. Buy Test\")\n print(\" \")\n print(\"10. Simulate\")\n print(\" \")\n print(\"99. Exit\")\n print(67 * \"-\")\n\ndef get_bot_list():\n print(\"\")\n path = '../bots'\n return [f for f in os.listdir(path) if f.endswith('.py')]\n \n\ndef select_bot():\n cls()\n bot_list = get_bot_list()\n i=0\n for _bot in bot_list :\n print(str(i)+\". \"+str(_bot))\n i+=1\n choice_bot = input(\"Select bot you want : \")\n\n #sys.stdout.write(bot_list[int(choice_bot)])\n #print(\" is selected\")s\n\n global selected_bot_name\n global bot\n selected_bot_name = str(bot_list[int(choice_bot)]).replace('.py','')\n bot = __import__(selected_bot_name)\n\n\n\ndef simulate() :\n cls()\n conn = psycopg2.connect(\"dbname=postgres user=postgres password=happy01\")\n with conn:\n with conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) as curs:\n # Event Code 채번\n curs.execute(\"SELECT nextval('SEQ_EVENT_CODE') as EVENT_CODE;\")\n rec = curs.fetchone()\n str_event_code = str(rec.event_code)\n print(\"Event Code : \" + str_event_code)\n\n curs.execute(\"INSERT INTO ACCOUNT(event_code, account_id, deposit, trading_unit_ratio) VALUES(%s, %s, %s, %s);\",(str_event_code, \"1000\", 100000000, 0.1,))\n \n\n # 거래 시작일, 종료일 설정\n curs.execute(\"SELECT DISTINCT DATE TRADING_DATE FROM JONGMOK_DAILY_SISE ORDER BY 1;\")\n for rec in curs:\n str_current_date = rec.trading_date\n\n # 사라 봇\n # def buy(event_code, account_id, jongmok_code, date, buy_quantity):\n #list = bot.decide_buy()\n buy_lists = [[\"1000\",\"086790\",5],[\"1000\",\"010120\",5]]\n for rec in buy_lists:\n buy(str_event_code, rec[0], rec[1], str_current_date, rec[2])\n\n\n # 팔아라 봇\n #bot.decide_sell()\n\n # 평가\n #account_evaluate(str_event_code, str_current_date)\n \n \n\n print(\"\\n\\nEnd\")\n input(\"Press any key...\")\n\n\n\n\n### Simulation 주식 매수, 매도 Function 정의\n### 실시간 거래처럼 매수,매도 주문 -> 체결이 아닌 종목의 해당 시점의 해당 금액으로 수량만 입력하여 매수, 매도하는 방식\n'''\n Logic\n 매수 혹은 매도 수행\n 성공, 실패(실패사유 포함) 리턴\n 1) 매수\n - 사고자 하는 종목코드, 일자, 수량을 Parameter로 함\n - 해당 종목의 현재가격 * 수량 <= 미수금 인지 체크\n - True\n - 해당 종목의 현재가격 * 수량 -> 매수 총금액\n - TRADING_STOCK TABLE에 보유하고 있는 해당 종목의 평균단가 * 보유수 -> 보유 총금액\n - (매수 총금액 + 보유 총금액) / (매수 수량 + 보유 수량) -> TRADING_STOCK.AVG_PRICE\n - 매수 수량 + 보유 수량 -> TRADING_STOCK.QUANTITY\n TRANSACTION{\n - 위 정보로 TRADING_STOCK MERGE\n - TRADING_HISTORY INSERT\n - ACCOUNT UPDATE\n }ERROR{\n - ERROR LOGGING\n }\n - False\n - 매수 불가, 사유 포함하여 False 리턴\n 2) 매도\n - 팔고자 하는 종목코드, 일자, 수량을 Parameter로 함\n - 팔고자 하는 수량 <= 보유수량 인지 체크\n - True\n - 해당 종목의 현재가격 * 수량 -> 매도 총금액\n - TRADING_STOCK TABLE에 보유하고 있는 해당 종목의 평균단가 * 보유수 -> 보유 총금액\n - (보유 총금액 - 매도 총금액) / (보유 수량 - 매도 수량) -> TRADING_STOCK.AVG_PRICE\n - 보유 수량 - 매도 수량 -> TRADING_STOCK.QUANTITY\n TRANSACTION{\n - 위 정보로 TRADING_STOCK MERGE\n - TRADING_HISTORY INSERT\n - ACCOUNT UPDATE\n }ERROR{\n - ERROR LOGGING\n }\n\n'''\n### Simulation 주식 매수, 매도 Function 정의 끝\ndef buy(event_code, account_id, jongmok_code, date, buy_quantity):\n # 해당 종목의 현재가격 * 수량 <= 미수금 인지 체크\n # 종목별 시세 Table : JONGMOK_DAILY_SISE\n if buy_quantity < 1:\n return False\n\n conn = psycopg2.connect(\"dbname=postgres user=postgres password=happy01\")\n with conn:\n with conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) as curs:\n curs.execute(\"SELECT final_price FROM JONGMOK_DAILY_SISE WHERE JM_CODE = %s AND DATE = %s;\",(jongmok_code, date,))\n rec = curs.fetchone()\n if rec == None :\n return False\n buy_total_price = rec.final_price * buy_quantity \n curs.execute(\"SELECT deposit FROM ACCOUNT WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s;\",(event_code, account_id,))\n rec = curs.fetchone()\n deposit = rec.deposit\n\n if buy_total_price <= deposit : \n curs.execute(\"SELECT coalesce(sum(avg_price),0) AVG_PRICE , coalesce(sum(quantity),0) QUANTITY FROM TRADING_STOCK WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s AND JM_CODE = %s;\",(event_code, account_id, jongmok_code,))\n rec = curs.fetchone()\n stock_total_price = rec.avg_price * rec.quantity\n stock_total_qty = rec.quantity \n result_total_qty = buy_quantity + stock_total_qty\n result_avg_price = (buy_total_price + stock_total_price) / result_total_qty \n if stock_total_qty > 0: \n # 보유주 Update\n curs.execute(\"UPDATE TRADING_STOCK SET AVG_PRICE = %s, QUANTITY = %s WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s AND JM_CODE = %s;\",(result_avg_price, result_total_qty, event_code, account_id, jongmok_code,)) \n else: \n # 보유주 Insert \n curs.execute(\"INSERT INTO TRADING_STOCK(EVENT_CODE, ACCOUNT_ID, JM_CODE, AVG_PRICE, QUANTITY) VALUES(%s, %s, %s, %s, %s) ;\",(event_code, account_id, jongmok_code, result_avg_price, result_total_qty,))\n # 계좌 예수금 차감\n curs.execute(\"UPDATE ACCOUNT SET DEPOSIT = DEPOSIT - %s WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s;\",(buy_total_price, event_code, account_id, ))\n return True\n \n else:\n return False\n\n\n\n### Simulation 주식 매수, 매도 Function 정의 끝\ndef sell(event_code, account_id, jongmok_code, date, sell_quantity):\n if sell_quantity < 1:\n return False\n\n conn = psycopg2.connect(\"dbname=postgres user=postgres password=happy01\")\n with conn:\n with conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) as curs:\n curs.execute(\"SELECT quantity, avg_price FROM TRADING_STOCK WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s AND JM_CODE = %s;\",(event_code, account_id, jongmok_code,))\n rec = curs.fetchone()\n stock_quantity = rec.quantity # 보유수량 \n curs.execute(\"SELECT final_price FROM JONGMOK_DAILY_SISE WHERE JM_CODE = %s AND DATE = %s;\",(jongmok_code, date,))\n rec = curs.fetchone()\n sell_total_price = rec.final_price * sell_quantity # 매도총금액\n result_total_qty = stock_quantity - sell_quantity # 매도후 잔량\n if result_total_qty > 0 :\n curs.execute(\"UPDATE TRADING_STOCK SET QUANTITY = %s WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s AND JM_CODE = %s;\",(result_total_qty, event_code, account_id, jongmok_code,))\n else:\n curs.execute(\"DELETE FROM TRADING_STOCK WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s AND JM_CODE = %s;\",(event_code, account_id, jongmok_code,))\n \n curs.execute(\"UPDATE ACCOUNT SET DEPOSIT = DEPOSIT + %s WHERE EVENT_CODE = %s AND ACCOUNT_ID = %s;\",(sell_total_price, event_code, account_id, ))\n return True\n\n\ndef account_evaluate(event_code, date):\n conn = psycopg2.connect(\"dbname=postgres user=postgres password=happy01\")\n with conn:\n with conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) as curs:\n curs.execute(\"SELECT ACCOUNT_ID, DEPOSIT FROM ACCOUNT WHERE EVENT_CODE = %s;\",(event_code,))\n for rec in curs:\n with conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor) as inner_curs:\n inner_curs.execute(\"SELECT coalesce(SUM(A.QUANTITY*B.FINAL_PRICE),0) STOCK_DEPOSIT FROM TRADING_STOCK A,JONGMOK_DAILY_SISE B WHERE A.JM_CODE = B.JM_CODE AND A.ACCOUNT_ID = %s AND B.DATE = %s AND A.EVENT_CODE = %s;\", (rec.account_id, date, event_code,))\n inner_rec = inner_curs.fetchone()\n evaluated_deposit = rec.deposit + inner_rec.stock_deposit\n inner_curs.execute(\"INSERT INTO ACCOUNT_EVALUATE (EVENT_CODE, DATE, ACCOUNT_ID, EVAL_DEPOSIT) VALUES (%s, %s, %s, %s);\",(event_code, date, rec.account_id, evaluated_deposit))\n\n\n\n\n\n\n##################\n# Start of program\n##################\n\nwhile loop: ## While loop which will keep going until loop = False\n cls()\n print_menu() ## Displays menu\n choice = input(\"Enter your choice [1-5]: \")\n \n if choice==\"1\":\n select_bot()\n\n elif choice==\"3\":\n \n buy(\"1\", \"1000\",\"086790\",\"20130807\",5)\n account_evaluate(\"1\", \"20130807\")\n buy(\"1\", \"1000\",\"086790\",\"20130808\",4)\n account_evaluate(\"1\", \"20130808\")\n buy(\"1\", \"1000\",\"086790\",\"20130809\",7)\n account_evaluate(\"1\", \"20130809\") \n sell(\"1\", \"1000\", \"086790\", \"20130812\", 10)\n account_evaluate(\"1\", \"20130812\")\n sell(\"1\", \"1000\", \"086790\", \"20130813\", 6)\n account_evaluate(\"1\", \"20130813\")\n\n elif choice==\"10\":\n simulate()\n\n elif choice==\"99\":\n ##print(\"Menu 1 has been selected\")\n ## You can add your code or functions here\n loop=False # This will make the while loop to end as not value of loop is set to False\n else:\n # Any integer inputs other than values 1-5 we print an error message\n print(choice)\n input(\"Wrong option selection. Enter any key to try again..\")\n\n\n","sub_path":"simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":12139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"446887068","text":"\r\n# Simulation Example Script\r\n\r\nJarnac.sim.loadSBML (JDesigner.basic.getSBML())\r\n\r\nJarnac.sim.setTimeStart (0.0)\r\nJarnac.sim.setTimeEnd (25.0)\r\nJarnac.sim.setNumPoints (500)\r\n\r\nmat = Jarnac.sim.simulate ()\r\n\r\nxyGraph.xaxis.setAxisMax (25)\r\nxyGraph.yaxis.setAxisMax (4)\r\n\r\nxyGraph.data.addArray ([mat])\r\nxyGraph.data.setLineColor (1, 0);\r\nxyGraph.data.setLineColor (2, 1);\r\n\r\n\r\n\r\ns = raw_input (\"Hit any key to continue\")\r\n\r\n# Plot the phase space\r\n\r\nxyGraph.data.selectCols([1,2])\r\nxyGraph.xaxis.setAxisMax (3.5);\r\nxyGraph.yaxis.setAxisMax (2.5);\r\nxyGraph.app.redraw()\r\n\r\n\r\ns = raw_input (\"Hit any key to continue\")\r\n\r\n# Change the symbol type\r\n\r\n# Data column 2 (x2), symbol number 16\r\nxyGraph.data.setSymbol(2, 16)\r\n\r\n\r\ns = raw_input (\"Hit any key to continue\")\r\n\r\n# Change the line type\r\n\r\n# Data column 2, 0 = no line; 1 = line\r\nxyGraph.data.setLineStyle (2, 0)","sub_path":"bindings/Python/_Tests/simPlot.py","file_name":"simPlot.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"559760125","text":"import argparse\nimport os\nimport time\nimport sys_path\nsys_path.insert_sys_path()\n\nfrom datetime import date\nfrom handlers.parameters_handler import ParameterHandler\nfrom manager.lib_distribution.filter_parser import *\nfrom configs.database import FilterInterfaceConfig\nfrom configs.commit_message import CommitMessage\nfrom configs.svn_resource import SVNResource, DataPathSVN\nfrom utils.svn_helper import SVNHelper\nfrom configs.jenkins import JenkinsHelper\nfrom configs.json_key import JobName\nfrom jenkins.lib_parsers.change_build_mapping_parser import ChangeBuildMappingParser\nfrom configs.test_result import FinalTestResult\n\nFILE_NAME_DEFAULT = \"ChangeLog\"\n\n\ndef parse_argument():\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--parameters', default=\"parameters.json\",\n required=True,\n help=\"All parameter needed for master to\"\n \" manage test nodes\")\n parser.add_argument('-g', '--gerrit-number',\n required=True,\n help='Change number to get parameter file')\n return parser\n\n\ndef replace_content(content, version):\n label = \"[D\" + str(version) + \"]\"\n import re\n pattern = \"\\n\\n.*\\[D\"\n paragraph = re.split(pattern, content)\n for num, para in enumerate(paragraph, 1):\n lines = re.split(\"\\n\", para)\n for line in lines:\n filter_value = line.find(label)\n if filter_value == 0:\n content = content.replace(para + \"\\n\\n\", \"\")\n return 0, content\n return 1, content\n\n\ndef main():\n\n # Parse argument\n parse = parse_argument()\n args = parse.parse_args()\n start_time = time.time()\n change_number = args.gerrit_number\n\n print(\" Start update change log\")\n # Get mapping file\n change_build_mapping_file = \\\n JenkinsHelper.get_file_mapping(job_name=JobName.IT,\n file_name=JenkinsHelper.CHANGE_BUILD_MAPPING_FILE)\n\n # Create data parser for change build mapping information\n data_parser = ChangeBuildMappingParser(mapping_file=change_build_mapping_file)\n latest_build = data_parser.get_latest_success_build(change_number=change_number)\n\n # Path to archive folder on Jenkins server\n archive_folder = JenkinsHelper.get_archive_path(job_name=JobName.IT,\n build_number=latest_build)\n parameter_file = os.path.join(archive_folder, FinalTestResult.INFO, args.parameters)\n\n parameter_handler = ParameterHandler(input_file=parameter_file)\n\n # Get delta version\n version = int(parameter_handler.get_delta_version()) + 1\n\n svn_resource = SVNResource()\n change_log_url = svn_resource.get_url(DataPathSVN.CHANGE_LOG)\n print(\"\\n+ Change log url: {0}\".format(change_log_url))\n change_log_local_path = os.path.join(os.getcwd(), DataPathSVN.CHANGE_LOG)\n svn_helper = SVNHelper(change_log_url, change_log_local_path)\n if not svn_helper.is_checkouted():\n svn_helper.checkout()\n svn_helper.update()\n\n # Parse commit message\n commit_message = parameter_handler.get_commit()\n filter_commit = parse_heading_message(commit_message)\n\n # Get main content of change log\n delta_summary = u\" *{0}:\".format(CommitMessage.SUMMARY) \\\n + unicode(filter_commit.encode('utf-8').strip(FilterInterfaceConfig.PHOCR_HEADER)) \\\n + u\"\\n\"\n\n # Get commit date\n commit_date = date.today()\n delta_and_date = u\"[D\" + unicode(version) + u\"]\" + u\" - \" + unicode(commit_date) + \"\\n\"\n delta_description = u\" *{0}: \\n\".format(CommitMessage.DESCRIPTION) \\\n + parse_commit_message_contents_by_topic(CommitMessage.DESCRIPTION,\n commit_message)\n\n # Write content to change log file\n change_log_file_path = os.path.join(os.path.abspath(change_log_local_path),\n FILE_NAME_DEFAULT)\n all_contents = unicode(delta_and_date) \\\n + unicode(delta_summary) \\\n + unicode(delta_description)\n\n # Check if delta log is writen, delete old log\n f = open(change_log_file_path, \"r+\")\n content = f.read()\n num, content = replace_content(content, version)\n if num == 0:\n # Delete file content if it has current delta log\n f.truncate(0)\n f.close()\n # Write file content after delete current delta log\n f = open(change_log_file_path, \"r+\")\n f.write(content)\n f.close()\n else:\n f.close()\n\n f = open(change_log_file_path, \"r\")\n contents = list()\n for line in f.readlines():\n contents.append(unicode(line.decode('utf-8')))\n # Here, we prepend the string we want to on first line\n contents.insert(0, all_contents)\n f.close()\n\n # We again open the file in WRITE mode\n f = open(change_log_file_path, \"wb\")\n f.writelines([unicode(line).encode('utf-8') for line in contents])\n f.close()\n\n # Commit change log file to SVN\n cm_update_change_log = \"Update change log. Add log of D{0}!\".format(version)\n svn_helper.commit(cm_update_change_log, [change_log_file_path])\n\n # Calculate execution time\n print(\"\\n Finished in: {execution_time}s\".format(execution_time=time.time() - start_time))\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"utilities/svn_manager/update_change_log.py","file_name":"update_change_log.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"37160563","text":"import random\nfrom typing import Dict, List, Optional, Tuple, Union\n\n\ndef topological_sort(graph):\n # type: Dict[str, List[str]] -> Optional[List[Tuple[Union[str, int]]]]\n \"\"\"Return linear ordering of the vertices of a directed graph.\n\n https://leetcode.com/problems/course-schedule (analogous)\n \"\"\"\n result = []\n counter = len(graph)\n\n # Select random node\n node = random.choice(list(graph.keys()))\n visited = set([node])\n\n while True:\n # If node does not point to any other node, remove from graph\n if len(graph[node]) == 0:\n # Remove node from targets\n for k, v in graph.items():\n if node in v:\n v.remove(node)\n\n # Remove node from source\n del graph[node]\n\n # Append node with current counter and decrement counter\n result.append((node, counter))\n counter -= 1\n\n # If all nodes accounted for, break from loop\n if counter == 0:\n break\n # Otherwise select another random node and reset nodes visited\n else:\n node = random.choice(list(graph.keys()))\n visited = set()\n\n # If node does point to other nodes, check if there's a cycle\n else:\n targets = graph[node]\n node = random.choice(targets)\n\n # If going down target nodes ends up in a cycle, return early\n if node in visited:\n return None\n\n visited.add(node)\n\n return result\n\n\nif __name__ == \"__main__\":\n graph_1 = {\n 'A': ['B', 'C'],\n 'B': [],\n 'C': ['B'],\n 'D': ['C'],\n 'E': ['D'],\n 'F': ['E'],\n }\n\n graph_2 = {\n 'A': ['B'],\n 'B': ['C'],\n 'C': ['A', 'E'],\n 'D': ['A'],\n 'E': [],\n }\n\n print(topological_sort(graph_1))\n print(topological_sort(graph_2))\n","sub_path":"2020/b0207_topological_sort.py","file_name":"b0207_topological_sort.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"477024293","text":"# Standard Library\nimport dataclasses\nimport json\n\n# Django\nfrom django.core.cache import cache\n\n# Third Party Libraries\nimport requests\n\n# Local\nfrom .models import Category, Podcast\n\nITUNES_SEARCH_URL = \"https://itunes.apple.com/search\"\n\n\nclass Timeout(requests.exceptions.Timeout):\n pass\n\n\nclass Invalid(requests.RequestException):\n pass\n\n\n@dataclasses.dataclass\nclass SearchResult:\n rss: str\n itunes: str\n title: str\n image: str\n\n def as_dict(self):\n return {\n \"rss\": self.rss,\n \"title\": self.title,\n \"itunes\": self.itunes,\n \"image\": self.image,\n }\n\n def as_json(self):\n return json.dumps(self.as_dict())\n\n\ndef fetch_itunes_genre(genre_id, num_results=20):\n \"\"\"Fetch top rated results for genre\"\"\"\n return _get_search_results(\n {\"term\": \"podcast\", \"limit\": num_results, \"genreId\": genre_id,},\n cache_key=f\"itunes:genre:{genre_id}\",\n )\n\n\ndef search_itunes(search_term, num_results=12):\n \"\"\"Does a search query on the iTunes API.\"\"\"\n\n return _get_search_results(\n {\"media\": \"podcast\", \"limit\": num_results, \"term\": search_term,},\n cache_key=f\"itunes:search:{search_term}\",\n )\n\n\ndef _get_search_results(params, cache_key, cache_timeout=86400, requests_timeout=3):\n\n results = cache.get(cache_key)\n if results is None:\n try:\n response = requests.get(\n ITUNES_SEARCH_URL, params, timeout=requests_timeout, verify=True,\n )\n response.raise_for_status()\n results = response.json()[\"results\"]\n cache.set(cache_key, results, timeout=cache_timeout)\n except KeyError as e:\n raise Invalid from e\n except requests.exceptions.Timeout as e:\n raise Timeout from e\n except requests.RequestException as e:\n raise Invalid from e\n\n return [\n SearchResult(\n item[\"feedUrl\"],\n item[\"trackViewUrl\"],\n item[\"collectionName\"],\n item[\"artworkUrl600\"],\n )\n for item in results\n if \"feedUrl\" in item\n ]\n\n\ndef crawl_itunes(limit=100):\n categories = (\n Category.objects.filter(itunes_genre_id__isnull=False)\n .prefetch_related(\"podcast_set\")\n .order_by(\"name\")\n )\n new_podcasts = 0\n\n for category in categories:\n current = category.podcast_set.values_list(\"itunes\", flat=True)\n podcasts = []\n\n try:\n results = fetch_itunes_genre(category.itunes_genre_id, num_results=limit)\n except (Invalid, Timeout):\n continue\n\n podcasts = [\n Podcast(title=result.title, rss=result.rss, itunes=result.itunes)\n for result in [r for r in results if r.itunes not in current]\n ]\n Podcast.objects.bulk_create(podcasts, ignore_conflicts=True)\n new_podcasts += len(podcasts)\n return new_podcasts\n","sub_path":"radiofeed/podcasts/itunes.py","file_name":"itunes.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"384709435","text":"#!/usr/bin/python\n\nimport subprocess\nimport globalVars\n\n\ndef firewallVPNOn(sendFile):\n command = globalVars.pathBaseTgScripts + \"vpn.sh\"\n subprocess.Popen(command, shell=True)\n if sendFile:\n globalVars.toFile(globalVars.sendFile, \"FWL abierto para la conexion VP_N. Recuerda cerrar el puerto al acabar enviando vpnoff\")\n\nif __name__ == \"__main__\":\n firewallVPNOn(True)\n","sub_path":"raspiWeb/backoffice/tgScripts/vpn.py","file_name":"vpn.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"383235962","text":"# Create file and read file\n# r : read file\n# w : write file\n# w+ : read and write\n\nimport os\n\n#os.path.join(\"dev\",\"python\",\"file.txt\")\n\nst = open(\"file1.txt\",\"w\")\nst.write(\"Today is Monday.\")\nst.close()\n\n# new exercise, open file and add file content into list.\n\nimport os\nmylist = list()\n\nwith open (\"file.txt\",\"r\") as f:\n mylist.append(f.read())\n\nwith open (\"file1.txt\",\"w\") as s:\n s.write(\"Today it is raining hard\")\n\nwith open (\"file1.txt\",\"r\") as s:\n mylist.append(s.read())\n\nprint(mylist)\n","sub_path":"Work_with_files.py","file_name":"Work_with_files.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"318986175","text":"import pandas as pd\nimport os\nimport time\nfrom sqlalchemy import create_engine\n\nfrom liwc_functions import add_liwc_counters, expand_json\nfrom sqlalchemy.types import DateTime\n\nconn_string = 'postgresql://' + os.environ['PGHOST'] + '/' + os.environ['PGDATABASE']\n\ndef add_liwc(args):\n\n file_names, output_schema, output_table = args[0], args[1], args[2]\n engine = create_engine(conn_string)\n\n for index, file_w_date in file_names.iterrows():\n\n file_name = file_w_date['file_name']\n last_update = file_w_date['last_update']\n\n # Get speaker data\n sql = \"\"\"\n SELECT file_name, last_update, speaker_name,\n context, section, speaker_number, speaker_text\n FROM streetevents.speaker_data\n WHERE speaker_name IS NOT NULL\n AND file_name ='%s' AND last_update = '%s'\n \"\"\" % (file_name, last_update)\n\n speaker_data = pd.read_sql(sql, engine)\n speaker_data['last_update'] = speaker_data['last_update']\n\n # Calculate LIWC, then drop speaker text\n speaker_data['add_liwc_counters'] = speaker_data['speaker_text'].apply(add_liwc_counters)\n speaker_data = speaker_data.drop(['speaker_text'], axis=1)\n\n # If no speaker_data returned, we still create a DataFrame to keep track\n # of files that have been processed.\n if len(speaker_data)==0:\n d = {'file_name': [file_name], 'last_update': [last_update]}\n speaker_liwc = pd.DataFrame(d)\n speaker_liwc['last_update'] = speaker_liwc['last_update']\n else:\n # Expand single JSON field to multiple columns\n speaker_liwc = expand_json(speaker_data, 'add_liwc_counters')\n\n # Export LIWC data from the call to Postgres\n conn = engine.connect()\n table_exists = engine.dialect.has_table(engine, output_table, schema=output_schema)\n speaker_liwc.to_sql(output_table,\n conn, schema=output_schema, if_exists='append', index=False,\n dtype = {'last_update': DateTime(timezone = True)})\n if not table_exists:\n sql = \"\"\"\n CREATE INDEX ON %s.%s (file_name);\n CREATE INDEX ON %s.%s (file_name, last_update);\n \"\"\" % (output_schema, output_table,\n output_schema, output_table)\n\n engine.execute(sql)\n conn.close()\n\n engine.dispose()\n","sub_path":"liwc_2015/liwc_add.py","file_name":"liwc_add.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"237463579","text":"\"\"\"\nLAB2 Task 1_2\n\nUse bilinear interpolation interpolation to interpolate a grey scale image\nEnlarge: [461, 461]\nShrink: [205, 205]\n\"\"\"\n\nimport numpy as np\nfrom skimage import io, data\nimport math\n\n\ndef linear(x, y1, y2):\n if y2 > y1:\n return y1 + x * (y2-y1)\n else:\n return y2 + (1-x)*(y1-y2)\n\n\ndef bilinear_11810818(input_file, dim):\n # Load image\n in_image = io.imread(input_file)\n print(in_image)\n out_width = dim[0]\n out_height = dim[1]\n in_width = in_image.shape[0]\n in_height = in_image.shape[1]\n out_image = np.zeros(dim, dtype=np.uint8)\n # out_image = np.zeros(dim)\n\n # Perform Exchange\n for col in range(out_width):\n for row in range(out_height):\n x = col*((in_width-1)/(out_width-1))\n y = row*((in_height-1)/(out_height-1))\n if x == round(x):\n if y == round(y): # 在点上\n out_image[col, row] = in_image[round(x), round(y)]\n else: # 在纵轴上\n out_image[col, row] = round(linear(y-math.floor(y), in_image[round(x), math.floor(y)], in_image[round(x), math.floor(y)+1]))\n elif y == round(y): # 在横轴上\n out_image[col, row] = round(linear(x-math.floor(x), in_image[math.floor(x), round(y)], in_image[math.floor(x)+1, round(y)]))\n else:\n left=linear(y-math.floor(y), in_image[math.floor(x), math.floor(y)], in_image[math.floor(x), math.floor(y)+1])\n right=linear(y-math.floor(y), in_image[math.floor(x)+1, math.floor(y)], in_image[math.floor(x)+1, math.floor(y)+1])\n out_image[col, row] = round(linear(x-math.floor(x), right, left))\n # out_image[col, row] = 1\n\n print(out_image)\n # Save Image\n io.imsave(\"bilinear_11810818.tif\", out_image)\n\n\nif __name__ == '__main__':\n bilinear_11810818(\"rice.tif\", [461, 461])","sub_path":"Lab2_Image_Interpolation/bilinear_11810818.py","file_name":"bilinear_11810818.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"440143698","text":"from stark.service.stark import site, ModelStark\nfrom .models import *\nfrom django.utils.safestring import mark_safe\nfrom django.shortcuts import render, redirect\nfrom django.urls import re_path\n\n\nclass UserConfig(ModelStark):\n list_display = [\"name\", \"email\", \"depart\"]\n\n\nsite.register(UserInfo, UserConfig)\nsite.register(School)\n\n\nclass ClassConfig(ModelStark):\n\n def display_classname(self, obj=None, header=False):\n if header:\n return \"班级名称\"\n return f\"{obj.course.name}({str(obj.semester)})\"\n\n list_display = [display_classname, \"tutor\", \"teachers\"]\n\n\nsite.register(ClassList, ClassConfig)\nsite.register(Department)\nsite.register(Course)\n\n\nclass CustomerConfig(ModelStark):\n\n def display_course(self, obj=None, header=False):\n if header:\n return \"课程\"\n temp = []\n for course in obj.course.all():\n s = f\"{course.name} \"\n temp.append(s)\n return mark_safe(\"\".join(temp))\n\n def cancel_course(self, request, customer_id, course_id):\n print(customer_id, course_id)\n obj = Customer.objects.filter(pk=customer_id).first()\n obj.course.remove(course_id)\n\n return redirect(self.get_change_url(\"list\"))\n\n def extra_url(self):\n temp = []\n temp.append(re_path(r\"cancel_course/(\\d+)/(\\d+)\", self.cancel_course))\n return temp\n\n list_display = [\"name\", \"gender\", display_course, \"consultant\"]\n\n\nsite.register(Customer, CustomerConfig)\n\n\nclass ConsultConfig(ModelStark):\n list_display = [\"customer\", \"consultant\", \"date\", \"note\"]\n\n\nsite.register(ConsultRecord, ConsultConfig)\n\n\nclass CourseRecordConfig(ModelStark):\n def score(self, request, course_record_id):\n if request.method == \"POST\":\n print(request.POST)\n data = {}\n for key, value in request.POST.items():\n if key == \"csrfmiddlewaretoken\": continue\n field, pk = key.rsplit(\"_\", 1)\n if pk in data:\n data[pk][field] = value\n else:\n data[pk] = {field: value} # data {4:{\"score\":90}}\n print(\"data\", data)\n\n for pk, update_data in data.items():\n StudyRecord.objects.filter(pk=pk).update(**update_data)\n return redirect(request.path)\n else:\n study_record_list = StudyRecord.objects.filter(course_record=course_record_id)\n score_choices = StudyRecord.score_choices\n return render(request, \"score.html\", locals())\n\n def extra_url(self):\n temp = []\n temp.append(re_path(r\"record_score/(\\d+)\", self.score))\n return temp\n\n def record(self, obj=None, header=False):\n if header:\n return \"考勤\"\n return mark_safe(\"记录\" % obj.pk)\n\n def record_score(self, obj=None, header=False):\n if header:\n return \"录入成绩\"\n return mark_safe(\"录入成绩\" % obj.pk)\n\n list_display = [\"class_obj\", \"day_num\", \"teacher\", record, record_score]\n\n\nsite.register(CourseRecord, CourseRecordConfig)\n\n\nclass StudyConfig(ModelStark):\n list_display = [\"student\", \"course_record\", \"record\", \"score\"]\n\n def patch_late(self, request, queryset):\n queryset.update(record=\"late\")\n\n patch_late.short_description = \"迟到\"\n actions = [patch_late]\n\n\nsite.register(StudyRecord, StudyConfig)\n\n\nclass StudentConfig(ModelStark):\n list_display = [\"customer\", \"class_list\"]\n list_display_links = [\"customer\"]\n\n\nsite.register(Student, StudentConfig)\n","sub_path":"web_server/CRM_SYSTEM/crm/stark.py","file_name":"stark.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"205422662","text":"# Copyright 2015 Cisco Systems, Inc.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n\"\"\"\r\nThis module performs the operation under LAN -> LAN Cloud.\r\n\"\"\"\r\nimport logging\r\nlog = logging.getLogger('ucs')\r\n\r\n\r\ndef vlan_create(handle, name, vlan_id, sharing=\"none\",\r\n mcast_policy_name=\"\", compression_type=\"included\",\r\n default_net=\"no\", pub_nw_name=\"\", parent_dn=\"fabric/lan\"):\r\n \"\"\"\r\n # LAN\r\n # - LAN Cloud\r\n\r\n Creates VLAN\r\n\r\n Args:\r\n handle (UcsHandle)\r\n sharing (String) : [\"community\", \"isolated\", \"none\", \"primary\"]\r\n name (String) : VLAN Name\r\n vlan_id (String): VLAN ID\r\n mcast_policy_name (String) : Multicast Policy Name\r\n compression_type (string) : [\"excluded\", \"included\"]\r\n default_net (String) : [\"false\", \"no\", \"true\", \"yes\"]\r\n pub_nw_name (String) :\r\n parent_dn (String) :\r\n\r\n Returns:\r\n None\r\n\r\n Example:\r\n vlan_create(handle, \"none\", \"vlan-lab\", \"123\", \"sample_mcast_policy\", \"included\")\r\n \"\"\"\r\n from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan\r\n\r\n obj = handle.query_dn(parent_dn)\r\n if obj:\r\n vlan = FabricVlan(parent_mo_or_dn=obj,\r\n sharing=sharing,\r\n name=name,\r\n id=vlan_id,\r\n mcast_policy_name=mcast_policy_name,\r\n policy_owner=\"local\",\r\n default_net=default_net,\r\n pub_nw_name=pub_nw_name,\r\n compression_type=compression_type)\r\n\r\n handle.add_mo(vlan, modify_present=True)\r\n handle.commit()\r\n else:\r\n log.info(parent_dn + \" MO is not available\")\r\n\r\n\r\ndef vlan_delete(handle, name, parent_dn=\"org-root\"):\r\n \"\"\"\r\n Deletes a VLAN\r\n Args:\r\n handle (UcsHandle)\r\n name (string)\r\n parent_dn (String) :\r\n Returns:\r\n None\r\n Example:\r\n vlan_delete(handle, \"lab-vlan\")\r\n \"\"\"\r\n\r\n dn = parent_dn + '/net-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"VLAN Mo is not present\")\r\n\r\n\r\ndef vlan_exists(handle, name, vlan_id=None, sharing=None,\r\n mcast_policy_name=None, compression_type=None,\r\n default_net=None, pub_nw_name=None, parent_dn=\"fabric/lan\"):\r\n \"\"\"\r\n Checks if the given VLAN already exists with the same params\r\n Args:\r\n handle (UcsHandle)\r\n sharing (String) : [\"community\", \"isolated\", \"none\", \"primary\"]\r\n name (String) : VLAN Name\r\n vlan_id (String): VLAN ID\r\n mcast_policy_name (String) : Multicast Policy Name\r\n compression_type (string) : [\"excluded\", \"included\"]\r\n default_net (String) : [\"false\", \"no\", \"true\", \"yes\"]\r\n pub_nw_name (String) :\r\n parent_dn (String) :\r\n Returns:\r\n True/False (Boolean)\r\n Example:\r\n bool_var = vlan_exists(handle, \"none\", \"vlan-lab\", \"123\", \"sample_mcast_policy\", \"included\")\r\n \"\"\"\r\n dn = parent_dn + '/net-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n if ((vlan_id and mo.vlan_id != vlan_id) and\r\n (sharing and mo.sharing != sharing) and\r\n (mcast_policy_name and mo.mcast_policy_name != mcast_policy_name) and\r\n (compression_type and mo.compression_type != compression_type) and\r\n (default_net and mo.default_net != default_net) and\r\n (pub_nw_name and mo.pub_nw_name != pub_nw_name)):\r\n return False\r\n return True\r\n return False","sub_path":"ucsmsdk_samples/network/vlan.py","file_name":"vlan.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"225239902","text":"from PyQt5 import (\n uic, \n QtCore\n)\n\nfrom PyQt5.QtCore import pyqtSignal\n\nfrom PyQt5.QtWidgets import (\n QApplication, \n QWidget,\n QMessageBox\n)\n\nfrom alarm_clock_item import AlarmClockItem\n\nclass AlarmClockItemWidget(QWidget):\n\n alarm_clock_remove = pyqtSignal()\n\n def __init__(self, alarm_clock, list_widget_item, parent=None):\n super(AlarmClockItemWidget, self).__init__(parent)\n\n uic.loadUi('ui/alarm_clock_item_widget.ui', self)\n\n self.alarm_clock = alarm_clock\n self.alarm_clock.setParent(self)\n\n self.list_widget_item = list_widget_item\n\n self.activeCheckBox.stateChanged.connect(self.change_active)\n self.removeButton.clicked.connect(self.remove)\n\n self.update_info_for_gui()\n\n def update_info_for_gui(self):\n self.titleLabel.setText(self.alarm_clock.title)\n self.timeLabel.setText(self.alarm_clock.time.toString('hh:mm'))\n self.activeCheckBox.setChecked(self.alarm_clock.is_active)\n self.titleLabel.setStyleSheet('QLabel { color : black; }' if self.alarm_clock.is_active else 'QLabel { color : gray; }')\n\n def change_active(self, state):\n self.alarm_clock.is_active = self.activeCheckBox.isChecked()\n self.update_info_for_gui()\n\n def remove(self):\n reply = QMessageBox.question(self, 'Выход',\n 'Вы уверены что хотите удалить будильник \"' + self.alarm_clock.title + '\"', \n QMessageBox.Yes | QMessageBox.No, \n QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n self.alarm_clock_remove.emit()\n\n ","sub_path":"alarm_clock_item_widget.py","file_name":"alarm_clock_item_widget.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"419991281","text":"import numpy as np\nimport env.rotations_c.rotations as rotations\nimport pybullet as p\n\n\ndef mat2quat_py(mat):\n if not isinstance(mat, np.ndarray):\n mat = np.array(mat)\n assert mat.shape == (3, 3)\n qr = 0.5 * np.sqrt(1 + mat[0, 0] + mat[1, 1] + mat[2, 2])\n if qr > 1e-6:\n qi = (mat[2, 1] - mat[1, 2]) / (4 * qr)\n qj = (mat[0, 2] - mat[2, 0]) / (4 * qr)\n qk = (mat[1, 0] - mat[0, 1]) / (4 * qr)\n else:\n qi_square = (mat[0, 0] + 1) / 2\n qj_square = (mat[1, 1] + 1) / 2\n qk_square = (mat[2, 2] + 1) / 2\n qi = np.sqrt(qi_square)\n if mat[0, 1] > 0:\n qj = np.sqrt(qj_square)\n else:\n qj = -np.sqrt(qj_square)\n if mat[0, 2] > 0:\n qk = np.sqrt(qk_square)\n else:\n qk = -np.sqrt(qk_square)\n return np.array([qi, qj, qk, qr])\n\n\ndef mat2quat(mat):\n if not isinstance(mat, np.ndarray):\n mat = np.array(mat)\n assert mat.shape == (3, 3)\n mat = np.reshape(mat, (9,))\n return np.array(rotations.mat2quat(mat))\n # qr = 0.5 * np.sqrt(1 + mat[0, 0] + mat[1, 1] + mat[2, 2])\n # if qr > 1e-6:\n # qi = (mat[2, 1] - mat[1, 2]) / (4 * qr)\n # qj = (mat[0, 2] - mat[2, 0]) / (4 * qr)\n # qk = (mat[1, 0] - mat[0, 1]) / (4 * qr)\n # else:\n # qi_square = (mat[0, 0] + 1) / 2\n # qj_square = (mat[1, 1] + 1) / 2\n # qk_square = (mat[2, 2] + 1) / 2\n # qi = np.sqrt(qi_square)\n # if mat[0, 1] > 0:\n # qj = np.sqrt(qj_square)\n # else:\n # qj = -np.sqrt(qj_square)\n # if mat[0, 2] > 0:\n # qk = np.sqrt(qk_square)\n # else:\n # qk = -np.sqrt(qk_square)\n # return np.array([qi, qj, qk, qr])\n\n\ndef quat_mul_py(q0, q1):\n if not isinstance(q0, np.ndarray):\n q0 = np.array(q0)\n if not isinstance(q1, np.ndarray):\n q1 = np.array(q1)\n assert q0.shape == q1.shape\n assert q0.shape[-1] == 4\n assert q1.shape[-1] == 4\n assert np.all(abs(np.linalg.norm(q0, axis=-1) - 1) < 1e-5)\n assert np.all(abs(np.linalg.norm(q1, axis=-1) - 1) < 1e-5)\n\n w0 = q0[..., 3]\n x0 = q0[..., 0]\n y0 = q0[..., 1]\n z0 = q0[..., 2]\n\n w1 = q1[..., 3]\n x1 = q1[..., 0]\n y1 = q1[..., 1]\n z1 = q1[..., 2]\n\n w = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1\n x = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1\n y = w0 * y1 + y0 * w1 + z0 * x1 - x0 * z1\n z = w0 * z1 + z0 * w1 + x0 * y1 - y0 * x1\n q = np.array([x, y, z, w])\n if q.ndim == 2:\n q = q.swapaxes(0, 1)\n assert q.shape == q0.shape\n return q\n\n\ndef quat_mul(q0, q1):\n quat = rotations.quat_mul(q0, q1)\n return np.asarray(quat)\n # if not isinstance(q0, np.ndarray):\n # q0 = np.array(q0)\n # if not isinstance(q1, np.ndarray):\n # q1 = np.array(q1)\n # assert q0.shape == q1.shape\n # assert q0.shape[-1] == 4\n # assert q1.shape[-1] == 4\n # assert np.all(abs(np.linalg.norm(q0, axis=-1) - 1) < 1e-5)\n # assert np.all(abs(np.linalg.norm(q1, axis=-1) - 1) < 1e-5)\n #\n # w0 = q0[..., 3]\n # x0 = q0[..., 0]\n # y0 = q0[..., 1]\n # z0 = q0[..., 2]\n #\n # w1 = q1[..., 3]\n # x1 = q1[..., 0]\n # y1 = q1[..., 1]\n # z1 = q1[..., 2]\n #\n # w = w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1\n # x = w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1\n # y = w0 * y1 + y0 * w1 + z0 * x1 - x0 * z1\n # z = w0 * z1 + z0 * w1 + x0 * y1 - y0 * x1\n # q = np.array([x, y, z, w])\n # if q.ndim == 2:\n # q = q.swapaxes(0, 1)\n # assert q.shape == q0.shape\n # return q\n\n\ndef quat_rot_vec_py(q, v0):\n if not isinstance(q, np.ndarray):\n q = np.array(q)\n v0_norm = np.linalg.norm(v0)\n q_v0 = np.array([v0[0], v0[1], v0[2], 0]) / v0_norm\n q_v = quat_mul_py(q, quat_mul_py(q_v0, quat_conjugate(q)))\n v = q_v[:-1] * v0_norm\n return v\n\n\ndef quat_rot_vec(q, v0):\n if not isinstance(q, np.ndarray):\n q = np.array(q)\n v0_norm = np.linalg.norm(v0)\n q_v0 = np.array([v0[0], v0[1], v0[2], 0]) / v0_norm\n q_v = quat_mul(q, quat_mul(q_v0, quat_conjugate(q)))\n v = q_v[:-1] * v0_norm\n return v\n\n\ndef quat_conjugate(q):\n return np.asarray(rotations.quat_conjugate(q))\n # if not isinstance(q, np.ndarray):\n # q = np.array(q)\n # inv_q = -q\n # inv_q[..., -1] *= -1\n # return inv_q\n\n\ndef euler2quat(euler):\n return np.asarray(p.getQuaternionFromEuler(euler))\n # alpha, beta, gamma = euler[0], euler[1], euler[2]\n # qi = np.sin(alpha / 2) * np.cos(beta / 2) * np.cos(gamma / 2) - np.cos(alpha / 2) * np.sin(beta / 2) * np.sin(gamma / 2)\n # qj = np.cos(alpha / 2) * np.sin(beta / 2) * np.cos(gamma / 2) + np.sin(alpha / 2) * np.cos(beta / 2) * np.sin(gamma / 2)\n # qk = np.cos(alpha / 2) * np.cos(beta / 2) * np.sin(gamma / 2) - np.sin(alpha / 2) * np.sin(beta / 2) * np.cos(gamma / 2)\n # qr = np.cos(alpha / 2) * np.cos(beta / 2) * np.cos(gamma / 2) + np.sin(alpha / 2) * np.sin(beta / 2) * np.sin(gamma / 2)\n # assert abs(qi ** 2 + qj ** 2 + qk ** 2 + qr ** 2 - 1) < 1e-5\n # return np.array([qi, qj, qk, qr])\n\n\ndef quat2euler(q):\n '''\n :param q: [qi, qj, qk, qr]\n :return: [alpha, beta, gamma] = [roll, pitch, yaw]\n '''\n return np.asarray(p.getEulerFromQuaternion(q))\n # # TODO: deal with singularities\n # qi, qj, qk, qr = q[0], q[1], q[2], q[3]\n # assert abs(qi ** 2 + qj ** 2 + qk ** 2 + qr ** 2 - 1) < 1e-5\n #\n # sqx = qi * qi\n # sqy = qj * qj\n # sqz = qk * qk\n # squ = qr * qr\n # sarg = -2 * (qi * qk - qr * qj)\n #\n # # If the pitch angle is PI / 2 or -PI / 2, there are infinite many solutions. We set roll = 0\n # if sarg <= -0.99999:\n # roll = 0\n # pitch = -0.5 * np.pi\n # yaw = 2 * np.arctan2(qi, -qj)\n # elif sarg >= 0.99999:\n # roll = 0\n # pitch = 0.5 * np.pi\n # yaw = 2 * np.arctan2(-qi, qj)\n # else:\n # roll = np.arctan2(2 * (qj * qk + qr * qi), squ - sqx - sqy + sqz)\n # pitch = np.arcsin(sarg)\n # yaw = np.arctan2(2 * (qi * qj + qr * qk), squ + sqx - sqy - sqz)\n # return np.array([roll, pitch, yaw])\n\n\ndef quat2mat(q):\n mat_array = np.array(rotations.quat2mat(q))\n return np.reshape(mat_array, (3, 3))\n # qi, qj, qk, qr = q[0], q[1], q[2], q[3]\n # assert abs(qi ** 2 + qj ** 2 + qk ** 2 + qr ** 2 - 1) < 1e-5\n # mat = np.zeros((3, 3))\n # mat[0][0] = 1 - 2 * (qj * qj + qk * qk)\n # mat[0][1] = 2 * (qi * qj - qk * qr)\n # mat[0][2] = 2 * (qi * qk + qj * qr)\n # mat[1][0] = 2 * (qi * qj + qk * qr)\n # mat[1][1] = 1 - 2 * (qi * qi + qk * qk)\n # mat[1][2] = 2 * (qj * qk - qi * qr)\n # mat[2][0] = 2 * (qi * qk - qj * qr)\n # mat[2][1] = 2 * (qj * qk + qi * qr)\n # mat[2][2] = 1 - 2 * (qi * qi + qj * qj)\n # return mat\n\n\ndef quat_diff(q1, q2):\n '''\n q1 - q2\n :param q1:\n :param q2:\n :return:\n '''\n assert np.all(abs(np.linalg.norm(q1) - 1) < 1e-5)\n assert np.all(abs(np.linalg.norm(q2) - 1) < 1e-5)\n q1 = np.array(q1)\n q2 = np.array(q2)\n if q2[3] < 0:\n q2 *= -1\n if q1[3] < 0:\n q1 *= -1\n inv_q2 = quat_conjugate(q2)\n q_diff = quat_mul(q1, inv_q2)\n if q_diff[3] < 0:\n q_diff *= -1\n return q_diff\n\n\ndef is_rotation_mat(mat):\n if np.all(np.abs(mat.transpose() @ mat - np.eye(3)) < 1e-4) and np.all(np.abs(mat @ mat.transpose() - np.eye(3)) < 1e-4):\n return True\n print(mat.transpose() @ mat, mat @ mat.transpose())\n return False\n\n\ndef gen_noisy_q(magnitude=0.03):\n axis_alpha, axis_beta = np.random.uniform(-np.pi, np.pi, size=2)\n axis = np.array([np.cos(axis_alpha) * np.cos(axis_beta), np.cos(axis_alpha) * np.sin(axis_beta), np.sin(axis_alpha)])\n noise_gamma = np.random.uniform(-magnitude, magnitude)\n q = np.concatenate([np.sin(noise_gamma / 2) * axis, [np.cos(noise_gamma / 2)]])\n return q\n\n\ndef euler2mat(euler):\n roll, pitch, yaw = euler\n Rx = np.array([[1., 0., 0.],\n [0., np.cos(roll), -np.sin(roll)],\n [0., np.sin(roll), np.cos(roll)]])\n Ry = np.array([[np.cos(pitch), 0., np.sin(pitch)],\n [0., 1., 0.],\n [-np.sin(pitch), 0., np.cos(pitch)]])\n Rz = np.array([[np.cos(yaw), -np.sin(yaw), 0.],\n [np.sin(yaw), np.cos(yaw), 0.],\n [0., 0., 1.]])\n return Rz @ Ry @ Rx\n\n\ndef rvec2mat(rvec):\n angle = np.linalg.norm(rvec)\n axis = np.array(rvec) / angle\n q = np.array([np.sin(angle / 2) * axis[0], np.sin(angle / 2) * axis[1], np.sin(angle / 2) * axis[2], np.cos(angle / 2)])\n mat = quat2mat(q)\n return mat\n\n\nif __name__ == \"__main__\":\n import time\n random_q = [gen_noisy_q() for _ in range(100)]\n vec = np.random.uniform(-1., 1., size=(3,))\n t1 = time.time()\n for i in range(len(random_q) - 1):\n py_res = quat_mul_py(random_q[i], random_q[i + 1])\n # c_res = quat_mul(random_q[i], random_q[i + 1])\n # assert np.linalg.norm(py_res - c_res) < 1e-5\n print(\"py quat mul time\", time.time() - t1)\n t1 = time.time()\n for i in range(len(random_q) - 1):\n quat_mul(random_q[i], random_q[i + 1])\n print(\"c quat mul time\", time.time() - t1)\n t1 = time.time()\n for i in range(len(random_q) - 1):\n quat_rot_vec_py(random_q[i], vec)\n print(\"py quat rot vec time\", time.time() - t1)\n t1 = time.time()\n for i in range(len(random_q) - 1):\n quat_rot_vec(random_q[i], vec)\n print(\"c quat rot vec time\", time.time() - t1)\n","sub_path":"env/bullet_rotations.py","file_name":"bullet_rotations.py","file_ext":"py","file_size_in_byte":9468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"589705677","text":"\"\"\"\n This is hard coded for an instance.\n\"\"\"\n\n# NEO CLONE\nCLONE1_NEO = ('[]:2051', '',)\netc_folder = ''\n\nnode_list = [CLONE1_NEO]\nca_file = '%s/ca.crt' %etc_folder\ncert_file = '%s/neo.crt' %etc_folder\nkey_file = '%s/neo.key' %etc_folder\n\nreturn [node_list, [ca_file, cert_file, key_file]]\n","sub_path":"bt5/erp5_neo_administration/SkinTemplateItem/portal_skins/erp5_neo_administration/ERP5Site_getNEONodeListAndSSLCertificateLocation.py","file_name":"ERP5Site_getNEONodeListAndSSLCertificateLocation.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"347225991","text":"import json\nimport random\nfrom abc import ABC, abstractmethod\n\n\n\"\"\"\n Objects represent chartjs instances\n\"\"\"\n\n\nclass ChartMixin(ABC):\n\n beginAtZero = True\n aspectRatio = True\n stepSize = 0.5\n title = None\n legend = False\n type_chart = None\n _colors = None\n tooltips = []\n\n @abstractmethod\n def generate_dataset(self, labels, data):\n \"\"\"\n `labels` (list) str -> list of string texts\n `data` (list) int or float values -> list values to render in chart\n \"\"\"\n pass\n\n def generate_options(self):\n return {\n \"responsive\": True,\n \"maintainAspectRatio\": self.aspectRatio,\n \"legend\": {\"display\": self.legend},\n \"title\": {\n \"fontSize\": 14,\n \"display\": True if self.title is not None else False,\n \"text\": self.title if self.title is not None else \"\",\n },\n }\n\n def _generate_colors(self, labels):\n return [\n \"#{:02x}{:02x}{:02x}\".format(\n *map(lambda x: random.randint(0, 255), range(3))\n )\n for entry in labels\n ]\n\n def set_colors(self, colors):\n self._colors = colors\n\n def _get_color(self):\n return \"#{:02x}{:02x}{:02x}\".format(\n *map(lambda x: random.randint(0, 255), range(3))\n )\n\n def _get_rgba_from_hex(self, color_hex):\n color = color_hex.lstrip(\"#\")\n rgb = [int(color[i : i + 2], 16) for i in [0, 2, 4]]\n\n return \"rgba({},{},{},0.6)\".format(*map(lambda x: x, rgb))\n\n\nclass BarChart(ChartMixin):\n\n type_chart = \"bar\"\n\n def generate_options(self):\n options = super().generate_options()\n options[\"scales\"] = {\n \"yAxes\": [\n {\n \"display\": True,\n \"ticks\": {\"beginAtZero\": self.beginAtZero, \"stepSize\": self.stepSize},\n }\n ]\n }\n return options\n\n def generate_dataset(self, labels, data, dataLabel=\"\"):\n dataset = {\n \"labels\": list(labels),\n \"datasets\": [\n {\n \"label\": self.tooltips if len(self.tooltips) > 0 else dataLabel,\n \"backgroundColor\": self._colors\n if self._colors is not None\n else self._generate_colors(labels),\n \"data\": list(data),\n }\n ],\n }\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n\n\nclass HorizontalBarChart(ChartMixin):\n\n type_chart = \"horizontalBar\"\n\n def generate_options(self):\n options = super().generate_options()\n options[\"scales\"] = {\n \"xAxes\": [\n {\n \"display\": True,\n \"ticks\": {\"beginAtZero\": self.beginAtZero, \"stepSize\": self.stepSize},\n }\n ]\n }\n return options\n\n def generate_dataset(self, labels, data, dataLabel=None):\n dataset = {\n \"labels\": list(labels),\n \"datasets\": [\n {\n \"label\": dataLabel if dataLabel is not None else \"\",\n \"backgroundColor\": self._colors\n if self._colors is not None\n else self._generate_colors(labels),\n \"data\": list(data),\n }\n ],\n }\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n\n\nclass PieChart(ChartMixin):\n type_chart = \"pie\"\n position = \"top\" # top,right, bottom, left\n\n def generate_options(self):\n context = super().generate_options()\n context[\"legend\"] = {\"position\": self.position}\n return context\n\n def generate_dataset(self, labels, data, dataLabel=None):\n dataset = {\n \"labels\": list(labels),\n \"datasets\": [\n {\n \"label\": dataLabel if dataLabel is not None else \"\",\n \"backgroundColor\": self._colors\n if self._colors is not None\n else self._generate_colors(labels),\n \"data\": list(data),\n }\n ],\n }\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n\n\nclass DoughnutChart(PieChart):\n type_chart = \"doughnut\"\n\n\nclass PolarAreaChart(PieChart):\n type_chart = \"polarArea\"\n\n\nclass LineChart(ChartMixin):\n\n type_chart = \"line\"\n\n def create_node(self, label, data, fill=False, color=None):\n \"\"\"\n this method create special line node, you must pass parameters\n `label` str -> an label individual node, `data` list -> data render on chart,\n `fill` bool -> default is False, use this to create area chart\n `color` str -> hex color representation (when fill is True)\n \"\"\"\n colorData = color if color is not None else self._get_color()\n return {\n \"data\": list(data),\n \"label\": label,\n \"backgroundColor\": self._get_rgba_from_hex(colorData),\n \"borderColor\": colorData,\n \"fill\": fill,\n }\n\n def generate_options(self):\n options = super().generate_options()\n options[\"scales\"] = {\n \"yAxes\": [\n {\n \"display\": True,\n \"ticks\": {\"beginAtZero\": self.beginAtZero, \"stepSize\": self.stepSize},\n }\n ]\n }\n return options\n\n def generate_dataset(self, labels, data):\n dataset = {\"labels\": labels, \"datasets\": data}\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n\n\nclass GroupChart(ChartMixin):\n type_chart = \"bar\"\n\n def create_node(self, label, data, color=None):\n \"\"\"\n This method create an special node to group chart:\n `label` -> (str) text to represent individual data\n `data` -> (list) data to render in chart\n `color` -> (str) hex string representation of color, default is None\n \"\"\"\n colorData = color if color is not None else self._get_color()\n return {\"label\": label, \"backgroundColor\": colorData, \"data\": list(data)}\n\n def generate_dataset(self, labels, data):\n dataset = {\"labels\": list(labels), \"datasets\": list(data)}\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n\n\nclass RadarChart(ChartMixin):\n\n type_chart = \"radar\"\n\n def create_node(self, label, data, color=None):\n colorData = color if color is not None else self._get_rgba_from_hex(color)\n return {\n \"label\": label,\n \"fill\": True,\n \"backgroundColor\": colorData,\n \"borderColor\": color,\n \"pointBorderColor\": \"#fff\",\n \"pointBackgroundColor\": color,\n \"data\": list(data),\n }\n\n def generate_dataset(self, labels, data):\n dataset = {\"labels\": list(labels), \"datasets\": list(data)}\n\n return {\n \"type\": self.type_chart,\n \"data\": json.dumps(dataset, ensure_ascii=False),\n \"options\": json.dumps(self.generate_options(), ensure_ascii=False),\n }\n","sub_path":"Cockpit/Lib/site-packages/dj_chartjs/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"43213480","text":"#!/usr/local/bin/python\n#--------------------------------------------------------------------------------------------------\n#-- president_sort.py\n#--------------------------------------------------------------------------------------------------\n# Program : president_sort.py\n# To Complie : n/a\n#\n# Purpose : print presidents fname,lname,state,birth sorted by \n#\n# Called By :\n# Calls :\n#\n# Author : Rusty Myers \n# Based Upon :\n#\n# Note : \n#\n# Revisions : \n# 2014-05-29 Initial Version\n#\n# Version : 1.0\n#--------------------------------------------------------------------------------------------------\n\nimport operator\n\npresidents = []\n\nwith open(\"presidents.txt\",\"r+\") as pres_txt:\n for line in pres_txt:\n presidents.append(line[:-1].split(\":\"))\n\n# Print Fname, lname, Bstate.\nfor president in sorted(presidents,key=operator.itemgetter(1,2)):\n print(\"{1:>30} {0:<12} - Home State: {2}\".format(president[1],president[2],president[10]))\n\n# Pres#:LastName:FirstMiddle:BY:BM:BD:DY:DM:DD:BirthTown:BirthState:SOY:SOM:SOD:EOY:EOM:EOD:Party\n# 40:Reagan:Ronald Wilson:1911:Feb:06:2004:Jun:05:Tampico:Illinois:1981:Jan:20:1989:Jan:20:Republican\n","sub_path":"rzm102/president_sort.py","file_name":"president_sort.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"229844758","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sqlite3\r\nimport os\r\n\r\nst.title(' AML DATA ENGINE ')\r\nuploadfile = st.file_uploader('Choose File', type = 'xlsx')\r\nif uploadfile:\r\n df = pd.read_excel(uploadfile)\r\n #st.dataframe(df)\r\n #st.table(df)\r\n #dfc = st.cache\r\n#if st.button(\"Data in the file\"):\r\n\t#st.write (df.shape)\r\n #st.write(df['ID Type'].describe())\r\n #st.write(df.groupby(by=['ID Type']))\r\n \r\n#if st.button('ID Types'):\r\n # st.write (df.groupby('ID Type')['Customer Number'].nunique('Transaction No'))\r\n\r\n#opto = st.sidebar.selectbox(\r\n # 'START ANALYSIS',\r\n #('Email','phone')\r\n#)\r\n\r\ncal = st.multiselect(\"Select an option\", ('ID TYPES','DATA IN THE FILE','NATIONALITY','REMITTER DETAILS','USER'))\r\n\r\nif 'ID TYPES' in cal:\r\n st.write (df.groupby('ID Type')['Customer Number'].nunique('Transaction No'))\r\n\r\nif 'DATA IN THE FILE' in cal:\r\n st.write (df.shape)\r\n\r\nif 'NATIONALITY' in cal:\r\n st.write (df.pivot_table(df,index=['Remitter Nationality']))\r\n \r\nif 'REMITTER DETAILS' in cal:\r\n st.write (df.pivot_table(df,index=['ID Type','Remitter Name']))\r\n \r\nif 'USER' in cal:\r\n st.write (df.pivot_table(df,index=['User Code','LC Amt'],aggfunc={'LC Amt':np.sum}))\r\n \r\n \r\n#complete untill now but needs work on 1. cache, 2. pivot tabel","sub_path":"moo.py","file_name":"moo.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"479282120","text":"#!/usr/bin/env python3\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\n\nif __name__ == \"__main__\":\n # Main code goes here\n\n #load the image from previous script\n f = h5py.File(\"filtered.h5\", \"r\")\n dset = f['/data/normalized']\n \n #sort the script\n sort = np.sort(dset)\n\n #get the mean and standard deviation\n mean = np.mean(sort)\n sd = np.std(sort)\n #the minimum 95% is within two standard deviations of the mean\n top_95 = mean + sd + sd\n\n #clip the data to only show the top 5%\n data = np.clip(dset, top_95, None)\n\n #plot the final image\n imgplot = plt.imshow(data, cmap='gray')\n plt.show()\n\n\n exit(0)\n","sub_path":"assignment1/solution-3.4.py","file_name":"solution-3.4.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"460485888","text":"#Name: Benjamin Yee\n#Teacher: Campbell\n#Section: 17/18\n\nfrom data import *\nfrom cast import *\nimport unittest\n\nclass TestCast(unittest.TestCase):\n def test_cast_ray_1(self):\n slist=[]\n p=Point(0,0,0)\n v=Vector(1,0,0)\n lig=Light(Point(0,0,100),Color(0.0,0.0,0.0))\n self.assertEqual(cast_ray(Ray(p,v),slist,Color(1.0,1.0,1.0),lig,p),\\\n Color(1.0,1.0,1.0))\n\n\n def test_cast_ray_6(self):\n fin=Finish(0.4,0.4,0.5,0.05)\n s2=Sphere(Point(1,0,0),1,Color(1.0,0,0),fin)\n s1=Sphere(Point(1,0,-3),1,Color(0,1.0,0),fin)\n slist=[s1,s2]\n p=Point(0,0,-14)\n v=Vector(1,0,14)\n lig=Light(Point(1,0,-100),Color(0.0,0.1,0.0))\n self.assertEqual(cast_ray(Ray(p,v),slist,Color(1.0,1.0,1.0),lig,p),\\\n Color(0.0,0.441796101,0.0))\n \n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"hw4/part5/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"344790261","text":"from multiprocessing import Process\nfrom core.common.plugin import AbstractPlugin\n\n\nclass Monitoring(AbstractPlugin):\n\n def __init__(self, monitoring_plugins, configuration_manager,\n inventory):\n self.monitoring_plugins = monitoring_plugins\n self.configuration_manager = configuration_manager\n self.inventory = inventory\n def start(self):\n self.monitoring_all()\n\n def monitoring_of(self, name):\n self.logger.info(u\"Starting monitoring of {0}\".format(name))\n try:\n self.monitoring_plugins[name].plugin.monitor(self.inventory)\n except Exception as error:\n self.logger.error(error)\n\n def monitoring_all(self):\n try:\n result_query = self.configuration_manager.get('monitoring')\n for elem in result_query:\n name = elem.type.name\n list_plugin = self.monitoring_plugins.enabled_plugins\n for plugin in list_plugin:\n if plugin == name:\n self.logger.info(\n u\"Starting monitoring of {0}\".format(name))\n pl = self.monitoring_plugins[plugin].plugin\n p = Process(\n target=pl.monitor, args=(self.inventory,))\n p.start()\n\n except Exception as error:\n self.logger.error(error)\n","sub_path":"plugins/monitor/monitor_manager.py","file_name":"monitor_manager.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"29824665","text":"import math\nfrom random import sample, shuffle\n\n\ndef __calculate_purity__(data, k, labels, clusters):\n\tsum = 0\n\tn = len(data)\n\n\tfor j in range(k):\n\t\tlst = {}\n\t\tfor i in clusters[j]:\n\t\t\tlst.update({labels[i]: 0})\n\t\tfor i in clusters[j]:\n\t\t\tlst[labels[i]] += 1\n\t\tmax_key = max(lst, key=lambda k: lst[k])\n\t\tsum += lst[max_key]\n\n\tret = float(sum)/n\n\n\treturn round(ret, 3)\n\n\n\ndef __hopkins_stats_(data):\n\tn = len(data)\n\tdata_points = [i for i in range(n)]\n\tm = n//3\n\n\tavg_hopkins = 0\n\titer_no = 1\n\n\tfor iter in range(iter_no):\n\t\tshuffle(data_points)\n\t\tX = sample(data_points, m)\n\t\tY = sample(data_points, m)\n\n\t\txsum = 0\n\t\tysum = 0\n\n\t\tfor i in X:\n\t\t\tmn = float(\"inf\")\n\t\t\tfor j in data_points:\n\t\t\t\tif i != j:\n\t\t\t\t\tmn = min(mn, euclidean_distance(data[i], data[j]))\n\t\t\txsum += mn\n\n\t\tnew_data_points = []\n\n\t\tfor i in data_points:\n\t\t\tif i not in Y:\n\t\t\t\tnew_data_points.append(i)\n\n\t\tfor i in Y:\n\t\t\tmn = float(\"inf\")\n\t\t\tfor j in data_points:\n\t\t\t\tif i!=j:\n\t\t\t\t\tmn = min(mn, euclidean_distance(data[i], data[j]))\n\t\t\tysum += mn\n\n\t\tavg_hopkins += ((ysum) / (xsum + ysum))\n\n\treturn round(avg_hopkins/iter_no, 3)\n\n\ndef __silhouette_coefficient__(data, k, clusters):\n\tavg_silhouette = 0\n\tA = {}\n\tB = {}\n\n\tn = len(data)\n\tfor c in range(k):\n\t\tfor i in clusters[c]:\n\t\t\tavg_dis = 0\n\t\t\tfor j in clusters[c]:\n\t\t\t\tavg_dis += euclidean_distance(data[i], data[j])\n\t\t\tA.update({i: avg_dis/(len(clusters[c])-1)})\n\n\tfor c in range(k):\n\t\tfor i in clusters[c]:\n\t\t\tmn = float(\"inf\")\n\t\t\tfor p in range(k):\n\t\t\t\tif p == c: continue\n\t\t\t\tavg_dis = 0\n\t\t\t\tfor j in clusters[p]:\n\t\t\t\t\tavg_dis += euclidean_distance(data[i], data[j])\n\t\t\t\tavg_dis /= len(clusters[p])\n\t\t\t\tmn = min(mn, avg_dis)\n\t\t\tB.update({i:mn})\n\n\tfor i in range(n):\n\t\ts = (B[i] - A[i])/max(B[i], A[i])\n\t\tavg_silhouette += s\n\n\tavg_silhouette /= n\n\n\treturn round(avg_silhouette, 3)\n\n\ndef manhattan_distance(a, b):\n\tsum = 0\n\tfor i in range(len(a)):\n\t\tsum += math.fabs(a[i]-b[i])\n\treturn sum\n\n\ndef euclidean_distance(a, b):\t\t\t\t\t# euclidean distance of two points\n\td = 0.0\n\tfor i in range(len(a)):\n\t\td = d + (a[i] - b[i]) ** 2\n\treturn math.sqrt(d)\n\n\ndef distance(a, b):\t\t\t\t\t\t\t\t# distance between object a and b\n\treturn euclidean_distance(a, b)\n\n\ndef find_means(list):\t\t\t\t\t\t\t# finding means from list of 2d points\n\tn = len(list)\n\tattr = len(list[0])\n\tlst = []\n\tfor i in range(attr):\n\t\tm = 0\n\t\tfor ele in list:\n\t\t\tm += ele[i]\n\t\tlst.append(m/n)\n\treturn lst\n\n\ndef plot_graph():\n\tprint()\n\nif __name__ == '__main__':\n\tar = [1, 2, 3, 4, 5, 6, 7, 8]\n\tpr = sample(ar, 3)\n\tprint(pr)\n\n","sub_path":"Clustering/Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"594715389","text":"# Problem [4047] : 영준이의 카드 카운팅\n# 입력 : 가지고 있는 카드의 무늬 종류와 카드 숫자를 입력\n# 각 무늬별로 13장의 카드를 만드는 데 부족한 카드 수를 출력\n\ndef my_card(cards):\n my_card = [[] for _ in range(4)]\n result = list()\n for card in cards:\n if card[0] == 'S':\n num = int(''.join(map(str,card[1])))\n if num not in my_card[0]:\n my_card[0].append(num)\n else :\n return 'ERROR'\n elif card[0] == 'D':\n num = int(''.join(map(str,card[1])))\n if num not in my_card[1]:\n my_card[1].append(num) \n else :\n return 'ERROR' \n elif card[0] == 'H':\n num = int(''.join(map(str,card[1])))\n if num not in my_card[2]:\n my_card[2].append(num)\n else :\n return 'ERROR'\n elif card[0] == 'C':\n num = int(''.join(map(str,card[1])))\n if num not in my_card[3]:\n my_card[3].append(num)\n else :\n return 'ERROR'\n for c in my_card:\n result.append(13-len(c))\n result = ' '.join(map(str,result))\n return result\n \n\nT = int(input())\nfor tc in range(1, T+1):\n data = input()\n data = data.replace('A','01')\n data = data.replace('J','11')\n data = data.replace('Q','12')\n data = data.replace('K','13')\n cards = [[data[i], data[i+1:i+3]] for i in range(0, len(data), 3)]\n result = my_card(cards)\n print('#{} {}'.format(tc,result))\n\n","sub_path":"SWEA/D3/SWEA_4047.py","file_name":"SWEA_4047.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"286256233","text":"# -*- encoding: utf-8 -*-\n#\n#Created on Mar 5, 2018\n#\n#@author: dogan\n#\n\nimport time\n\nfrom openerp.tools import DEFAULT_SERVER_DATE_FORMAT\nfrom openerp.tools import float_compare, float_is_zero\nfrom openerp import api, models, fields, tools, _\nfrom openerp.exceptions import Warning as UserError\n\n\nclass MepBoMTemplateLineRule(models.Model):\n _name= 'mrp.bom.template_line.rule'\n \n template_line_id = fields.Many2one('mrp.bom.template_line','BoM Template Line')\n attribute_id = fields.Many2one('product.attribute',string='Component Attribute', required=True)\n bom_attribute_id = fields.Many2one('product.attribute',string='Product Attribute', required=True)\n offset = fields.Float('Offset')\n coeff = fields.Float('Coefficient')\n precision = fields.Integer('Precision')\n \n \n @api.multi\n def compute_attribute_value(self, bom_attribute_value):\n self.ensure_one()\n \n attr_val_obj = self.env['product.attribute.value']\n attr_val = bom_attribute_value * self.coeff + self.offset\n \n # attr_val = '%*.f' % (self.precision, attr_val)\n attr_val_id = attr_val_obj.search([('attribute_id', '=', self.attribute_id.id),('numeric_value', '=', attr_val)], limit=1)\n \n return attr_val_id\n \n \n\nclass MrpBoMTemplateLine(models.Model):\n _name = 'mrp.bom.template_line'\n \n bom_id = fields.Many2one('mrp.bom',string='BoM',required=False, ondelete='cascade')\n product_tmpl_id = fields.Many2one('product.template',string='Product Template', required=True)\n product_id = fields.Many2one('product.product',string='Product', compute='_compute_product')\n type=fields.Selection([('normal', 'Normal'), ('phantom', 'Phantom')], 'BoM Line Type', required=True,\n help=\"Phantom: this product line will not appear in the raw materials of manufacturing orders,\"\n \"it will be directly replaced by the raw materials of its own BoM, without triggering\"\n \"an extra manufacturing order.\", default='normal')\n \n date_start = fields.Date('Valid From', help=\"Validity of component. Keep empty if it's always valid.\")\n date_stop = fields.Date('Valid Until', help=\"Validity of component. Keep empty if it's always valid.\")\n routing_id = fields.Many2one('mrp.routing', 'Routing', help=\"The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning.\")\n \n product_rounding = fields.Float('Product Rounding', help=\"Rounding applied on the product quantity.\")\n product_efficiency = fields.Float('Manufacturing Efficiency', required=True, help=\"A factor of 0.9 means a loss of 10% within the production process.\", default=1.0)\n property_ids = fields.Many2many('mrp.property', string='Properties') #Not used\n\n attribute_value_ids = fields.Many2many('product.attribute.value', string='Variants', help=\"BOM Product Variants needed form apply this line.\")\n #child_line_ids = fields.One2many(\"mrp.bom.line\", string=\"BOM lines of the referred bom\", compute='_get_child_bom_lines')\n \n \n bom_attribute_ids = fields.Many2many('product.attribute',string='BoM Product Attributes',compute='_compute_bom_attributes')\n attribute_ids = fields.Many2many('product.attribute',string='Attributes',compute='_compute_attributes')\n product_qty = fields.Float('Quantity')\n product_uom = fields.Many2one('product.uom',string='UoM')\n product_uos_qty = fields.Float('Product UOS Qty')\n product_uos = fields.Many2one('product.uom', 'Product UOS', help=\"Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock.\")\n \n variant_rule_ids = fields.One2many('mrp.bom.template_line.rule','template_line_id', string='Variant Rules')\n \n# @api.multi\n# def _get_child_bom_lines(self):\n# \"\"\"If the BOM line refers to a BOM, return the ids of the child BOM lines\"\"\"\n# bom_obj = self.env['mrp.bom']\n# res = {}\n# for bom_template_line in self:\n# bom_id = bom_obj._bom_find(\n# product_tmpl_id=bom_template_line.product_tmpl_id.id,\n# product_id=bom_template_line.product_id.id)\n# if bom_id:\n# child_bom = bom_obj.browse(cr, uid, bom_id, context=context)\n# res[bom_line.id] = [x.id for x in child_bom.bom_line_ids]\n# else:\n# res[bom_line.id] = False\n# return res\n\n \n @api.multi\n def _compute_variant_values(self):\n self.ensure_one()\n bom_id = self.bom_id\n product_to_produce = self.env.context.get('product_produce', False) or \\\n bom_id.product_id or \\\n bom_id.product_tmpl_id.product_variant_ids[0]\n \n product_produce_values = { att_val.attribute_id.id:att_val.numeric_value for att_val in product_to_produce.attribute_value_ids }\n product_consume_values = self.env['product.attribute.value']\n \n for rule in self.variant_rule_ids:\n val = product_produce_values[rule.bom_attribute_id.id]\n variant_val = rule.compute_attribute_value(val)\n product_consume_values |= variant_val\n \n return product_consume_values\n \n @api.multi\n @api.depends('product_tmpl_id')\n def _compute_product(self):\n for bom_tmpl_line in self:\n val_ids = bom_tmpl_line._compute_variant_values()\n if len(val_ids) != len(bom_tmpl_line.variant_rule_ids):\n # one or more values are not defined in the values\n bom_tmpl_line.product_id = False\n \n else:\n def has_all_values(product):\n return product.attribute_value_ids == val_ids\n\n variant_ids = bom_tmpl_line.product_tmpl_id.product_variant_ids.filtered(has_all_values)\n \n bom_tmpl_line.product_id = variant_ids and variant_ids[0] or False\n \n \n \n \n @api.multi\n @api.depends('product_tmpl_id')\n def _compute_attributes(self):\n for line in self:\n line.attribute_ids = line.product_tmpl_id.attribute_line_ids.mapped('attribute_id')\n \n @api.multi\n @api.depends('bom_id','bom_id.product_tmpl_id')\n def _compute_bom_attributes(self):\n for line in self:\n line.bom_attribute_ids = line.bom_id.product_tmpl_id.attribute_line_ids.mapped('attribute_id')\n \n @api.onchange('product_uom') \n def onchange_uom(self):\n self.ensure_one()\n res = {'value': {}}\n if not self.product_uom:\n return res\n \n if self.product_uom.category_id.id != self.product_tmpl_id.uom_id.category_id.id:\n res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}\n res['value'].update({'product_uom': self.product_tmpl_id.uom_id.id})\n \n return res\n \n\nclass MrpBoM(models.Model):\n _inherit = 'mrp.bom'\n \n template_line_ids = fields.One2many('mrp.bom.template_line','bom_id', string='Template Lines')\n \n @api.model\n def _skip_template_line(self, line, product):\n \"\"\" Control if a BoM template line should be produce, can be inherited for add\n custom control.\n @param line: BoM template line.\n @param product: Selected product produced.\n @return: True or False\n \"\"\"\n if line.date_start and line.date_start > time.strftime(DEFAULT_SERVER_DATE_FORMAT) or \\\n line.date_stop and line.date_stop < time.strftime(DEFAULT_SERVER_DATE_FORMAT):\n return True\n # all bom_line_id variant values must be in the product\n if line.attribute_value_ids:\n if not product or (set(map(int,line.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))):\n return True\n return False\n \n \n \n @api.model\n def _prepare_template_consume_line(self, tmpl_line, product_id, quantity, factor=1):\n uos_qty = (tmpl_line.product_uos and\n self._factor(\n tmpl_line.product_uos_qty * factor,\n tmpl_line.product_efficiency, tmpl_line.product_rounding))\n return {\n 'name': product_id.name,\n 'product_id': product_id.id,\n 'product_qty': quantity,\n 'product_uom': tmpl_line.product_uom.id,\n 'product_uos_qty': uos_qty or False,\n 'product_uos': tmpl_line.product_uos.id,\n }\n \n @api.v7\n def _bom_explode(self, cr, uid, bom, product, factor, properties=None,\n level=0, routing_id=False, previous_products=None,\n master_bom=None, context=None):\n \n return super(MrpBoM, self)._bom_explode(cr, uid, bom,\n product, factor, properties=properties, level=level,\n routing_id=routing_id, previous_products=previous_products,\n master_bom=master_bom, context=context)\n \n \n @api.v8\n def _bom_explode(self, product, factor, properties=None, level=0,\n routing_id=False, previous_products=None,\n master_bom=None):\n result, result2 = super(MrpBoM, self)._bom_explode(product, factor, properties=properties, level=level,\n routing_id=routing_id, previous_products=previous_products,\n master_bom=master_bom)\n master_bom = master_bom or self\n \n uom_obj = self.env['product.uom']\n \n for tmpl_line in self.template_line_ids:\n if self._skip_template_line(tmpl_line, product):\n continue\n if (set(map(int, tmpl_line.property_ids or [])) -\n set(properties or [])):\n continue\n product_tmpl_id = tmpl_line.product_tmpl_id.id\n if (previous_products and\n product_tmpl_id in previous_products):\n raise UserError(\n _('BoM \"%s\" contains a BoM line with a product recursion: '\n '\"%s\".') % (master_bom.name,\n tmpl_line.product_tmpl_id.name_get()[0][1]))\n \n \n #TODO: find the matching product and explode\n product_id = tmpl_line.with_context(product_produce=product).product_id\n if not product_id:\n raise UserError('no product found for template %s' % tmpl_line.product_tmpl_id.display_name)\n \n quantity = self._factor(\n tmpl_line.product_qty * factor,\n tmpl_line.product_efficiency, tmpl_line.product_rounding)\n \n bom2 = self._bom_find(product_id=product_id.id, properties=properties)\n \n \n # If BoM should not behave like PhantoM, just add the product,\n # otherwise explode further\n if (tmpl_line.type != \"phantom\" and\n (not bom2 or bom2.type != \"phantom\")):\n result.append(\n self._prepare_template_consume_line(tmpl_line, product_id, quantity, factor))\n elif bom2:\n all_prod = [self.product_tmpl_id.id] + (previous_products or [])\n #bom2 = self.browse(bom_id)\n # We need to convert to units/UoM of chosen BoM\n factor2 = uom_obj._compute_qty(\n tmpl_line.product_uom.id, quantity, bom2.product_uom.id)\n quantity2 = factor2 / bom2.product_qty\n res = bom2._bom_explode(\n product_id, quantity2, properties=properties,\n level=level + 10, previous_products=all_prod,\n master_bom=master_bom)\n result = result + res[0]\n result2 = result2 + res[1]\n else:\n raise UserError(\n _('BoM \"%s\" contains a phantom BoM line but the product '\n '\"%s\" does not have any BoM defined.') %\n (master_bom.name, product_id.name))\n \n \n return result, result2\n \n \n @api.multi\n def _find_matching_product(self):\n ''' Find the matching variant component for the rules\n '''\n self.ensure_one()\n return self.product_variant_ids[0]\n \n \n \nclass MrpBoMLine(models.Model):\n _inherit = 'mrp.bom.line'\n \n factor_attribute_id = fields.Many2one('product.attribute',string='Factor Attribute',\n help='End product attribute to use for raw material calculation')\n attribute_factor = fields.Float(string='Factor',help='Factor to multiply by the numeric value of attribute')\n \n ","sub_path":"mrp_dynamic_raw_materials/models/mrp_bom.py","file_name":"mrp_bom.py","file_ext":"py","file_size_in_byte":13109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"651252056","text":"\"\"\"\nGiven a tree, rearrange the tree in in-order so that the leftmost node in the tree is now\nthe root of the tree, and every node has no left child and only 1 right child.\nExample 1:\nInput: [5,3,6,2,4,null,8,1,null,null,null,7,9]\n\n 5\n / \\\n 3 6\n / \\ \\\n 2 4 8\n / / \\\n1 7 9\n\nOutput: [1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9]\n\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n \\\n 7\n \\\n 8\n \\\n 9\nNote:\n\nThe number of nodes in the given tree will be between 1 and 100.\nEach node will have a unique integer value from 0 to 1000.\n\"\"\"\nclass Tree:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef increasingBST(root):\n \"\"\"Runtime: 96 ms, faster than 72.46% of Python online submissions for Increasing Order Search Tree.\n\"\"\"\n arr = foo(root, [])\n tree = Tree(999)\n for a in arr:\n tree.right = Tree(a)\n tree = tree.right\n return tree.right\n\ndef foo(root, arr):\n if root == None:\n return\n foo(root.left, arr)\n arr.append(root.val)\n foo(root.right, arr)\n\n return arr\n","sub_path":"8_training/increasing_order_BST.py","file_name":"increasing_order_BST.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"598785285","text":"import grbl\nimport time\nimport argparse\nimport websockets\nimport asyncio\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', '--verbose', help='Print every debug messages on the console', action='store_true')\nparser.add_argument('-t', '--test', help='do not output serial', action='store_true')\nargs = parser.parse_args()\nprint('SERVER :server arguments')\nprint('SERVER :--verbose: ' + str(args.verbose))\nprint('SERVER :--test: ' + str(args.test))\n\n# list port = python -m serial.tools.list_ports\nemotion_list = [\"Angry\", \"Disgusted\", \"Fearful\",\n \"Happy\", \"Neutral\", \"Sad\", \"Surprised\"]\nport_dict = {0: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_75830333938351103152-if00',\n 1: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_85734323331351204021-if00',\n 2: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_85632333136351C050B0-if00',\n 3: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_55838343633351410212-if00',\n 4: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_5583834363335161C161-if00',\n 5: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_55838343833351905281-if00',\n 6: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_75833353734351D08290-if00',\n 7: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_55838343633351318152-if00',\n 8: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_558383436333516132C0-if00',\n 9: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_558383436333513181D0-if00',\n 10: '/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_5583834363335161C140-if00',\n 11: '/dev/serial/by-id/usb-NicoHood_HoodLoader2_Uno-if00'}\nsetting_dict = {'homing_cycle': ['$22', 1],\n 'homing_feed': ['$24', 2000],\n 'homing_seek': ['$25', 6000],\n 'homing_debounce': ['$26', 25],\n 'x_step': ['$100', 80],\n 'y_step': ['$101', 80],\n 'z_step': ['$102', 80],\n 'x_max_rate': ['$110', 10000],\n 'y_max_rate': ['$111', 10000],\n 'z_max_rate': ['$112', 10000],\n 'x_accel': ['$120', 1000],\n 'y_accel': ['$121', 1000],\n 'z_accel': ['$122', 1000],\n 'x_travel': ['$130', 1000],\n 'y_travel': ['$131', 1000],\n 'z_travel': ['$132', 1000],\n }\nposition_data_dict = {\"Angry\": [[19, 50.65, 9.29], [24.85, 8.52, 1.81], [-5.55, 17.87, 1.28], [-9.51, 18.79, -18.11],\n [30.73, 29.96, 1.46], [9.42, -2.63, 26.25], [33.88, -2.51, -0.55],\n [-8.27, -20.74, -30.97], [17.03, 26.93, -14.53], [-41.05, -27.66, -3.65],\n [-48.46, -43.57, -31.81], [-43.75, -23.84, -41.16]\n ],\n \"Disgusted\": [[-41.2, -13.46, 7.43], [4.1, -12.98, -17.56], [2.96, 33.1, 45.52],\n [30.42, 5.86, -2.09], [12.2, 28.1, 21.67], [-7.51, -35.22, -38.34],\n [-19.45, -3.46, -9.97], [-31.68, -42.36, -25.77], [6.2, 25.89, 19.48],\n [1.32, -2.41, 17.11], [41.23, 43.98, 20.42], [-7.81, -15.71, -1.75]\n\n ],\n \"Fearful\": [[17.15, 19.92, 32.44], [42.73, 35.83, 7.82], [-27.86, -48.92, -41.18],\n [-9.91, 24.4, 42.16], [38.74, 25.3, 16.97], [20, 28.72, 33.45], [30.31, 23.84, 21.11],\n [24.32, 29.35, 30.79], [27.59, 23.61, 23.1], [26.22, 29.34, 29.17],\n [26.19, 23.79, 24.47], [27.26, 29.04, 27.97]\n ],\n \"Happy\": [[-21.3, -18.21, -15.64], [-15.88, -19.15, -23.03], [-24.2, -21.11, -15.67],\n [-12.22, -14.42, -21.92], [-29.61, -30.32, -19.61], [0.63, 22.25, 35.09],\n [32.77, 16.44, -5.84], [-23.92, -31.19, -27.78], [-19.42, -13.12, -12.75],\n [-17.26, -22.41, -24.29], [-22.05, -17.99, -15.46], [-16.17, -19.18, -21.9]\n ],\n \"Neutral\": [[50, 49.89, 49.55], [48.96, 48.1, 46.95], [45.47, 43.64, 41.43],\n [38.81, 35.75, 32.23], [28.24, 23.77, 18.85], [13.54, 7.93, 2.13],\n [-3.72, -9.46, -14.97], [-20.13, -24.89, -29.18], [-33.01, -36.38, -39.3],\n [-41.8, -43.91, -45.66], [-47.08, -48.18, -49.01], [-49.57, -49.9, -50]\n ],\n \"Sad\": [[50, 38.9, 28.45], [18.65, 9.51, 1.02], [-6.82, -14, -20.53], [-26.41, -31.63, -36.2],\n [-40.12, -43.39, -46], [-47.96, -49.27, -49.92], [-49.92, -49.27, -47.96],\n [-46, -43.39, -40.12], [-36.2, -31.63, -26.41], [-20.53, -14, -6.82], [1.02, 9.51, 18.65],\n [28.45, 38.9, 50]\n ],\n \"Surprised\": [[-50, -49.97, -49.88], [-49.72, -49.51, -49.22], [-48.87, -48.44, -47.94],\n [-47.37, -46.72, -45.98], [-45.16, -44.25, -43.25], [-42.14, -40.93, -39.61],\n [-38.17, -36.6, -34.89], [-33.04, -31.01, -28.81], [-26.41, -23.79, -20.9],\n [-17.73, -14.2, -10.26], [-5.8, -0.66, 5.42], [12.95, 23.24, 50]\n ],\n \"null\": [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],\n [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]\n ]\n }\n\n\ndef wait_response(ports):\n while True:\n if args.test:\n break\n counter = 0\n print('SERVER :Waiting for ', end='')\n for port in ports:\n if port.in_waiting() <= 0:\n print('#' + str(port.num) + ', ', end='')\n counter += 1\n if counter == 0:\n break\n print('', end='\\n')\n time.sleep(1)\n for port in ports:\n print('SERVER :#' + str(port.num) + ': ' + port.rx(), end='')\n print('\\nSERVER :Got responses from ' + str(len(ports)) + ' devices.')\n\n\ndef collect_position(ports):\n counter = 0\n position = []\n for new_device in ports:\n for i in range(3):\n position.append(new_device.position_data[i])\n # print(str(position[counter]) + ' ', end='')\n counter += 1\n # print('')\n step = -5\n for i in range(50, -50, step):\n for j in range(len(position)):\n if i >= position[j] > i+step:\n print('@', end='')\n else:\n print('|', end='')\n print(' ', end='')\n print('')\n\n\nclass TimerHandler:\n def __init__(self, time_mode_change, time_mode_same, threshold):\n self.time_mode_change = time_mode_change\n self.time_mode_same = time_mode_same\n self.time_next_move = time_mode_change\n self.time_last_triggered = 0\n self.input = 'null'\n self.mode = 'null'\n self.threshold = threshold\n self.counter = 0\n\n def count(self):\n self.counter += 1\n\n def reset_counter(self):\n self.counter = 0\n\n def reset_timer(self):\n self.time_last_triggered = time.time()\n\n def is_time_over(self):\n if time.time() - self.time_last_triggered > self.time_next_move:\n return True\n else:\n return False\n\n def should_mode_change(self):\n if self.counter >= self.threshold:\n return True\n else:\n return False\n\n def save_mode(self, mode):\n self.mode = mode\n\n def save_input(self, input_input):\n self.input = input_input\n\n def save_time_next_move(self, time_input):\n self.time_next_move = time_input\n if args.verbose:\n print(\"TIMER :Time until next move: \" + str(self.time_next_move))\n\n\nnum_of_grbl = 12\nmy_grbl = []\n\n# initialize\nfor i in range(num_of_grbl):\n my_grbl.append(grbl.GRBL(port_dict[i], timeout=5, num=i, pos_max=-10, pos_min=-420, iteration=5))\n # my_grbl[i].get_settings()\n my_grbl[i].set_settings(setting_dict)\nfor device in my_grbl:\n device.home(wait=False)\n# wait for response of homing ended\nwait_response(my_grbl)\n# initialize position to 'null'\nfor device in my_grbl:\n device.set_position(position_data_dict['null'][device.num], 10000, 'G1')\n device.move()\n\ntimer = TimerHandler(time_mode_change=3, time_mode_same=1, threshold=5)\n\n\nasync def receive_data(websocket, path):\n global timer\n global my_grbl\n received_data = await websocket.recv()\n print(f\"SERVER :Received data: {received_data}\")\n if args.verbose:\n print(f\"SERVER :Last Data: {timer.input}\")\n if received_data == timer.input:\n timer.count()\n else:\n timer.reset_counter()\n if timer.is_time_over():\n print('\\033[35m' + 'SERVER :' + str(timer.time_next_move) + 'seconds passed' + '\\033[0m')\n for moving_grbl in my_grbl:\n moving_grbl.move()\n if timer.mode != 'null':\n moving_grbl.iterate()\n timer.reset_timer()\n else:\n print('\\033[35m' + 'SERVER :' + str(timer.time_next_move) + 'seconds not passed' + '\\033[0m')\n\n if timer.should_mode_change() and timer.mode != received_data:\n # change mode\n timer.save_mode(received_data)\n print(f\"\\033[32mSERVER :Mode change to {timer.mode}\\033[0m\")\n timer.save_time_next_move(timer.time_mode_change)\n for changing_grbl in my_grbl:\n changing_grbl.reset()\n changing_grbl.set_position(position_data_dict[timer.mode][changing_grbl.num], 10000, 'G0')\n # collect_position(my_grbl)\n else:\n print(f\"\\033[32mSERVER :Mode is same to {timer.mode}\\033[0m\")\n timer.save_time_next_move(timer.time_mode_same)\n for setting_grbl in my_grbl:\n setting_grbl.set_position(position=setting_grbl.get_position(), feedrate=5000, mode='G1')\n timer.save_input(received_data)\n collect_position(my_grbl)\n\n\nstart_server = websockets.serve(receive_data, \"localhost\", 8765)\n\nasyncio.get_event_loop().run_until_complete(start_server)\nasyncio.get_event_loop().run_forever()\n\nfor device in my_grbl:\n device.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"15175885","text":"__author__=\"Federico Fancellu\"\n\nfrom argparse import ArgumentParser\nimport codecs\n\nclass TreeModifer(object):\n def __init__(self,args):\n self.inputTB = codecs.open(args.t,'rb','utf8')\n self.outputTB = codecs.open(args.o,'wb','utf8')\n if args.a=='toUniversalPOS':\n self.all_universal_POS()\n self.close_streams()\n\n def all_universal_POS(self):\n for line in self.inputTB:\n if line[0].isdigit():\n items = line.strip().split('\\t')\n items[4] = items[3]\n self.outputTB.write('\\t'.join(items) + '\\n')\n else:\n self.outputTB.write(line)\n\n def close_streams(self):\n self.inputTB.close()\n self.outputTB.close()\n\n\nif __name__==\"__main__\":\n parser = ArgumentParser()\n parser.add_argument('-t',help=\"Filepath to the UD treebank\",required=True)\n parser.add_argument('-o',help=\"Filepath to output UD treebank\",required=True)\n parser.add_argument('-a',choices=['toStandardPOS','toUniversalPOS'],required=True)\n args = parser.parse_args()\n TreeModifer(args)\n","sub_path":"scripts/utils/format_ud_tb.py","file_name":"format_ud_tb.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"191865405","text":"from django.shortcuts import render\nfrom learning_logs.models import Topic,Entry\nfrom django.http import HttpResponseRedirect\nfrom learning_logs.forms import TopicForm,EntryForm\n\n# Create your views here.\ndef index(request):\n return render(request,'learning_logs/index.html')\ndef topics(request):\n topics=Topic.objects.order_by('date_added')\n context={'topics':topics}\n return render(request,'learning_logs/topics.html',context)\ndef topic(request,topic_id):\n topic=Topic.objects.get(id=topic_id)\n entries=topic.entry_set.order_by('-date_added')#减号指的是降序\n context={'topic':topic,'entries':entries}\n return render(request,'learning_logs/topic.html',context)\n'''\n利用(?Pd+)捕获的值,传入topic()视图中并获取他,\n再将获取到进行降序排序,即显示最近的主题和条目并保存到context的字典中\n最后再传回topic.html文件中\n'''\ndef new_topic(request):\n #未提交数据:创建一个新表单\n if request.method!='POST':\n form=TopicForm()\n else:\n form=TopicForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reversed('learning_logs:topics.html'))\n context={'form':form}\n print(\"i am oklkld\")\n return render(request,'learning_logs/new_topic.html',context)\ndef new_entry(request,topic_id):\n '''在待定的主题中添加新条目'''\n topic=Topic.objects.get(id=topic_id)\n if request.method!='POST':\n print(\"i am okla\")\n #这里创建的新表单form有问题,导致加入的entry内容进不去,数据库也不行\n form=EntryForm()\n else:\n form=EntryForm(data=request.POST)\n if form.is_valid():\n new_entry=form.save(commit=False)\n new_entry.topic=topic\n new_entry.save()\n #这里的跳转跳不过去\n return HttpResponseRedirect(reversed('learning_logs:topic',args=[topic_id]))\n context={'topic':topic,'form':form}\n return render(request,'learning_logs/new_entry.html',context)\ndef edit_entry(request,entry_id):\n entry=Entry.objects.get(id=entry_id)\n topic=entry.topic\n if request.method!='POST':\n form=EntryForm(instance=entry)#创建一根EntryForm表单实例,并使用instance是他可以把当前的文本填充\n else: #用户将可以看到并编辑\n form=EntryForm(instance=entry,data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reversed('learning_logs:topics',args=[topic.id]))\n #return render(request,'learning_logs/topic.html')\n context={'entry':entry,'topic':topic,'form':form}\n return render(request,'learning_logs/edit_entry.html',context)\ndef deletes(request):\n sql_topic=Topic.objects.get(id=12)\n sql_topic.delete()\n return render(request,'learning_logs/index.html')\n #return HttpResponseRedirect(reversed('learning_logs:index'))","sub_path":"learning_logs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"6877710","text":"import pandas as pd\nimport numpy as np\nimport jsonschema\nimport json\nfrom string import Template\nfrom functools import reduce\n\nspec_filepath = \"doc/covidom_struc.csv\"\noutput_filepath = \"sql_script/covidom_alter_tbls_fk.sql\"\n\nalter_tmplt = Template(\"\"\"ALTER TABLE $table_dest\n ADD FOREIGN KEY ($dest_field) REFERENCES $source_table($source_field) ON DELETE CASCADE;\n\"\"\")\n\n# json schema\nfk_schema = {\n \"$id\": \"https://example.com/person.schema.json\",\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Foreign key\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\"\n }\n }\n\n#--- LIB ---#\n\n#TODO modularize\ndef fk_sql_from_tplt_ho(template):\n def list2upplet(inp):\n return ', '.join(map(lambda e: \"\\\"\" + e + \"\\\"\", inp)) if isinstance(inp, list) else inp\n def foreign_key_sql_from_tplt(jsons):\n return '\\n'.join(list(map(\n lambda jso: template.substitute(\n table_dest=jso[\"tbl_dest_name\"],\n dest_field=list2upplet(jso[\"col_dest_name\"]),\n source_field=list2upplet(jso[\"col_src_name\"]),\n source_table=jso[\"tbl_src_name\"]),\n jsons.values())))\n return foreign_key_sql_from_tplt\n\n\n\ndef transf_json_obj(obj, tbl_dest_name):\n col_dest_name = list(obj.keys())[0]\n col_src_name = list(obj[col_dest_name].keys())[0]\n tbl_src_name = obj[col_dest_name][col_src_name]\n return {'tbl_dest_name': tbl_dest_name, 'col_dest_name': col_dest_name,\n 'tbl_src_name': tbl_src_name, 'col_src_name': col_src_name}\n\n\ndef transf_json_objs(arr, tbl_dest_name):\n return list(map(lambda obj: transf_json_obj(obj, tbl_dest_name), arr))\n\ndef grp_by_src_name(list_fks_in_tbl):\n return reduce(lambda acc, cur: {**acc, cur['tbl_src_name']: [*acc[cur['tbl_src_name']], cur]\n if cur['tbl_src_name'] in acc.keys() else [cur] },list_fks_in_tbl, {})\n\ndef fks_merge_cols(fks_grpby_src_name):\n return {key : reduce(lambda acc, cur: { 'tbl_dest_name': cur['tbl_dest_name'],\n 'col_dest_name': [cur['col_dest_name'], *acc['col_dest_name']] if 'col_dest_name' in acc else [cur['col_dest_name']],\n 'tbl_src_name': cur['tbl_src_name'],\n 'col_src_name': [cur['col_src_name'], *acc['col_src_name']] if 'col_src_name' in acc else [cur['col_src_name']],\n }, value, {}) for key, value in fks_grpby_src_name.items()}\n\nlist_fks_in_tbl = [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_PatientUId',\n 'tbl_src_name': 'patient',\n 'col_src_name': '#Patient'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_ClinicId',\n 'tbl_src_name': 'clinic_followup_reason',\n 'col_src_name': '#Clinic'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_DoctorId',\n 'tbl_src_name': 'doctor_list',\n 'col_src_name': '#Doctor'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_FollowUpReasonId',\n 'tbl_src_name': 'clinic_followup_reason',\n 'col_src_name': '#Follow up reason'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_EndReasonId',\n 'tbl_src_name': 'end_reason',\n 'col_src_name': 'Id'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_MedicalDepartmentId',\n 'tbl_src_name': 'medical_department_list',\n 'col_src_name': '#Medical dept'}]\nfks_grpby_src_name = grp_by_src_name(list_fks_in_tbl)\nassert fks_grpby_src_name == {'patient': [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_PatientUId',\n 'tbl_src_name': 'patient',\n 'col_src_name': '#Patient'}],\n 'clinic_followup_reason': [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_ClinicId',\n 'tbl_src_name': 'clinic_followup_reason',\n 'col_src_name': '#Clinic'},\n {'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_FollowUpReasonId',\n 'tbl_src_name': 'clinic_followup_reason',\n 'col_src_name': '#Follow up reason'}],\n 'doctor_list': [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_DoctorId',\n 'tbl_src_name': 'doctor_list',\n 'col_src_name': '#Doctor'}],\n 'end_reason': [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_EndReasonId',\n 'tbl_src_name': 'end_reason',\n 'col_src_name': 'Id'}],\n 'medical_department_list': [{'tbl_dest_name': 'patient_stay',\n 'col_dest_name': 'PatientStay_MedicalDepartmentId',\n 'tbl_src_name': 'medical_department_list',\n 'col_src_name': '#Medical dept'}]}\n\nassert transf_json_obj(json.loads('{\"#PatientUId\": {\"patient\": \"patient\"}}'), \"xx\") ==\\\n{\"tbl_dest_name\": \"xx\", \"col_dest_name\": \"#PatientUId\", \"tbl_src_name\": \"patient\", \"col_src_name\": \"patient\"}\nassert transf_json_obj(json.loads('{\"x\": {\"z\": \"y\"}}'), \"00\") ==\\\n{\"tbl_dest_name\": \"00\", \"col_dest_name\": \"x\", \"tbl_src_name\": \"y\", \"col_src_name\": \"z\"}\n\nassert transf_json_objs(json.loads('[{\"x\": {\"z\": \"y\"}}]'), \"00\") ==\\\n [{\"tbl_dest_name\": \"00\", \"col_dest_name\": \"x\", \"tbl_src_name\": \"y\", \"col_src_name\": \"z\"}]\n\n#--- END LIB ---#\n\ndef main():\n\n covidom_struc = pd.read_csv(spec_filepath, sep=';')\n covidom_struc['json_fk'] = covidom_struc.foreign_keys.apply(json.loads)\n assert all(covidom_struc['json_fk'].apply(lambda fk: jsonschema.validate(fk, fk_schema) is None))\n\n covidom_struc['json_formatted'] = covidom_struc.apply(lambda row: transf_json_objs(row.json_fk, row.table), axis=1)\n fks_grpby_src_name = covidom_struc['json_formatted'].apply(grp_by_src_name)\n\n fks_merged_cols = fks_grpby_src_name.apply(fks_merge_cols)\n\n foreign_key_sql = fk_sql_from_tplt_ho(alter_tmplt)\n sql_scripts = fks_merged_cols.apply(foreign_key_sql)\n with open(output_filepath, \"w\") as text_file:\n text_file.write(sql_scripts.str.cat())\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n","sub_path":"covidom_fk_script.py","file_name":"covidom_fk_script.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"180630884","text":"# Licensed to the Software Freedom Conservancy (SFC) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The SFC licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import Optional\n\nfrom appium.options.common.supports_capabilities import SupportsCapabilities\n\nCHROMEDRIVER_PORT = 'chromedriverPort'\n\n\nclass ChromedriverPortOption(SupportsCapabilities):\n @property\n def chromedriver_port(self) -> Optional[int]:\n \"\"\"\n Local port number to use for Chromedriver communication.\n \"\"\"\n return self.get_capability(CHROMEDRIVER_PORT)\n\n @chromedriver_port.setter\n def chromedriver_port(self, value: int) -> None:\n \"\"\"\n The port number to use for Chromedriver communication.\n Any free port number is selected by default if unset.\n \"\"\"\n self.set_capability(CHROMEDRIVER_PORT, value)\n","sub_path":"appium/options/android/common/context/chromedriver_port_option.py","file_name":"chromedriver_port_option.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"105169062","text":"import json\nimport sqlite3\n\n# SQLite DB Name\nDB_Name = 'SmartHome.db'\n\n\n# ===============================================================\n# Database Manager Class\n\nclass DatabaseManager:\n def __init__(self):\n self.conn = sqlite3.connect(DB_Name)\n self.conn.execute('pragma foreign_keys = on')\n self.conn.commit()\n self.cur = self.conn.cursor()\n\n def add_del_update_db_record(self, sql_query, args=()):\n self.cur.execute(sql_query, args)\n self.conn.commit()\n return\n\n def __del__(self):\n self.cur.close()\n self.conn.close()\n\n\n# ===============================================================\n# Functions to push Sensor Data into Database\n\n# Function to save Temperature to DB Table\ndef conditioner_status_data_handler(jsonData):\n # Parse Data\n json_Dict = json.loads(jsonData)\n conditionerID = json_Dict['ConditionerID']\n date_and_time = json_Dict['Date']\n status = json_Dict['Status']\n\n # Push into DB Table\n dbObj = DatabaseManager()\n dbObj.add_del_update_db_record(\n \"insert into CONDITIONER_DATA (ConditionerID, Date_n_Time, Status) values (?,?,?)\",\n [conditionerID, date_and_time, status])\n del dbObj\n print(\"Inserted Temperature Data into Database.\")\n\n\n# ===============================================================\n# Master Function to Select DB Funtion based on MQTT Topic\n\ndef conditioner_status_handler(Topic, jsonData):\n if Topic == \"Home/Conditioner/Status\":\n conditioner_status_data_handler(jsonData)\n","sub_path":"src/sqlite/store_data_into_sqlite.py","file_name":"store_data_into_sqlite.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"62968937","text":"# import the packages we will need\nfrom tensorflow.keras.applications import ResNet50\nfrom tensorflow.keras.applications import InceptionV3\nfrom tensorflow.keras.applications import Xception\nfrom tensorflow.keras.applications import VGG16\nfrom tensorflow.keras.applications import VGG19\nfrom tensorflow.keras.applications import imagenet_utils\nfrom tensorflow.keras.applications.inception_v3 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.preprocessing.image import load_img\nimport numpy as np\nimport argparse\nimport cv2\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to the input image\")\nap.add_argument(\"-model\", \"--model\", type=str, default=\"vgg16\", help=\"name of pre-trained network to use\")\nargs = vars(ap.parse_args())\n\n# dictionary that maps input model names to their classes inside keras\nMODELS = {\n \"vgg16\": VGG16,\n \"vgg19\": VGG19,\n \"inception\": InceptionV3,\n \"xception\": Xception,\n \"resnet\": ResNet50\n}\n\n# ensure valid model name was supplied as input\nif args[\"model\"] not in MODELS.keys():\n raise AssertionError(\"The --model command line argument should be a key in the `MODELS` dictionary\")\n\n# initialize the input image shape and pre-processing function based on supplied model\ninputShape = (224, 224)\npreprocess = imagenet_utils.preprocess_input\n\n# because inception and xception use different size of images\nif args[\"model\"] in (\"inception\", \"xception\"):\n inputShape = (299, 299)\n preprocess = preprocess_input\n\n# loading the network weights from disk\nprint(\"[INFO] loading {}...\".format(args[\"model\"]))\nNetwork = MODELS[args[\"model\"]]\nmodel = Network(weights=\"imagenet\")\n\n# loading the input image and resizing it to the required input dimensions\nprint(\"[INFO] loading and pre-processing image...\")\nimage = load_img(args[\"image\"], target_size=inputShape)\nimage = img_to_array(image)\n\n# our input image is now represented as a NumPy array of shape\n# (inputShape[0], inputShape[1], 3) however we need to expand the\n# dimension by making the shape (1, inputShape[0], inputShape[1], 3)\n# so we can pass it through the network\nimage = np.expand_dims(image, axis=0)\n\n# pre-process the image using the appropriate function based on the model that will be using\nimage = preprocess(image)\n\nprint(\"[INFO] classifying image with '{}'...\".format(args[\"model\"]))\npreds = model.predict(image)\n# we use .decode_predictions to give us a list of \"human-readable\" labels and the probabilities associated with each class label\nP = imagenet_utils.decode_predictions(preds)\n\n# The top-5 predictions (labels with the largest probabilities) are printed\nfor (i, (imagenetID, label, prob)) in enumerate(P[0]):\n print(\"{}. {}: {:.2f}%\".format(i + 1, label, prob * 100))\n\n# load the image via OpenCV, draw the top prediction on the image\norig = cv2.imread(args[\"image\"])\n(imagenetID, label, prob) = P[0][0]\ncv2.putText(orig, \"Label: {}\".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)\ncv2.imshow(\"Classification\", orig)\ncv2.waitKey(0)","sub_path":"Chapter20/imagenet_pretrained.py","file_name":"imagenet_pretrained.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"12867356","text":"class Family:\n def __init__(self, last_name):\n self.members = {}\n self.last_name = last_name\n\n def add_member_details(self, name, age, sex, is_child):\n member = {\n \"name\": name,\n 'age': age,\n \"sex\": sex,\n \"is_child\": is_child\n }\n self.members[name] = member\n\n def print_members(self):\n print(self.members)\n\n# class Children(Family):\n# def __init__(self, last_name):\n# super().__init__(self, last_name)\n\n def born(self,**kwargs):\n self.members[kwargs[\"name\"]] = kwargs\n # self.add_member_details(name=, age=0, sex=, is_child=True, **kwargs)\n print(f'Mazel Tov on the birth of your new child {kwargs[\"name\"]}!')\n\n def is_18(self, name):\n for child in self.members:\n if self.members[name]['age'] > 18:\n return True\n # print(f'{name} is older than 18')\n else:\n # print(f'{name} is younger than 18') how to stop it from printing 4 times.\n return False\n\n def Smith_Family(self):\n paragraph = f\"The Smith family has {len(self.members)} members, and their names are: \"\n for k, v in self.members.items():\n paragraph += \", \" + v[\"name\"]\n # for child, children in v.items():\n # paragraph += f\" {child} : {children}; \"\n # paragraph += \"\\n\"\n print(paragraph)\n\nfamily1 = Family(\"Smith\")\nmember1 = family1.add_member_details(\"Michael\", 35, 'Male', False)\nmember2= family1.add_member_details(\"Sarah\", 32, 'Female', False)\nmember3 = family1.add_member_details(\"Kevin\", 16, 'Male', True)\nfamily1.print_members()\n\nfamily1.born(name=\"Moses\", age=0,sex=\"Male\",is_child=True,prophet=True)\nprint(family1.last_name)\nfamily1.print_members()\n\nprint(family1.is_18(\"Kevin\"))\nfamily1.Smith_Family()\n\n","sub_path":"week_5/day_3/exerciseFeb11.py","file_name":"exerciseFeb11.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"325751955","text":"\"\"\"\nVarious utility functions and data types\n\"\"\"\n\nimport os\nfrom platform import system as platform_system\nimport psutil\nimport traceback\nfrom pydoc import locate\n\n#from . import s3_utils\n#from .basic_utils import bytes2human\nfrom .extensions import DATAMODEL_EXTENSIONS\n\nimport logging\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\ndef bytes2human(n):\n \"\"\"Convert bytes to human-readable format\n\n Taken from the `psutil` library which references\n http://code.activestate.com/recipes/578019\n\n Parameters\n ----------\n n : int\n Number to convert\n\n Returns\n -------\n readable : str\n A string with units attached.\n\n Examples\n --------\n >>> bytes2human(10000)\n '9.8K'\n\n >>> bytes2human(100001221)\n '95.4M'\n \"\"\"\n symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols):\n prefix[s] = 1 << (i + 1) * 10\n for s in reversed(symbols):\n if n >= prefix[s]:\n value = float(n) / prefix[s]\n return '%.1f%s' % (value, s)\n return \"%sB\" % n\n\n\nclass NoTypeWarning(Warning):\n pass\n\n\ndef get_schema_uri_from_converter(converter_class):\n \"\"\"\n Given a converter class, return the schema_uri corresponding to the tag.\n \"\"\"\n # Obtain one of the possible objects the converter returns\n classname = converter_class.types[0]\n # Presume these are from the same directory tree\n rclass = locate(classname)\n tag = rclass._tag\n schema_uri = next(\n t for t in DATAMODEL_EXTENSIONS[0].tags if t._tag_uri == tag)._schema_uri\n return schema_uri\n\n# def open(init=None, memmap=False, **kwargs):\n# \"\"\"\n# Creates a DataModel from a number of different types\n\n# Parameters\n# ----------\n# init : shape tuple, file path, file object, astropy.io.fits.HDUList,\n# numpy array, dict, None\n\n# - None: A default data model with no shape\n\n# - shape tuple: Initialize with empty data of the given shape\n\n# - file path: Initialize from the given file (FITS , JSON or ASDF)\n\n# - readable file object: Initialize from the given file object\n\n# - astropy.io.fits.HDUList: Initialize from the given\n# `~astropy.io.fits.HDUList`\n\n# - A numpy array: A new model with the data array initialized\n# to what was passed in.\n\n# - dict: The object model tree for the data model\n\n# memmap : bool\n# Turn memmap of FITS file on or off. (default: False). Ignored for\n# ASDF files.\n\n# kwargs : dict\n# Additional keyword arguments passed to lower level functions. These arguments\n# are generally file format-specific. Arguments of note are:\n\n# - FITS\n\n# skip_fits_update - bool or None\n# `True` to skip updating the ASDF tree from the FITS headers, if possible.\n# If `None`, value will be taken from the environmental SKIP_FITS_UPDATE.\n# Otherwise, the default value is `True`.\n\n# Returns\n# -------\n# model : DataModel instance\n# \"\"\"\n\n# from . import datamodels as dm\n# from . import filetype\n\n# # Initialize variables used to select model class\n\n# shape = ()\n# file_name = None\n# file_to_close = None\n\n# # Get special cases for opening a model out of the way\n# # all special cases return a model if they match\n\n# if init is None:\n# return dm.DataModel(None)\n\n# elif isinstance(init, dm.DataModel):\n# # Copy the object so it knows not to close here\n# return init.__class__(init)\n\n# elif isinstance(init, (str, bytes)) or hasattr(init, \"read\"):\n# # If given a string, presume its a file path.\n# # if it has a read method, assume a file descriptor\n\n# if isinstance(init, bytes):\n# init = init.decode(sys.getfilesystemencoding())\n\n# file_name = basename(init)\n# file_type = filetype.check(init)\n\n# elif file_type == \"asn\":\n# raise NotImplementedError(\"roman_datamodels does not yet support associations\")\n# # Read the file as an association / model container\n# # from . import container\n# # return container.ModelContainer(init, **kwargs)\n\n# elif file_type == \"asdf\":\n# # Read the file as asdf, no need for a special class\n# return dm.DataModel(init, **kwargs)\n\n# elif isinstance(init, tuple):\n# for item in init:\n# if not isinstance(item, int):\n# raise ValueError(\"shape must be a tuple of ints\")\n# shape = init\n\n# elif isinstance(init, np.ndarray):\n# shape = init.shape\n\n# elif is_association(init) or isinstance(init, list):\n# raise NotImplementedError(\"stdatamodels does not yet support associations\")\n# # from . import container\n# # return container.ModelContainer(init, **kwargs)\n\n# # Log a message about how the model was opened\n# if file_name:\n# log.debug(f'Opening {file_name} as {new_class}')\n# else:\n# log.debug(f'Opening as {new_class}')\n\n# # Actually open the model\n# model = new_class(init, **kwargs)\n\n# return model\n\n\ndef _class_from_model_type(hdulist):\n \"\"\"\n Get the model type from the primary header, lookup to get class\n \"\"\"\n raise NotImplementedError(\n \"stdatamodels does not yet support automatic model class selection\")\n # from . import _defined_models as defined_models\n\n # if hdulist:\n # primary = hdulist[0]\n # model_type = primary.header.get('DATAMODL')\n\n # if model_type is None:\n # new_class = None\n # else:\n # new_class = defined_models.get(model_type)\n # else:\n # new_class = None\n\n # return new_class\n\n\ndef _class_from_ramp_type(hdulist, shape):\n \"\"\"\n Special check to see if file is ramp file\n \"\"\"\n raise NotImplementedError(\n \"stdatamodels does not yet support automatic model class selection\")\n # if not hdulist:\n # new_class = None\n # else:\n # if len(shape) == 4:\n # try:\n # hdulist['DQ']\n # except KeyError:\n # from . import ramp\n # new_class = ramp.RampModel\n # else:\n # new_class = None\n # else:\n # new_class = None\n\n # return new_class\n\n\ndef _class_from_reftype(hdulist, shape):\n \"\"\"\n Get the class name from the reftype and other header keywords\n \"\"\"\n raise NotImplementedError(\n \"stdatamodels does not yet support automatic model class selection\")\n # if not hdulist:\n # new_class = None\n\n # else:\n # primary = hdulist[0]\n # reftype = primary.header.get('REFTYPE')\n # if reftype is None:\n # new_class = None\n\n # else:\n # from . import reference\n # if len(shape) == 0:\n # new_class = reference.ReferenceFileModel\n # elif len(shape) == 2:\n # new_class = reference.ReferenceImageModel\n # elif len(shape) == 3:\n # new_class = reference.ReferenceCubeModel\n # elif len(shape) == 4:\n # new_class = reference.ReferenceQuadModel\n # else:\n # new_class = None\n\n # return new_class\n\n\ndef _class_from_shape(hdulist, shape):\n \"\"\"\n Get the class name from the shape\n \"\"\"\n raise NotImplementedError(\n \"stdatamodels does not yet support automatic model class selection\")\n # if len(shape) == 0:\n # from . import model_base\n # new_class = model_base.DataModel\n # elif len(shape) == 4:\n # from . import quad\n # new_class = quad.QuadModel\n # elif len(shape) == 3:\n # from . import cube\n # new_class = cube.CubeModel\n # elif len(shape) == 2:\n # try:\n # hdulist[('SCI', 2)]\n # except (KeyError, NameError):\n # # It's an ImageModel\n # from . import image\n # new_class = image.ImageModel\n # else:\n # # It's a MultiSlitModel\n # from . import multislit\n # new_class = multislit.MultiSlitModel\n # else:\n # new_class = None\n\n # return new_class\n\n\ndef can_broadcast(a, b):\n \"\"\"\n Given two shapes, returns True if they are broadcastable.\n \"\"\"\n for i in range(1, min(len(a), len(b)) + 1):\n adim = a[-i]\n bdim = b[-i]\n\n if not (adim == 1 or bdim == 1 or adim == bdim):\n return False\n\n return True\n\n\ndef to_camelcase(token):\n return ''.join(x.capitalize() for x in token.split('_-'))\n\n\ndef is_association(asn_data):\n \"\"\"\n Test if an object is an association by checking for required fields\n \"\"\"\n if isinstance(asn_data, dict):\n if 'asn_id' in asn_data and 'asn_pool' in asn_data:\n return True\n return False\n\n\ndef get_short_doc(schema):\n title = schema.get('title', None)\n description = schema.get('description', None)\n if description is None:\n description = title or ''\n else:\n if title is not None:\n description = title + '\\n\\n' + description\n return description.partition('\\n')[0]\n\n\ndef ensure_ascii(s):\n if isinstance(s, bytes):\n s = s.decode('ascii')\n return s\n\n\ndef create_history_entry(description, software=None):\n \"\"\"\n Create a HistoryEntry object.\n\n Parameters\n ----------\n description : str\n Description of the change.\n software : dict or list of dict\n A description of the software used. It should not include\n asdf itself, as that is automatically notated in the\n `asdf_library` entry.\n\n Each dict must have the following keys:\n\n ``name``: The name of the software\n ``author``: The author or institution that produced the software\n ``homepage``: A URI to the homepage of the software\n ``version``: The version of the software\n\n Examples\n --------\n >>> soft = {'name': 'jwreftools', 'author': 'STSCI', \\\n 'homepage': 'https://github.com/spacetelescope/jwreftools', 'version': \"0.7\"}\n >>> entry = create_history_entry(description=\"HISTORY of this file\", software=soft)\n\n \"\"\"\n from asdf.tags.core import Software, HistoryEntry\n import datetime\n\n if isinstance(software, list):\n software = [Software(x) for x in software]\n elif software is not None:\n software = Software(software)\n\n entry = HistoryEntry({\n 'description': description,\n 'time': datetime.datetime.utcnow()\n })\n\n if software is not None:\n entry['software'] = software\n return entry\n\n\ndef get_envar_as_boolean(name, default=False):\n \"\"\"Interpret an environmental as a boolean flag\n\n Truth is any numeric value that is not 0 or\n any of the following case-insensitive strings:\n\n ('true', 't', 'yes', 'y')\n\n Parameters\n ----------\n name : str\n The name of the environmental variable to retrieve\n\n default : bool\n If the environmental variable cannot be accessed, use as the default.\n \"\"\"\n truths = ('true', 't', 'yes', 'y')\n falses = ('false', 'f', 'no', 'n')\n if name in os.environ:\n value = os.environ[name]\n try:\n value = bool(int(value))\n except ValueError:\n value_lowcase = value.lower()\n if value_lowcase not in truths + falses:\n raise ValueError(\n f'Cannot convert value \"{value}\" to boolean unambiguously.')\n return value_lowcase in truths\n return value\n\n log.debug(\n f'Environmental \"{name}\" cannot be found. Using default value of \"{default}\".')\n return default\n\n\ndef check_memory_allocation(shape, allowed=None, model_type=None, include_swap=True):\n \"\"\"Check if a DataModel can be instantiated\n\n Parameters\n ----------\n shape : tuple\n The desired shape of the model.\n\n allowed : number or None\n Fraction of memory allowed to be allocated.\n If None, the environmental variable `DMODEL_ALLOWED_MEMORY`\n is retrieved. If undefined, then no check is performed.\n `1.0` would be all available memory. `0.5` would be half available memory.\n\n model_type : DataModel or None\n The desired model to instantiate.\n If None, `open` will be used to guess at a model type depending on shape.\n\n include_swap : bool\n Include available swap in the calculation.\n\n Returns\n -------\n can_instantiate, required_memory : bool, number\n True if the model can be instantiated and the predicted memory footprint.\n \"\"\"\n # Determine desired allowed amount.\n if allowed is None:\n allowed = os.environ.get('DMODEL_ALLOWED_MEMORY', None)\n if allowed is not None:\n allowed = float(allowed)\n\n # Create the unit shape\n unit_shape = (1,) * len(shape)\n\n # Create the unit model.\n if model_type:\n unit_model = model_type(unit_shape)\n else:\n unit_model = open(unit_shape)\n\n # Size of the primary array.\n primary_array_name = unit_model.get_primary_array_name()\n primary_array = getattr(unit_model, primary_array_name)\n size = primary_array.nbytes\n for dimension in shape:\n size *= dimension\n\n # Get available memory\n available = get_available_memory(include_swap=include_swap)\n log.debug(\n f'Model size {bytes2human(size)} available system memory {bytes2human(available)}')\n\n if size > available:\n log.warning(\n f'Model {model_type} shape {shape} requires {bytes2human(size)} which is more than'\n f' system available {bytes2human(available)}'\n )\n\n if allowed and size > (allowed * available):\n log.debug(\n f'Model size greater than allowed memory {bytes2human(allowed * available)}'\n )\n return False, size\n\n return True, size\n\n\ndef get_available_memory(include_swap=True):\n \"\"\"Retrieve available memory\n\n Parameters\n ----------\n include_swap : bool\n Include available swap in the calculation.\n\n Returns\n -------\n available : number\n The amount available.\n \"\"\"\n system = platform_system()\n\n # Apple MacOS\n log.debug(f'Running OS is \"{system}\"')\n if system in ['Darwin']:\n return get_available_memory_darwin(include_swap=include_swap)\n\n # Default to Linux-like:\n return get_available_memory_linux(include_swap=include_swap)\n\n\ndef get_available_memory_linux(include_swap=True):\n \"\"\"Get memory for a Linux system\n\n Presume that the swap space as reported is accurate at the time of\n the query and that any subsequent allocation will be held the value.\n\n Parameters\n ----------\n include_swap : bool\n Include available swap in the calculation.\n\n Returns\n -------\n available : number\n The amount available.\n \"\"\"\n vm_stats = psutil.virtual_memory()\n available = vm_stats.available\n if include_swap:\n swap = psutil.swap_memory()\n available += swap.total\n return available\n\n\ndef get_available_memory_darwin(include_swap=True):\n \"\"\"Get the available memory on an Apple MacOS-like system\n\n For Darwin, swap space is dynamic and will attempt to use the whole of the\n boot partition.\n\n If the system has been configured to use swap from other sources besides\n the boot partition, that available space will not be included.\n\n Parameters\n ----------\n include_swap : bool\n Include available swap in the calculation.\n\n Returns\n -------\n available : number\n The amount available.\n \"\"\"\n vm_stats = psutil.virtual_memory()\n available = vm_stats.available\n if include_swap:\n\n # Attempt to determine amount of free disk space on the boot partition.\n try:\n swap = psutil.disk_usage('/private/var/vm').free\n except FileNotFoundError as exception:\n log.warn('Cannot determine available swap space.'\n f'Reason:\\n'\n f'{\"\".join(traceback.format_exception(exception))}')\n swap = 0\n available += swap\n\n return available\n","sub_path":"src/roman_datamodels/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":16232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"98204957","text":"# -*- coding: UTF-8 -*-\nimport glob\nimport json\nimport random\nimport math\nimport numpy as np\nimport cv2\nimport os\nfrom tianchi_extractor.lstm_extractor.tf_config import Config,path_format,label_dict\nfrom tianchi_extractor.util.groundtruth_map import gen_substitution_dict\nfrom tianchi_extractor.config import TASKS\nclass Data_loader():\n def __init__(self,input,word_len = 400):\n self.word_len = word_len\n self.input_file = json.load(open(input,'r'))\n # self.input_file =[f for f in self.input_file if '/56/' in f]\n # self.label_list = json.load(open(label_list,'r'))\n # self.char_list = json.load(open(word_list,'r'))\n self.data = self._build_data_lsit(self.input_file)\n\n # self.batch_size =batch_size\n pass\n\n def _build_data_lsit(self,input_file):\n detail_list = []\n sent_list = []\n label_list = []\n mask_list = []\n max_len = 0\n # length = 0\n for i,input in enumerate(input_file):\n if not i%1000:\n print('{} start'.format(i))\n path = path_format.format(input[0],input[1])\n task = TASKS.index(input[0])\n # if task==1:\n # continue\n tmp = [0]*3\n tmp [task]=1\n task = tmp\n if not os.path.exists(path):\n continue\n data = json.load(open(path,'r'))\n split_pos = filter(lambda x :x[1][0]==u'。',enumerate(data))\n # max_len = max([max_len,len(data)])\n # print(max_len)\n data_split =[]\n start = 0\n for idx,pos in enumerate(split_pos):\n data_split.append(data[start:pos[0]+1])\n start = pos[0]+1\n if start=len(new_order):\n break\n else:\n data_out = new_order[start:]\n\n yield [self.data[0][d] for d in data_out], \\\n [self.data[1][d] for d in data_out], \\\n [self.data[2][d] for d in data_out],\\\n [self.data[3][d] for d in data_out],\n\nif __name__=='__main__':\n label = ['z']\n for t in TASKS:\n print(gen_substitution_dict(t))\n label.extend([a[1] for a in gen_substitution_dict(t).items()])\n label =list(set(label))\n print(label)\n d = Data_loader('/home/xinmatrix/TwoTB/tianchi/extras/train.json',)\n\n for asdas in d.load_data_batch(64):\n print(len(asdas[0]))\n pass","sub_path":"tianchi_extractor/lstm_extractor/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"457981685","text":"import json\nimport hashlib\n\ndef hash_sha256(file):\n buf_size = 65536 # lets read stuff in 64kb chunks!\n sha256 = hashlib.sha256()\n with open(file, 'rb') as f:\n while True:\n data = f.read(buf_size)\n if not data:\n break\n sha256.update(data)\n return sha256.hexdigest()\n\n\ndef get_dupes(a):\n # Duplicate and sort list\n a = a.copy()\n a.sort()\n\n dupes = set()\n last_item = \"\"\n # Find any consecutive items in the list\n for item in a:\n if last_item == item:\n dupes.add(item)\n last_item = item\n return dupes\n\n\ndef remover(rm_list=None, target=None):\n # Removes elements from 'target' list based on the parallel truth-table list 'rm_list'\n removed = 0\n for i, truefalse in enumerate(rm_list):\n if truefalse:\n del target[i - removed]\n removed += 1\n\n\ndef remove_dupes(authors=None, dq_age=None, dq_mult=None, cids=None, meta=None):\n mode = meta['Duplicate Action']\n # if (mode := meta['Duplicate Action']) not in {\"DQ\", \"FirstOnly\"}:\n if mode not in {\"DQ\", \"FirstOnly\"}:\n x = input(\"Invalid duplicate action! Only DQ or FirstOnly is acceptable!\")\n exit(1)\n print(\"Operating Mode: \" + mode)\n\n dq_age.update({'Null', 'NULL*'}) # Everything in dq_age gets marked for removal\n rm_list = []\n rm_cache = set()\n # Mark true/false in a parallel list to indicate if the element should be deleted later\n for author in authors:\n if author in dq_age:\n rm_list.append(True)\n elif author in dq_mult:\n if mode == \"DQ\" or author in rm_cache: # Being in rm_cache means author has appeared once\n rm_list.append(True)\n else:\n rm_cache.add(author) # Remember that author has been seen once already\n rm_list.append(False) # FirstOnly mode, so this comment is spared\n else:\n rm_list.append(False)\n\n # Split the full comment ID list into thread-specific sublists first before removing\n # This is to make it easy to calculate the truncated length of each thread's comment IDs\n sub_cids = []\n sub_rm = []\n prev = 0\n cur = 0\n for thread in meta['threads']:\n cur += thread['length']\n sub_cids.append(cids[prev:cur])\n sub_rm.append(rm_list[prev:cur])\n prev = cur\n\n # Remove marked elements from the sublists, and append new length of CIDs\n for i, (sub_cid, rm) in enumerate(zip(sub_cids, sub_rm)):\n remover(rm_list=rm, target=sub_cid)\n meta['threads'][i]['trunc_length'] = len(sub_cid)\n\n # Recombine the truncated sublists and return it\n ret_list = []\n for sub_cid in sub_cids:\n ret_list += sub_cid\n return ret_list\n\n\ndef main():\n # Load files\n with open('meta.json', 'r') as f:\n meta = json.load(f)\n\n file_name = meta['CID_Filename']\n\n with open(file_name, 'r') as f:\n comment_ids = [line.strip() for line in f]\n\n with open(file_name.rstrip('.txt') + '_Authors.txt', 'r') as f:\n authors = [line.strip() for line in f]\n\n with open(file_name.rstrip('.txt') + '_DQ-Age.txt', 'r') as f:\n dq_age = [line.strip() for line in f]\n\n # Find multiposters\n dq_mult = get_dupes(authors)\n print(\"{} users have young accounts!\\n{} users have multiple posts!\".format(len(dq_age), len(dq_mult)))\n\n # Remove all invalid comment IDs\n before = len(comment_ids)\n comment_ids = remove_dupes(authors=authors, dq_age=set(dq_age), dq_mult=set(dq_mult), cids=comment_ids, meta=meta)\n after = len(comment_ids)\n\n # Save multiposter usernames & the truncated comment ID list\n with open(file_name.rstrip('.txt') + '_DQ-MultiPost.txt', 'w') as f:\n f.write('\\n'.join(sorted(dq_mult, key=str.casefold)))\n\n with open(file_name.rstrip('.txt') + '_Truncated.txt', 'w') as f:\n f.write('\\n'.join(comment_ids))\n\n # Calculate file hashes and save\n meta['DQMULT_SHA256'] = hash_sha256(file_name.rstrip('.txt') + '_DQ-MultiPost.txt')\n meta['TRUNC_SHA256'] = hash_sha256(file_name.rstrip('.txt') + '_Truncated.txt')\n print(\"\\nDQMult SHA-256 Hash: {}\\nT_CID SHA-256 Hash: {}\".format(meta['DQMULT_SHA256'], meta['TRUNC_SHA256']))\n\n with open('meta.json', 'w') as outfile:\n json.dump(meta, outfile, indent=4)\n\n x = input(\"Removed {} comments!\".format(before - after))\n return\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2020 Feb~Oct Draws/MM54/removeInvalids.py","file_name":"removeInvalids.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"278292089","text":"import queue\nimport threading\nfrom tweet2video import Tweet2Video\n\n\nclass MultiThreadWorker():\n def __init__(self, num_workers, queue_size):\n self.num_workers = num_workers\n self.queue_size = queue_size\n self.q = queue.Queue(queue_size)\n self.active_works = []\n\n def add_task(self, username, num_tweets):\n t = threading.Thread(\n target=self.generate_video,\n args=(username, num_tweets))\n\n num_active_workers = threading.active_count() - 1\n print(num_active_workers)\n if num_active_workers < self.num_workers:\n t.start()\n self.active_works.append(t)\n else:\n self.q.put(t)\n print(f'{username} is waiting.')\n return t\n\n def generate_video(self, username, num_tweets):\n tv = Tweet2Video(username, num_tweets)\n print(f'{username} starts.')\n tv.run()\n print(f'{username} done.')\n\n def start(self):\n while not self.q.empty():\n num_active_workers = threading.active_count()\n # print('num_active_workers', num_active_workers)\n if num_active_workers < self.num_workers:\n t = self.q.get()\n t.start()\n\n for worker in self.active_works:\n if worker.is_alive():\n worker.join()\n return 0\n\n\ndef main():\n num_workers = 2\n queue_size = 100\n mtw = MultiThreadWorker(num_workers, queue_size)\n mtw.add_task('@Shakespeare', 10)\n mtw.add_task('@realDonaldTrump', 10)\n mtw.add_task('@Literature', 10)\n mtw.add_task('@langston_poems', 10)\n mtw.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"multi_thread_worker.py","file_name":"multi_thread_worker.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"380674646","text":"import math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import LabelEncoder\nimport seaborn as sns\nfrom collections import Counter\n\n\n\ndf1 = pd.read_csv(\"iris.csv\")\ndf2 = pd.read_csv(\"label.csv\")\ndf = pd.concat([df1,df2],axis = 1)\n\n\ndf.describe()\ndf.info()\ndf = df.drop(['label'], axis = 1)\ncorrm = df.corr()\nsns.heatmap(corrm,annot = True,vmax = .8, square = True)\n\n\n\nX = df.iloc[:,[0,2,3]].values\ny = df.iloc[:,-1].values\n\n\n'''\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX = sc.fit_transform(X)\n'''\n\n\ndef dist_func(x,y):\n \n ans = 0\n for i in range(len(x)-1):\n ans += pow(x[i] - y[i],2)\n \n \n return math.sqrt(ans)\n \n \n\n\n\ndef kmeans(ds,centroid,k):\n \n max_iter = 100\n \n for i in range(max_iter):\n \n d = {}\n for i in range(k):\n d[i]=[]\n \n d1 = {}\n for i in range(k):\n d1[i]=[]\n \n for j in range(len(ds)):\n l= []\n \n for m in centroid:\n \n l.append(dist_func(m,ds[j]))\n \n d[l.index(min(l))].append(ds[j])\n \n \n \n for z in d:\n centroid[z] = np.average(d[z],axis = 0)\n \n \n \n return d\n\n\n\n\n\n\ndef rec_kmeans(ds):\n \n \n li = set()\n \n for j in ds:\n \n if(j[3] != -1):\n li.add(j[3])\n \n \n \n if(len(li) == 1):\n final_clusters.append(ds)\n cluster_label.append(li)\n return\n\n \n if(len(li) == 0):\n final_clusters.append(ds)\n cluster_label.append(-1)\n return\n \n \n \n newk = len(li)\n centroid = []\n \n for i in range(newk):\n centroid.append(ds[i])\n \n r = kmeans(ds,centroid,newk)\n \n for z in r:\n z1 = r[z]\n rec_kmeans(z1)\n \n \n\n\n\n \nk = 3\n\nX = pd.DataFrame(X)\ny = pd.DataFrame(y)\nX1 = pd.concat([X,y],axis = 1)\n\nX_training = X1.iloc[:,0:4].values\n\nfinal_clusters = []\ncluster_label = []\ncentr = []\nfor i in range(k):\n centr.append(X_training[i])\n\n\n#initial Kmeans call \nres = kmeans(X_training,centr,k)\n\n'''\nfor i in res:\n ct0 = 0\n ct1 = 0\n ct2 = 0\n tot = 0\n for j in res[i]:\n tot = len(res[i])\n if(j[3] == 1):\n ct1 += 1\n elif(j[3] == 2):\n ct2 += 1\n elif(j[3] == 0):\n ct0 += 1\n \n per0 = 0\n per1 = 0\n per2 = 0\n \n if(ct0 != 0):\n per0 = (ct0/tot) * 100\n if(ct1 != 0):\n per1 = (ct1/tot) * 100\n if(ct2 != 0):\n per2 = (ct2/tot) * 100\n \n print(per0,per1,per2)\n \n \nthers = 6 \n'''\n\n\n\ndt = [\"cluster1\",\"cluster2\",\"cluster3\"]\ndty = []\nfor i in res:\n lbl = set()\n for j in res[i]:\n if(j[3] != -1):\n lbl.add(int(j[3]))\n \n print(len(lbl)) \n dty.append(len(lbl))\n\nplt.bar(dt,dty) \nplt.ylabel(\"No of class labels\")\n \n \n \n \n\ncolors = [\"r\", \"g\", \"c\", \"b\", \"k\"]\n\n\n\nfor c in res:\n\tcolor = colors[c]\n\tfor features in res[c]:\n\t\tplt.scatter(features[0], features[1], color = color,s = 30)\n\n\nfor c in centr:\n plt.scatter(c[0],c[1],marker = 'X',s = 150 ,color = 'm')\n \n\nplt.show()\n\n \n \n \n#recursive Kmeans call for every partition in res\nfor par in res: \n pi = res[par]\n rec_kmeans(pi)\n \n\n\nfinal_centroids = []\n\n\n#calculating final centroids\nfor i in final_clusters:\n final_centroids.append(np.average(i,axis = 0))\n\n\n\n#ploting final results\n \n \ndtf = [f'cluster{i}' for i in range(1,len(final_clusters) + 1)]\ndtfy = []\nfor i in final_clusters:\n lbl = set()\n for j in i:\n if(j[3] != -1):\n lbl.add(int(j[3]))\n \n \n dtfy.append(len(lbl))\n \nplt.bar(dtf,dtfy)\nplt.xticks(rotation = 60)\nplt.ylabel(\"No of class labels\")\n\nfor fea in final_clusters:\n for k in fea:\n plt.scatter(k[0], k[1], color = \"k\",s = 10)\n\n\n\nfor cen in final_centroids:\n plt.scatter(cen[0],cen[1],marker = \"X\",color = \"m\",s = 30)\n \n \nplt.show()\n ","sub_path":"rec_iris.py","file_name":"rec_iris.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"618741624","text":"import random\n\n\ndef targetNumber():\n target = random.randint(1, 12) * random.randint(1, 12)\n return target\n\n\ndef scoringNumbers():\n return random.randint(1, 6), random.randint(1, 6), random.randint(1, 6)\n\n\ndef evaluateExpression(expression, sn):\n numbers = []\n for char in expression:\n if char.isdigit() == True:\n numbers.append(int(char))\n numbers.sort()\n sn = list(sn)\n sn.sort()\n if numbers == sn:\n return True\n else:\n return False\n\n\ndef takeTurn(rollScoringNumbers, rollTargetNumber):\n solution = raw_input(\"Enter an expression: \")\n if evaluateExpression(solution, rollScoringNumbers) == False:\n return \"you did not enter a valid expression\"\n else:\n return eval(solution)\n\n\ndef main():\n numberOfRounds = int(input(\"Rounds: \"))\n p1count, p2count = 0, 0\n for i in range(numberOfRounds):\n rollScoringNumbers = scoringNumbers()\n rollTargetNumber = targetNumber()\n print(rollScoringNumbers, rollTargetNumber)\n p1 = takeTurn(rollScoringNumbers, rollTargetNumber)\n p2 = takeTurn(rollScoringNumbers, rollTargetNumber)\n print(p1, p2, rollTargetNumber, type(rollTargetNumber))\n if type(p1) == str:\n p1 = 0\n if type(p2) == str:\n p2 = 0\n if p2 == p1:\n p1count += 1\n p2count += 1\n elif abs(p1 - rollTargetNumber) < abs(p2 - rollTargetNumber):\n p1count += 1\n else:\n p2count += 1\n print(\"P1: \", p1count, \"P2: \", p2count)\n\n\nmain()\n","sub_path":"Lab/mathDice.py","file_name":"mathDice.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"651795998","text":"import data\nimport models\nimport numpy as np\nfrom keras.layers import Input\nfrom keras.layers import Dense\nfrom keras.layers import concatenate\nfrom keras.layers import Dropout\nfrom keras.layers import Dense\nfrom keras.models import Model\nfrom keras.utils.vis_utils import plot_model\nfrom keras.models import load_model\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import EarlyStopping\nimport sys\n\n# Copyright 2020 QCRI, HBKU (Author: Ahmed Ali)\n# Apache 2.0.\n\n\ntrain=\"data/dev_mgb2.feats\"\ndev=\"data/eval_mgb2.feats\"\ntest=\"data/summa_ar.feats\"\n_EPOCHS=50\n\ntrain_data = data.load_features (train,data.cols)\ndev_data = data.load_features (dev,data.cols)\ntest_data = data.load_features (test,data.cols)\n\n\n_feature=\"grapheme\"\n_ngram=3\nword_list_g = data.return_word_list (train_data[_feature].values.tolist(),_ngram)\ntrain_feats_g = data.make_features (train_data[_feature].values.tolist(), word_list_g, _ngram, True)\ndev_feats_g = data.make_features (dev_data[_feature].values.tolist(), word_list_g, _ngram, True)\ntest_feats_g = data.make_features (test_data[_feature].values.tolist(), word_list_g, _ngram, True)\n\n\n_feature=\"words\"\n_ngram=2\nword_list_w = data.return_word_list (train_data[_feature].values.tolist(), _ngram)\ntrain_feats_w = data.make_features (train_data[_feature].values.tolist(), word_list_w, _ngram, True)\ndev_feats_w = data.make_features (dev_data[_feature].values.tolist(), word_list_w, _ngram, True)\ntest_feats_w = data.make_features (test_data[_feature].values.tolist(), word_list_w, _ngram, True)\n\n\ntrain_feats = np.hstack((train_feats_g,train_feats_w,train_data[data.continuous_black_grapheme]))\ndev_feats = np.hstack((dev_feats_g,dev_feats_w,dev_data[data.continuous_black_grapheme]))\ntest_feats = np.hstack((test_feats_g,test_feats_w,test_data[data.continuous_black_grapheme]))\n\n\nprint (train_feats_g.shape,train_feats_w.shape,train_data[data.continuous_black_grapheme].shape,train_feats.shape)\n\n\n## call back \nmlp = models.create_mlp(train_feats.shape[1])\n\n\nwer = Dense(32, activation=\"relu\")(mlp.output)\nwer = Dropout(0.2) (wer)\nwer = Dense(1, kernel_initializer='normal')(wer)\n\n\n##\nfinal_model='eWER_black_box'\n\nmodel = Model(inputs=mlp.input, outputs= wer)\nplot_model(model, to_file='results/'+final_model+'_plots.pdf', show_shapes=True, show_layer_names=True)\nmodel.compile(loss=\"mean_squared_error\", optimizer='adam', metrics=['accuracy'])\n\n\n# TRAIN E-WER\n'''\nearlystopper = EarlyStopping(monitor='val_loss', min_delta=0,\n patience=1, verbose=1, mode='auto')\n\ncheckpoint = ModelCheckpoint(filepath=final_model+'.{epoch:02d}-{val_loss:.2f}.h5', monitor='val_accuracy', \n verbose=1, save_best_only=True, mode='auto')\n\n\nmodel.fit(train_feats, train_data[\"wer\"].to_numpy(),\n validation_data=(dev_feats, dev_data[\"wer\"].to_numpy()),\n batch_size=32, epochs=_EPOCHS, verbose=1, \n callbacks=[checkpoint,earlystopper]) \n \n'''\nmodel.summary()\n\n \n\nmodel.fit(train_feats, train_data[\"wer\"].to_numpy(),\n batch_size=32, epochs=_EPOCHS, verbose=1)\n\nmodel.save('results/'+final_model+'.h5', overwrite=True)\nmodel = load_model('results/'+final_model+'.h5')\nprint (dev_feats.shape[1])\n\n\n#dev\npred = model.predict(dev_feats).flatten()\ndata.test_wer (pred, dev_data, \"dev_mgb2_\"+final_model)\n\n#test\npred = model.predict(test_feats).flatten()\ndata.test_wer (pred, test_data, \"summa_\"+final_model)\n#### \n\n","sub_path":"scripts/train_ewer_black_box.py","file_name":"train_ewer_black_box.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"246766375","text":"class heap:\n def __init__(self,A=[]):\n self.size=len(A)\n self.A=A\n \n def swap(self,a,b):\n temp=self.A[a]\n self.A[a]=self.A[b]\n self.A[b]=temp\n \n def buildheap(self):\n n=self.size\n for i in range(n//2,-1,-1):\n self.min_heapify(i)\n\n def min_heapify(self,index):\n left=index*2+1\n right=index*2+2\n smallest=index\n\n if(left[^<]*',html):\n a.append(match.group()[16:-6])\n total_h=0\n total_s=0\n print(\"From a sample size of\", len(a), \"persons:\")\n for c in a:\n h=0\n s=0\n for i in happy:\n h += len(re.findall(i,c))\n for i in sad:\n s += len(re.findall(i,c))\n if(h>=s):\n togglehappy = \"happy\"\n else:\n togglehappy = \"sad\"\n total_h+=h\n total_s+=s\n print(\"This sentence is mostly\", togglehappy+\". It contained\",\n h, \"amount of happy keywords and\", s, \"amount of sad keywords.\")\n if(total_h>total_s):\n togglehappy = \"happy\"\n else:\n togglehappy = \"sad\"\n print( \"The general feelings towards this video were\", togglehappy)\n\ninp = input(\"Input url:\\n\")\ncomments(inp)\ninput(\"Click enter to quit\")\n","sub_path":"YouTube Comment Scraper.py","file_name":"YouTube Comment Scraper.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"45073313","text":"from django.core.management.base import BaseCommand\n\nfrom apps.interpreter.models import SingleScoreComputeTask\n\n\nclass Command(BaseCommand):\n help = '''\n Запуск вычислений неоконченных задач\n '''\n\n def handle(self, *args, **options):\n tasks = SingleScoreComputeTask.objects.filter(complete=False)\n for t in tasks:\n t.compute_async()\n","sub_path":"apps/interpreter/management/commands/update_single_score_tasks.py","file_name":"update_single_score_tasks.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"354836340","text":"from django.contrib import admin\nfrom .models import Covers\n\n\n# Register your models here.\n\nclass CoverInline(admin.TabularInline):\n model = Covers\n\n\nclass CoversAdmin(admin.ModelAdmin):\n inline = CoverInline\n\n\nadmin.site.register(Covers, CoversAdmin)\n","sub_path":"hcpresearch/covers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"436801419","text":"# -*- coding: utf-8 -*-\nimport models\n\nfrom rest_framework import serializers\n\nfrom apps.accounts.serializers import UserSerializer\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Product\n fields = (\n 'pk', \n 'name', \n 'point', \n )\n\n\nclass BucketItemSerializer(serializers.ModelSerializer):\n\n item = ProductSerializer()\n\n class Meta:\n model = models.BucketItem\n fields = (\n 'pk', \n 'quantity', \n 'item',\n 'bucket',\n )\n\n\nclass BucketSerializer(serializers.ModelSerializer):\n\n items = BucketItemSerializer(source='bucketitem_set', many=True)\n user = UserSerializer(many=False, read_only=True)\n\n class Meta:\n model = models.Bucket\n fields = (\n 'pk', \n 'user',\n 'items',\n )\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n\n bucket = BucketSerializer()\n\n class Meta:\n model = models.Order\n fields = (\n 'pk',\n 'timestamp', \n 'status', \n 'bucket',\n )\n\n","sub_path":"toNature/apps/recycle/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"115852260","text":"from django.conf.urls import patterns, include, url\n\nfrom med_social.decorators import vendor_required\nfrom .status_views import (list_status, create_status, edit_status,\n status_details, create_flow, edit_flow)\n\n\n# namespace = locations\nurlpatterns = patterns('projects.status_views',\n url(r'^$', list_status, name='list'),\n url(r'^create/$', create_status, name='create'),\n url(r'^(?P\\d+)/edit/$', edit_status, name='edit'),\n url(r'^(?P\\d+)/$', status_details, name='details'),\n url(r'^(?P\\d+)/flow/add/$', create_flow,\n name='create_flow'),\n url(r'^flow/(?P\\d+)/edit/$', edit_flow,\n name='edit_flow'),\n)\n","sub_path":"apps/projects/status_urls.py","file_name":"status_urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"68644312","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 25 15:50:55 2020\n\n@author: tilak\n\"\"\"\n\nimport subprocess\nimport os\nimport pandas as pd\nimport cryptography\n\nclass Annonymise:\n def __init__(self,path=\"\"):\n if path == \"\":\n raise Exception(\"Path Empty error\")\n return\n \n path = path.replace(\"\\\\\",'/')\n os.chdir(path)\n with open('commands.txt','r') as c:\n alpha = c.read().splitlines()\n \n for i in alpha:\n subprocess.call(i,shell=True)\n def encryptexcelfile(self,name=\"\",sheet_name=\"\"):\n \n if name == \"\" or sheet_name ==\"\":\n raise Exception(\"Empty path exception\")\n return\n \n df = pd.read_excel(path,sheet_name=sheet_name)\n return df\n \n def encryptcsvfile(self,name=\"\"):\n \n if name == \"\" :\n raise Exception(\"Empty csv path exception\")\n return\n \n df = pd.read_csv(path)\n return\n \n def __checkdataspread__(self,df):\n #check the dataframe data spread in this function\n columns = df.columns\n for i in columns:\n \n keys = df[i].value_counts().keys().tolist()\n vals = df[i].value_counts().tolist()\n hofdf = len(df[i])/10 #10% of len\n if len(vals) < hofdf:\n colmune =encryptcolumn(df[i])\n df[i]= columne\n return df\n \n def __encryptcolumn__(self,df):\n #encrypt the column and return \n \n return df\n \n def __decryptcolumn__(self,df):\n \n #need to check if using a public private key architecture\n return df\n \n \n \n \n \n \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"328186211","text":"import urllib\n\nCLIENT_ID = '2C2OMEYXHWI5LSAJ4EMLFMVYEWVOMH5B5NS50T3GB1UIT354'\nSECRET = 'QO5WJBRCUBWMRVWWNFIWHQHSFSDCQWKGFJACSJ0MEO22LSPY'\n\ndef url(endpoint='/venues/explore', queryDict={'near': 'Chicago, IL', 'query': 'night club'}):\n full_url = 'https://api.foursquare.com/v2%s?client_id=%s&client_secret=%s' % (\n endpoint, CLIENT_ID, SECRET)\n \n for k, v in queryDict.iteritems():\n full_url += '&%s=%s' % (k, urllib.quote(v))\n \n return full_url\n \n\n\n","sub_path":"py/fs_api.py","file_name":"fs_api.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"581649630","text":"\"\"\"Filters service and clutter links\"\"\"\nimport re\n\nIS_LONG_NAME = re.compile(r\"^\\w{3,},\\s\\w{3,}\\s\\w{3,}$\", re.IGNORECASE)\nIS_SHORT_NAME = re.compile(r\"^\\w{3,},\\s\\w{3,}$\", re.IGNORECASE)\nIS_INITIALS_NAME = re.compile(r\"^\\w\\.\\s\\w\\.\\s \\w{3,}$\", re.IGNORECASE)\nIS_YEAR_NAME = re.compile(r\"^\\w{3,},\\s\\w{3,}$\", re.IGNORECASE)\nIS_YEAR = re.compile(r\"^\\d\\d\\d\\d$\", re.IGNORECASE)\nIS_LONG_YEAR = re.compile(r\"^\\d\\d\\d\\d год$\", re.IGNORECASE)\nIS_BC_YEAR = re.compile(r\"^\\d\\d\\d до н. э.$\", re.IGNORECASE)\nIS_DATE = re.compile(r\"^\\d\\d \\w{2,9}$\", re.IGNORECASE)\n\nMONTHS = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', \n 'Июль', 'Август', 'Сентябрь', 'Октябрь', 'Ноябрь', 'Декабрь']\n\n\ndef is_irrelevant(link):\n \"\"\"Filters service and clutter links\"\"\"\n return (\n link == 'Викисловарь' or\n link == 'Москва' or\n link.startswith('Википедия:') or\n link.startswith('Шаблон:') or\n link.startswith('Категория:') or\n link.startswith('Обсуждение:') or\n link.startswith('Обсуждение шаблона:') or\n link.startswith('Модуль:') or\n link.startswith('Проект:') or\n link.startswith('Портал:') or\n link.startswith('Энциклопедический словарь') or\n link.startswith('ISO') or\n link.endswith(' язык') or\n link.endswith(' (языки)') or\n link.endswith(' (издательство)') or\n link.endswith(' (книга)') or\n link.endswith(' (город)') or\n link.endswith(' (фильм)') or\n link.endswith(' век') or\n link in MONTHS or\n IS_LONG_NAME.match(link) or\n IS_SHORT_NAME.match(link) or\n IS_INITIALS_NAME.match(link) or\n IS_YEAR.match(link) or\n IS_LONG_YEAR.match(link) or\n IS_BC_YEAR.match(link) or\n IS_DATE.match(link) or\n is_name(link)\n )\n\n\ndef is_name(link):\n \"\"\"Check for names\"\"\"\n return (\n 'Алексей' in link or\n 'Чарльз' in link\n )\n","sub_path":"data/links_filter.py","file_name":"links_filter.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"519570441","text":"__author__ = 'ShravanB'\r\n\r\n'''\r\nMetro is the \"node\" of the graph\r\nholds metro info and routes of each metro\r\n'''\r\n\r\nclass Metro:\r\n def __init__(self, metros):\r\n self.code = metros['code']\r\n self.name = metros['name']\r\n self.country = metros['country']\r\n self.continent = metros['continent']\r\n self.timezone = metros['timezone']\r\n self.coordinates = metros['coordinates']\r\n self.population = metros['population']\r\n self.region = metros['region']\r\n self.routes = {}\r\n self.distance = 0\r\n '''\r\n Add route to routes\r\n @param destination: destination code\r\n @param distance: distance to destination\r\n '''\r\n def add_route(self, destination, distance):\r\n \"\"\"\r\n\r\n :param destination:\r\n :param distance:\r\n :return:\r\n \"\"\"\r\n if destination not in self.routes:\r\n self.routes[destination] = distance\r\n\r\n '''\r\n returns all routes from this metro\r\n '''\r\n def get_routes(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n destinations = ''\r\n for d in self.routes:\r\n destinations += '\\n' + str(d) + ' - ' + str(self.routes[d])\r\n return destinations\r\n\r\n '''\r\n returns all info about metro\r\n includes routes\r\n '''\r\n def get_info(self):\r\n \"\"\"\r\n\r\n :return:\r\n \"\"\"\r\n info = 'Name: ' + self.name \\\r\n + '\\nCode: ' + self.code \\\r\n + '\\nCountry: ' + self.country \\\r\n + '\\nContinent: ' + self.continent \\\r\n + '\\nTime Zone: ' + str(self.timezone) \\\r\n + '\\nCoordinates: ' + str(self.coordinates) \\\r\n + '\\nPopulation: ' + str(self.population) \\\r\n + '\\nRegion: ' + str(self.region) \\\r\n + '\\nRoutes: ' + self.get_routes() + '\\n'\r\n\r\n return info\r\n","sub_path":"Assignment2.1/src/Metro.py","file_name":"Metro.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"190394038","text":"#!/usr/bin/python\n\nimport urllib\nfrom bs4 import BeautifulSoup\n\nhttpResponse = urllib.urlopen(\"http://www.securitytube.net/video/3000\")\n\nhttpResponse.code\nhtml = httpResponse.read()\nhtml\n\nbs = BeautifulSoup(html, \"lxml\")\n\ndescr = bs.find('div', id='description')\n\nallLinks = bs.find_all('a')\nallLinks\n\nbs.get_text()\n\nvideoLink = bs.find('iframe', {'title' : 'YouTube video player' }) \nvideoLink\n\nvideoLink['src']\n\nforms = bs.find_all('form')\nforms\n\n","sub_path":"Python/Pentesting with Python/Module 4/Module-4-Video-Code/Module-4-Video-3-Code.py","file_name":"Module-4-Video-3-Code.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"36904423","text":"# -*- coding: UTF-8 -*-\nimport logging\nimport os\nimport time\n\nimport pymysql\nfrom bs4 import BeautifulSoup\n\nlogger = logging.getLogger(\"import_tmall_data\")\nlogger.setLevel(level=logging.INFO)\nhandler = logging.FileHandler(\"log.log\")\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\n\nlogger.addHandler(handler)\nlogger.addHandler(console)\n\n\ndef parse_url(url):\n url_dict = {}\n query = url[url.find('?') + 1:]\n query_strs = query.split('&')\n for query_str in query_strs:\n strs = query_str.split('=')\n url_dict[strs[0]] = strs[1]\n return url_dict\n\n\ndef select_item_info():\n result = []\n # 打开数据库连接\n db = pymysql.connect(\"127.0.0.1\", \"root\", \"luog@1989\", \"cup\", charset=\"utf8mb4\")\n\n # 使用cursor()方法获取操作游标\n cursor = db.cursor()\n\n # SQL 查询语句\n sql = \"select seller_id, item_id from item_info\"\n\n # 执行SQL语句\n try:\n cursor.execute(sql)\n # 获取所有记录列表\n results = cursor.fetchall()\n for row in results:\n result.append(row)\n except Exception as e:\n logger.info(\"error! error msg: %s\" % e)\n # 发生错误时回滚\n db.rollback()\n return []\n logger.info(\"select record! sql: \" + sql)\n # 关闭数据库连接\n db.close()\n return result\n\n\ndef save_data(item_list):\n \"\"\" 保存数据到mysql \"\"\"\n # 打开数据库连接\n db = pymysql.connect(\"127.0.0.1\", \"root\", \"luog@1989\", \"cup\", charset=\"utf8mb4\")\n\n # 使用cursor()方法获取操作游标\n cursor = db.cursor()\n\n # SQL 插入语句\n sql = \"INSERT INTO `item_info`(`collection_time`, `seller_id`, `item_id`, `item_name`, `shop_name`) \" \\\n \"VALUES (%s, %s, %s, %s, %s)\"\n list = []\n for item in item_list:\n list.append([item[\"collectionTime\"], item[\"sellerId\"], item[\"itemId\"],\n item[\"itemName\"], item[\"shopName\"]])\n\n try:\n # 执行sql语句\n cursor.executemany(sql, list)\n # 执行sql语句\n db.commit()\n except Exception as e:\n logger.info(\"error! error msg: %s\" % e)\n # 发生错误时回滚\n db.rollback()\n\n logger.info(\"insert success! insert count: %d\" % item_list.__len__())\n # 关闭数据库连接\n db.close()\n\n\ndef run():\n base_path = os.getcwd() + os.path.sep\n import_path = base_path + 'import' + os.path.sep\n\n result = []\n\n if not os.path.exists(import_path):\n os.makedirs(import_path)\n\n file_list = os.listdir(import_path)\n if len(file_list) == 0:\n logger.info(\"输入数据为空,程序终止!\")\n exit(0)\n\n collection_time = time.strftime('%Y-%m-%d', time.localtime())\n item_result = select_item_info()\n item_set = set()\n for data in item_result:\n item_set.add(\"%s_%s\" % (data[0], data[1]))\n\n total_count = 0\n data_count = 0\n for i in range(0, len(file_list)):\n path = os.path.join(import_path, file_list[i])\n logger.info(\"开始处理数据【 %s 】\" % path)\n\n html = \"\"\n with open(path, 'r', encoding='gb18030') as f:\n html = f.read()\n\n soup = BeautifulSoup(html, \"html.parser\")\n product_iWraps = soup.find_all(\"div\", \"product-iWrap\")\n for product_iWrap in product_iWraps:\n total_count = total_count + 1\n data = {'collectionTime': collection_time}\n productTitles = product_iWrap.find_all(\"p\", \"productTitle\")\n for productTitle in productTitles:\n children = productTitle.findChildren()\n for child in children:\n if 'a' == child.name:\n url = child.attrs['href'].strip().replace('\\n', '')\n url_dict = parse_url(url)\n data['url'] = url\n data['sellerId'] = url_dict['user_id'].strip().replace('\\n', '')\n data['itemId'] = url_dict['id'].strip().replace('\\n', '')\n data['itemName'] = child.attrs['title'].strip().replace('\\n', '')\n\n productShops = product_iWrap.find_all(\"a\", \"productShop-name\")\n for productShop in productShops:\n data['shopName'] = productShop.contents[0].strip().replace('\\n', '')\n\n productPrices = product_iWrap.find_all(\"p\", \"productPrice\")\n for productPrice in productPrices:\n children = productPrice.findChildren()\n for child in children:\n if 'em' == child.name:\n data['price'] = child.attrs['title'].strip().replace('\\n', '')\n # os.remove(path)\n if \"%s_%s\" % (data['sellerId'], data['itemId']) in item_set:\n continue\n if (\"背心\" in data['itemName'] and \"少女\" not in data['itemName']) or \"运动\" in data['itemName']:\n continue\n\n item_set.add(\"%s_%s\" % (data['sellerId'], data['itemId']))\n result.append(data)\n if len(result) == 0:\n logger.info(\"数据为空,不插入数据!\")\n continue\n data_count = data_count + len(result)\n save_data(result)\n result = []\n logger.info(\"处理数据【 %s 】完成\" % path)\n\n logger.info(\"处理数据完成,数据总数:{},插入数据总数;{}\".format(total_count, data_count))\n","sub_path":"import_tmall_data.py","file_name":"import_tmall_data.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"550610733","text":"# -*-coding:utf-8-*-\n\"\"\"\n****************************************\nauthor:善待今天\ntime:2019/06/20 23:37\nfile:module1.py\nsoftware:PyCharm Community Edition\nE-mail:2904504961@qq.com\nMotivational motto: Do difficult things and get something\n****************************************\n\"\"\"\n\"\"\"\n6、新建一个包pack,在包中新建两个模块module1,module2, \n在module1中定义一个函数(函数实现上一次石头、剪刀布游戏的功能),\n该函数可以通过传入的参数来控制游戏次数,\n然后在module2中导入module1中定义的函数,并调用。\n\"\"\"\n\nimport random\n\ngame = {'1':'剪刀','2':'石头','3':'布'}\n\n\ndef play_game(a):\n \"\"\"\n 函数实现剪刀、石头、布的游戏功能\n :param a: 限制最多游戏次数,int类型\n :return:\n \"\"\"\n if type(a) == int and a > 0:\n count_play = 0 # 计算已玩游戏次数\n count_win = 0 # 计算胜利次数\n print(\"欢迎进入********剪刀石头布********游戏\")\n while count_play < a:\n user_choice = input(\"【1】剪刀 【2】石头 【3】布 【4】退出 请选择: \")\n pc = random.randint(1,3)\n if user_choice.isdigit() and int(user_choice) in range(1,5):\n if int(user_choice) in range(1,4):\n count_play += 1\n if int(user_choice) - pc == -1 or int(user_choice) - pc == 2:\n print(\"您出拳:{},电脑出拳:{},您输了!\".format(game[user_choice],game[str(pc)]))\n elif int(user_choice) - pc == 0:\n print(\"您出拳:{},电脑出拳:{},平局!\".format(game[user_choice], game[str(pc)]))\n else:\n count_win += 1\n print(\"您出拳:{},电脑出拳:{},您赢了!\".format(game[user_choice], game[str(pc)]))\n else:\n if count_play == 0:\n print(\"本轮游戏没有成绩!\")\n return\n else:\n print(\"本轮游戏胜率为: {:.2%}\".format(count_win / count_play))\n return\n else:\n print(\"请从数字1-4中选择输入!\")\n print(\"本轮游戏胜率为: {:.2%}\".format(count_win / count_play))\n else:\n print(\"游戏次数必须是大于0的整数!\")\n\ndef main(a):\n play_game(a)\n\n\n\n\n\n","sub_path":"20190508_007/pack/module1.py","file_name":"module1.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"266905257","text":"import namesDictionary as ND\nimport pandas as pd\nimport pandas_dataframes_functions as pdFunc\nimport csv_functionalities as csvFunc\nimport os\n\n\ndef main():\n path = ND.MAIN_DIRECTORY\n files = os.listdir(path)\n\n global_data_frame = pd.DataFrame()\n for year in sorted(files):\n year_path = os.path.join(path, year)\n year_months = os.listdir(year_path)\n for month in sorted(year_months):\n month_path = os.path.join(year_path, month)\n month_days = os.listdir(month_path)\n for day in sorted(month_days):\n print(day)\n day_path = os.path.join(month_path, day)\n date_format = day.replace(\"_\", \"\") # getting date format as YYYYMMDD\n\n # SECTION 1: Fast Lane Characteristics files\n # Characteristics directory\n characteristics_directory = os.path.join(\n day_path, ND.CHARACTERISTICS_DIR\n )\n # FastlaneCharacteristics file\n characteristics_file = os.path.join(\n characteristics_directory,\n ND.FASTLANE_FILE_PREFIX\n + \"{}\".format(date_format)\n + ND.XML_FILES_ENDING,\n )\n if not os.path.exists(characteristics_file):\n raise FileNotFoundError(characteristics_file)\n\n characteristics_reader = csvFunc.create_csv_reader(characteristics_file)\n\n characteristics_table_list = csvFunc.to_table_list_without_empty_values(\n characteristics_reader\n )\n\n characteristics_data_frame = pdFunc.create_general_dataframe_from_table(\n characteristics_table_list\n )\n\n main_data_frame = characteristics_data_frame # This is the main dataframe per day (sorted by timestamp)\n # SECTION 2: Typ_AQZustandArchiv files\n\n # Typ_Archiv directory\n typ_archive_directory = os.path.join(day_path, ND.TYP_ARCHIVE_DIR)\n for arch_file_prefix, var_name in ND.GREEN_FILES_PREFIX.items():\n arch_file_name = (\n arch_file_prefix\n + \"{}\".format(date_format)\n + ND.XML_FILES_ENDING\n )\n archive_file = os.path.join(typ_archive_directory, arch_file_name)\n if not os.path.exists(archive_file):\n continue # if file is not found, continue with next one\n\n archive_reader = csvFunc.create_csv_reader(archive_file)\n\n archive_table_list = csvFunc.to_table_list_without_empty_values(\n archive_reader\n )\n\n archive_data_frame = pdFunc.create_general_dataframe_from_table(\n archive_table_list\n )\n\n if any(\n arch_file_prefix in file_pfx\n for file_pfx in ND.GREEN_FILES_PREFIX_BATCH_1\n ):\n # drop all the columns except for the date, time and C_Actors_0_Reason\n archive_data_frame = pdFunc.drop_all_columns_except(\n [\"C_Actors_0_Reason\"], archive_data_frame,\n )\n pdFunc.rename_columns_in_dataframe(\n archive_data_frame, [\"C_Actors_0_Reason\"], [var_name],\n )\n else:\n # if file is found in GREEN_FILES_PREFIX_BATCH_2\n # drop all the columns except for the date, time and C_Actors_1_Reason\n archive_data_frame = pdFunc.drop_all_columns_except(\n [\"C_Actors_1_Reason\"], archive_data_frame,\n )\n pdFunc.rename_columns_in_dataframe(\n archive_data_frame, [\"C_Actors_1_Reason\"], [var_name],\n )\n # Here is where we add the columns that correspond to the discrete data\n # (i.e. those incidents reported only at specific times)\n main_data_frame = (\n pdFunc.merge_dataframes_on_fist_smaller(\n main_data_frame, archive_data_frame, tol=\"59s\",\n )\n .apply(pdFunc.propagate_values_until_next)\n .apply(pdFunc.fill_NaN_with_default)\n ) # propagate the values of one occurrence until a new one and fill with \"default\" the remaining\n\n # SECTION 3: RhoStruct files\n rho_struct_directory = os.path.join(day_path, ND.STRUCTURE_DIR)\n # RED FILES: Current time\n for rs_current_time_file_prefix in ND.RS_FILES_CURRENT_TIME:\n rs_current_time_file_name = (\n rs_current_time_file_prefix\n + \"{}\".format(date_format)\n + ND.XML_FILES_ENDING\n )\n rs_current_time_file = os.path.join(\n rho_struct_directory, rs_current_time_file_name\n )\n if not os.path.exists(rs_current_time_file):\n continue # if file is not found, continue with next one\n\n rs_current_time_reader = csvFunc.create_csv_reader(\n rs_current_time_file\n )\n\n rs_current_time_table_list = csvFunc.to_table_list_without_empty_values(\n rs_current_time_reader\n )\n\n rs_current_time_data_frame = pdFunc.create_general_dataframe_from_table(\n rs_current_time_table_list\n )\n\n rs_current_time_data_frame = pdFunc.drop_all_columns_except(\n [\n \"C_q_Lkw__wert\",\n \"C_q_Pkw_wert\",\n \"C_q_Kfz_wert\",\n \"C_v_Kfz_wert\",\n ],\n rs_current_time_data_frame,\n )\n\n pdFunc.rename_columns_in_dataframe(\n rs_current_time_data_frame,\n [\n \"C_q_Lkw__wert\",\n \"C_q_Pkw_wert\",\n \"C_q_Kfz_wert\",\n \"C_v_Kfz_wert\",\n ],\n ND.RS_FILES_CURRENT_TIME[rs_current_time_file_prefix],\n )\n\n main_data_frame = pdFunc.merge_dataframes_on_fist_smaller(\n main_data_frame, rs_current_time_data_frame,\n ) # adding data from current time\n\n # RED FILES: Previous time\n for (\n rs_previous_time,\n var_names_min,\n ) in ND.RHO_STRUCT_PREVIOUS_TIME.items():\n rs_previous_time_file_name = (\n rs_previous_time\n + \"{}\".format(date_format)\n + ND.XML_FILES_ENDING\n )\n rs_previous_time_file = os.path.join(\n rho_struct_directory, rs_previous_time_file_name\n )\n if not os.path.exists(rs_previous_time_file):\n continue # if file is not found, continue with next one\n\n rs_previous_time_reader = csvFunc.create_csv_reader(\n rs_previous_time_file\n )\n\n rs_previous_time_table_list = csvFunc.to_table_list_without_empty_values(\n rs_previous_time_reader\n )\n\n rs_previous_time_data_frame = pdFunc.create_general_dataframe_from_table(\n rs_previous_time_table_list\n )\n\n rs_previous_time_data_frame = pdFunc.drop_all_columns_except(\n [\"C_q_Kfz_wert\", \"C_v_Kfz_wert\"], rs_previous_time_data_frame,\n )\n\n # HERE WE START DOING STUFF FOR THE HANDLING OF THE MINUTES\n minutes_pattern = [\"_15\", \"_30\", \"_45\", \"_60\"]\n for minute in minutes_pattern:\n # Copy rs_previous_time_data_frame into a new variable per minute pattern\n rs_previous_time_df_new = rs_previous_time_data_frame.copy(\n deep=True\n )\n # Get those names that contain the minute pattern\n new_names_with_minutes = [\n name for name in var_names_min if minute in name\n ]\n pdFunc.rename_columns_in_dataframe(\n rs_previous_time_df_new,\n [\"C_q_Kfz_wert\", \"C_v_Kfz_wert\"],\n new_names_with_minutes,\n )\n # remove last rows from dataframe based on minutes\n min_mod = int(minute.replace(\"_\", \"\")) # in int\n if min_mod == 60:\n min_str_fmt = \"01:00:00\"\n else:\n min_str_fmt = \"0:{}:00\".format(\n min_mod\n ) # in str with format \"%H:%M:%S\"\n # Here, the last time in data frame will vary depending on the minute in the loop:\n # For:\n # 15 min -> 23:45:00\n # 30 min -> 23:30:00\n # 45 min -> 23:15:00\n # 60 min -> 23:00:00\n last_time_in_df = pdFunc.subtract_time_str(\n \"23:59:00\", min_str_fmt\n )\n rs_previous_time_df_new = rs_previous_time_df_new.between_time(\n \"00:00:00\", last_time_in_df\n )\n # Add the minutes to the timestamp so they match with the current time\n pdFunc.add_minutes_to_timestamp_idx(\n rs_previous_time_df_new, min_mod\n )\n # And, finally, adding only the values between 01:00:00 and 24:00:00\n rs_previous_time_df_new = rs_previous_time_df_new.between_time(\n \"01:00:00\", \"23:59:00\"\n )\n main_data_frame = pdFunc.merge_dataframes_on_fist_smaller(\n main_data_frame, rs_previous_time_df_new,\n ) # adding data from current time\n\n if global_data_frame.empty:\n global_data_frame = main_data_frame\n else:\n # Appending elements to the bottom of the data frame\n global_data_frame = pd.concat(\n [global_data_frame, main_data_frame],\n ignore_index=False,\n axis=0,\n )\n\n # deleting \"traffic_load\", \"fee\" and \"automatic_fee\" from table\n global_data_frame = pdFunc.drop_selected_columns(\n [\"traffic_load\", \"fee\", \"automatic_fee\"], global_data_frame\n )\n # fill empty cells with 0\n global_data_frame = global_data_frame.fillna(0)\n # export dataframe to csv\n pdFunc.convert_to_csv(\"table_with_data_FINAL_nueva.csv\", global_data_frame)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"564784519","text":"'''\nThis file gets the predicted tec maps by first loading the saved model and then running on the test input.\n'''\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport sys\nsys.path.append(\"../STResNet\")\nfrom st_resnet import STResNetShared, STResNetIndep\nimport tensorflow as tf\nfrom params import Params as param\nimport pandas as pd\nimport numpy as np\nimport sqlite3\nfrom tqdm import tqdm\nimport datetime as dt\nimport os\nimport shutil\nimport numpy\nimport time\nimport glob\nfrom omn_utils import OmnData\nfrom batch_utils import BatchDateUtils, TECUtils\n\n# Set batch size to 1 for texting\ntest_batch_size = 1\n\n# Set the test period\nparam.start_date = dt.datetime(2015, 3, 1)\nparam.end_date = dt.datetime(2015, 4, 1)\n\n# Select the model folder and model epock\n#saved_model_path = \"\" #enter the model name for getting the prediction, eg. \"model_batch8_epoch1_resnet10_nresfltr12_nfltr12_of1_otec12_cf1_csl12_pf12_psl24_tf36_tsl8_gs32_ks55_exoT_nrmT_1.250402212\n#param.saved_model_path = \"./model_results/model_batch64_epoch100_resnet100_nresfltr24_nfltr12_of2_otec24_cf2_csl72_pf12_psl72_tf36_tsl8_gs32_ks55_exoT_nrmT_yr_11_13_310.1902163028717\"\n#param.saved_model = '/epoch_24' #'/epoch_X', for loading the model saved at X epoch\n\nparam.saved_model_path = \"./model_results/model_batch64_epoch100_resnet100_nresfltr12_nfltr12_of2_otec12_cf2_csl72_pf12_psl72_tf36_tsl8_gs32_ks55_exoT_nrmT_w0_yr_11_13_379.3419065475464\"\nparam.saved_model = '/epoch_33' #'/epoch_X', for loading the model saved at X epoch\n\n#param.saved_model = '/current' # for loading the final saved model\n\n# Extract hyperparameter values from saved_model_path\nparam_values = param.saved_model_path.split(\"/\")[-1].split(\"_\")\nparam.batch_size = [int(x.replace(\"batch\", \"\")) for x in param_values if x.startswith(\"batch\")][0]\nparam.num_epochs = [int(x.replace(\"epoch\", \"\")) for x in param_values if x.startswith(\"epoch\")][0]\nparam.num_of_residual_units = [int(x.replace(\"resnet\", \"\")) for x in param_values if x.startswith(\"resnet\")][0]\nparam.resnet_out_filters = [int(x.replace(\"nresfltr\", \"\")) for x in param_values if x.startswith(\"nresfltr\")][0]\nparam.num_of_filters = [int(x.replace(\"nfltr\", \"\")) for x in param_values if x.startswith(\"nfltr\")][0]\nparam.output_freq = [int(x.replace(\"of\", \"\")) for x in param_values if x.startswith(\"of\")][0]\nparam.num_of_output_tec_maps = [int(x.replace(\"otec\", \"\")) for x in param_values if x.startswith(\"otec\")][0]\nparam.closeness_freq = [int(x.replace(\"cf\", \"\")) for x in param_values if x.startswith(\"cf\")][0]\nparam.closeness_sequence_length = [int(x.replace(\"csl\", \"\")) for x in param_values if x.startswith(\"csl\")][0]\nparam.period_freq = [int(x.replace(\"pf\", \"\")) for x in param_values if x.startswith(\"pf\")][0]\nparam.period_sequence_length = [int(x.replace(\"psl\", \"\")) for x in param_values if x.startswith(\"psl\")][0]\nparam.trend_freq = [int(x.replace(\"tf\", \"\")) for x in param_values if x.startswith(\"tf\")][0]\nparam.trend_sequence_length = [int(x.replace(\"tsl\", \"\")) for x in param_values if x.startswith(\"tsl\")][0]\nparam.gru_size = [int(x.replace(\"gs\", \"\")) for x in param_values if x.startswith(\"gs\")][0]\nks = [x.replace(\"ks\", \"\") for x in param_values if x.startswith(\"ks\")][0]\nparam.kernel_size = (int(ks[0]), int(ks[1]))\nexo = [x.replace(\"exo\", \"\") for x in param_values if x.startswith(\"exo\")][0]\nnrm = [x.replace(\"nrm\", \"\") for x in param_values if x.startswith(\"nrm\")][0]\nweight_num = [x for x in param_values if x.startswith(\"w\")]\nif weight_num:\n #param.weight_file = weight_num[0] + \"_mlat_45-70_1.0_mlat_80-90_1.0_mlon_None.npy\"\n param.loss_weight_matrix = glob.glob(os.path.join(param.weight_dir, weight_num[0] + \"*\"))[0]\nelse:\n pass\nif exo == \"T\":\n param.add_exogenous = True\nelse:\n param.add_exogenous = False \nif nrm == \"T\":\n param.imf_normalize = True\nelse:\n param.imf_normalize = False \n\n#closeness is sampled 12 times every 5 mins, lookback = (12*5min = 1 hour)\n#freq 1 is 5mins\n#size corresponds to the sample size\ncloseness_size = param.closeness_sequence_length\n\n#period is sampled 24 times every 1 hour (every 12th index), lookback = (24*12*5min = 1440min = 1day)\nperiod_size = param.period_sequence_length\n\n#trend is sampled 24 times every 3 hours (every 36th index), lookback = (8*36*5min = 1440min = 1day)\ntrend_size = param.trend_sequence_length\n\n# We need OMNI data for testing\n# setting appropriate vars \nomn_train=False\nstart_date_omni = param.start_date - dt.timedelta(days=param.load_window)\nend_date_omni = param.end_date + dt.timedelta(days=param.load_window)\n\n# Copy the trained model to current folder\nif not os.path.exists(param.saved_model_path):\n print(\"Please copy the model to ./model_results/\")\n print(\"Exitting... \")\n exit()\n\npath = param.saved_model_path+\"_values\"\n\n#getting the omni object\nomnObj = OmnData(start_date_omni, end_date_omni, param.omn_dbdir, param.omn_db_name, param.omn_table_name, omn_train, param.imf_normalize, path)\n\n# get all corresponding dates for batches\nbatchObj = BatchDateUtils(param.start_date, param.end_date, test_batch_size, param.tec_resolution, param.data_point_freq,\\\n param.closeness_freq, closeness_size, param.period_freq, period_size,\\\n param.trend_freq, trend_size, param.num_of_output_tec_maps, param.output_freq,\\\n param.closeness_channel, param.period_channel, param.trend_channel)\n \n#getting all the datetime from which prediction has to be made \n#date_arr_test = np.array( list(batchObj.batch_dict.keys()) )\npred_horizon = param.tec_resolution*param.output_freq*param.num_of_output_tec_maps # in minutes\nnum_test_iter = int((param.end_date - param.start_date).total_seconds() / 60. / pred_horizon)\ndate_arr_test = [param.start_date + dt.timedelta(minutes=i*pred_horizon) for i in range(num_test_iter)]\n\n\n# Bulk load TEC data\ntecObj = TECUtils(param.start_date, param.end_date, param.file_dir, param.tec_resolution, param.load_window,\\\n param.closeness_channel, param.period_channel, param.trend_channel)\n \nweight_matrix = np.load(param.loss_weight_matrix)\n#converting by repeating the weight_matrix into a desired shape of (B, O, H, W)\nweight_matrix_expanded = np.expand_dims(weight_matrix, 0)\nweight_matrix_tiled = np.tile(weight_matrix_expanded, [test_batch_size*param.num_of_output_tec_maps, 1, 1])\nloss_weight_matrix = np.reshape(weight_matrix_tiled, [test_batch_size, param.num_of_output_tec_maps, param.map_height, param.map_width])\n\n#converting the dimension from (B, O, H, W) -> (B, H, W, O)\nloss_weight_matrix = np.transpose(loss_weight_matrix, [0, 2, 3, 1])\n\n#creating directory inside the model_path_values folder for those datetime variables for which prediction is made\npath_pred = path+'/'+\"predicted_tec/\"\nif not os.path.exists(path_pred):\n os.makedirs(path_pred)\n\n# Parameters for tensor flow\nif(param.independent_channels == True): \n g = STResNetIndep()\n print (\"Computation graph for ST-ResNet with independent channels loaded\\n\")\n\nelse:\n g = STResNetShared()\n print (\"Computation graph for ST-ResNet with shared channels loaded\\n\")\n\nwith tf.Session(graph=g.graph) as sess:\n #loading the trained model whose path is given in the params file\n g.saver.restore(sess, param.saved_model_path+param.saved_model)\n \n loss_values = []\n for te_ind, current_datetime in tqdm(enumerate(date_arr_test)):\n print(\"Testing data point-->\" + current_datetime.strftime(\"%Y%m%d-%H%M\"))\n\n #get the batch of data points\n curr_batch_time_dict = batchObj.batch_dict[current_datetime]\n \n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n \n #if we need to use the exogenous module\n if (param.add_exogenous == True):\n imf_batch = omnObj.get_omn_batch(current_datetime, test_batch_size, param.trend_freq, trend_size )\n \n if(param.closeness_channel == True and param.period_channel == True and param.trend_channel == True):\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, period, trend = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_period, g.exo_trend],\n feed_dict={g.c_tec: data_close,\n g.p_tec: data_period,\n g.t_tec: data_trend,\n g.output_tec: data_out,\n g.exogenous: imf_batch,\n g.loss_weight_matrix: loss_weight_matrix})\n elif(param.closeness_channel == True and param.period_channel == True and param.trend_channel == False):\n #here the data_trend will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, period = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_period],\n feed_dict={g.c_tec: data_close,\n g.p_tec: data_period,\n g.output_tec: data_out,\n g.exogenous: imf_batch,\n g.loss_weight_matrix: loss_weight_matrix})\n elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == True): \n #here the data_period will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, trend = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close, g.exo_trend],\n feed_dict={g.c_tec: data_close,\n g.t_tec: data_trend,\n g.output_tec: data_out,\n g.exogenous: imf_batch,\n g.loss_weight_matrix: loss_weight_matrix})\n elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == False):\n #here the data_period, data_trend will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness = sess.run([g.loss, g.x_res, g.output_tec, g.exo_close],\n feed_dict={g.c_tec: data_close,\n g.output_tec: data_out,\n g.exogenous: imf_batch,\n g.loss_weight_matrix: loss_weight_matrix})\n #if we dont want to use the exogenous module \n else:\n if(param.closeness_channel == True and param.period_channel == True and param.trend_channel == True):\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, period, trend = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.period_output, g.trend_output],\n feed_dict={g.c_tec: data_close,\n g.p_tec: data_period,\n g.t_tec: data_trend,\n g.output_tec: data_out,\n g.loss_weight_matrix: loss_weight_matrix})\n elif(param.closeness_channel == True and param.period_channel == True and param.trend_channel == False):\n #here the data_trend will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, period = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.period_output],\n feed_dict={g.c_tec: data_close,\n g.p_tec: data_period,\n g.output_tec: data_out,\n g.loss_weight_matrix: loss_weight_matrix}) \n elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == True):\n #here the data_period will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness, trend = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output, g.trend_output],\n feed_dict={g.c_tec: data_close,\n g.t_tec: data_trend,\n g.output_tec: data_out,\n g.loss_weight_matrix: loss_weight_matrix}) \n elif(param.closeness_channel == True and param.period_channel == False and param.trend_channel == False):\n #here the data_period,data_trend will be empty\n data_close, data_period, data_trend, data_out = tecObj.create_batch(curr_batch_time_dict)\n loss_v, pred, truth, closeness = sess.run([g.loss, g.x_res, g.output_tec, g.closeness_output],\n feed_dict={g.c_tec: data_close,\n g.output_tec: data_out,\n g.loss_weight_matrix: loss_weight_matrix})\n loss_values.append(loss_v) \n print(\"val_loss: {:.3f}\".format(loss_v)) \n \n #saving the predictions, one file for one TEC map\n for j, point_dtm_key in enumerate(curr_batch_time_dict.keys()):\n for k, dtm in enumerate(curr_batch_time_dict[point_dtm_key][\"future_dtm\"]):\n tec_pred = dtm.strftime(\"%Y%m%d.%H%M\") + \"_pred.npy\"\n np.save(path_pred+tec_pred, pred[j, :, :, k])\n\n tec_close = point_dtm_key.strftime(\"%Y%m%d.%H%M\") + \"_close.npy\"\n tec_period = point_dtm_key.strftime(\"%Y%m%d.%H%M\") + \"_period.npy\"\n tec_trend = point_dtm_key.strftime(\"%Y%m%d.%H%M\") + \"_trend.npy\"\n if(param.closeness_channel == True):\n np.save(path_pred+tec_close, closeness[j])\n if(param.period_channel == True):\n np.save(path_pred+tec_period, period[j])\n if(param.trend_channel == True): \n np.save(path_pred+tec_trend, trend[j])\n \n loss_values = np.array(loss_values)\n print ('Saving loss values in the .npy file ...') \n np.save(path_pred+'prediction_loss.npy', loss_values) \n","sub_path":"ModelValidation/gen_pred_tec.py","file_name":"gen_pred_tec.py","file_ext":"py","file_size_in_byte":16152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"591535423","text":"from tensorflow.keras import layers\nfrom tensorflow.keras.models import Model\nfrom config import IMAGE_INPUT_SHAPE, AUX_INPUT_SHAPE\nfrom network_output_classes import NetworkOutputClasses\n\ndef create_model():\n image_input = layers.Input(name = 'image_input', shape = IMAGE_INPUT_SHAPE)\n aux_input = layers.Input(name = 'aux_input', shape = AUX_INPUT_SHAPE)\n\n # in: 768 x 1024\n y = conv(image_input, filters = 8, strides = (2, 2))\n\n # in: 384 x 512\n y = conv(y, filters = 12, strides = (2, 2))\n\n # in: 192 x 256\n y = conv(y, filters = 16, strides = (2, 2))\n\n # in: 96 x 128\n y = conv(y, filters = 16)\n y = layers.MaxPool2D(pool_size = (2, 2))(y)\n\n # in: 48 x 64\n y = conv(y, filters = 24)\n y = layers.MaxPool2D(pool_size = (2, 2))(y)\n\n # in: 24 x 32\n y = conv(y, filters = 24)\n y = layers.MaxPool2D(pool_size = (2, 2))(y)\n\n # in: 12 x 16\n y = conv(y, filters = 48)\n y = layers.MaxPool2D(pool_size = (2, 2))(y)\n\n # in: 6 x 8\n y = conv(y, filters = 64)\n y = layers.AveragePooling2D(pool_size = (6, 8))(y)\n y = layers.Flatten()(y)\n\n aux = dense(aux_input, 12)\n aux = dense(aux, 12)\n\n y = layers.Concatenate()([y, aux])\n y = dense(y, 64)\n y = dense(y, 32)\n\n y = layers.Dense(len(NetworkOutputClasses), activation = 'softmax')(y)\n\n return Model(\n inputs = [image_input, aux_input],\n outputs = y\n )\n\ndef conv(x, filters, kernel_size = (3, 3), strides = (1, 1), padding = 'same'):\n y = layers.Conv2D(\n filters = filters,\n kernel_size = kernel_size,\n strides = strides,\n padding = padding\n )(x)\n\n y = layers.BatchNormalization()(y)\n y = layers.ReLU()(y)\n\n return y\n\ndef dense(x, size):\n y = layers.Dense(size)(x)\n y = layers.ReLU()(y)\n\n return y\n\nif __name__ == '__main__':\n print(create_model().summary())\n","sub_path":"classifier/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"408263693","text":"T = int(input())\nfor hhh in range(T):\n L = input().strip().split()\n s = L[0]\n k = int(L[1])\n aList = [s[i:j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]\n set1 = set(aList)\n bList = [i for i in set1 if aList.count(i) == k]\n bList = [len(i) for i in bList]\n set1 = set(bList)\n bList = [(bList.count(i), i) for i in set1]\n bList.sort(reverse=True)\n if bList:\n print(bList[0][1])\n else:\n print(-1)","sub_path":"Code/CodeRecords/2190/60799/239053.py","file_name":"239053.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"78945798","text":"\"\"\" helpers.py: helpers for customer \"\"\"\n\nimport logging\nfrom flask import jsonify, current_app, request, g\n\nfrom models import models\n\nlog = logging.getLogger(__name__)\n\ndef get_customer_by_name(name):\n\n customer = models.CustomerModel.query.filter_by(name=name).first()\n\n return customer\n\ndef get_customer_by_uuid(uuid):\n\n customer = models.CustomerModel.query.filter_by(uuid=uuid).first()\n\n return customer\n","sub_path":"apiroot/customer/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"650759957","text":"import numpy as np\nfrom glob import glob\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as fits\nimport astropy.units as unit\nimport astropy.constants as const\nsys.path.append('/home/robert/Documents/python/python_tools')\nfrom spec_tools import *\nfrom normalize_tools import *\n\nnormalize = True\n\nregion = '6678'\nnbins = 2048\n\nfits_files = sorted(glob('spectra/ondrejov_COUDE700/ha/*.fit'))\nfiles = sorted(glob('spectra/ondrejov_COUDE700/ha/*.dat'))\n\nprint('number of files: ', len(files))\n\ndatfile = 'KOREL/' + region + '/korel.dat_ondrejov_COUDE700_'+ str(nbins)\n\nif os.path.isfile(datfile):\n os.remove(datfile)\n\nSNRs = []\nMJDs = []\n\nfor i, fits_file in enumerate(fits_files):\n\n wav, flux, MJD, dateobs, datereduc, fitsfile = loadfits(fits_file)\n MJDs.append(MJD)\n #print(MJD, wav[0], wav[-1], wav[0]/(wav[1]-wav[0]))\n\n SNRs.append(get_spec_snr(wav, flux, (6658, 6698)))\n\nprint('maximum SNR: ', np.amax(SNRs))\n\nfor i, file in enumerate(files):\n\n wav, flux = np.loadtxt(file, unpack=True)\n\n flux = flux[(wav>6653) & (wav<6703)]\n wav = wav[(wav>6653) & (wav<6703)]\n\n print(SNRs[i])\n\n if normalize:\n\n cont = normalize_line_polynom(wav, flux, 6668, 6688, polynom=1)\n flux = flux / cont(wav)\n\n plt.plot(wav, flux, alpha=0.5, color='black', lw=0.5)\n plt.text(6655, 1.2, 'SNR = {:.2f}'.format(SNRs[i]))\n plt.axhline(y=1, lw=0.5, color='grey', ls=':')\n # ax.set_xlim(3995, 4045)\n # ax.set_xlim(4450, 4498)\n plt.ylim(0.5, 1.3)\n plt.savefig(file+'.png', format='png', dpi=300)\n\n plt.close()\n\n # plt.text(wav[-1]-10, 0.97, 'SNR = {:.2f}'.format(SNRs[i]))\n weight = (SNRs[i]/854.4600241890643)**2\n # plt.text(wav[-1]-10, 0.93, 'weight = {:.2f}'.format(weight))\n\n print('datapoints in region: ', len(wav))\n\n wav_new = np.linspace(6653., 6702.5, num=nbins)\n\n x = 3e5 * np.log(wav_new / wav_new[0])\n delta_x_min = x[-1] - x[-2]\n print('smallest RV step: ', delta_x_min)\n rv_step = delta_x_min\n\n equidistant_log_scale = np.empty(nbins)\n for j in range(len(equidistant_log_scale)):\n equidistant_log_scale[j] = x[0] + j * rv_step\n\n wav_equi = np.exp(equidistant_log_scale / 3e5) * wav_new[0]\n flux_equi = np.interp(wav_equi, wav, flux)\n\n # for i in range(1, len(wav_equi)):\n # print(3e5 * (np.log(wav_equi[i]) - np.log(wav_equi[i-1])))\n\n wav, flux = wav_equi, flux_equi\n\n flux[0], flux[-1] = 1.0, 1.0\n\n with open(datfile, 'a') as koreldat:\n if i != 0:\n koreldat.write('\\n')\n # koreldat.write('{:12.5f}{:10.4f}{:7.3f} {:5.3f} {}\\n'.format(MJD, wav[0], rv_step, weight, nbins))\n koreldat.write('{:12.5f}{:10.4f}{:7.3f} {:5.3f} {}\\n'.format(\n MJDs[i], wav[0], rv_step, weight, nbins))\n for j in range(len(flux)):\n if j != 0:\n if j % 10 == 0:\n koreldat.write('\\n')\n koreldat.write(' {:7.5f}'.format(flux[j]))\n\nprint(np.amax(SNRs))\n","sub_path":"PREKOR.py","file_name":"PREKOR.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"378090064","text":"# -*- coding: utf-8 -*-\nfrom DofLog import *\n\nfrom wget import download\nfrom zipfile import ZipFile\n\nfrom sys import argv, exit\nfrom PyQt5 import QtCore, QtGui, QtWidgets, Qt\n\nclass CheckList(QtWidgets.QListWidget):\n def __init__(self, strings, parent=None):\n super().__init__(parent)\n for text in strings:\n self.createItems(text)\n\n def createItems(self, text):\n item = QtWidgets.QListWidgetItem(text)\n item.setFlags(item.flags() | Qt.Qt.ItemIsUserCheckable)\n item.setCheckState(Qt.Qt.Unchecked)\n self.addItem(item)\n\nclass DofLogWindow(QtWidgets.QMainWindow):\n def closeEvent(self, event):\n toaster_thread.isRunning = False\n discord_thread.stop()\n event.accept()\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n self.icon = QtGui.QIcon(\"res/icon.ico\")\n MainWindow.setMaximumSize(QtCore.QSize(315, 185))\n\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(363, 182)\n MainWindow.setWindowIcon(self.icon)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.gridLayout = QtWidgets.QGridLayout()\n self.gridLayout.setObjectName(\"gridLayout\")\n self.nameLE = QtWidgets.QLineEdit(self.centralwidget)\n self.nameLE.setMinimumSize(QtCore.QSize(153, 0))\n self.nameLE.setMaximumSize(QtCore.QSize(153, 16777215))\n self.nameLE.setObjectName(\"nameLE\")\n self.gridLayout.addWidget(self.nameLE, 0, 1, 1, 1)\n self.passwordLbl = QtWidgets.QLabel(self.centralwidget)\n self.passwordLbl.setMaximumSize(QtCore.QSize(97, 16777215))\n self.passwordLbl.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n self.passwordLbl.setObjectName(\"passwordLbl\")\n self.gridLayout.addWidget(self.passwordLbl, 2, 0, 1, 1)\n self.connectBtn = QtWidgets.QPushButton(self.centralwidget)\n self.connectBtn.setObjectName(\"connectBtn\")\n self.gridLayout.addWidget(self.connectBtn, 4, 2, 1, 1, QtCore.Qt.AlignTop)\n self.organiserBtn = QtWidgets.QPushButton(self.centralwidget)\n self.organiserBtn.setObjectName(\"organiserBtn\")\n self.gridLayout.addWidget(self.organiserBtn, 2, 2, 1, 1, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignLeft)\n self.gridLayout_3 = QtWidgets.QGridLayout()\n self.gridLayout_3.setObjectName(\"gridLayout_3\")\n self.upBtn = QtWidgets.QPushButton(self.centralwidget)\n self.upBtn.setMaximumSize(QtCore.QSize(30, 16777215))\n self.upBtn.setObjectName(\"upBtn\")\n self.gridLayout_3.addWidget(self.upBtn, 2, 0, 1, 1, QtCore.Qt.AlignRight)\n self.downBtn = QtWidgets.QPushButton(self.centralwidget)\n self.downBtn.setMaximumSize(QtCore.QSize(30, 16777215))\n self.downBtn.setObjectName(\"downBtn\")\n self.gridLayout_3.addWidget(self.downBtn, 3, 0, 1, 1, QtCore.Qt.AlignRight)\n self.gridLayout.addLayout(self.gridLayout_3, 4, 0, 1, 1)\n self.addBtn = QtWidgets.QPushButton(self.centralwidget)\n self.addBtn.setMaximumSize(QtCore.QSize(75, 16777215))\n self.addBtn.setObjectName(\"addBtn\")\n self.gridLayout.addWidget(self.addBtn, 0, 2, 1, 1, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignLeft)\n self.settingsBtn = QtWidgets.QPushButton(self.centralwidget)\n self.settingsBtn.setMaximumSize(QtCore.QSize(75, 16777215))\n self.settingsBtn.setObjectName(\"settingsBtn\")\n self.gridLayout_3.addWidget(self.settingsBtn, 0, 0, 1, 1, QtCore.Qt.AlignRight)\n self.passwordLE = QtWidgets.QLineEdit(self.centralwidget)\n self.passwordLE.setMinimumSize(QtCore.QSize(153, 0))\n self.passwordLE.setMaximumSize(QtCore.QSize(153, 16777215))\n self.passwordLE.setFrame(True)\n self.passwordLE.setEchoMode(QtWidgets.QLineEdit.Password)\n self.passwordLE.setObjectName(\"passwordLE\")\n self.gridLayout.addWidget(self.passwordLE, 2, 1, 1, 1)\n self.listWidget = CheckList([])\n self.listWidget.setMinimumSize(QtCore.QSize(0, 0))\n self.listWidget.setMaximumSize(QtCore.QSize(153, 75))\n self.listWidget.setObjectName(\"listWidget\")\n self.gridLayout.addWidget(self.listWidget, 4, 1, 1, 1)\n self.usernameLbl = QtWidgets.QLabel(self.centralwidget)\n self.usernameLbl.setMaximumSize(QtCore.QSize(97, 16777215))\n self.usernameLbl.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n self.usernameLbl.setObjectName(\"usernameLbl\")\n self.gridLayout.addWidget(self.usernameLbl, 1, 0, 1, 1)\n self.nameLbl = QtWidgets.QLabel(self.centralwidget)\n self.nameLbl.setMaximumSize(QtCore.QSize(97, 16777215))\n self.nameLbl.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\n self.nameLbl.setObjectName(\"nameLbl\")\n self.gridLayout.addWidget(self.nameLbl, 0, 0, 1, 1)\n self.usernameLE = QtWidgets.QLineEdit(self.centralwidget)\n self.usernameLE.setMinimumSize(QtCore.QSize(153, 0))\n self.usernameLE.setMaximumSize(QtCore.QSize(153, 16777215))\n self.usernameLE.setObjectName(\"usernameLE\")\n self.gridLayout.addWidget(self.usernameLE, 1, 1, 1, 1)\n self.deleteBtn = QtWidgets.QPushButton(self.centralwidget)\n self.deleteBtn.setMaximumSize(QtCore.QSize(75, 16777215))\n self.deleteBtn.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)\n self.deleteBtn.setObjectName(\"deleteBtn\")\n self.gridLayout.addWidget(self.deleteBtn, 1, 2, 1, 1, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignLeft)\n self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n self.addBtn.clicked.connect(self.addAction)\n self.settingsBtn.clicked.connect(self.settings)\n self.deleteBtn.clicked.connect(self.remAction)\n self.connectBtn.clicked.connect(self.connectAction)\n self.upBtn.clicked.connect(self.upList)\n self.downBtn.clicked.connect(self.downList)\n self.organiserBtn.clicked.connect(self.organizerLink)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n MainWindow.setTabOrder(self.nameLE, self.usernameLE)\n MainWindow.setTabOrder(self.usernameLE, self.passwordLE)\n MainWindow.setTabOrder(self.passwordLE, self.listWidget)\n MainWindow.setTabOrder(self.listWidget, self.addBtn)\n MainWindow.setTabOrder(self.addBtn, self.connectBtn)\n MainWindow.setTabOrder(self.connectBtn, self.organiserBtn)\n\n self.error_msg = QtWidgets.QMessageBox()\n self.error_msg.setIcon(QtWidgets.QMessageBox.Critical)\n self.error_msg.setWindowTitle(df_windowTitle)\n self.error_msg.setWindowIcon(self.icon)\n\n self.setImages()\n self.reloadList()\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", df_windowTitle + \" - v\" + df_version))\n self.passwordLbl.setText(_translate(\"MainWindow\", \"Mot de passe :\"))\n #self.organiserBtn.setText(_translate(\"MainWindow\", \"Organiser\n #(nAiO)\"))\n self.organiserBtn.setToolTip(_translate(\"MainWindow\", \"Lance l'Organiser (nAiO)\"))\n #self.addBtn.setText(_translate(\"MainWindow\", \"Ajouter\"))\n self.addBtn.setToolTip(_translate(\"MainWindow\", \"Ajoute le compte dans le fichier sauvegarde\"))\n self.usernameLbl.setText(_translate(\"MainWindow\", \"Nom de compte :\"))\n self.nameLbl.setText(_translate(\"MainWindow\", \"Nom du raccourci :\"))\n self.connectBtn.setToolTip(_translate(\"MainWindow\", \"Connecte le/les compte(s) sélectionné(s)\"))\n #self.connectBtn.setText(_translate(\"MainWindow\", \"Connecter !\"))\n self.deleteBtn.setToolTip(_translate(\"MainWindow\", \"Supprime le compte sélectionné\"))\n #self.deleteBtn.setText(_translate(\"MainWindow\", \"Supprimer\"))\n #self.upBtn.setText(_translate(\"MainWindow\", \"▲\"))\n self.upBtn.setToolTip(_translate(\"MainWindow\", \"Monte le compte sélectionné dans la liste\"))\n #self.downBtn.setText(_translate(\"MainWindow\", \"▼\"))\n self.downBtn.setToolTip(_translate(\"MainWindow\", \"Descend le compte sélectionné dans la liste\"))\n\n def setImages(self):\n self.organiserBtn.setIcon(QtGui.QIcon('res/organiser.ico'))\n self.organiserBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.addBtn.setIcon(QtGui.QIcon('res/add.png'))\n self.addBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.deleteBtn.setIcon(QtGui.QIcon('res/rem.png'))\n self.deleteBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.connectBtn.setIcon(QtGui.QIcon('res/login.png'))\n self.connectBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.connectBtn.setIconSize(Qt.QSize(32,32))\n self.upBtn.setIcon(QtGui.QIcon('res/up.png'))\n self.upBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.downBtn.setIcon(QtGui.QIcon('res/down.png'))\n self.downBtn.setCursor(QtCore.Qt.PointingHandCursor)\n self.settingsBtn.setIcon(QtGui.QIcon('res/settings.png'))\n self.settingsBtn.setCursor(QtCore.Qt.PointingHandCursor)\n\n def settings(self):\n settings_window.show()\n settings_window.reload()\n\n def addAction(self):\n savelogsThread = saveLogs()\n\n nameLE_txt = self.nameLE.text()\n usernameLE_txt = self.usernameLE.text()\n passwordLE_txt = self.passwordLE.text()\n\n if nameLE_txt == \"\" or (' ' in nameLE_txt) or \\\n usernameLE_txt == \"\" or (' ' in usernameLE_txt) or \\\n passwordLE_txt == \"\" or (' ' in passwordLE_txt):\n self.error_msg.setText(\"L'un des champs requis est vide ou contient un espace !\")\n self.error_msg.exec_()\n else:\n savelogsThread.name = nameLE_txt\n savelogsThread.raw_username = usernameLE_txt\n savelogsThread.raw_password = passwordLE_txt\n\n savelogsThread.start()\n\n self.nameLE.clear()\n self.usernameLE.clear()\n self.passwordLE.clear()\n\n self.reloadList()\n def remAction(self):\n try:\n deletelogsThread = deleteLogs()\n deletelogsThread.name = self.listWidget.currentItem().text().lower()\n deletelogsThread.start()\n self.reloadList()\n except AttributeError:\n self.error_msg.setText(\"Aucun compte sélectionné !\")\n self.error_msg.exec_()\n\n def organizerLink(self):\n if not exists(r'Modules/Organizer.exe'):\n toaster_thread.message = \"Téléchargement d'Organizer en cours...\"\n toaster_thread.isShowing = True\n download(r\"http://update.naio.fr/v2/Organizer/1.4/Organizer.zip\", getcwd())\n with ZipFile('Organizer.zip', 'r') as zipObj:\n zipObj.extractall()\n remove(join(getcwd(), 'Organizer.zip'))\n toaster_thread.message = \"Téléchargement d'Organizer terminé !\"\n toaster_thread.isShowing = True\n Popen(join(getcwd(), r'Modules/Organizer.exe'), stdout=DEVNULL)\n\n def connectAction(self):\n connexionThread = logDof()\n\n for i in range(self.listWidget.count()):\n if self.listWidget.item(i).checkState():\n connexionThread.accNames.append(self.listWidget.item(i).text().lower())\n\n if len(connexionThread.accNames) > 0:\n if not exists(config[\"General\"][\"al_path\"]):\n self.error_msg.setText(\"Le chemin vers Ankama Launcher est invalide !\\nUne fenêtre vous demandant d'ouvrir le .exe d'Ankama Launcher va s'ouvrir...\")\n self.error_msg.exec_()\n fileName = QtWidgets.QFileDialog.getOpenFileName(caption=\"Sélectionnez Ankama Launcher\", directory=\"C:\\\\\",filter=\"Exe Files (*.exe)\")\n config.set(\"General\",\"al_path\",fileName[0].replace('/','\\\\'))\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n connexionThread.start()\n else:\n self.error_msg.setText(\"Aucun compte sélectionné !\")\n self.error_msg.exec_()\n\n def reloadList(self):\n self.listWidget.clear()\n for acc in config['Accounts']:\n self.listWidget.createItems(upper_str(acc))\n\n if config[\"General\"][\"retro_mode\"] == \"True\":\n self.nameLbl.setStyleSheet(\"QLabel{color:black;}\")\n self.usernameLbl.setStyleSheet(\"QLabel{color:black;}\")\n self.passwordLbl.setStyleSheet(\"QLabel{color:black;}\")\n MainWindow.setStyleSheet(\"\"\"\n QPushButton{\n background-color:transparent;\n }\n\n QListWidget, QLineEdit{\n background-color: transparent;\n border-style: outset;\n border-color: transparent;\n }\n\n #MainWindow{\n background-image:url('res/dr/bg.jpg');\n }\n \"\"\")\n self.listWidget.setStyleSheet(\"\"\"\n QListWidget::indicator:unchecked{\n image: url('res/dr/checkbox_uc.jpg');\n }\n QListWidget::indicator:checked{\n image: url('res/dr/checkbox_c.jpg');\n }\n QListWidget::item{\n color:black;\n }\n \"\"\")\n else:\n self.nameLbl.setStyleSheet(\"QLabel{color:white;}\")\n self.usernameLbl.setStyleSheet(\"QLabel{color:white;}\")\n self.passwordLbl.setStyleSheet(\"QLabel{color:white;}\")\n MainWindow.setStyleSheet(\"\"\"\n QPushButton{\n background-color:transparent;\n }\n\n QListWidget, QLineEdit{\n background-color: transparent;\n border-style: outset;\n border-color: transparent;\n }\n #MainWindow{\n background-image:url('res/d2/bg.jpg');\n }\n \"\"\")\n self.listWidget.setStyleSheet(\"\"\"\n QListWidget::indicator:unchecked{\n image: url('res/d2/checkbox_uc.jpg');\n }\n QListWidget::indicator:checked{\n image: url('res/d2/checkbox_c.jpg');\n }\n QListWidget::item{\n color:white;\n }\n \"\"\")\n\n def upList(self):\n try:\n name = self.listWidget.currentItem().text().lower()\n accounts = list(config.items('Accounts'))\n for i in range(len(accounts)):\n if accounts[i][0] == name:\n id = i\n break\n if id > 0:\n accounts[id], accounts[id - 1] = accounts[id - 1], accounts[id]\n config.remove_section('Accounts')\n config.add_section(\"Accounts\")\n for i in range(len(accounts)):\n config.set(\"Accounts\", accounts[i][0], accounts[i][1])\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n self.reloadList()\n except AttributeError:\n self.error_msg.setText(\"Aucun compte sélectionné !\")\n self.error_msg.exec_()\n def downList(self):\n try:\n name = self.listWidget.currentItem().text().lower()\n accounts = list(config.items('Accounts'))\n for i in range(len(accounts)):\n if accounts[i][0] == name:\n id = i\n break\n if id < len(accounts) - 1:\n accounts[id], accounts[id + 1] = accounts[id + 1], accounts[id]\n config.remove_section('Accounts')\n config.add_section(\"Accounts\")\n for i in range(len(accounts)):\n config.set(\"Accounts\", accounts[i][0], accounts[i][1])\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n self.reloadList()\n except AttributeError:\n self.error_msg.setText(\"Aucun compte sélectionné !\")\n self.error_msg.exec_()\n \nclass SettingsWindow(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(SettingsWindow,self).__init__(parent=parent)\n\n self.width = 255\n self.height = 80\n self.left = screenSize.width() / 2 - self.width / 2\n self.top = screenSize.height() / 2 - self.height / 2\n\n self.setup()\n\n def setup(self):\n \"Initialise la fenêtre\"\n self.setObjectName(\"SettingsWindow\")\n self.setWindowIcon(QtGui.QIcon(\"res/icon.ico\"))\n self.setWindowTitle(df_windowTitle + \" - v\" + df_version)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.setFixedSize(self.size())\n \n self.createGrid()\n self.constructGrid()\n self.setupEvents()\n\n self.reload()\n\n self.setLayout(self.grid)\n\n def createGrid(self):\n \"Créer la grille\"\n self.title = QtWidgets.QLabel(\"Paramètres\")\n self.title.setAlignment(QtCore.Qt.AlignRight)\n font = QtGui.QFont()\n font.setPointSize(8)\n font.setBold(True)\n font.setWeight(75)\n self.title.setFont(font)\n\n self.image = QtWidgets.QLabel(self)\n pixmap = QtGui.QPixmap('res/settings.png')\n self.image.setPixmap(pixmap)\n\n self.stayLogCB = QtWidgets.QCheckBox()\n self.stayLogCB.setObjectName(\"stayLogCB\")\n self.stayLogCB.setToolTip(\"Reste connecté à Ankama Launcher après la connexion à Dofus\")\n self.stayLogCB.setText(\"Rester co\")\n self.stayLogCB.setCursor(QtCore.Qt.PointingHandCursor)\n \n self.upperAccountsCB = QtWidgets.QCheckBox()\n self.upperAccountsCB.setObjectName(\"upperAccountsCB\")\n self.upperAccountsCB.setToolTip(\"Met une majuscule au début du nom dans la liste des comptes\")\n self.upperAccountsCB.setText(\"Majuscule au début\")\n self.upperAccountsCB.setCursor(QtCore.Qt.PointingHandCursor)\n\n self.retroModeCB = QtWidgets.QCheckBox()\n self.retroModeCB.setObjectName(\"retroModeCB\")\n self.retroModeCB.setToolTip(\"Pour Dofus Retro ou non\")\n self.retroModeCB.setText(\"Dofus Retro\")\n self.retroModeCB.setCursor(QtCore.Qt.PointingHandCursor)\n\n self.grid = QtWidgets.QGridLayout()\n\n def constructGrid(self):\n \"Affiche les widgets\"\n self.grid.addWidget(self.title,0,0)\n self.grid.addWidget(self.image,0,1)\n self.grid.addWidget(self.stayLogCB,1,0)\n self.grid.addWidget(self.upperAccountsCB,1,1)\n self.grid.addWidget(self.retroModeCB,2,0)\n\n def setupEvents(self):\n \"Créer les événements des boutons\"\n self.stayLogCB.clicked.connect(self.staylogAction)\n self.upperAccountsCB.clicked.connect(self.upperAccountsAction)\n self.retroModeCB.clicked.connect(self.retroModeAction)\n\n def staylogAction(self):\n config.set(\"General\",\"stay_logged\", str(self.stayLogCB.isChecked()))\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n def upperAccountsAction(self):\n config.set(\"General\",\"upper_accounts\", str(self.upperAccountsCB.isChecked()))\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n self.reload()\n def retroModeAction(self):\n config.set(\"General\",\"retro_mode\", str(self.retroModeCB.isChecked()))\n with open('config.ini', 'w') as configfile:\n config.write(configfile)\n self.reload()\n\n def reload(self):\n if config[\"General\"][\"retro_mode\"] == \"True\":\n self.setStyleSheet(\"\"\"\n QCheckBox::indicator:unchecked{\n image: url('res/dr/checkbox_uc.jpg');\n }\n QCheckBox::indicator:checked{\n image: url('res/dr/checkbox_c.jpg');\n }\n\n #SettingsWindow{\n background-color: #d5cfab;\n }\n \"\"\")\n else:\n self.setStyleSheet(\"\"\"\n QCheckBox::indicator:unchecked{\n image: url('res/d2/checkbox_uc.jpg');\n }\n QCheckBox::indicator:checked{\n image: url('res/d2/checkbox_c.jpg');\n }\n\n QLabel,QCheckBox{\n color:white;\n }\n\n #SettingsWindow{\n background-color: #181119;\n }\n \"\"\")\n\n self.stayLogCB.setChecked(False)\n if config[\"General\"][\"stay_logged\"] == \"True\":\n self.stayLogCB.setChecked(True)\n self.upperAccountsCB.setChecked(False)\n if config[\"General\"][\"upper_accounts\"] == \"True\":\n self.upperAccountsCB.setChecked(True)\n self.retroModeCB.setChecked(False)\n if config[\"General\"][\"retro_mode\"] == \"True\":\n self.retroModeCB.setChecked(True)\n\n ui.reloadList()\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(argv)\n screenSize = app.desktop().screenGeometry()\n MainWindow = DofLogWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n settings_window = SettingsWindow()\n MainWindow.show()\n exit(app.exec_())\n","sub_path":"DofLog/DofLog_UI.py","file_name":"DofLog_UI.py","file_ext":"py","file_size_in_byte":21964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"453087161","text":"# Problem Set 2, hangman.py\n# Name: \n# Collaborators:\n# Time spent:\n\n# Hangman Game\n# -----------------------------------\n# Helper code\n# You don't need to understand this helper code,\n# but you will have to know how to use the functions\n# (so be sure to read the docstrings!)\nimport random\nimport string\n\nWORDLIST_FILENAME = \"words.txt\"\n\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print(\" \", len(wordlist), \"words loaded.\")\n return wordlist\n\n\n\ndef choose_word(wordlist):\n \"\"\"\n wordlist (list): list of words (strings)\n \n Returns a word from wordlist at random\n \"\"\"\n return random.choice(wordlist)\n\n# end of helper code\n\n# -----------------------------------\n\n# Load the list of words into the variable wordlist\n# so that it can be accessed from anywhere in the program\nwordlist = load_words()\n\n\ndef is_word_guessed(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing; assumes all letters are\n lowercase\n letters_guessed: list (of letters), which letters have been guessed so far;\n assumes that all letters are lowercase\n returns: boolean, True if all the letters of secret_word are in \n letters_guessed;\n False otherwise\n '''\n for l in secret_word:\n if l not in letters_guessed:\n return False\n else:\n continue\n return True\n\n\n\ndef get_guessed_word(secret_word, letters_guessed):\n '''\n secret_word: string, the word the user is guessing\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string, comprised of letters, underscores (_), and spaces that \n represents which letters in secret_word have been guessed so far.\n '''\n word_guessed = ''\n for l in secret_word:\n if l in letters_guessed:\n word_guessed += l + ' '\n else:\n word_guessed += '_ '\n return word_guessed\n\n\n\ndef get_available_letters(letters_guessed):\n '''\n letters_guessed: list (of letters), which letters have been guessed so far\n returns: string (of letters), comprised of letters that represents which \n letters have not yet been guessed.\n '''\n alphabet_list = list(string.ascii_lowercase)\n for l in letters_guessed:\n if l in alphabet_list:\n alphabet_list.pop(alphabet_list.index(l))\n else:\n continue\n return ', '.join(alphabet_list)\n\n\ndef hangman(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses s/he starts with.\n \n * The user should start with 6 guesses\n\n * Before each round, you should display to the user how many guesses\n s/he has left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Remember to make\n sure that the user puts in a letter!\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n Follows the other limitations detailed in the problem write-up.\n '''\n warnings = 3\n guesses = 6\n print(f'The secret word is {len(secret_word)} letters long!')\n letters_guessed = []\n while True:\n print(f'You have {guesses} left!')\n print('Your available letters are: ', \n get_available_letters(letters_guessed))\n print(f'{get_guessed_word(secret_word, letters_guessed)}')\n letter = input('Enter your guess:\\n>').lower()\n if letter in get_available_letters(letters_guessed):\n letters_guessed.append(letter)\n else:\n print('Please choose a letter from the list')\n \n warnings -= 1\n if warnings <= 0:\n guesses -= 1\n elif warnings > 0:\n print(f'Be careful, you have {warnings} left.')\n guesses -= 1\n if guesses == 0:\n print('Sorry you\\'ve ran out of guesses!')\n print(f'The secret word was {secret_word}')\n break\n continue\n if letter not in secret_word:\n guesses -= 1\n print(f'Sorry! {letter} is not in the secret word.')\n else:\n print(f'Yes! {letter} is in the secret word!')\n print('--------------------------------------')\n if is_word_guessed(secret_word, letters_guessed) is True:\n print(f'Correct! The secret word was {secret_word}')\n print(f'You had {guesses} left!')\n break\n elif guesses == 0:\n print('Sorry you\\'ve ran out of guesses!')\n print(f'The secret word was {secret_word}')\n break\n else:\n pass\n\n\n# When you've completed your hangman function, scroll down to the bottom\n# of the file and uncomment the first two lines to test\n#(hint: you might want to pick your own\n# secret_word while you're doing your own testing)\n\n\n# -----------------------------------\n\n\n\ndef match_with_gaps(my_word, other_word, letters_guessed):\n '''\n my_word: string with _ characters, current guess of secret word\n other_word: string, regular English word\n returns: boolean, True if all the actual letters of my_word match the \n corresponding letters of other_word, or the letter is the special symbol\n _ , and my_word and other_word are of the same length;\n False otherwise: \n '''\n i = 0\n for char1 in my_word.replace(' ', ''):\n if char1 != other_word[i] and char1 != '_':\n return False\n elif char1 == '_' and other_word[i] in letters_guessed:\n return False\n i += 1\n return True\n\n\n\ndef show_possible_matches(my_word, letters_guessed):\n '''\n my_word: string with _ characters, current guess of secret word\n returns: nothing, but should print out every word in wordlist that matches \n my_word Keep in mind that in hangman when a letter is guessed, all the\n positions at which that letter occurs in the secret word are revealed.\n Therefore, the hidden letter(_ ) cannot be one of the letters in the word\n that has already been revealed.\n '''\n wl = []\n for word in wordlist:\n if len(word) == len(my_word.replace(' ', '')):\n if match_with_gaps(my_word, word, letters_guessed) is True:\n wl.append(word)\n print(wl)\n\n\n\ndef hangman_with_hints(secret_word):\n '''\n secret_word: string, the secret word to guess.\n \n Starts up an interactive game of Hangman.\n \n * At the start of the game, let the user know how many \n letters the secret_word contains and how many guesses s/he starts with.\n \n * The user should start with 6 guesses\n \n * Before each round, you should display to the user how many guesses\n s/he has left and the letters that the user has not yet guessed.\n \n * Ask the user to supply one guess per round. Make sure to check that the \n user guesses a letter\n \n * The user should receive feedback immediately after each guess \n about whether their guess appears in the computer's word.\n\n * After each guess, you should display to the user the \n partially guessed word so far.\n \n * If the guess is the symbol *, print out all words in wordlist that\n matches the current guessed word. \n \n Follows the other limitations detailed in the problem write-up.\n '''\n warnings = 3\n guesses = 6\n print(f'The secret word is {len(secret_word)} letters long!')\n letters_guessed = []\n while True:\n print(f'You have {guesses} left!')\n print(f'Your available letters are: ',\n get_available_letters(letters_guessed))\n print(f'{get_guessed_word(secret_word, letters_guessed)}')\n print('Enter \\'*\\' to see all possible matches in words list')\n letter = input('Enter your guess:\\n>').lower()\n if letter == '*':\n show_possible_matches(get_guessed_word(secret_word, \n letters_guessed), letters_guessed)\n continue\n if letter in get_available_letters(letters_guessed):\n letters_guessed.append(letter)\n else:\n print('Please choose a letter from the list')\n \n warnings -= 1\n if warnings <= 0:\n guesses -= 1\n elif warnings > 0:\n print(f'Be careful, you have {warnings} left.')\n guesses -= 1\n if guesses == 0:\n print('Sorry you\\'ve ran out of guesses!')\n print(f'The secret word was {secret_word}')\n break\n continue\n if letter not in secret_word:\n guesses -= 1\n print(f'Sorry! {letter} is not in the secret word.')\n else:\n print(f'Yes! {letter} is in the secret word!')\n print('--------------------------------------')\n if is_word_guessed(secret_word, letters_guessed) is True:\n print(f'Correct! The secret word was {secret_word}')\n print(f'You had {guesses} left!')\n break\n elif guesses == 0:\n print('Sorry you\\'ve ran out of guesses!')\n print(f'The secret word was {secret_word}')\n break\n\n\n\n# When you've completed your hangman_with_hint function, comment the two similar\n# lines above that were used to run the hangman function, and then uncomment\n# these two lines and run this file to test!\n# Hint: You might want to pick your own secret_word while you're testing.\n\n\nif __name__ == \"__main__\":\n secret_word = choose_word(wordlist)\n hangman_with_hints(secret_word)\n while True:\n print('Game Over. Would you like to play again? \\'y\\' ',\n 'for yes or any key for no')\n play_again = input(\">\")\n if play_again.lower() == 'y':\n secret_word = choose_word(wordlist)\n hangman_with_hints(secret_word)\n else:\n break\n\n","sub_path":"ps2/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":10547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"374445123","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, ConversationHandler, RegexHandler\nfrom app.db import create_poll, end_poll, vote_poll, get_votes, get_poll_options\n\n\npobj = {}\n\ndef pollbegin(bot, update):\n global pobj\n pobj[update.message.chat_id] = {} \n update.message.reply_text('What is your poll question?')\n return 1\n\ndef pollquestion(bot,update):\n global pobj\n pobj[update.message.chat_id][\"question\"] = update.message.text\n update.message.reply_text('Enter the options in new lines : ')\n return 2\n\ndef polloptions(bot,update):\n global pobj\n chat_id = update.message.chat_id\n options = update.message.text.split(\"\\n\")\n options_str = \"\\n\".join([str(1+options.index(op)) + \") \" + op for op in options])\n\n update.message.reply_text('Call for Vote : \\n'+\n pobj[update.message.chat_id][\"question\"] + '\\n' + options_str + \n '\\nVote by using:\\n pls vote 2\\nShow votes:\\n pls vote\\nEnd vote and show results:\\n pls vote end')\n create_poll(chat_id, pobj[chat_id][\"question\"], options)\n\n return ConversationHandler.END\n\n\ndef pollcancel(bot,update):\n chat_id = update.message.chat_id\n end_poll(chat_id)\n update.message.reply_text('Poll cancelled.')\n return ConversationHandler.END\n\npoll_handler = ConversationHandler(\n entry_points = [RegexHandler(\"^(?i)(pls poll)$\", pollbegin)],\n\n states = {\n 1 : [MessageHandler(Filters.text, pollquestion)],\n 2 : [MessageHandler(Filters.text,polloptions)]\n },\n\n fallbacks = [RegexHandler(\"^(pls end|pls cancel)$\", pollcancel)]\n\n)\n\ndef poll_extras_handler(bot,update,msg_list):\n chat_id = update.message.chat_id\n username = update.message.from_user.username\n if msg_list[1] == \"vote\":\n \n\n # view poll\n if len(msg_list) == 2:\n view_poll(bot,update)\n \n # vote end or vote \n elif len(msg_list) > 2 and msg_list[2]==\"end\":\n view_poll(bot, update)\n end_poll(chat_id)\n update.message.reply_text(\"Poll has been ended.\")\n \n # add vote\n else:\n n = int(msg_list[2]) -1\n options = get_poll_options(chat_id)\n if n in range(0,len(options)): \n vote_poll(chat_id, username, 1+n)\n update.message.reply_text(\"Voted!\")\n else:\n update.message.reply_text(\"Option out of range!\")\n\n\ndef view_poll(bot,update):\n votes = get_votes(update.message.chat_id)\n votes_str = \"Votes :\\n\"\n for option in votes:\n index, option, users = option\n votes_str += str(index) + \") \" + option\n if len(users) > 0:\n votes_str += \" (\"+ \", \".join(users) +\")\"\n votes_str += \"\\n\"\n update.message.reply_text(votes_str)\n\n ","sub_path":"app/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"398408544","text":"import os\nimport sys\nimport pygame\n\n\ndef check_any_joystick() -> bool:\n \"\"\"\n Отдельная функция по проверке на наличие хоть одного джойстика\n :return: True, если джойстик подключён, False, если нет\n \"\"\"\n return pygame.joystick.get_count() != 0\n\n\ndef get_joystick() -> pygame.joystick.Joystick:\n \"\"\"\n Отдельная функция по получению одного подключённого джойстика\n :return: Джойстик\n \"\"\"\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n return joystick\n\n\ndef load(filename):\n image = load_image(filename, 'assets\\\\tiles')\n image = pygame.transform.scale(image, (TILE_SIZE, TILE_SIZE))\n return image\n\n\ndef load_image(filename: str, path_to_folder=\"assets\", colorkey=None):\n fullname = os.path.join(path_to_folder, filename)\n # если файл не существует, то выходим\n if not os.path.isfile(fullname):\n print(f\"ОШИБКА! Не удалось загрузить изображение {filename}\")\n print(f\"По пути {fullname}\")\n sys.exit(-1)\n image = pygame.image.load(fullname)\n\n if colorkey is not None:\n image = image.convert()\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image = image.convert_alpha()\n return image\n","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"318213139","text":"'''\nGood morning! Here's your coding interview problem for today.\n\nThis problem was recently asked by Google.\n\nGiven a list of numbers and a number k, return whether any two numbers from the list add up to k.\n\nFor example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n\nBonus: Can you do this in one pass?\n'''\n\nnumber_list = [10, 15, 3, 7]\nk = 17\n\n\n# First attempt.\ndef find_k(number_list, k):\n for i in range(0, len(number_list)):\n curr = number_list[i]\n tmp = number_list.copy()\n tmp.pop(i)\n for j in range(0, len(tmp)):\n sum = curr + tmp[j]\n if sum == k:\n return True\n return False\n\ndef sum_exists(numbers, target):\n differences = {target - number for number in numbers}\n return bool(differences.intersection(numbers))\n\n\n\n","sub_path":"11-25.py","file_name":"11-25.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"563092931","text":"# coding=utf-8\nimport time\nimport unittest\nimport HTMLTestRunner\n\nfrom app.student.homework.object_page.home_page import HomePage\nfrom app.student.homework.object_page.homework_page import Homework\nfrom app.student.homework.object_page.guess_word_page import GuessWord\nfrom app.student.login.object_page.login_page import LoginPage\nfrom app.student.login.test_data.login_failed_toast import VALID_LOGIN_TOAST\nfrom app.student.homework.test_data.homework_title_type_yb import GetVariable as gv\nfrom conf.base_page import BasePage\nfrom utils.toast_find import Toast\nfrom utils.yb_dict import no_yb_operate_word\nfrom conf.decorator import setup, teardown, testcase, teststeps\n\n\nclass Games(unittest.TestCase):\n \"\"\"猜词游戏 -yb字体\"\"\"\n\n @classmethod\n @setup\n def setUp(cls):\n \"\"\"启动应用\"\"\"\n cls.login_page = LoginPage()\n cls.home_page = HomePage()\n cls.homework = Homework()\n cls.guess_word = GuessWord()\n\n @classmethod\n @teardown\n def tearDown(cls):\n pass\n\n @testcase\n def test_guess_word_noyb(self):\n self.login_page.app_status() # 判断APP当前状态\n\n if self.home_page.wait_check_page():\n print(\"已进入主界面:\")\n var = self.home_page.homework_count()\n if gv.GUE_WOR in var[0]: # 该作业存在\n for i in range(0, len(var[0])):\n if var[0][i] == gv.GUE_WOR:\n var[1][i].click()\n time.sleep(3)\n count = self.homework.games_count(0, '猜词游戏')\n self.game_exist(count[0])\n if count[1] == 10:\n game_count = self.homework.swipe_screen('猜词游戏')\n self.game_exist(game_count)\n else:\n print('当前页no have该作业')\n game = self.home_page.swipe(var[0], gv.GUE_WOR, '猜词游戏') # 作业list翻页\n self.game_exist(game)\n print('Game Over')\n else:\n try:\n Toast().find_toast(VALID_LOGIN_TOAST.login_failed())\n except Exception:\n print(\"未进入主界面\")\n raise\n\n @teststeps\n def game_exist(self, count):\n \"\"\"猜词游戏游戏具体操作 及 操作后的滑屏\"\"\"\n if len(count) != 0:\n for index_1 in count:\n print('####################################################')\n print('有小游戏', index_1)\n homework_type = self.homework.tv_testbank_name(index_1) # 获取小游戏模式\n self.homework.games_type()[index_1].click() # 进入小游戏\n self.diff_type_no(homework_type) # 不同模式小游戏的 游戏过程\n\n # self.vocab_select.result_page() # 结果页\n # self.vocab_select.result_detail_page() # 结果页 查看答案 按钮\n # self.vocab_select.study_again(homework_type) # 结果页 错题再练 按钮\n\n print('####################################################')\n self.homework.back_up_button()\n self.homework.back_up_button() # 返回主界面\n else:\n print('no have猜词游戏小游戏')\n\n @teststeps\n def diff_type_no(self, tpe):\n \"\"\"选择 不同模式小游戏的 游戏方法\"\"\"\n print(tpe)\n if tpe == '有发音':\n self.voice_pattern_no()\n elif tpe == '无发音':\n self.no_voice_pattern_no()\n\n @teststeps\n def voice_pattern_no(self):\n \"\"\"《猜词游戏 有发音模式》 游戏过程\"\"\"\n if self.guess_word.wait_check_page(): # 页面检查点\n rate = self.guess_word.rate()\n for i in range(int(rate)):\n content = self.guess_word.chinese() # 展示的题目内容\n letters = self.guess_word.keyboard() # 小键盘 字母\n if len(content) == 3:\n value = no_yb_operate_word(content)\n if len(value) == 1:\n for k in range(len(letters)):\n if letters[k].text == value:\n letters[k].click() # 点击键盘对应字母\n break\n else:\n for k in range(len(value)):\n for z in range(len(letters)):\n if letters[z].text == value[k]:\n letters[z].click() # 点击键盘对应字母\n break\n else:\n for j in range(len(content)):\n for k in range(len(letters)):\n if letters[k].text == content[j]:\n letters[k].click() # 点击键盘对应字母\n break\n time.sleep(2)\n\n @teststeps\n def no_voice_pattern_no(self):\n \"\"\"《猜词游戏 无发音模式》 游戏过程\"\"\"\n if self.guess_word.wait_check_page(): # 页面检查点\n rate = self.guess_word.rate()\n for i in range(int(rate)):\n content = self.guess_word.chinese() # 展示的题目内容\n letters = self.guess_word.keyboard() # 小键盘 字母\n if len(content) == 3:\n value = no_yb_operate_word(content)\n if len(value) == 1:\n for k in range(len(letters)):\n if letters[k].text == value:\n letters[k].click() # 点击键盘对应字母\n break\n else:\n for k in range(len(value)):\n for z in range(len(letters)):\n if letters[z].text == value[k]:\n letters[z].click() # 点击键盘对应字母\n break\n else:\n for j in range(len(content)):\n for k in range(len(letters)):\n if letters[k].text == content[j]:\n letters[k].click() # 点击键盘对应字母\n break\n time.sleep(2)\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n suite.addTest(Games('test_guess_word'))\n\n report_title = u'自动化测试执行报告'\n desc = '用于展示修改样式后的HTMLTestRunner'\n timestr = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n filename = r'C:/Users/V/Desktop/Testreport/Result_' + timestr + '.html'\n\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title=report_title,\n description=desc)\n runner.run(suite)\n fp.close()\n","sub_path":"app/student/homework/test_cases/yb_script/test003_guess_word_noyb.py","file_name":"test003_guess_word_noyb.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"350683151","text":"\"\"\"\n选择下拉列表元素三种方法\n\"\"\"\nfrom selenium import webdriver\nimport unittest\nimport time\nfrom selenium.webdriver.support.ui import Select\n\n\nclass VisitSogouByChrome(unittest.TestCase):\n def setUp(self):\n # 启动Chrome浏览器\n self.driver = webdriver.Chrome(executable_path=\"D:\\\\chromedriver\")\n\n def test_operateDropList(self):\n url = r\"C:\\Users\\Mr雷的电脑\\Desktop\\selenium_python\\test_html\\select_text.html\"\n self.driver.get(url)\n select_element = Select(self.driver.find_element_by_xpath(\"//select\"))\n print(select_element.first_selected_option.text)\n # 获取所有元素\n all_options = select_element.options\n # 打印元素长度\n print(len(all_options))\n \"\"\"\n is_enable()判断是否可操作\n is_selecter()判断是否被选中\n \"\"\"\n # 方法一:通过索引选择元素,索引从0开始\n select_element.select_by_index(1)\n print(select_element.all_selected_options[0].text)\n time.sleep(2)\n # 方法二:通过选择文本为欧文选项\n select_element.select_by_visible_text(\"科比\")\n self.assertEqual(select_element.all_selected_options[0].text, u\"科比\")\n\n def tearDown(self):\n # 退出谷歌浏览器\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"webdriver_api/operate_droplist.py","file_name":"operate_droplist.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"640047704","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom index.models import Icon\nfrom django.core.cache import cache\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.http import require_http_methods\nimport json\n# Create your views here.\n# 进入页面调用该方法\n'''\n code = 1为数据查询正常有数据\n code = -1 表示从数据库查询出来的时候没有数据\n code = -2 表示数据库查询出现异常\n'''\ndef index(request):\n list = {}\n array = []\n try:\n icons = Icon.objects.filter(iconId = 1)\n if len(icons) > 0:\n list['code'] = 1\n list['message'] = '查询正常'\n for icon in icons:\n array.append(icons)\n list['data'] = array\n else:\n list['code'] = -1\n list['message'] = '暂无数据'\n icons.close()\n return render(request, 'pages/index.html', list)\n except:\n list['code'] = -2\n list['message'] = '数据查询异常'\n return render(request, 'pages/index.html', list)\n \n'''\n 获取首页的信息并且拼装成字典\n'''\n@csrf_exempt\n@require_http_methods([\"POST\"])\ndef indexInfo(request):\n list = {}\n array = []\n try:\n icons = Icon.objects.filter(iconId = 1)\n if len(icons) > 0:\n list['code'] = 1\n list['message'] = '查询正常'\n for icon in icons:\n print(icon)\n array.append(Icon.getIconList(icon))\n list['data'] = array\n else:\n list['code'] = -1\n list['message'] = '查询失败'\n return HttpResponse(json.dumps(list), content_type=\"application/json\")\n except Exception as e:\n list['code'] = -2\n list['message'] = '暂无数据'\n return HttpResponse(json.dumps(list), content_type=\"application/json\")","sub_path":"index/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"340184104","text":"import torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom runnorm import RunNorm\n\n\ndef leaky_tanh(input, slope=0.01):\n return th.tanh(input * (2 / 3)) * 1.7159 + input * slope\n\n\nclass Model(nn.Module):\n def __init__(self, input_size, output_size, hidden_size=None, window_size=10, input_dropout=0.5, input_noise=0.1,\n activation=F.leaky_relu, **kwargs):\n super().__init__()\n if (hidden_size is None):\n hidden_size = input_size*window_size // 2\n\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.window_size = window_size\n self.input_dropout = input_dropout\n self.input_noise = input_noise\n self.activation = activation\n \n self.fc1 = nn.Linear(window_size*input_size, hidden_size, bias=True)\n self.fc2 = nn.Linear(hidden_size, hidden_size // 4, bias=True)\n self.fc3 = nn.Linear(hidden_size // 4, output_size, bias=True)\n\n self.input_frames = []\n\n def reset(self):\n self.input_frames.clear()\n\n @property\n def sequential(self):\n return True\n\n def forward(self, inputs):\n one_sample_mode = False\n if (inputs.dim() <= 2):\n one_sample_mode = True\n inputs = inputs.unsqueeze(dim=0)\n \n outputs = []\n for i in range(inputs.size(0)):\n # Get next frame\n frame = inputs[i]\n \n # Apply dropout if needed\n if (self.input_dropout is not None):\n frame = F.dropout(frame, p=self.input_dropout, training=self.training)\n\n # Add white noise if needed (only at training time)\n if (self.input_noise is not None) and self.training:\n frame = frame + th.randn_like(frame) * self.input_noise\n\n # Get last frames of window_size\n self.input_frames.append(frame)\n if (len(self.input_frames) > self.window_size):\n self.input_frames.pop(0)\n \n if (len(self.input_frames) < self.window_size):\n continue\n \n x = th.stack(self.input_frames, dim=-1)\n x = x.view(x.size(0), -1)\n\n # Pass x through model\n x = self.fc1(x)\n x = self.activation(x)\n \n # Apply dropout if needed\n if (self.input_dropout is not None):\n x = F.dropout(x, p=(self.input_dropout/4), training=self.training)\n\n # Add white noise if needed (only at training time)\n if (self.input_noise is not None) and self.training:\n x = x + th.randn_like(x) * (self.input_noise / 2)\n \n x = self.fc2(x)\n x = self.activation(x)\n\n yhat = self.fc3(x)\n outputs.append(yhat)\n \n # Combine sequence of outputs\n outputs = th.stack(outputs, dim=0)\n if one_sample_mode:\n outputs = outputs.squeeze(dim=0)\n return outputs\n\n def extra_repr(self):\n return '''input_size={}, output_size={}, hidden_size={}, window_size={},\ninput_dropout={}, input_noise={},\nactivation={}'''.format(self.input_size, self.output_size, self.hidden_size,\n self.window_size, self.input_dropout, self.input_noise, str(self.activation))\n","sub_path":"models/winfc3.py","file_name":"winfc3.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"269661583","text":"import re\n\nimport pandas as pd \nimport numpy as np \n\nfrom awesome_print import ap \n\nCUTOFF = 1\n\ndef parse_line(line):\n\ttry:\n\t\tarea,gene_level = re.split(r'[ ](?=[A-Z])',line)\n\t\tgene,level = gene_level.split()\n\t\t#area,gene,level = line.split('|')\n\t\treturn {'Structure':area,'ID':gene,'Level':float(level)}\n\texcept:\n\t\treturn None\n\n\ncontrol = pd.DataFrame(filter(None,[parse_line(line) \n\tfor line in open('../docs/gene-expression-by-area-snapshot.txt').read().splitlines()]))\n\nroi = pd.read_csv('../docs/gene-expression-by-area-filtered.txt',sep='|',\n\tnames=['Structure','ID','Level'])\n\ncontrol['zscore'] = np.absolute((control['Level'] - control['Level'].mean())/control['Level'].std(ddof=0))\nroi['zscore'] = np.absolute((roi['Level'] - roi['Level'].mean())/roi['Level'].std(ddof=0))\n\n\n#Selectively upregulated genese are those in ROI with |zscore| >CUTOFF and with |zscore| < CUTOFF in Brain\nroi_upregulated_genes = roi[roi['zscore']>CUTOFF]\ncontrol_not_upregulated_genes = control[control['zscore'] 0:\n self.matrix = [0] *4*self.count\n def buildTree(i, j, index, nums): \n if i == j: \n self.matrix[index] = nums[i] \n else:\n mid = (i + j)/2\n buildTree(i, mid, 2*index+1, nums)\n buildTree(mid+1, j, 2*index+2, nums)\n self.matrix[index] = self.matrix[2*index+1] + self.matrix[2*index+2]\n\n buildTree(0, self.count-1, 0, nums)\n else:\n self.matrix = None\n\n def update(self, i, val):\n \"\"\"\n :type i: int\n :type val: int\n :rtype: int\n \"\"\"\n def updateNode(i, val, index, left, right):\n if left == right == i:\n v = self.matrix[index]\n self.matrix[index] = val\n return v\n mid = (left+right)/2\n if left <= i and mid >= i:\n v = updateNode(i, val, 2*index+1, left, mid)\n self.matrix[index] = self.matrix[index] - v + val\n return v\n elif mid+1 <= i and right >= i:\n v = updateNode(i, val, 2*index+2, mid+1, right)\n self.matrix[index]= self.matrix[index] - v + val\n return v\n \n if self.count > 0:\n return updateNode(i, val, 0, 0, self.count-1)\n else:\n return 0\n\n def sumRange(self, i, j):\n \"\"\"\n sum of elements nums[i..j], inclusive.\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n if self.count == 0:\n return 0\n def calculate(i, j, index, left, right):\n if i < left or j > right:\n return 0\n if i == left and j == right:\n return self.matrix[index]\n value = 0\n mid = (left+right)/2\n if left <= i and mid >= i:\n if mid >= j:\n return calculate(i, j, 2*index+1, left, mid)\n else:\n value += calculate(i, mid, 2*index+1, left, mid)\n if right >= j and mid+1 <= j:\n if mid + 1 <= i:\n return calculate(i, j, 2*index+2, mid+1, right)\n else:\n value += calculate(mid+1, j, 2*index+2, mid+1, right)\n return value\n return calculate(i,j, 0, 0, self.count-1)\n\n# Your NumArray object will be instantiated and called as such:\n# numArray = NumArray(nums)\n# numArray.sumRange(0, 1)\n# numArray.update(1, 10)\n# numArray.sumRange(1, 2)","sub_path":"LeetCode/Solved/oj307.py","file_name":"oj307.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"533411531","text":"#!/usr/bin/env python3\n\nimport argparse\ntry:\n import xmlrpclib\nexcept ImportError:\n import xmlrpc.client as xmlrpclib\nimport pprint\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--port\",\n default=\"8888\",\n help=\"server port(8888)\")\n\n parser.add_argument(\n \"--stop-channel\",\n help=\"channel name\")\n\n parser.add_argument(\n \"--stop-channels\",\n action='store_true',\n help=\"stop all channels\")\n\n parser.add_argument(\n \"--start-channels\",\n action='store_true',\n help=\"start all channels\")\n\n parser.add_argument(\n \"--start-channel\",\n help=\"channel name\")\n\n parser.add_argument(\n \"--host\",\n default=\"localhost\",\n help=\"host name (localhost)\")\n\n parser.add_argument(\n \"--stop\",\n action='store_true',\n help=\"stop server\")\n\n parser.add_argument(\n \"--show-config\",\n action='store_true',\n help=\"print configuration\")\n\n parser.add_argument(\n \"--show-channel-config\",\n help=\"print channel configuration\")\n\n parser.add_argument(\n \"--show-de-config\",\n action='store_true',\n help=\"print Decision Engine configuration\")\n\n parser.add_argument(\n \"--reload-config\",\n action=\"store_true\",\n help=\"reload configuration\")\n\n parser.add_argument(\n \"--status\",\n action='store_true',\n help=\"print status server\")\n\n parser.add_argument(\n \"--print-product\",\n help=\"product name\")\n\n parser.add_argument(\n \"--print-products\",\n action='store_true',\n help=\"print products\")\n\n parser.add_argument(\n \"--query\",\n help=\"panda query, e.g. \\\" FigureOfMerit != inf \\\"\")\n\n parser.add_argument(\n \"--columns\",\n help=\"comma separated list of columns\")\n\n parser.add_argument(\n \"--print-engine-loglevel\",\n action='store_true',\n help=\"print engine log level\")\n\n parser.add_argument(\n \"--get-channel-loglevel\",\n help=\"print channel log level\")\n\n parser.add_argument(\n \"--set-channel-loglevel\",\n nargs=2,\n help=\" loglevel, (possible levels are NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL)\")\n\n parser.add_argument(\n \"--reaper-start\",\n action='store_true',\n help=\"start the database cleanup process\")\n\n parser.add_argument(\n \"--reaper-start-delay-secs\",\n default=\"0\",\n type=int,\n help=\"Delay the database cleanup process start\")\n\n parser.add_argument(\n \"--reaper-stop\",\n action='store_true',\n help=\"stop the database cleanup process\")\n\n parser.add_argument(\n \"--reaper-status\",\n action='store_true',\n help=\"show the database cleanup process status\")\n\n return parser\n\ndef build_xmlrpc_connection(host, port):\n con_string = \"http://{}:{}\".format(host, port)\n s = xmlrpclib.ServerProxy(con_string, allow_none=True)\n\n return s\n\ndef execute_command_from_args(argsparsed, xmlrpcsocket):\n '''argsparsed should be from create_parser in this file'''\n\n if argsparsed.status:\n return xmlrpcsocket.status()\n\n if argsparsed.stop_channel:\n return xmlrpcsocket.stop_channel(argsparsed.stop_channel)\n\n if argsparsed.start_channel:\n return xmlrpcsocket.start_channel(argsparsed.start_channel)\n\n if argsparsed.stop_channels:\n return xmlrpcsocket.stop_channels()\n\n if argsparsed.start_channels:\n return xmlrpcsocket.start_channels()\n\n if argsparsed.print_engine_loglevel:\n return xmlrpcsocket.get_log_level()\n\n if argsparsed.get_channel_loglevel:\n level = argsparsed.get_channel_loglevel\n if level == \"UNITTEST\":\n return \"NOTSET\"\n else:\n return xmlrpcsocket.get_channel_log_level(argsparsed.get_channel_loglevel)\n\n if argsparsed.set_channel_loglevel:\n return xmlrpcsocket.set_channel_log_level(argsparsed.set_channel_loglevel[0], argsparsed.set_channel_loglevel[1])\n\n if argsparsed.show_config:\n return pprint.pformat(xmlrpcsocket.show_config(\"all\"))\n\n if argsparsed.show_channel_config:\n channel = argsparsed.show_channel_config\n return pprint.pformat(xmlrpcsocket.show_config(channel))\n\n if argsparsed.show_de_config:\n return xmlrpcsocket.show_de_config()\n\n if argsparsed.reload_config:\n return xmlrpcsocket.reload_config()\n\n if argsparsed.print_products:\n return xmlrpcsocket.print_products()\n\n if argsparsed.print_product:\n return xmlrpcsocket.print_product(argsparsed.print_product,\n argsparsed.columns,\n argsparsed.query)\n\n if argsparsed.stop:\n return xmlrpcsocket.stop()\n\n if argsparsed.reaper_stop:\n return xmlrpcsocket.reaper_stop()\n\n if argsparsed.reaper_start:\n return xmlrpcsocket.reaper_start(argsparsed.reaper_start_delay_secs)\n\n if argsparsed.reaper_status:\n return xmlrpcsocket.reaper_status()\n\ndef main(args_to_parse=None):\n '''If you pass a list of args, they will be used instead of sys.argv'''\n\n parser = create_parser()\n\n if args_to_parse:\n args = parser.parse_args(args_to_parse)\n else:\n args = parser.parse_args()\n\n socket = build_xmlrpc_connection(args.host, args.port)\n\n return execute_command_from_args(args, socket)\n\n\nif __name__ == \"__main__\":\n print(main())\n","sub_path":"framework/engine/de_client.py","file_name":"de_client.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"567845370","text":"import os\nimport re\nimport requests\n\npath = u\"/Users/objcat/markdown/objcat_blog_markdown/markdown/Python/[Python] import自定义类\"\n\n\"\"\"\n将简书markdown转换成github (古老版本: 废弃)\n主要功能为下载图片后替换markdown中的路径为绝对路径 \n\n使用方法\n img 存放图片\n建立文件夹结构 文章名(文件夹) \n md 存放md\n \n单个文章: 使用 download_replace 方法来指定单个文章文件夹\n多个文章: 一个大文件夹包含多个文章文件夹目录 使用 batch 方法给定大目录即可\n\n\"\"\"\n\n\n# 下载图片 替换文章\ndef download_replace(dir_path):\n md_path = dir_path + \"/md\"\n img_path = dir_path + \"/img\"\n for filename in os.listdir(md_path):\n md = md_path + \"/\" + filename\n f = open(md)\n content = f.readlines()\n f.close()\n f = open(md)\n content2 = f.read()\n f.close()\n i = 1\n for url in content:\n if \"https://upload-images\" in url:\n pattern = re.compile(r\"]\\((.*?)\\)\")\n item = re.findall(pattern, url)\n print(item[0])\n result = requests.get(item[0])\n with open(img_path + \"/\" + str(i) + \".png\", \"wb\") as f2:\n f2.write(result.content)\n content2 = content2.replace(url, \"![](\" + \"../img/\" + str(i) + \".png\" + \")\" + \"\\n\")\n i += 1\n # 生成新文档\n with open(md_path + \"/new.md\", \"w\") as f2:\n f2.write(content2)\n\n\ndef batch(root_path):\n for filename in os.listdir(root_path):\n target_path = root_path + \"/\" + filename\n if os.path.isdir(target_path):\n for filename2 in os.listdir(target_path):\n target_path2 = target_path + \"/\" + filename2\n if os.path.isdir(target_path2):\n print(\"开始处理: \" + filename2)\n download_replace(target_path2)\n\n\nif __name__ == '__main__':\n # download_replace(path)\n batch(\"/Users/objcat/markdown/objcat_blog_markdown/markdown\")\n","sub_path":"src/skill/jianshu_to_github/conver2.py","file_name":"conver2.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"303335633","text":"import sys\nfrom pathlib import Path\nimport shutil\nimport re\n\nCYRILLIC_SYMBOLS = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяєіїґ'\nTRANSLATION = (\"a\", \"b\", \"v\", \"g\", \"d\", \"e\", \"e\", \"j\", \"z\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"r\", \"s\", \"t\", \"u\",\n \"f\", \"h\", \"ts\", \"ch\", \"sh\", \"sch\", \"\", \"y\", \"e\", \"u\", \"ja\")\n\nTRANS = {}\n\nfor k, v in zip(CYRILLIC_SYMBOLS, TRANSLATION):\n TRANS[ord(k)] = v\n TRANS[ord(k.upper())] = v.upper()\n\n\ndef normalize(name: str) -> str:\n translated_name = name.translate(TRANS)\n translated_name = re.sub(r\"\\W\", \"_\", translated_name)\n return translated_name\n\n\nJPEG_IMAGES = []\nJPG_IMAGES = []\nPNG_IMAGES = []\nSVG_IMAGES = []\nDOC_DOC = []\nDOCX_DOC = []\nXLSX_DOC = []\nPPTX_DOC = []\nPDF_DOC = []\nZIP_ARCH = []\nTAR_ARCH = []\nMP3_MUSIC = []\nFOLDERS = []\nOTHER = []\nEXTENSIONS = set()\n\nREGISTERED_EXTENSIONS = {\n 'JPEG': JPEG_IMAGES,\n 'JPG': JPG_IMAGES,\n 'PNG': PNG_IMAGES,\n 'SVG': SVG_IMAGES,\n 'DOC': DOC_DOC,\n 'DOCX': DOCX_DOC,\n 'XLSX': XLSX_DOC,\n 'PPTX': PPTX_DOC,\n 'PDF': PDF_DOC,\n 'ZIP': ZIP_ARCH,\n \"TAR\": TAR_ARCH,\n \"MP3\": MP3_MUSIC,\n 'OTHER': OTHER\n}\n\n\ndef parse_folder(path):\n p = Path(path)\n for file in p.iterdir():\n if file.is_dir():\n if file.name not in ['IMAGES', 'DOCS', 'ARCH', 'OTHER', 'VIDEOS', 'MUSIC']:\n FOLDERS.append(file)\n parse_folder(file)\n\n continue\n else:\n ext = file.suffix[1:].upper()\n EXTENSIONS.add(ext)\n if ext in REGISTERED_EXTENSIONS.keys():\n REGISTERED_EXTENSIONS[ext].append(file)\n else:\n REGISTERED_EXTENSIONS['OTHER'].append(file)\n\n return REGISTERED_EXTENSIONS\n\n\ndef handle_image(file: Path, root_folder: Path, dist: str):\n target_folder = root_folder / dist\n target_folder.mkdir(exist_ok=True)\n ext = Path(file).suffix\n new_target_folder = target_folder / ext.upper()\n new_target_folder.mkdir(exist_ok=True)\n new_name = normalize(file.name.replace(ext, \"\")) + ext\n file.replace(new_target_folder / new_name)\n\n\ndef handle_doc(file: Path, root_folder: Path, dist: str):\n target_folder = root_folder / dist\n target_folder.mkdir(exist_ok=True)\n ext = Path(file).suffix\n new_target_folder = target_folder / ext.upper()\n new_target_folder.mkdir(exist_ok=True)\n new_name = normalize(file.name.replace(ext, \"\")) + ext\n file.replace(new_target_folder / new_name)\n\n\ndef handle_music(file: Path, root_folder: Path, dist: str):\n target_folder = root_folder / dist\n target_folder.mkdir(exist_ok=True)\n ext = Path(file).suffix\n new_target_folder = target_folder / ext.upper()\n new_target_folder.mkdir(exist_ok=True)\n new_name = normalize(file.name.replace(ext, \"\")) + ext\n file.replace(new_target_folder / new_name)\n\n\ndef handle_other(file: Path, root_folder: Path, dist: str):\n target_folder = root_folder / dist\n target_folder.mkdir(exist_ok=True)\n ext = Path(file).suffix\n new_name = normalize(file.name.replace(ext, \"\")) + ext\n file.replace(target_folder / new_name)\n\n\ndef handle_archive(file: Path, root_folder: Path, dist: str):\n target_folder = root_folder / dist\n target_folder.mkdir(exist_ok=True)\n ext = Path(file).suffix\n folder_for_arch = normalize(file.name.replace(ext, \"\"))\n archive_folder = target_folder / folder_for_arch\n archive_folder.mkdir(exist_ok=True)\n try:\n shutil.unpack_archive(str(file.resolve()), str(archive_folder.resolve()))\n except shutil.ReadError:\n archive_folder.rmdir()\n return\n file.unlink()\n\n\ndef handle_folder(folder: Path):\n try:\n folder.rmdir()\n except OSError:\n print(f\"Не удалось удалить папку {folder}\")\n\n\ndef main(folder):\n parse_folder(folder)\n\n for file in DOC_DOC:\n handle_image(file, folder, \"DOCS\")\n\n for file in DOCX_DOC:\n handle_image(file, folder, \"DOCS\")\n\n for file in PPTX_DOC:\n handle_image(file, folder, \"DOCS\")\n\n for file in XLSX_DOC:\n handle_image(file, folder, \"DOCS\")\n\n for file in PDF_DOC:\n handle_image(file, folder, \"DOCS\")\n\n for file in TAR_ARCH:\n handle_image(file, folder, \"ARCH\")\n\n for file in MP3_MUSIC:\n handle_image(file, folder, \"MUSIC\")\n\n for file in JPEG_IMAGES:\n handle_image(file, folder, \"IMAGES\")\n\n for file in JPG_IMAGES:\n handle_image(file, folder, \"IMAGES\")\n\n for file in PNG_IMAGES:\n handle_image(file, folder, \"IMAGES\")\n\n for file in SVG_IMAGES:\n handle_image(file, folder, \"IMAGES\")\n\n for file in OTHER:\n handle_other(file, folder, \"OTHER\")\n\n for file in ZIP_ARCH:\n handle_archive(file, folder, \"ARCH\")\n\n for f in FOLDERS:\n handle_folder(f)\n\n\nscan_path = sys.argv[1]\nprint(f\"Start in folder {scan_path}\")\nsearch_folder = Path(scan_path)\nsearch_folder = search_folder.resolve()\nmain(search_folder)\n","sub_path":"clean_folder/clean_folder/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"141610439","text":"\"\"\"This file is a Uhaul spider created on top of the ATSSpider\nscrapy crawl uhaul -a mining_job_id=9999 -a iteration=1 -a url=\"http://jobs.uhaul.com\"\n\nsample url:\n http://jobs.uhaul.com\n\"\"\"\nimport re\nfrom urlparse import urljoin, urlparse, parse_qs\n\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\n\nclass Uhaul(ATSSpider):\n\n name = 'uhaul' # unique identifier for this spider\n\n allowed_domains = ['uhaul.com'] # set allowed domain to crawl\n\n el_raw_html = \"//div[@id='ctl00_contentMain_pnlJobDetail']\"\n\n def parse(self, response):\n self.set_meta_language(response)\n hxs = HtmlXPathSelector(response)\n\n hiddens = {}\n hiddens['__EVENTTARGET'] = ''\n hiddens['__EVENTARGUMENT'] = ''\n hiddens['__EVENTVALIDATION'] = hxs.select(\"//input[contains(@name, '__EVENTVALIDATION')]/@value\").extract()\n hiddens['__VIEWSTATE'] = hxs.select(\"//input[contains(@name, '__VIEWSTATE')]/@value\").extract()\n hiddens['ctl00$btnSearch'] = hxs.select(\"//input[contains(@name, 'ctl00$btnSearch')]/@value\").extract()\n hiddens['ctl00$txtLocation'] = ''\n hiddens['ctl00$txtjobsearch'] = ''\n\n yield FormRequest(formdata=hiddens, url=response.url, callback=self.parse_search)\n\n def parse_search(self, response):\n hxs = HtmlXPathSelector(response)\n\n url = \"http://jobs.uhaul.com/\"\n links = hxs.select(\"//a[contains(@href, 'job_detail.aspx')]/../..\")\n\n for link in links:\n location = ''.join(link.xpath(\"td[2]/text()\").extract()).strip()\n path = link.xpath(\"td[1]/a/@href\").extract()[0]\n request = Request(urljoin(url, path), callback=self.parse_job_callback())\n request.meta['location'] = location\n yield request\n\n next_page = hxs.select(\"//a[contains(text(), 'Next')]/@href\").extract()\n if len(next_page) > 0:\n hiddens = {}\n \n event_target = re.findall(r\"__doPostBack\\('(.+)',''\\)\", next_page[0])[0]\n hiddens['__EVENTTARGET'] = event_target\n\n hiddens['__EVENTARGUMENT'] = ''\n hiddens['__EVENTVALIDATION'] = hxs.select(\"//input[contains(@name, '__EVENTVALIDATION')]/@value\").extract()\n hiddens['__VIEWSTATE'] = hxs.select(\"//input[contains(@name, '__VIEWSTATE')]/@value\").extract()\n hiddens['ctl00$txtLocation'] = ''\n hiddens['ctl00$txtjobsearch'] = ''\n\n yield FormRequest(formdata=hiddens, url=response.url, callback=self.parse_search)\n\n def parse_job(self, response):\n hxs = HtmlXPathSelector(response)\n loader = BrightcorpItemLoader(selector=hxs)\n url_prse = urlparse(response.url)\n job_id = parse_qs(url_prse.query)['aval_job_id'][0]\n\n loader.add_value(\"referencenumber\", \"%s-%s\" % (url_prse.netloc.split('.')[1], job_id))\n loader.add_xpath(\"title\", '//span[@id=\"ctl00_contentMain_lblJobTitle\"]/text()')\n loader.add_xpath(\"description\", '//span[@id=\"ctl00_contentMain_lblDescription\"]')\n loader.add_xpath(\"qualifications\", '//span[@id=\"ctl00_contentMain_lblReq\"]/text()')\n loader.add_value(\"url\", response.url)\n loader.add_value(\"company\", \"U-Haul\")\n loader.add_value(\"location\", response.meta['location'])\n loader.add_xpath(\"jobcategory\", '//span[@id=\"ctl00_contentMain_lblWkStatus\"]/text()')\n loader.add_xpath(\"educationrequirements\", '//span[@id=\"ctl00_contentMain_lblEdu\"]/text()')\n\n return loader.load_item()\n\n def set_custom_item(self, response):\n url_parse = urlparse(response.url)\n job_id = parse_qs(url_parse.query)['aval_job_id'][0]\n\n self.loader.add_value(\"company\", \"U-Haul\")\n self.loader.add_value('referencenumber', \"%s-%s\" % (url_parse.netloc.split('.')[1], job_id))\n\n","sub_path":"brightcorp/brightcorp/spiders/uhaul.py","file_name":"uhaul.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"435146441","text":"from flask_restful import Resource\nfrom backend.common import util\nfrom gensim import corpora, models, similarities\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom elasticsearch import Elasticsearch\n\nimport requests\nimport re\n\n\nclass SimilarWines(Resource):\n def get(self, wine_id):\n print(\"Call for: GET /beers/%s\" % wine_id)\n url = util.es_base_url['wines'] + '/' + wine_id\n resp = requests.get(url)\n data = resp.json()\n wine = data['_source']\n\n query = wine['description']\n id_list = self.find_similar_docs(query)\n query = {\n \"query\": {\n \"terms\": {\n \"_id\": id_list\n }\n }\n }\n client = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n result = client.search(index='findmywine', body=query)\n\n wines = []\n for hit in result['hits']['hits']:\n wine = hit['_source']\n wine['id'] = hit['_id']\n wines.append(wine)\n return wines\n\n @staticmethod\n def normalize(review):\n review_letters = re.sub('[^a-zA-Z]', ' ', str(review))\n review_letters = review_letters.lower()\n return \" \".join(review_letters.split())\n\n @staticmethod\n def remove_stopwords(review):\n stop_words = set(stopwords.words('english'))\n ls = [word for word in review.split() if word not in stop_words]\n txt = \" \".join(ls)\n return txt\n\n @staticmethod\n def find_similar_docs(query):\n index = similarities.MatrixSimilarity.load(\"backend/wines.index\")\n dictionary = corpora.Dictionary.load('backend/wines.dict')\n lsi = models.LsiModel.load('backend/model.lsi')\n vec_bow = dictionary.doc2bow(word_tokenize(SimilarWines.remove_stopwords(SimilarWines.normalize(query))))\n vec_lsi = lsi[vec_bow] # convert the query to LSI space\n sims = index[vec_lsi]\n sims = sorted(enumerate(sims), key=lambda item: -item[1])[:8]\n\n wine_titles = [doc[0] for doc in sims]\n return wine_titles\n","sub_path":"backend/resources/SimilarWines.py","file_name":"SimilarWines.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"423466462","text":"from aoc import get_ints, read_file, timer\n\ndef draw(points):\n gapX = min(list(zip(*points))[0])\n gapY = min(list(zip(*points))[1])\n sizeX = max(list(zip(*points))[0]) - gapX + 1\n sizeY = max(list(zip(*points))[1]) - gapY + 1\n \n field = [[\" \" for y in range(sizeX)] for x in range(sizeY)]\n\n for point in points:\n field[point[1]-gapY][point[0]-gapX] = \"*\"\n \n for y in range(sizeY):\n print(''.join(field[y]))\n\n@timer\ndef solve():\n points = [get_ints(line) for line in read_file(\"10\")]\n\n found, size = 0, 200\n\n while found < len(points):\n found = 0\n for point in points:\n point[0] += point[2]\n point[1] += point[3]\n if point[0] >= 0 and \\\n point[0] < size and \\\n point[1] >= 0 and \\\n point[1] < size:\n found += 1\n \n return points\n\nresult = solve() \ndraw(result)","sub_path":"2018_10_p1.py","file_name":"2018_10_p1.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"448345434","text":"# Create 2 new lists height and weight\nheight = [1.87, 1.87, 1.82, 1.91, 1.90, 1.85, 1.83]\nweight = [81.65, 97.52, 95.25, 92.98, 86.18, 88.45, 80.00]\n\nimport numpy as np\n\n# Create 2 numpy arrays from height and weight\nnp_height = np.array(height)\nnp_weight = np.array(weight)\n\nprint(type(np_height))\n\n# Calculate bmi\nbmi = np_weight / np_height ** 2\n\n# Print the result\nprint(bmi)\n\n# For a boolean response\nprint (bmi > 25)\n\n# Print only those observations above 23\nprint (bmi[bmi > 25])\n\nweight_kg = [81.65, 97.52, 95.25, 92.98, 86.18, 88.45]\n\nnp_weigth_kg = np.array(weight_kg)\nnp_weight_lbs = np_weigth_kg * 2.2\nprint (np_weight_lbs)\n","sub_path":"LearnNumpyArray.py","file_name":"LearnNumpyArray.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"198516247","text":"# public class Solution {\n# public int subarraySum(int[] nums, int k) {\n# int count = 0, sum = 0;\n# HashMap < Integer, Integer > map = new HashMap < > ();\n# map.put(0, 1);\n# for (int i = 0; i < nums.length; i++) {\n# sum += nums[i];\n# if (map.containsKey(sum - k))\n# count += map.get(sum - k);\n# map.put(sum, map.getOrDefault(sum, 0) + 1);\n# }\n# return count;\n# }\n# }\n\nclass Solution:\n def subarraySum(self, nums, k):\n preSums = {0: 1}\n s = 0\n res = 0\n for num in nums:\n s += num\n res += preSums.get(s - k, 0)\n preSums[s] = preSums.get(s, 0) + 1\n return res\n\n# class Solution:\n# def subarraySum(self, nums: List[int], k: int) -> int:\n# count = 0\n# for i in range(len(nums)):\n# sub_sum = 0\n# for j in range(i,len(nums)):\n# sub_sum += nums[j]\n# if sub_sum == k:\n# count += 1\n# return count\n","sub_path":"SubarraySumEqualsK.py","file_name":"SubarraySumEqualsK.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"39536557","text":"\"\"\"code from:\nhttps://blog.dreamshire.com/project-euler-57-solution/\"\"\"\nfrom math import log10\n\nL, n, d, c = 1000, 3, 2, 0\n\nfor x in range(2, L + 1):\n n, d = n + 2 * d, n + d\n if int(log10(n)) > int(log10(d)):\n c += 1\nprint(c)\n","sub_path":"codes/problems/problem57.py","file_name":"problem57.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"480263257","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom astropy.io import fits\nfrom astropy.io.fits import getheader\nfrom scipy.optimize import curve_fit\nfrom scipy.interpolate import interp1d\nimport mpyfit\nfrom astropy.io import fits\nfrom scipy.signal import argrelextrema\nfrom scipy.optimize import curve_fit\nfrom scipy.interpolate import interp1d\nimport csv\nimport pickle\nimport math as m\nimport pandas as pd\nimport os\n\n\n# Create a master shifting function with a lot of flexibility\n# Specifically, is able to:\n# - shift to the same reference frame (option, can be disabled)\n# - remove a specific planet signal (option, can be disabled)\n# - inject a specific planet signal (option, can be disabled)\n# - shift to zero or median (both options)\n\ndef master_shifting_planet(bjd, ccfBary, rvh,\n ref_frame_shift, # \"off\" or a specific value in km/s\n removed_planet_rvs, # array of rv values for planet signal in km/s OR \"NULL\"\n injected_planet_params, # array of amplitude (km/s), 2pi/period, phase\n zero_or_median): # \"zero\" or \"median\"\n number_of_ccfs = len(ccfBary)\n\n # HARPS direct data lists\n BJD_list = []\n og_ccf_list = []\n rv_from_HARPS_list = []\n v_rad_raw_list = []\n\n # injected planet list\n planet_signal_list = []\n\n # mpyfit lists\n mu_og_list = []\n mu_jup_list = []\n mu_planet_list = []\n mu_zero_list = []\n CCF_normalized_list = []\n\n # CCF lists\n compiled_ccf_list = []\n jup_shifted_CCF_data_list = []\n planet_shifted_CCF_data_list = []\n shifted_CCF_list = []\n final_ccf_list = []\n\n def planet_signal(x):\n return injected_planet_params[0] * np.sin(injected_planet_params[1] * x + injected_planet_params[2])\n\n spline_method = 'quadratic'\n for i in range(0, number_of_ccfs):\n day_of_observation = bjd[i]\n BJD_list.append(day_of_observation)\n\n # extracts the CCF data and rv from fits\n CCF_data = ccfBary[i]\n og_ccf_list.append(CCF_data)\n rv_from_HARPS = rvh[i] # - bsrv[i]\n rv_from_HARPS_list.append(rv_from_HARPS)\n\n # Finds the local minima using a Gaussian fit\n # Define the actual function where A = p[0], mu = p[1], sigma = p[2], c = p[3]\n def gauss(x, p):\n return -p[0] * np.exp(-(x - p[1]) ** 2 / (2. * p[2] ** 2)) + p[3]\n\n # A simple minimization function:\n def least(p, args):\n x, y = args\n return gauss(x, p) - y\n\n parinfo = [{'fixed': False, 'step': 1e-6},\n {'fixed': False, 'step': 1e-4},\n {'fixed': False, 'step': 1e-14},\n {'fixed': False, 'step': 1e-9}]\n\n # no_shift fit\n rv_data = np.linspace(-20, 20, 161)\n p_no_shifted = [1., 0.1, 1., 0.5]\n pfit_no_shift, results_no_shift = mpyfit.fit(least, p_no_shifted, (rv_data, CCF_data), parinfo)\n mu_og = pfit_no_shift[1]\n mu_og_list.append(mu_og)\n compiled_ccf_list.append(CCF_data)\n\n # Add in reference frame shift\n\n if removed_planet_rvs[0] != \"NULL\":\n jupiter_shift = removed_planet_rvs[i]\n v_rad_raw = rvh[i] + removed_planet_rvs[i]\n v_rad_raw_list.append(v_rad_raw)\n\n # planet removal shift\n rv_data_jupiter_shift = rv_data + jupiter_shift # minus sign\n f_jup = interp1d(rv_data_jupiter_shift, CCF_data, kind=spline_method, fill_value='extrapolate')\n jupiter_shifted_CCF_data = f_jup(rv_data)\n jup_shifted_CCF_data_list.append(jupiter_shifted_CCF_data)\n compiled_ccf_list.append(jupiter_shifted_CCF_data)\n\n # fits the shifted by jupiter data\n p_shifted_jup = [1., 0.1 + jupiter_shift, 1., 0.5]\n pfit_jup, results_jup = mpyfit.fit(least, p_shifted_jup, (rv_data, jupiter_shifted_CCF_data), parinfo)\n m = pfit_jup[1]\n mu_jup_list.append(m)\n\n if injected_planet_params[0] != \"NULL\":\n # inject a planet (k=0.3 m/s, p = 365.24d)\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n\n bjd_array = np.asarray(day_of_observation)\n inj_planet_shift = planet_signal(bjd_array) # km/s\n planet_signal_list.append(inj_planet_shift)\n rv_data_planet_shift = rv_data + inj_planet_shift\n f_planet = interp1d(rv_data_planet_shift, ccf_to_use, kind='cubic', fill_value='extrapolate')\n planet_shifted_CCF_data = f_planet(rv_data)\n planet_shifted_CCF_data_list.append(planet_shifted_CCF_data)\n compiled_ccf_list.append(planet_shifted_CCF_data)\n\n # fits the shifted by planet data\n p_shifted_planet = [1., 0.1 + inj_planet_shift, 1., 0.5]\n pfit_planet, results_planet = mpyfit.fit(least, p_shifted_planet, (rv_data, planet_shifted_CCF_data),\n parinfo)\n\n m = pfit_planet[1]\n mu_planet_list.append(m)\n\n if zero_or_median == \"zero\":\n # Shift to zero, after planet shift\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n\n shift_to_zero = -(rv_from_HARPS + inj_planet_shift)\n rv_data_shifted = rv_data + shift_to_zero\n\n f = interp1d(rv_data_shifted, ccf_to_use, kind='cubic', fill_value='extrapolate')\n shifted_CCF_data = f(rv_data)\n shifted_CCF_list.append(shifted_CCF_data)\n compiled_ccf_list.append(shifted_CCF_data)\n\n # fits the shifted data\n p_shifted = [1., 0.1 - shift_to_zero, 1., 0.5]\n pfit, results = mpyfit.fit(least, p_shifted, (rv_data, shifted_CCF_data), parinfo)\n m_zero = pfit[1]\n mu_zero_list.append(m_zero) # -0.1)\n else:\n # Shift to median, after planet shift\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n\n shift_to_zero = ((np.mean(rvh) - rv_from_HARPS) - inj_planet_shift)\n rv_data_shifted = rv_data + shift_to_zero\n\n f = interp1d(rv_data_shifted, ccf_to_use, kind='cubic', fill_value='extrapolate')\n shifted_CCF_data = f(rv_data)\n shifted_CCF_list.append(shifted_CCF_data)\n compiled_ccf_list.append(shifted_CCF_data)\n\n # fits the shifted data\n p_shifted = [1., 0.1 - shift_to_zero, 1., 0.5]\n pfit, results = mpyfit.fit(least, p_shifted, (rv_data, shifted_CCF_data), parinfo)\n m_zero = pfit[1]\n mu_zero_list.append(m_zero) # -0.1)\n else:\n if zero_or_median == \"zero\":\n # Shift to zero\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n\n shift_to_zero = -(rv_from_HARPS)\n rv_data_shifted = rv_data + shift_to_zero\n\n f = interp1d(rv_data_shifted, ccf_to_use, kind='cubic', fill_value='extrapolate')\n shifted_CCF_data = f(rv_data)\n shifted_CCF_list.append(shifted_CCF_data)\n compiled_ccf_list.append(shifted_CCF_data)\n\n # fits the shifted data\n p_shifted = [1., 0.1 - shift_to_zero, 1., 0.5]\n pfit, results = mpyfit.fit(least, p_shifted, (rv_data, shifted_CCF_data), parinfo)\n m_zero = pfit[1]\n mu_zero_list.append(m_zero) # -0.1)\n else: # shifted to median instead\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n shift_to_median = (np.mean(rvh) - rv_from_HARPS)\n rv_data_shifted = rv_data + shift_to_median\n\n f = interp1d(rv_data_shifted, ccf_to_use, kind='cubic', fill_value='extrapolate')\n shifted_CCF_data = f(rv_data)\n shifted_CCF_list.append(shifted_CCF_data)\n compiled_ccf_list.append(shifted_CCF_data)\n\n # fits the shifted data\n p_shifted = [1., 0.1 - shift_to_median, 1., 0.5]\n pfit, results = mpyfit.fit(least, p_shifted, (rv_data, shifted_CCF_data), parinfo)\n m_zero = pfit[1]\n mu_zero_list.append(m_zero) # -0.1)\n ccf_to_use = compiled_ccf_list[len(compiled_ccf_list) - 1]\n final_ccf_list.append(ccf_to_use)\n\n # normalize the CCFs\n x_left = ccf_to_use[0:40]\n x_right = ccf_to_use[121:161]\n x_norm_range = list(x_left) + list(x_right)\n CCF_normalized = ccf_to_use * (1 / np.mean(x_norm_range))\n CCF_normalized_list.append(CCF_normalized)\n\n # Create a dataframe\n d = {'BJD': BJD_list,\n 'vrad_star': rvh,\n 'vrad_plan_star': rvh + planet_signal_list,\n 'og_ccf_list': og_ccf_list,\n 'jup_shifted_CCF_data_list': jup_shifted_CCF_data_list,\n 'planet_shifted_CCF_data_list': planet_shifted_CCF_data_list,\n 'zero_shifted_CCF_list': shifted_CCF_list,\n 'CCF_normalized_list': CCF_normalized_list,\n 'mu_og_list': mu_og_list,\n 'mu_jup_list': mu_jup_list,\n 'mu_planet_list': mu_planet_list,\n 'mu_zero_list': mu_zero_list\n }\n df = pd.DataFrame(data=d)\n\n return df\n\n\nif __name__ == '__main__':\n master_shifting_planet()\n","sub_path":"exoplanet-ml/rv_net/master_shifting_planet.py","file_name":"master_shifting_planet.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"84"} +{"seq_id":"127785882","text":"import numpy as np\n\ndef share_error(Dithered, quant_error, x, y):\n Xmax = Dithered.shape[0]\n Ymax = Dithered.shape[1]\n if(x + 1 < Xmax):\n Dithered[x + 1, y ] = Dithered[x + 1, y ] + (quant_error * 7 / 16)\n \n if(x - 1 > 0 and (y + 1