diff --git "a/4634.jsonl" "b/4634.jsonl" new file mode 100644--- /dev/null +++ "b/4634.jsonl" @@ -0,0 +1,642 @@ +{"seq_id":"188345812","text":"import json\nimport os\nfrom datetime import datetime\nfrom sqlalchemy import create_engine, Column, Integer, String, DateTime\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.dialects.postgresql import JSON, JSONB\n#from health_check.database import session\n#from health_check.models import Item\n\nengine = create_engine('postgresql://ubuntu:thinkful@localhost:5432/server-health')\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\n\nclass Item(Base):\n __tablename__ = \"server_health\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n json_data = Column(JSON)\n start_time = Column(DateTime, default=datetime.utcnow) \n\n\nBase.metadata.create_all(engine)\n\ndef open_json(file_name):\n try:\n output_json = json.load(open(file_name))\n return output_json\n except ValueError:\n return None\n\ndef main():\n for root, dirs, files in os.walk('/home/ubuntu/workspace/thinkful/projects/health_check/data', topdown=True, onerror=None, followlinks=False):\n if files: # Check the files list and verify there is any data in it. If there is no data in the list then skip to Else: statement. \n for name in files:\n target_file_name = os.path.join(root, name)\n server_name = name.split(\".\")[0]\n output_json = open_json(target_file_name)\n \n check_server_record = session.query(Item).filter_by(name=server_name).first() \n if not check_server_record:\n server_name = Item(name=server_name, json_data=output_json)\n session.add(server_name)\n session.commit()\n # print(server_name.name)\n # print(server_name.id)\n # # print(server_name.json_data)\n # print(server_name.start_time)\n else:\n check_server_record.json_data=output_json\n check_server_record.start_time=(datetime.utcnow())\n session.add(check_server_record)\n session.commit()\n # print(\"Updated record for {}\".format(check_server_record.name))\n # print(\"It still has the same id: {}\".format(check_server_record.id))\n # print(\"The update time / date has chenged to {}\".format(check_server_record.start_time))\n \n else:\n # print(\"No Data\")\n exit()\n\n# if __name__ == '__main__' and __package__ is None:\n# from os import sys, path\n# sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n# main()\nif __name__ == '__main__':\n main()","sub_path":"health_check/update_database.py","file_name":"update_database.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"413329687","text":"import nltk\nfrom nltk import WordNetLemmatizer\nfrom nltk.corpus import stopwords\n\ntext = \"\"\"APJ Abdul kalam came into this world on 15th October 1931, at a time when India was under British \noccupation. He was born to a Tamil Muslim family in Tamil Nadu. His father was a boat owner while his mother was a \nhousewife. Furthermore, Kalam had five siblings and was the youngest of the lot. In school, Kalam was an average \nstudent but was still hardworking and bright. I think this certainly is a great motivation for all the average \nstudents out there. Being average, you must never ever underestimate yourself and continue doing the hard work. \"\"\"\n\nsentences = nltk.sent_tokenize(text)\nstemmer = WordNetLemmatizer()\n\nfor word in range(len(sentences)):\n word_tokens = nltk.word_tokenize(sentences[word])\n words = [stemmer.lemmatize(word) for word in word_tokens if word not in set(stopwords.words('english'))]\n sentences[word] = \" \".join(words)\n\nprint(sentences)","sub_path":"stemming_lemmatization/lemmatization.py","file_name":"lemmatization.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"403060492","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport rospy, sys\nimport moveit_commander\nfrom geometry_msgs.msg import PoseStamped\nimport copy\nimport os\nfrom nn_vs.srv import Save_image, Save_imageRequest\nimport random\n\npi = 3.14159\n\nX0 = 0.3\nY0 = -0.045\n# 期望抓取位置\nX = [X0, X0-0.025, X0-0.025, X0+0.025, X0+0.025]\nY = [Y0, Y0-0.025, Y0+0.025, Y0+0.025, Y0-0.025]\nZ = 0.085 + 0.25\n\nroll = [x for x in range(-5,6,1)]\npitch = [x for x in range(-5,6,1)]\nyaw = [x for x in range(-10,11,1)]\n\npz = [float(x)/1000 for x in range(0,11,1)]\npx = [float(x)/1000 for x in range(-5,6,1)]\npy = [float(x)/1000 for x in range(-5,6,1)]\npxy = [[-0.005, 0], [0.005, 0], [-0.0025, 0], [0.0025, 0], [0, 0], \\\n [0, 0.005], [0, -0.005], [0, 0.0025], [0, -0.0025], \\\n [0.0025, 0.0025], [-0.0025, 0.0025], [0.0025, -0.0025], [-0.0025, -0.0025]]\n\nYaw = [0, 30, 60, 90] # Yaw = [0, 30, 60, 90]\n\nROOT_PATH = '/home/li/ROS/probot_ws/src/PROBOT_Anno/nn_vs/poses'\n\ndef Quaternion_multiply(q1,q2):\n \"\"\"\n q1 * q2 = \n (w1*w2 - x1*x2 - y1*y2 - z1*z2) + (w1*x2 + x1*w2 + y1*z2 - z1*y2) i +\n (w1*y2 - x1*z2 + y1*w2 + z1*x2) j + (w1*z2 + x1*y2 - y1*x2 + z1*w2) k\n \"\"\"\n x1,y1,z1,w1 = q1\n x2,y2,z2,w2 = q2\n x = w1*x2 + x1*w2 + y1*z2 - z1*y2\n y = w1*y2 - x1*z2 + y1*w2 + z1*x2\n z = w1*z2 + x1*y2 - y1*x2 + z1*w2\n w = w1*w2 - x1*x2 - y1*y2 - z1*z2\n return [x,y,z,w]\n\n\ndef Quaternion_normal(q):\n x,y,z,w = q\n mod = pow((pow(x,2)+pow(y,2)+pow(z,2)+pow(w,2)), 1/2)\n return [x/mod, y/mod, z/mod, w/mod]\n\nclass Robot:\n def __init__(self):\n # 初始化move_group的API\n moveit_commander.roscpp_initialize(sys.argv)\n # 初始化ROS节点\n rospy.init_node('test_move', anonymous=True)\n # 初始化需要使用move group控制的机械臂中的arm group\n self.arm = moveit_commander.MoveGroupCommander('manipulator')\n # 设置机械臂运动的允许误差值\n self.arm.set_goal_joint_tolerance(0.0002)\n # 设置允许的最大速度和加速度\n self.arm.set_max_acceleration_scaling_factor(0.8)\n self.arm.set_max_velocity_scaling_factor(0.8)\n # base\n self.base_frame_id = \"/base_footprint\"\n self.end_effector_link = self.arm.get_end_effector_link()\n # print(self.end_effector_link)\n self.id = 1 # 1, 937, 1873, 2809, 3745, \n self.save_request = Save_imageRequest()\n \n # 控制机械臂先回到初始化位置\n # self.arm.set_named_target('home')\n # self.arm.go()\n # rospy.sleep(1)\n\n rospy.wait_for_service('/image_save')\n self.image_save_client = rospy.ServiceProxy('/image_save', Save_image)\n rospy.sleep(0.2)\n \n def pose_transform(self, curr_pose, delte_pose):\n \"\"\" pose transform return pose \"\"\"\n target_pose = copy.deepcopy(curr_pose)\n # target_pose.pose.position.x = 0.235270741065\n # target_pose.pose.position.y = 0.097024760869\n # target_pose.pose.position.z = 0.203904230734\n\n \n\n target_pose.pose.orientation.x = -0.999513345318\n target_pose.pose.orientation.y = 0.0311825508069\n target_pose.pose.orientation.z = -0.000829331830071\n target_pose.pose.orientation.w = 0.000182385374545\n target_pose.header.stamp = rospy.Time.now() # update time in ROS\n return target_pose\n\n def write_pose(self, file_name, current_pose, rpy):\n \"\"\" write current pose and rpy in file \"\"\"\n # write the pose in file\n file_handle = open(os.path.join(ROOT_PATH, file_name),mode='w+')\n file_handle.write('# position\\n')\n s = str(current_pose.pose.position.x)\n file_handle.write('x='+s+'\\n')\n s = str(current_pose.pose.position.y)\n file_handle.write('y='+s+'\\n')\n s = str(current_pose.pose.position.z)\n file_handle.write('z='+s+'\\n')\n file_handle.write('# quaternion\\n')\n s = str(current_pose.pose.orientation.x)\n file_handle.write('x='+s+'\\n')\n s = str(current_pose.pose.orientation.y)\n file_handle.write('y='+s+'\\n')\n s = str(current_pose.pose.orientation.z)\n file_handle.write('z='+s+'\\n')\n s = str(current_pose.pose.orientation.w)\n file_handle.write('w='+s+'\\n')\n file_handle.write('# rpy\\n')\n s = str(rpy[0])\n file_handle.write('r='+s+'\\n')\n s = str(rpy[1])\n file_handle.write('p='+s+'\\n')\n s = str(rpy[2])\n file_handle.write('y='+s+'\\n')\n\n file_handle.close()\n\n def auto_simple(self):\n # vertical cylinder with radius 5mm and height 10 mm\n # sampled uniformly from -5 degrees to 5 degrees for roll and pitch \n # and -10 degrees to 10 degrees for yaw.\n # 正方形四个顶点加中心点 共五个点, 每个点旋转0°,30°,60°,90°, 每次均匀采样200个数据\n # 5*4*200 = 4000\n rpys = []\n for r in roll:\n for p in pitch:\n for y in yaw:\n rpys.append([r,p,y])\n random.shuffle(rpys)\n # pos = []\n # for z in pz:\n # for x in px:\n # for y in py:\n # pos.append([x,y,z])\n # print(len(rpys), len(pos))\n n = len(rpys)\n idx = 0\n for YAW in Yaw:\n for z in pz:\n for x in px:\n for y in py:\n # 遍历位置, 姿态随机选3个\n for _ in range(3):\n r = rpys[idx][0]\n p = rpys[idx][1]\n ya = rpys[idx][2]\n idx += 1\n if idx == n: idx = 0\n print(self.id,x,y,z,r,p,ya)\n # move to the target position\n self.arm.set_pose_target([X[0]+x, Y[0]+y, Z+z, (-180+r)*pi/180, (0+p)*pi/180, (YAW+ya)*pi/180])\n self.arm.go()\n rospy.sleep(0.2)\n # read current pose from simulation\n current_pose = self.arm.get_current_pose()\n # print(current_pose)\n rpy = self.arm.get_current_rpy()\n # print(rpy)\n # write the pose in file\n self.write_pose(str(self.id)+'.txt', current_pose, rpy)\n self.save_request.file_name = str(self.id)+'.jpg'\n self.image_save_client.call(self.save_request)\n self.id += 1\n\n def auto_simple1(self):\n # vertical cylinder with radius 5mm and height 10 mm\n # sampled uniformly from -5 degrees to 5 degrees for roll and pitch \n # and -10 degrees to 10 degrees for yaw.\n # 正方形四个顶点加中心点 共五个点, 每个点旋转0°,30°,60°,90°, 每次均匀采样200个数据\n # 5*4*200 = 4000\n for YAW in Yaw:\n for z in pz:\n for x,y in pxy:\n # 遍历位置, 姿态随机选3个\n for _ in range(3):\n r = random.choice(roll)\n p = random.choice(pitch)\n ya = random.choice(yaw)\n print(self.id,x,y,z,r,p,ya)\n # move to the target position\n self.arm.set_pose_target([X[0]+x, Y[0]+y, Z+z, (-180+r)*pi/180, (0+p)*pi/180, (YAW+ya)*pi/180])\n self.arm.go()\n rospy.sleep(0.2)\n # read current pose from simulation\n current_pose = self.arm.get_current_pose()\n # print(current_pose)\n rpy = self.arm.get_current_rpy()\n # print(rpy)\n # write the pose in file\n self.write_pose(str(self.id)+'.txt', current_pose, rpy)\n self.save_request.file_name = str(self.id)+'.jpg'\n self.image_save_client.call(self.save_request)\n self.id += 1 \n\n def test_move(self):\n \"\"\" Testing move \"\"\"\n # target_pose = PoseStamped()\n # target_pose.header.frame_id = self.base_frame_id # must have this, pose is based on \"base_footprint\"\n # target_pose.header.stamp = rospy.Time.now() # update time in ROS\n # target_pose.pose.position.x = 0.235270741065\n # target_pose.pose.position.y = 0.097024760869\n # target_pose.pose.position.z = 0.203904230734\n # target_pose.pose.orientation.x = 0.999513345318\n # target_pose.pose.orientation.y = -0.0311825508069\n # target_pose.pose.orientation.z = 0.000829331830071\n # target_pose.pose.orientation.w = -0.000182385374545\n # self.arm.set_pose_target(target_pose)\n # self.arm.go()\n # rospy.sleep(1)\n\n # rpy = self.arm.get_current_rpy()\n # print(rpy)\n\n self.arm.set_pose_target([X[0], Y[0], Z, -180*pi/180, 0*pi/180, 0*pi/180])\n self.arm.go()\n rospy.sleep(0.5)\n \n # # self.arm.set_pose_target(target_pose)\n # # self.arm.go() # 机械臂运动\n # # rospy.sleep(1)\n # current_pose = self.arm.get_current_pose ()\n # print(current_pose)\n # rpy = self.arm.get_current_rpy()\n # print(rpy)\n\n # # 设置机械臂的目标位置,使用六轴的位置数据进行描述(单位:弧度)\n # joint_positions = [0.391410, -0.676384, -0.376217, 0.0, 1.052834, 0.454125]\n # self.arm.set_joint_value_target(joint_positions)\n \n # 控制机械臂先回到初始化位置\n # self.arm.set_named_target('home')\n # self.arm.go()\n # rospy.sleep(1)\n \n \n\nif __name__ == \"__main__\":\n\n # q1 = [-0.999513345318,0.0311825508069,-0.000829331830071,0.000182385374545]\n # q2 = [0,0,1,0]\n # q = Quaternion_multiply(q1, q2)\n # q = Quaternion_normal(q)\n # print(q)\n\n rb = Robot()\n\n try:\n rb.test_move()\n # rb.auto_simple()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"nn_vs/scripts/visual_servo/vs_test_move.py","file_name":"vs_test_move.py","file_ext":"py","file_size_in_byte":10285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"92414380","text":"import csv\nimport numpy as np\n\nevent_cause_vals = ['Random System Fault', 'Network Fault',\n 'Upgrade/Maintenance', 'Design Understanding',\n 'Design Problem', 'Environment or External', 'Induced',\n 'Informational', 'Documentation']\n\n# TODO: Add all other lists that contain label options\n\n\n# This function iterates through the alarm data to create a master set of alarm\n# code values. This set will be used as input for the NN\ndef make_alarm_hex_master_set():\n alarm_hex_master = set()\n\n with open('alarm.csv', encoding='utf8') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for row in csv_reader:\n if row[3] != \"NULL\":\n alarm_hex_master.add(row[3])\n\n return sorted(alarm_hex_master)\n\n\n# This function iterates through the ticket data file to create set of incident\n# ID values that will be used to get corresponding hex values\ndef make_incident_id_master_set():\n incident_id_master = set()\n\n with open('ticketdata.csv', encoding='utf8') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for row in csv_reader:\n if row[0] != \"NULL\":\n incident_id_master.add(row[0])\n\n return sorted(incident_id_master)\n\n\n# Returns list of 0's that has a length of n\ndef zerolistmaker(n):\n listofzeros = [0] * n\n return listofzeros\n\n\n# Function that creates a set of the incident ID's in the alarm file\ndef get_alarm_file_incident_ids():\n result_set = set()\n\n with open('alarm.csv', encoding='utf8') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for row in csv_reader:\n result_set.add(row[0])\n\n return sorted(result_set)\n\n\n# Function will iterate through the set of incident ID's in the alarm file and\n# the incident ID's in the ticket file. If there is a match, it is added to the\n# set. The set is a union of ID's in both files\ndef get_id_hex_set():\n result_set = set()\n ticket_id_set = make_incident_id_master_set()\n incident_id_in_alarm_file = get_alarm_file_incident_ids()\n for item in ticket_id_set:\n for value in incident_id_in_alarm_file:\n if item == value:\n result_set.add(str(item))\n return sorted(result_set)\n\n\n# Function associates all related alarm values for an incident ID. Store the\n# result in a list\ndef get_associated_hex_vals(id_val):\n id_and_hex_list = []\n id_and_hex_list.append(str(id_val))\n hex_val_set = set()\n with open('alarm.csv', encoding='utf8') as csv_file:\n csv_reader = csv.reader(csv_file)\n next(csv_reader)\n for row in csv_reader:\n if str(id_val) in row and row[3] != 'NULL':\n hex_val_set.add(str(row[3]))\n id_and_hex_list.append(sorted(hex_val_set))\n\n return id_and_hex_list\n\n\n# This will create a 2-D matrix with incident ids mapped to corresponding\n# alarm hex values\ndef make_incident_id_to_alarm_hex_list():\n result_list = []\n set_of_ids = get_id_hex_set()\n\n for values in set_of_ids:\n result_list.append(get_associated_hex_vals(values))\n\n return result_list\n\n\n# Function that will add the labels(class values) to a list that contains\n# ID and hex codes. Returns list containing ID's, features, and labels\n# (Example: [id, [hex_vals], [labels]]\ndef create_id_label_feature_list():\n id_hex_list = make_incident_id_to_alarm_hex_list()\n ticket_data_list = create_ticket_data_list()\n for data in id_hex_list:\n for ticket in ticket_data_list:\n end = len(ticket)\n start = end - 6\n if data[0] == ticket[0]:\n data.append(ticket[start:end])\n\n return id_hex_list\n\n\n# Function iterates through the ticket data in a given csv file and stores the\n# ticket data in an array\ndef create_ticket_data_list():\n ticket_data = []\n with open('ticketdata.csv', encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n for row in reader:\n ticket_data.append(row)\n\n return ticket_data\n\n\n# Gets the hex codes in the master array at index 1, and makes a result list\n# that contains all of the hex values for each ticket\ndef get_hex_codes():\n ticket_arr = create_id_label_feature_list()\n result_list = []\n for values in ticket_arr:\n hex_vals = values[1]\n result_list.append(hex_vals)\n return result_list\n\n\n# This function will accept the array of values, from get_hex_codes, and return\n# and array of 0 and 1 depending on the hex values present in the array. Length\n# will be same as length of alarm master set\ndef encode_hex_values(data_arr):\n hex_list = list(make_alarm_hex_master_set())\n list_len = len(hex_list)\n temp_list = zerolistmaker(list_len)\n for index in range(len(hex_list)):\n if hex_list[index] in data_arr:\n temp_list[index] = 1\n return temp_list\n\n\n# Iterates through each hex_arr in get_hex_codes and will encode the data as an\n# array of 0 and 1 for each ticket\ndef encode_ticket_hex_codes():\n result_list = []\n for hex_arr in get_hex_codes():\n result_list.append(encode_hex_values(hex_arr))\n return result_list\n\n\n# Gets all of the values from the master array at index 2, and makes a result\n# list that contains all of the label options for each ticket\ndef get_label_options():\n ticket_arr = create_id_label_feature_list()\n result_list = []\n for values in ticket_arr:\n hex_vals = values[2]\n result_list.append(hex_vals)\n return result_list\n\n\n# encodes event cause and returns an array the length of event_cause_vals\ndef encode_event_cause_options(value):\n list_len = len(event_cause_vals)\n temp_list = zerolistmaker(list_len)\n for option in range(len(event_cause_vals)):\n # TODO: May need to revert index(value) back to data_arr[option]\n if value == event_cause_vals[option]:\n temp_list[option] = 1\n else:\n temp_list[option] = 0\n return temp_list\n\n\n# Target the specific index value inside the label array, within the array of\n# incident ID, [hex_vals], [label option] --> This retrieves event cause labels\ndef get_event_cause_val():\n index_val = 0\n option_list = get_label_options()\n result_list = []\n for values in option_list:\n result_list.append(encode_event_cause_options(values[index_val]))\n return result_list\n\n\n# Takes in a data set and writes that set, line by line, to the filename\ndef write_to_file(data, filename):\n with open(filename, 'w') as result_file:\n try:\n for row in data:\n result_file.write(\"%s\\n\" % row)\n except Exception:\n raise ValueError(\"Failed to write to file\")\n\n\n# TODO: Make this function more generic to accept different input params\ndef convert_array_to_np_array(input_data):\n numpy_array = np.array(input_data)\n numpy_array = numpy_array.astype(int)\n return numpy_array\n\n\n# Function calls that write to output files. This will help the team verify the\n# data correlates to tickets appropriately\nencode_hex = convert_array_to_np_array(encode_ticket_hex_codes())\nencode_event_cause = convert_array_to_np_array(get_event_cause_val())\nwrite_to_file(encode_hex, 'hexnumpyarray.txt')\nwrite_to_file(encode_event_cause, 'eventnumpyarray.txt')\n\n","sub_path":"emelia/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"351727917","text":"import pickle\nimport os\nimport shutil\nimport mysql\nimport logic\n\n\ndef prepare():\n #first time to run\n sync()\n logic.idle_desks = logic.Idle_desks()\n\ndef save():\n #save data for resume\n data_dir = os.path.expanduser(logic.data_dir)\n if not os.path.isdir(data_dir):\n os.mkdir(data_dir)\n company_file = os.path.expanduser(logic.company_file)\n with open(company_file, 'wb') as f:\n pickle.dump(logic.info, f)\n data_file = os.path.expanduser(logic.data_file)\n with open(data_file, 'wb') as f:\n for k, v in logic.waiting.items():\n v.waiters = set()\n for k, v in logic.tables.items():\n v.waiters = set()\n for k, v in logic.cooks.items():\n v.waiters = set()\n logic.mask.waiters = set()\n data = {'waiting': logic.waiting, 'tables': logic.tables, 'uids': logic.uids, 'cooks': logic.cooks, 'diet': logic.diet,\n 'category': logic.category, 'desks': logic.desks, 'cook_do': logic.cook_do, 'uid': logic.global_uid,\n 'pid': logic.global_pid}\n pickle.dump(data, f)\n\n\ndef resume():\n #consider the difference between first time and not first time\n if not os.path.isfile(logic.data_file):\n sync()\n return\n # not first time\n if os.path.exists(logic.company_file):\n with open(logic.company_file, 'rb') as f:\n info = pickle.load(f)\n else:\n info = {'company': '', 'shop': '', 'location': '', 'heading': '', 'welcome': '', 'desp': ''}\n logic.info = info\n\n with open(logic.data_file, 'rb') as f:\n data = pickle.load(f)\n logic.waiting = data['waiting']\n logic.tables = data['tables']\n logic.uids = data['uids']\n logic.cooks = data['cooks']\n logic.diet = data['diet']\n logic.category = data['category']\n logic.desks = data['desks']\n logic.cook_do = data['cook_do']\n logic.global_pid = data['pid']\n logic.global_uid = data['uid']\n \n\ndef sync():\n #company_file\n #import pdb\n #pdb.set_trace()\n #print logic.company_file\n sync_info()\n sync_pid()\n sync_uid()\n #desks and tables\n sync_tables()\n #printers\n sync_printers()\n #category\n sync_category()\n #diet\n sync_diet()\n #mask\n #mask = mysql.get_all('mask')\n #logic.mask.content = set()\n #for one in mask:\n # logic.mask.add(one['did'])\n #faculty\n sync_faculty()\n #cook_do\n sync_cookdo()\n\ndef sync_pid():\n result = mysql.get('id', {'name': 'pid'})\n if len(result) == 0:\n mysql.insert('id', {'name': 'pid', 'num': 0})\n logic.global_pid = 0\n else:\n logic.global_pid = result[0]['num']\n\ndef sync_uid():\n result = mysql.get('id', {'name': 'uid'})\n if len(result) == 0:\n mysql.insert('id', {'name': 'uid', 'num': 0})\n logic.global_uid = 0\n else:\n logic.global_uid = result[0]['num']\n\ndef sync_info():\n if os.path.isfile(logic.company_file):\n with open(logic.company_file, 'rb') as f:\n info = pickle.load(f)\n else:\n info = {'company': '', 'shop': '', 'location': '', 'heading': '', 'welcome': '', 'desp': ''}\n logic.info = info\n\ndef sync_tables():\n desks = mysql.get_all('desks')\n logic.desks = set()\n logic.tables = {}\n for one in desks:\n logic.desks.add(one['desk'])\n logic.tables[one['desk']] = logic.Table(one['desk'])\n\ndef sync_printers():\n printers = mysql.get_all('printers')\n logic.printers = {}\n for one in printers:\n logic.printers[one['name']] = one['ip']\n\ndef sync_category():\n category = mysql.get_all('category')\n logic.category = {}\n for one in category:\n logic.category[one['cid']] = one\n\ndef sync_diet():\n diet = mysql.get_all('diet')\n logic.diet = {}\n for one in diet:\n logic.diet[one['did']] = one\n\ndef sync_faculty():\n faculty = mysql.get_all('faculty')\n for one in faculty:\n logic.faculty[one['fid']] = one\n\ndef sync_cookdo():\n cook_do = mysql.get_all('cook_do')\n logic.cook_do = {}\n for one in cook_do:\n if one['fid'] not in logic.cook_do:\n logic.cook_do[one['fid']] = set()\n logic.cook_do.get(one['fid']).add(one['did'])\n for k, v in logic.cook_do.items():\n if u'all' in v:\n v = set([u'all'])\n","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"603495840","text":"import os, sys, glob\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport healpy as hp\nimport matplotlib \nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt \n\nc_light = 299792.458 # in km/s\n\n\ndef get_args(argv=None):\n parser = argparse.ArgumentParser(\n description=\"Sky-map creation settings.\"\n )\n parser.add_argument(\n \"--inmap\",\n dest='inmap',\n action=\"store\",\n type=str,\n default=\"/cosma7/data/dp004/dc-beck3/ISW_Ree_Sciama/03Maps/Ray_maps_zrange_0.17_0.57.h5\",\n help=\"File in which map is saved.\",\n )\n parser.add_argument(\n \"--outdir\",\n dest='outdir',\n action=\"store\",\n type=str,\n default=\"/cosma7/data/dp004/dc-beck3/ISW_Ree_Sciama/05Correlations/healpy/\",\n help=\"File in which map is saved.\",\n )\n parser.add_argument(\n \"--nside\",\n dest='nside',\n action=\"store\",\n type=int,\n default=1024*4,\n )\n parser.add_argument(\n \"--label\",\n dest='label',\n action=\"store\",\n type=str,\n default=\"zrange_0.17_0.57_new\",\n )\n args = parser.parse_args()\n\n return args\n\ndef run(args):\n maps = pd.read_hdf(args.inmap)\n maps = maps.drop(columns=[\"chi_co\", \"kappa_1\", \"shear_x\", \"shear_y\"])\n\n # smoothing\n #maps = hp.smoothing(maps, fwhm=np.radians(2.5))\n\n maps.loc[:, [\"deflt_x\", \"deflt_y\", \"kappa_2\"]] /= c_light ** 2\n maps.loc[:, \"isw_rs\"] /= c_light ** 3\n nsources = maps.index.values\n\n maps[\"pix\"] = hp.ang2pix(\n args.nside,\n maps[\"the_co\"].values,\n maps[\"phi_co\"].values,\n lonlat=False,\n )\n hp_maps = maps.groupby(['pix']).mean()\n\n # Initate the map and fill it with the values\n f_sky = 20*20 / (4*180*180/np.pi) # sky coverage\n #f_sky = len(hp_maps.index)/hp.nside2npix(args.nside) # sky coverage\n print(\"The sky coverage is %f\" % f_sky)\n hp_kappa = np.zeros(hp.nside2npix(args.nside), dtype=np.float)\n hp_kappa[\n list(\n set(np.arange(\n hp.nside2npix(args.nside))).symmetric_difference(set(hp_maps.index.values)\n )\n )\n ] = hp.UNSEEN\n for pix in hp_maps.index.values:\n hp_kappa[pix] = hp_maps.loc[pix][\"kappa_2\"]\n \n hp_iswrs = np.zeros(hp.nside2npix(args.nside), dtype=np.float)\n hp_iswrs[\n list(\n set(np.arange(\n hp.nside2npix(args.nside))).symmetric_difference(set(hp_maps.index.values)\n )\n )\n ] = hp.UNSEEN\n for pix in hp_maps.index.values:\n hp_iswrs[pix] = hp_maps.loc[pix][\"isw_rs\"]\n \n cl_kk = hp.anafast(hp_kappa, hp_kappa)\n cl_kt = hp.anafast(hp_kappa, hp_iswrs)\n cl_tt = hp.anafast(hp_iswrs, hp_iswrs)\n ell = np.arange(len(cl_tt))\n\n np.save(args.outdir+\"ell_nside%d_%s\" % (args.nside, args.label), ell)\n np.save(args.outdir+\"clkk_nside%d_%s\" % (args.nside, args.label), cl_kk/f_sky)\n np.save(args.outdir+\"clkt_nside%d_%s\" % (args.nside, args.label), cl_kt/f_sky)\n np.save(args.outdir+\"cltt_nside%d_%s\" % (args.nside, args.label), cl_tt/f_sky)\n\n\nif __name__ == \"__main__\":\n args = get_args()\n run(args)\n","sub_path":"src/sky_maps/correlations/corr_hspace_healpy.py","file_name":"corr_hspace_healpy.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"302958974","text":"\"\"\" Quantum Inspire SDK\n\nCopyright 2018 QuTech Delft\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom typing import List, Union, Dict, Any\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.result import postprocess, Result\nfrom qiskit.result.models import ExperimentResult\n\nfrom quantuminspire.exceptions import QisKitBackendError\n\n\nclass QIResult(Result): # type: ignore\n \"\"\"\n A result object returned by QIJob:\n qi_backend = QI.get_backend('QX single-node simulator')\n qi_job = qi_backend.retrieve_job(job_id)\n qi_result = qi_job.result()\n \"\"\"\n def __init__(self, backend_name: str, backend_version: str, qobj_id: str, job_id: str, success: bool,\n results: List[ExperimentResult], date: Any = None, status: Any = None, header: Any = None,\n **kwargs: str) -> None:\n \"\"\"\n Construct a new QIResult object. Not normally called directly, use a QIJob to get the QIResult.\n\n Args:\n backend_name: backend name.\n backend_version: backend version, in the form X.Y.Z.\n qobj_id: user-generated Qobj id.\n job_id: unique execution id from the backend.\n success: True if complete input qobj executed correctly. (Implies each experiment success)\n results: corresponding results for array of experiments of the input qobj\n date: date to be added to the result object\n status: status to be added to the result object\n header: header to be added to the result object\n kwargs: other parameters (added as metadata to the result object)\n \"\"\"\n super().__init__(backend_name, backend_version, qobj_id, job_id, success,\n results, date, status, header, **kwargs)\n\n def get_probabilities(self, experiment: Any = None) -> Union[Dict[str, float], List[Dict[str, float]]]:\n\n \"\"\"Get the probability data of an experiment. The probability data is added as a separate result by\n Quantum Inspire backend. Based on Qiskit get_count method from Result.\n\n Args:\n experiment (str or QuantumCircuit or Schedule or int or None): the index of the\n experiment, as specified by ``get_data()``.\n\n Returns:\n One or more dictionaries which holds the states and probabilities for each result.\n\n Raises:\n QisKitBackendError: raised if there are no probabilities in a result for the experiment(s).\n \"\"\"\n if experiment is None:\n exp_keys = range(len(self.results))\n else:\n exp_keys = [experiment] # type: ignore\n\n dict_list: List[Dict[str, float]] = []\n for key in exp_keys:\n exp = self._get_experiment(key)\n try:\n header = exp.header.to_dict()\n except (AttributeError, QiskitError): # header is not available\n header = None\n\n probabilities = getattr(self._get_experiment(key).data, 'probabilities', None)\n if probabilities is not None:\n dict_list.append(postprocess.format_counts(self._get_experiment(key).data.probabilities, header))\n else:\n raise QisKitBackendError('No probabilities for experiment \"{0}\"'.format(key))\n\n # Return first item of dict_list if size is 1\n if len(dict_list) == 1:\n return dict_list[0]\n else:\n return dict_list\n","sub_path":"src/quantuminspire/qiskit/qi_result.py","file_name":"qi_result.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"279220580","text":"# -*- coding:utf-8 -*-\r\n# project config\r\nimport os\r\n\r\nfrom project.settings import UPLOAD_COMOFINDER_ROOT\r\n\r\n\r\n# project data directory name\r\nDATA_NAME = 'data'\r\n# project output driectory name\r\nOUTPUT_NAME = 'output'\r\n\r\nRESULT_NAME = 'result'\r\n# project log file driectory name\r\nLOG_NAME = 'log'\r\n# project version\r\nVERSION = \"1.0.1v\"\r\n# project usage help language: en, zh\r\nLN = \"en\"\r\n# project debug trigger\r\nDEBUG = False\r\n# motif class types: 0-miRNA, 1-TF, 2-gene\r\nMOTIFTYPE = 3\r\n# Maxrium Trials\r\nMAXTRIALS = 100\r\n\r\n# project current directory root path\r\nPROJECT_DIR = UPLOAD_COMOFINDER_ROOT\r\n# project data directory path\r\nFILE_DIR = os.path.join(PROJECT_DIR, DATA_NAME)\r\n# project running output directory path\r\nOUTPUT_DIR = os.path.join(PROJECT_DIR, OUTPUT_NAME)\r\n\r\nRESULT_DIR = os.path.join(PROJECT_DIR, RESULT_NAME)\r\n# project logger file directory path\r\nLOG_DIR = os.path.join(PROJECT_DIR, LOG_NAME)\r\n","sub_path":"comofinder/Conf.py","file_name":"Conf.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"603854588","text":"import unittest\nimport sqlite3\nimport sys\nsys.path.append('../')\nimport classes\nimport Sprint04\n\n\nclass TestUserStory22(unittest.TestCase):\n\n def test01(self):\n individual1 = classes.Individual('I2', 'Marge /Bouvier/ ', 'F', '1980-03-19', 'F', None, None, 'F1')\n individual2 = classes.Individual('I2', 'Buck /Simpson/ ', 'F', '1980-03-19', 'F', None, None, 'F1')\n\n Sprint04.unique_ids(individual1.id)\n self.assertEqual(Sprint04.unique_ids(individual2.id), individual2.id)\n\nclass TestUserStory23(unittest.TestCase):\n\n def test01(self):\n individual1 = classes.Individual('I1', 'Marge /Bouvier/ ', 'F', '1980-03-19', 'F', None, None, 'F1')\n individual2 = classes.Individual('I2', 'Marge /Bouvier/ ', 'F', '1980-03-19', 'F', None, None, 'F4')\n\n Sprint04.unique_name_and_birthday(individual1)\n self.assertEqual(Sprint04.unique_name_and_birthday(individual2), individual2.id)\n\nclass TestUserStory24(unittest.TestCase):\n\n def test01(self):\n wife = classes.Individual('I2', 'Marge Simpson', 'F', '1900-01-01', 'F', '1920-01-01', None, 'F1')\n children = classes.Individual('I1', 'Homer Simpson', 'M', '2022-01-01', 'T', None, None, 'F1')\n husband = classes.Individual('I3', 'Abraham Simpson', 'M', '2020-01-01', 'T', '2021-01-1', 'I1', 'F1')\n\n #passing in objects as the husband and wife instead of their IDs\n fam1 = classes.Family('F1', '2010-09-08', None, husband, wife, [children])\n fam2 = classes.Family('F2', '2010-09-08', None, husband, wife, [children])\n\n\n self.assertIsNone(Sprint04.unique_family(fam1))\n self.assertEqual(Sprint04.unique_family(fam2), fam2.id)\n\n\n\nclass TestUserStory25(unittest.TestCase):\n\n def test01(self):\n wife = classes.Individual('I2', 'Marge Simpson', 'F', '1900-01-01', 'F', '1920-01-01', None, 'F1')\n child = classes.Individual('I1', 'Bart Simpson', 'M', '2022-01-01', 'T', None, None, 'F1')\n husband = classes.Individual('I3', 'Abraham Simpson', 'M', '2020-01-01', 'T', '2021-01-1', 'I1', 'F1')\n\n #passing in objects as the husband and wife instead of their IDs\n fam1 = classes.Family('F1', '2010-09-08', None, husband, wife, [child, child])\n\n\n self.assertEqual(Sprint04.unique_names_in_family(fam1), fam1.id)\n\nclass TestUserStory14(unittest.TestCase):\n\n def test01(self):\n #US 14 - No more than five siblings should be born at the same time\n wife = classes.Individual('I2', 'Marge Simpson', 'F', '1900-01-01', 'F', '1920-01-01', 'I1', 'F1')\n child = classes.Individual('I1', 'Bart Simpson', 'M', '2022-01-01', 'T', None, None, None)\n husband = classes.Individual('I3', 'Abraham Simpson', 'M', '2020-01-01', 'T', '2021-01-1', 'I1', 'F1')\n\n # passing in objects as the husband and wife instead of their IDs\n fam1 = classes.Family('F1', '2010-09-08', None, husband, wife, [child, child, child, child, child])\n\n self.assertEqual(Sprint04.multiple_births_in_family(fam1), fam1.id)\n\nclass TestUserStory21(unittest.TestCase):\n\n def test01(self):\n #US 21 - Husband in family should be male and wife in family should be female\n wife = classes.Individual('I2', 'Marge Simpson', 'M', '1900-01-01', 'F', '1920-01-01', 'I1', 'F1')\n child = classes.Individual('I1', 'Bart Simpson', 'M', '2022-01-01', 'T', None, None, None)\n husband = classes.Individual('I3', 'Abraham Simpson', 'M', '2020-01-01', 'T', '2021-01-1', 'I1', 'F1')\n\n # passing in objects as the husband and wife instead of their IDs\n fam1 = classes.Family('F1', '2010-09-08', None, husband, wife, [child, child, child, child, child])\n\n self.assertEqual(Sprint04.correct_gender_role(fam1), fam1.id)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/testSprint04.py","file_name":"testSprint04.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"622477912","text":"import re\n\nfrom conans.test.assets.genconanfile import GenConanfile\nfrom conans.test.utils.tools import TestClient, NO_SETTINGS_PACKAGE_ID\n\n\ndef test_private_skip():\n # app -> pkg -(private)-> dep\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"create . --name=dep --version=1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"dep/1.0\", visible=False)})\n client.run(\"create . --name=pkg --version=1.0\")\n client.run(\"remove dep/1.0:* -c\") # Dep binary is removed not used at all\n\n client.save({\"conanfile.py\": GenConanfile().with_requires(\"pkg/1.0\")})\n client.run(\"create . --name=app --version=1.0 -v\")\n client.assert_listed_binary({\"dep/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n\n\ndef test_private_no_skip():\n # app -> pkg -(private)-> dep\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"create . --name=dep --version=1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"dep/1.0\", visible=False)})\n client.run(\"create . --name=pkg --version=1.0\")\n\n # But if we want to build pkg, no skip\n client.run(\"create . --name=app --version=1.0 --build=app/* --build=pkg/*\")\n client.assert_listed_binary({\"dep/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Cache\")})\n\n client.run(\"remove dep/1.0:* -c\") # Dep binary is removed not used at all\n client.run(\"create . --name=app --version=1.0 --build=app/* --build=pkg/*\", assert_error=True)\n client.assert_listed_binary({\"dep/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Missing\")})\n\n\ndef test_consumer_no_skip():\n # app -(private)-> pkg -> dep\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"create . --name=dep --version=1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requires(\"dep/1.0\")})\n client.run(\"create . --name=pkg --version=1.0\")\n package_id = client.created_package_id(\"pkg/1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"pkg/1.0\", visible=False)})\n\n client.run(\"install . \")\n\n client.assert_listed_binary({f\"dep/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Cache\")})\n client.assert_listed_binary({f\"pkg/1.0\": (package_id, \"Cache\")})\n\n\ndef test_shared_link_static_skip():\n # app -> pkg (shared) -> dep (static)\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile().with_shared_option(False)})\n client.run(\"create . --name=dep --version=1.0\")\n package_id = client.created_package_id(\"dep/1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"dep/1.0\").\n with_shared_option(True)})\n client.run(\"create . --name=pkg --version=1.0\")\n client.run(\"remove dep/1.0:* -c\") # Dep binary is removed not used at all\n\n client.save({\"conanfile.py\": GenConanfile().with_requires(\"pkg/1.0\")})\n client.run(\"create . --name=app --version=1.0 -v\")\n client.assert_listed_binary({\"dep/1.0\": (package_id, \"Skip\")})\n\n\ndef test_test_requires():\n # Using a test_requires can be skipped if it is not necessary to build its consumer\n # app -> pkg (static) -(test_requires)-> gtest (static)\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile().with_shared_option(False)})\n client.run(\"create . --name=gtest --version=1.0\")\n package_id = client.created_package_id(\"gtest/1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_test_requires(\"gtest/1.0\").\n with_shared_option(False)})\n client.run(\"create . --name=pkg --version=1.0\")\n client.run(\"remove gtest/1.0:* -c\") # Dep binary is removed not used at all\n\n client.save({\"conanfile.py\": GenConanfile().with_requires(\"pkg/1.0\")})\n # Checking list of skipped binaries\n client.run(\"create . --name=app --version=1.0\")\n assert re.search(r\"Skipped binaries(\\s*)gtest/1.0\", client.out)\n # Showing the complete information about the skipped binary\n client.run(\"create . --name=app --version=1.0 -v\")\n client.assert_listed_binary({\"gtest/1.0\": (package_id, \"Skip\")}, test=True)\n\n\ndef test_build_scripts_no_skip():\n c = TestClient()\n c.save({\"scripts/conanfile.py\": GenConanfile(\"script\", \"0.1\").with_package_type(\"build-scripts\"),\n \"app/conanfile.py\": GenConanfile().with_tool_requires(\"script/0.1\")})\n c.run(\"create scripts\")\n c.assert_listed_binary({\"script/0.1\": (\"da39a3ee5e6b4b0d3255bfef95601890afd80709\", \"Build\")},\n build=True)\n c.run(\"install app\")\n c.assert_listed_binary({\"script/0.1\": (\"da39a3ee5e6b4b0d3255bfef95601890afd80709\", \"Cache\")},\n build=True)\n\n\ndef test_list_skip_printing():\n \"\"\" make sure that when a package is required in the graph, it is not marked as SKIP, just\n because some other part of the graph is skipping it. In this case, a tool_require might be\n necessary for some packages building from soures, but not for others\n \"\"\"\n c = TestClient()\n c.save({\"tool/conanfile.py\": GenConanfile(\"tool\", \"0.1\"),\n \"pkga/conanfile.py\": GenConanfile(\"pkga\", \"0.1\").with_tool_requires(\"tool/0.1\"),\n \"pkgb/conanfile.py\": GenConanfile(\"pkgb\", \"0.1\").with_requires(\"pkga/0.1\")\n .with_tool_requires(\"tool/0.1\"),\n \"app/conanfile.py\": GenConanfile().with_requires(\"pkgb/0.1\")})\n c.run(\"create tool\")\n c.run(\"create pkga\")\n c.run(\"create pkgb\")\n c.run(\"remove pkga:* -c\")\n c.run(\"install app --build=missing\")\n c.assert_listed_binary({\"tool/0.1\": (\"da39a3ee5e6b4b0d3255bfef95601890afd80709\", \"Cache\")},\n build=True)\n\n\ndef test_conf_skip():\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile()})\n client.run(\"create . --name=maths --version=1.0\")\n client.run(\"create . --name=ai --version=1.0\")\n\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"maths/1.0\", visible=False)})\n client.run(\"create . --name=liba --version=1.0\")\n client.save({\"conanfile.py\": GenConanfile().with_requirement(\"ai/1.0\", visible=False)})\n client.run(\"create . --name=libb --version=1.0\")\n\n client.save({\"conanfile.py\": GenConanfile().with_requires(\"liba/1.0\", \"libb/1.0\")})\n client.run(\"create . --name=app --version=0.0 -v\")\n client.assert_listed_binary({\"maths/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n client.assert_listed_binary({\"ai/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n\n client.run(\"create . --name=app --version=1.0 -v -c *:tools.graph:skip_binaries=False\")\n client.assert_listed_binary({\"maths/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Cache\")})\n client.assert_listed_binary({\"ai/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Cache\")})\n\n client.run(\"create . --name=app --version=2.0 -v -c maths/*:tools.graph:skip_binaries=False\")\n client.assert_listed_binary({\"maths/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Cache\")})\n client.assert_listed_binary({\"ai/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n\n client.run(\"create . --name=app --version=3.0 -v -c *:tools.graph:skip_binaries=True\")\n client.assert_listed_binary({\"maths/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n client.assert_listed_binary({\"ai/1.0\": (NO_SETTINGS_PACKAGE_ID, \"Skip\")})\n","sub_path":"conans/test/integration/graph/test_skip_binaries.py","file_name":"test_skip_binaries.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"150377828","text":"# -*- coding: utf-8 -*-\n\nfrom flask import request, render_template\nfrom app import app\n# from app.users import *\n\n@app.route('/', methods = ['GET', 'POST'])\n@app.route('/index', methods = ['GET', 'POST'])\ndef index():\n if request.method == \"GET\":\n return render_template(\"index.html\", title = u\"Flask + Skeleton Template\")\n elif request.method == \"POST\":\n resp = \"\"\n for item in request.form:\n response = response + request.form[item] + \"\\n\"\n return response","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"196130518","text":"import tensorflow as tf\n\nclass DP_CNN:\n\n def __init__(self,batch_size=128,total_sequence_length=400):\n self.hpcnn_filter_size=3\n self.hpcnn_number_filters=64\n self.stride_length=1\n self.vocab_size=10000\n self.embed_size=64\n self.initializer=tf.random_normal_initializer(stddev=0.1)\n self.num_repeat=4\n self.is_training_flag=True\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],initializer=self.initializer) # [vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)\n\n self.input_x = tf.placeholder(tf.int32, [batch_size, total_sequence_length], name=\"input_x\")\n #embedding_documents = tf.nn.embedding_lookup(self.Embedding,input_x) # [None,num_sentences,sentence_length,embed_size]\n #self.dpcnn_two_layers_conv(embedding_documents)\n result=self.inference_deep_pyramid_cnn()\n print(\"result:\",result)\n\n def inference_deep_pyramid_cnn(self):\n \"\"\"\n deep pyramid cnn for text categorization\n region embedding-->two layers of convs-->repeat of building block(Pooling,/2-->conv-->conv)--->pooling\n for more check: http://www.aclweb.org/anthology/P/P17/P17-1052.pdf\n :return: logits_list\n \"\"\"\n #1.region embedding\n embedding_documents=self.region_embedding() #shape:[batch_size,total_sequence_length,embedding_size]\n\n #2.two layers of convs\n embedding_documents = tf.expand_dims(embedding_documents ,-1) # [batch_size,total_sequence_length,embed_size,1). expand dimension so meet input requirement of 2d-conv\n conv=self.dpcnn_two_layers_conv(embedding_documents,double_num_filters=False) #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n #skip connection: add and activation\n conv=conv+embedding_documents #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n b = tf.get_variable(\"b-inference\", [self.hpcnn_number_filters])\n print(\"conv:\",conv,\";b:\",b)\n conv = tf.nn.relu(tf.nn.bias_add(conv, b),\"relu-inference\") #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n #3. repeat of building blocks\n for i in range(self.num_repeat):\n conv=self.dpcnn_pooling_two_conv(conv,i) #shape:[batch_size,total_sequence_length/np.power(2,i),hpcnn_number_filters]\n\n #4. max pooling\n seq_length1=conv.get_shape().as_list()[1] #sequence length after multiple layers of conv and pooling\n seq_length2=conv.get_shape().as_list()[2] #sequence length after multiple layers of conv and pooling\n print(\"before.final.pooling:\",conv)\n pooling=tf.nn.max_pool(conv, ksize=[1,seq_length1,seq_length2,1], strides=[1,1,1,1], padding='VALID',name=\"pool\") #[batch_size,hpcnn_number_filters]\n pooling=tf.squeeze(pooling)\n print(\"pooling.final:\",pooling)\n\n #5. classifier\n\n return pooling\n\n def dpcnn_pooling_two_conv(self, conv, layer_index):\n \"\"\"\n pooling followed with two layers of conv, used by deep pyramid cnn.\n pooling-->conv-->conv-->skip connection\n conv:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n :return:[batch_size,total_sequence_length/2,embed_size/2,hpcnn_number_filters]\n \"\"\"\n with tf.variable_scope(\"pooling_two_conv_\" + str(layer_index)):\n # 1. pooling:max-pooling with size 3 and stride 2==>reduce shape to half\n pooling = tf.nn.max_pool(conv, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME',name=\"pool\") # [batch_size,total_sequence_length/2,embed_size/2,hpcnn_number_filters]\n print(layer_index,\"dpcnn_pooling_two_conv.pooling:\", pooling)\n\n # 2. two layer of conv\n conv = self.dpcnn_two_layers_conv(pooling)\n #print(\"dpcnn_pooling_two_conv.layer_index\", layer_index, \"conv:\", conv)\n\n # 3. skip connection and activation\n conv = conv + pooling\n b = tf.get_variable(\"b-poolcnn%s\" % self.hpcnn_number_filters, [self.hpcnn_number_filters])\n conv = tf.nn.relu(tf.nn.bias_add(conv, b),\"relu-poolcnn\") # shape:[batch_size,total_sequence_length/2,embed_size/2,hpcnn_number_filters]\n return conv\n\n def dpcnn_two_layers_conv(self, inputs,double_num_filters=True):\n \"\"\"\n two layers of conv\n inputs:[batch_size,total_sequence_length,embed_size,dimension]. e.g.(128, 400, 64,1)-->[128,200,32,250]\n :return:[batch_size,total_sequence_length,embed_size,num_filters]\n \"\"\"\n # conv1:\n # filter1's first three dimension apply to [total_sequence_length, embed_size, 1] of embedding_documents\n print(\"dpcnn_two_layers_conv.inputs:\", inputs) # (128, 400, 64, 250)\n channel = inputs.get_shape().as_list()[-1]\n if double_num_filters:\n hpcnn_number_filters =channel * 2\n else:\n hpcnn_number_filters=self.hpcnn_number_filters\n filter1 = tf.get_variable(\"filter1-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, channel, hpcnn_number_filters],initializer=self.initializer)\n conv1 = tf.nn.conv2d(inputs, filter1, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n conv1 = tf.contrib.layers.batch_norm(conv1, is_training=self.is_training_flag, scope='cnn1')\n\n print(\"dpcnn_two_layers_conv.conv1:\", conv1) # (128, 400, 64, 250)\n b1 = tf.get_variable(\"b-cnn-%s\" % hpcnn_number_filters, [hpcnn_number_filters])\n conv1 = tf.nn.relu(tf.nn.bias_add(conv1, b1),\"relu1\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n # conv2\n # filter2's first three dimension apply to:[total_sequence_length,embed_size,hpcnn_number_filters] of conv1\n filter2 = tf.get_variable(\"filter2-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, hpcnn_number_filters, hpcnn_number_filters],initializer=self.initializer)\n conv2 = tf.nn.conv2d(conv1, filter2, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv2\") # shape:[batch_size,stotal_sequence_length,embed_size,hpcnn_number_filters]\n conv2 = tf.contrib.layers.batch_norm(conv2, is_training=self.is_training_flag, scope='cnn2')\n\n print(\"dpcnn_two_layers_conv.conv2:\", conv2) # (128, 400, 64, 250)\n return conv2 # shape:[batch_size,total_sequence_length,embed_size,num_filters]\n\n def region_embedding(self):\n \"\"\"\n region embedding for hp_cnn: embedding of a region of text covering one or more words.\n check: Enhancing region embedding with unsuper- vised embeddings in paper: deep pyramid cnn for text categorization\n instead of follow the way in the paper, here we just use pretrained word embedding\n :return:#[batch_size,sequence_length,embed_size]\n \"\"\"\n embedded_document = tf.nn.embedding_lookup(self.Embedding, self.input_x) #[batch_size,sequence_length,embed_size]\n return embedded_document #[batch_size,sequence_length,embed_size]\n\n\nx=DP_CNN()\nprint(x)","sub_path":"log_and_test/deep_pyramid_cnn_test.py","file_name":"deep_pyramid_cnn_test.py","file_ext":"py","file_size_in_byte":7193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"356967194","text":"# Hangman Game (Jogo da Forca)\n# Programação Orientada a Objetos\n\n# Import\nimport random\n\ntabuleiro = ['''\n+---+\n| |\n |\n |\n |\n |\n=========\n\\t* Você pode errar até 6 vezes''', '''\n+---+\n| |\nO |\n |\n |\n |\n=========\n\\t* Você pode errar até 5 vezes''', '''\n+---+\n| |\nO |\n| |\n |\n |\n=========\n\\t* Você pode errar até 4 vezes''', '''\n +---+\n | |\n O |\n/| |\n |\n |\n=========\n\\t* Você pode errar até 3 vezes''', '''\n +---+\n | |\n O |\n/|\\ |\n |\n |\n=========\n\\t* Você pode errar até 2 vezes''', '''\n +---+\n | |\n O |\n/|\\ |\n/ |\n |\n=========\n\\t* Você pode errar mais 1 vez''', '''\n +---+\n | |\n O |\n/|\\ |\n/ \\ |\n |\n=========''']\n\n\n# Classe\nclass Forca:\n\n # Método Construtor\n def __init__(self, word):\n\n self.palavra = word.lower()\n self.letrasDigitadas = []\n self.letrasCorretas = []\n self.letrasErradas = []\n self.listaOculta = []\n\n for k in range(len(self.palavra)):\n self.listaOculta.append(\" * \")\n\n\n # Método para checar o status do game e imprimir o board na tela\n def imprimeTabuleiro(self):\n print(tabuleiro[len(self.letrasErradas)])\n\n # Método para adivinhar a letra\n def verificaLetra(self, letter):\n\n letra = letter.lower()\n\n if letra not in self.letrasDigitadas:\n self.letrasDigitadas.append(letra)\n\n if letra in self.palavra:\n print(\"\\t* Possui a letra: \", letra, \"\\n\")\n self.letrasCorretas.append(letra)\n\n else:\n print(\"\\t* Não possui a letra: \", letra, \"\\n\")\n self.letrasErradas.append(letra)\n\n else:\n print(\"\\nErro: Digite alguma letra diferente destas: \", self.letrasDigitadas, \"\\n\")\n\n print(\"\\t* Letras Digitadas: \", self.letrasDigitadas)\n print(\"\\t* Letras Corretas: \", self.letrasCorretas)\n print(\"\\t* Letras Erradas: \", self.letrasErradas)\n\n\n # Método para não mostrar a letra no board\n def palavraOculta(self, letter):\n\n letra = letter.lower()\n\n if letra in self.palavra:\n for ind in range(len(self.palavra)):\n if self.palavra[ind] == letra:\n self.listaOculta[ind] = letra\n\n print(\"\\n\\t* Palavra: \", self.listaOculta)\n\n\n # Método para verificar se o jogo terminou\n def continuaJogando(self):\n if len(self.letrasErradas) == 6:\n False\n else:\n return True\n\n # Método para verificar se o jogador venceu\n def ganhou(self):\n if \" * \" in self.listaOculta:\n return False\n else:\n return True\n\n def acabou(self):\n if self.ganhou() or not self.continuaJogando():\n return True\n\n# ------------------------------------------------------------------------------------------\n\n# Função para ler uma palavra de forma aleatória do banco de palavras\ndef palavraAleatoria():\n\n with open(\"palavras.txt\", \"rt\", encoding='utf8') as arquivo:\n bank = arquivo.readlines()\n return bank[random.randint(0, len(bank))].strip()\n\n\n# ------------------------------------------------------------------------------------------\n\n# Função Main - Execução do Programa\ndef main():\n\n # Objeto\n jogo = Forca(palavraAleatoria())\n\n # Enquanto o jogo não tiver terminado, print do status, solicita uma letra e faz a leitura do caracter\n while not jogo.acabou():\n\n print(\"\\n-------------------------------------------\")\n print(\"\\t---- Jogo da Forca ----\")\n print(\"\\tA palavra é: \", jogo.palavra)\n print(\"-------------------------------------------\")\n\n entrada = input(\"\\n\\t* Digite a letra: \")\n\n jogo.verificaLetra(entrada[0])\n jogo.imprimeTabuleiro()\n jogo.palavraOculta(entrada[0])\n\n # De acordo com o status, imprime mensagem na tela para o usuário\n if jogo.ganhou():\n print('\\n\\t---- Parabéns! Você venceu! ----')\n else:\n print('\\n\\t---- Game over! ----')\n print('\\t---- A palavra era: ' + jogo.palavra, \" ----\")\n\n\n# Executa o programa\nif __name__ == \"__main__\":\n main()\n","sub_path":"forca_v1.py","file_name":"forca_v1.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"504891788","text":"import math as m\n\nx = m.e\n\ny_natural = m.log(x)\ny_base10 = m.log(x, 10)\ny_log10 = m.log10(x)\n\nprint(x, y_natural, y_base10, y_log10)\n\ndeg = 60\n\nrads = deg * m.pi / 180\nrads_fromfunc = m.radians(deg)\n\nprint(deg, m.pi, rads, rads_fromfunc)\n\ndeg = 180\n\ncos_deg = m.cos(deg)\ncos_rad = m.cos(m.radians(deg))\n\nprint(deg, cos_deg, cos_rad)\n\n# factorial\nprint(m.factorial(5))","sub_path":"mathtest.py","file_name":"mathtest.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"309668315","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nfrom functools import partial\r\nimport multiprocessing \r\nfrom multiprocessing import Pool\r\nimport time\r\nimport re\r\n\r\ncount=0\r\n\r\ndef fetch(url):\r\n\tresp=requests.get(url).text\r\n\treturn resp\r\n\r\ndef getHref(url):\r\n\threfL=[]\r\n\tsoup= BeautifulSoup(fetch(url), 'html.parser')\r\n\ttitleL=soup.find('ul','list imm').find_all('a','tit')\r\n\tfor item in titleL:\r\n\t\threfL.append(item.get('href'))\r\n\r\n\treturn hrefL\r\n\r\ndef getContent(url,temp):\r\n\tsoup=BeautifulSoup(fetch(url), 'html.parser')\r\n\tcontents=soup.find('div','text').find_all('p')\r\n\tfileT=open(f'src/src{temp+1}.txt','a',encoding='utf-8')\r\n\tfileT.write(soup.find('h1').text.strip()+'\\n')\r\n\t#fileT.write('\\n') 如需簡易排版\r\n\t\r\n\tfor content in contents:\r\n\t\tif('不用抽 不用搶 現在用APP看新聞 保證天天中獎' in content.text or '點我下載APP' in content.text or '按我看活動辦法' in content.text):\r\n\t\t\tcontinue\r\n\t\tfileT.write(content.text)\r\n\t\tfileT.write('\\n')\r\n\t\t#fileT.write('\\n') 如需簡易排版\r\n\r\n\r\n\r\ndef mkLink(purl):\r\n\treturn 'https:'+purl\r\n\r\ndef mulTa(hrefs,count,temp):\r\n\tgetContent(mkLink(hrefs[temp]),count+temp)\r\n\t\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\tif(not os.path.exists('src')):\t\r\n\t\tos.mkdir('src')\r\n\t\r\n\tcount=0\r\n\tpg=5 #下載頁數\r\n\tcateg='society' #'politics'政治 'society'社會 'life'生活 'world'國際 'local'地方 'people'人物 'novelty'蒐奇\r\n\t\r\n\tfor i in range(1,pg+1): #多線檔案製造\r\n\t\turl=f'https://news.ltn.com.tw/list/breakingnews/{categ}/{i}'\r\n\t\threfs=getHref(url)\r\n\t\t\r\n\t\tpool = multiprocessing.Pool()\r\n\t\tpool.map(partial(mulTa,hrefs,count), range(0,len(hrefs)))\r\n\t\tpool.close()\r\n\t\tcount+=len(hrefs)\r\n\t\tprint(count)\r\n\r\n\tif(not os.path.exists('src2')):\t\r\n\t\t\tos.mkdir('src2')\r\n\t\r\n\tfileAll=open('src2/all.txt','a',encoding='utf-8')\r\n\t\r\n\tfor i in range(1, count+1):\r\n\t\tfileT=open(f'src/src{i}.txt','r',encoding='utf-8')\r\n\t\tfileAll.write(fileT.read())\r\n\r\n#src存放為單篇 src2為多篇合一","sub_path":"ltnNewsCr.py","file_name":"ltnNewsCr.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"183562076","text":"GLOBAL_X = 80\r\nGLOBAL_Y = GLOBAL_X\r\nTILE_SIZE = 10\r\n\r\nTIME_RANGE = 180\r\nHUMANS = int(GLOBAL_X * GLOBAL_X * .9)\r\nHUMAN_MOVEMENT = int(HUMANS * .2)\r\n\r\n\r\n# Cure\r\nCHANCE_CURE = .2\r\nTIME_UNTIL_CURED = int(TIME_RANGE * .1)\r\n\r\n# Death\r\nCHANCE_DEATH = .03\r\nTIME_UNTIL_DEATH = int(TIME_RANGE * .033)\r\n","sub_path":"utils/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"18225452","text":"# Copyright (c) OpenMMLab. All rights reserved.\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer, collate, scatter\nfrom mmdet.datasets import replace_ImageToTensor\nfrom torch import nn\nfrom torch.utils.data import Dataset\n\nfrom mmdeploy.codebase.base import BaseTask\nfrom mmdeploy.utils import Task, get_input_shape\nfrom .mmocr import MMOCR_TASK\n\n\ndef process_model_config(model_cfg: mmcv.Config,\n imgs: Union[Sequence[str], Sequence[np.ndarray]],\n input_shape: Optional[Sequence[int]] = None):\n \"\"\"Process the model config.\n\n Args:\n model_cfg (mmcv.Config): The model config.\n imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted\n data type are List[str], List[np.ndarray].\n input_shape (list[int]): A list of two integer in (width, height)\n format specifying input shape. Default: None.\n\n Returns:\n mmcv.Config: the model config after processing.\n \"\"\"\n if model_cfg.data.test['type'] == 'ConcatDataset':\n model_cfg.data.test.pipeline = \\\n model_cfg.data.test['datasets'][0].pipeline\n\n is_ndarray = isinstance(imgs[0], np.ndarray)\n\n if is_ndarray:\n model_cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray'\n\n test_pipeline = model_cfg.data.test.pipeline\n test_pipeline = replace_ImageToTensor(test_pipeline)\n # for static exporting\n if input_shape is not None:\n resize = {\n 'height': input_shape[1],\n 'min_width': input_shape[0],\n 'max_width': input_shape[0],\n 'keep_aspect_ratio': False\n }\n if 'transforms' in test_pipeline[1]:\n if test_pipeline[1].transforms[0].type == 'ResizeOCR':\n test_pipeline[1].transforms[0].height = input_shape[1]\n test_pipeline[1].transforms[0].max_width = input_shape[0]\n else:\n raise ValueError(f'Transforms[0] should be ResizeOCR, but got\\\n {test_pipeline[1].transforms[0].type}')\n else:\n test_pipeline[1].update(resize)\n model_cfg.data.test.pipeline = test_pipeline\n return model_cfg\n\n\n@MMOCR_TASK.register_module(Task.TEXT_RECOGNITION.value)\nclass TextRecognition(BaseTask):\n \"\"\"Text detection task class.\n\n Args:\n model_cfg (mmcv.Config): Original PyTorch model config file.\n deploy_cfg (mmcv.Config): Loaded deployment config object.\n device (str): A string represents device type.\n \"\"\"\n\n def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,\n device: str):\n super(TextRecognition, self).__init__(model_cfg, deploy_cfg, device)\n\n def init_backend_model(self,\n model_files: Optional[str] = None,\n **kwargs) -> torch.nn.Module:\n \"\"\"Initialize backend model.\n\n Args:\n model_files (Sequence[str]): Input model files.\n\n Returns:\n nn.Module: An initialized backend model.\n \"\"\"\n from .text_recognition_model import build_text_recognition_model\n model = build_text_recognition_model(\n model_files, self.model_cfg, self.deploy_cfg, device=self.device)\n return model.eval()\n\n def init_pytorch_model(self,\n model_checkpoint: Optional[str] = None,\n cfg_options: Optional[Dict] = None,\n **kwargs) -> torch.nn.Module:\n \"\"\"Initialize torch model.\n\n Args:\n model_checkpoint (str): The checkpoint file of torch model,\n defaults to `None`.\n cfg_options (dict): Optional config key-pair parameters.\n\n Returns:\n nn.Module: An initialized torch model generated by OpenMMLab\n codebases.\n \"\"\"\n from mmocr.apis import init_detector\n model = init_detector(self.model_cfg, model_checkpoint, self.device,\n cfg_options)\n\n return model.eval()\n\n def create_input(self,\n imgs: Union[str, np.ndarray],\n input_shape: Sequence[int] = None) \\\n -> Tuple[Dict, torch.Tensor]:\n \"\"\"Create input for segmentor.\n\n Args:\n imgs (str | np.ndarray): Input image(s), accepted data type are\n `str`, `np.ndarray`.\n input_shape (list[int]): A list of two integer in (width, height)\n format specifying input shape. Defaults to `None`.\n\n Returns:\n tuple: (data, img), meta information for the input image and input.\n \"\"\"\n if isinstance(imgs, (list, tuple)):\n if not isinstance(imgs[0], (np.ndarray, str)):\n raise AssertionError('imgs must be strings or numpy arrays')\n\n elif isinstance(imgs, (np.ndarray, str)):\n imgs = [imgs]\n else:\n raise AssertionError('imgs must be strings or numpy arrays')\n\n from mmdet.datasets.pipelines import Compose\n from mmocr.datasets import build_dataset # noqa: F401\n cfg = process_model_config(self.model_cfg, imgs, input_shape)\n test_pipeline = Compose(cfg.data.test.pipeline)\n\n data_list = []\n for img in imgs:\n # prepare data\n if isinstance(imgs[0], np.ndarray):\n # directly add img\n data = dict(img=img)\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n\n # build the data pipeline\n data = test_pipeline(data)\n # get tensor from list to stack for batch mode (text detection)\n data_list.append(data)\n\n if isinstance(data_list[0]['img'], list) and len(data_list) > 1:\n raise Exception('aug test does not support '\n f'inference with batch size '\n f'{len(data_list)}')\n\n batch_data = collate(data_list, samples_per_gpu=len(imgs))\n\n for k, v in batch_data.items():\n # batch_size > 1\n if isinstance(v, DataContainer):\n batch_data[k] = v.data[0]\n\n if self.device != 'cpu':\n batch_data = scatter(batch_data, [self.device])[0]\n\n return batch_data, batch_data['img']\n\n def visualize(self,\n model: nn.Module,\n image: Union[str, np.ndarray],\n result: list,\n output_file: str,\n window_name: str = '',\n show_result: bool = False):\n \"\"\"Visualize predictions of a model.\n\n Args:\n model (nn.Module): Input model.\n image (str | np.ndarray): Input image to draw predictions on.\n result (list): A list of predictions.\n output_file (str): Output file to save drawn image.\n window_name (str): The name of visualization window. Defaults to\n an empty string.\n show_result (bool): Whether to show result in windows, defaults\n to `False`.\n \"\"\"\n show_img = mmcv.imread(image) if isinstance(image, str) else image\n output_file = None if show_result else output_file\n model.show_result(\n show_img,\n result,\n out_file=output_file,\n win_name=window_name,\n show=show_result)\n\n @staticmethod\n def run_inference(model: nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> list:\n \"\"\"Run inference once for a segmentation model of mmseg.\n\n Args:\n model (nn.Module): Input model.\n model_inputs (dict): A dict containing model inputs tensor and\n meta info.\n\n Returns:\n list: The predictions of model inference.\n \"\"\"\n return model(**model_inputs, return_loss=False, rescale=True)\n\n @staticmethod\n def get_partition_cfg(partition_type: str) -> Dict:\n \"\"\"Get a certain partition config.\n\n Args:\n partition_type (str): A string specifying partition type.\n\n Returns:\n dict: A dictionary of partition config.\n \"\"\"\n raise NotImplementedError('Not supported yet.')\n\n @staticmethod\n def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:\n \"\"\"Get input tensor from input data.\n\n Args:\n input_data (dict): Input data containing meta info and image\n tensor.\n Returns:\n torch.Tensor: An image in `Tensor`.\n \"\"\"\n if isinstance(input_data['img'], DataContainer):\n return input_data['img'].data[0]\n return input_data['img'][0]\n\n @staticmethod\n def evaluate_outputs(model_cfg: mmcv.Config,\n outputs: Sequence,\n dataset: Dataset,\n metrics: Optional[str] = None,\n out: Optional[str] = None,\n metric_options: Optional[dict] = None,\n format_only: bool = False,\n log_file: Optional[str] = None):\n \"\"\"Perform post-processing to predictions of model.\n\n Args:\n model_cfg (mmcv.Config): The model config.\n outputs (list): A list of predictions of model inference.\n dataset (Dataset): Input dataset to run test.\n metrics (str): Evaluation metrics, which depends on\n the codebase and the dataset, e.g., \"acc\" for text\n recognition, and \"hmean-iou\" for text detection.\n out (str): Output result file in pickle format, defaults to `None`.\n metric_options (dict): Custom options for evaluation, will be\n kwargs for dataset.evaluate() function. Defaults to `None`.\n format_only (bool): Format the output results without perform\n evaluation. It is useful when you want to format the result\n to a specific format and submit it to the test server. Defaults\n to `False`.\n log_file (str | None): The file to write the evaluation results.\n Defaults to `None` and the results will only print on stdout.\n \"\"\"\n from mmcv.utils import get_logger\n logger = get_logger('test', log_file=log_file)\n\n if out:\n logger.debug(f'writing results to {out}')\n mmcv.dump(outputs, out)\n kwargs = {} if metric_options is None else metric_options\n if format_only:\n dataset.format_results(outputs, **kwargs)\n if metrics:\n eval_kwargs = model_cfg.get('evaluation', {}).copy()\n # hard-code way to remove EvalHook args\n for key in [\n 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',\n 'rule'\n ]:\n eval_kwargs.pop(key, None)\n eval_kwargs.update(dict(metric=metrics, **kwargs))\n logger.info(dataset.evaluate(outputs, **eval_kwargs))\n\n def get_preprocess(self) -> Dict:\n \"\"\"Get the preprocess information for SDK.\n\n Return:\n dict: Composed of the preprocess information.\n \"\"\"\n input_shape = get_input_shape(self.deploy_cfg)\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = model_cfg.data.test.pipeline\n return preprocess\n\n def get_postprocess(self) -> Dict:\n \"\"\"Get the postprocess information for SDK.\n\n Return:\n dict: Composed of the postprocess information.\n \"\"\"\n postprocess = self.model_cfg.label_convertor\n return postprocess\n\n def get_model_name(self) -> str:\n \"\"\"Get the model name.\n\n Return:\n str: the name of the model.\n \"\"\"\n assert 'type' in self.model_cfg.model, 'model config contains no type'\n name = self.model_cfg.model.type.lower()\n return name\n","sub_path":"src/mmdeploy/mmdeploy/codebase/mmocr/deploy/text_recognition.py","file_name":"text_recognition.py","file_ext":"py","file_size_in_byte":12129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"58804039","text":"__author__ = 'mohammad'\n\nfrom AdminManager.dateManager import ShamsiToMiladi, MiladiToShamsi\n\nfrom django import template\n\nimport datetime\n\n\nregister = template.Library()\n\n@register.filter()\ndef convert_to_shamsi(value):\n miladi_date = {'year':value.year, 'month':value.month, 'day':value.day}\n shamsi_date = MiladiToShamsi.convert_miladi_to_shamsi(miladi_date)\n return shamsi_date\n\n\n\n\n@register.filter()\ndef convert_to_miladi(value):\n shamsi_date = {'year':value.year, 'month':value.month, 'day':value.day}\n miladi_date = ShamsiToMiladi.convert_shamsi_to_miladi(shamsi_date)\n return miladi_date\n\n\n\n\n\n","sub_path":"IMDB3/AdminManager/templatetags/DateFilters.py","file_name":"DateFilters.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"414117372","text":"import pygame, sys\n\n\nclass IntroBackground(object):\n def __init__(self, screen):\n self.screen = screen\n self.background = pygame.image.load(\"zombies.png\")\n self.initAnimation()\n\n def initAnimation(self):\n self.background = pygame.transform.scale(self.background, (800, 600))\n bg_width = self.background.get_rect().right\n bg_height = self.background.get_rect().left\n self.bg_slice = pygame.Surface((800, 600))\n self.buttonColor = (0, 0, 0)\n self.buttonTextColor = (255, 255, 255)\n self.showtips = False\n\n \n\n def startButton(self):\n # draw the button\n pygame.draw.ellipse(self.screen, self.buttonColor, (325, 460, 150, 70))\n # set the start word\n font = pygame.font.SysFont(\"Arial\", 30, True)\n text = font.render(\"Start\", True, self.buttonTextColor)\n self.screen.blit(text, (360, 475))\n\n # return true if (x, y) is in the button and change the color of the button\n def inStartButton(self, x, y):\n if (325 <= x < 325 + 150 and 460 <= y < 460 + 70):\n self.buttonColor = (255, 255, 255)\n self.buttonTextColor = (255, 0, 0)\n self.showtips = True\n return True\n self.buttonColor = (0, 0, 0)\n self.buttonTextColor = (255, 255, 255)\n self.showtips = False\n return False\n\n\n def title(self):\n font = pygame.font.SysFont(\"Arial\", 50, True, True)\n text = font.render(\"Killing Zombies\", True, (255, 0, 0))\n self.screen.blit(text, (200, 70))\n\n def tips(self):\n content = []\n content += [\"Tips:\"]\n content += [\"Swipe the blade to kill zombies.\"]\n content += [\"Press left or right to avoid damage.\"]\n content += [\"Press \\\"p\\\" if you need more powerful weapon\"]\n content += [\"Press \\\"q\\\" to exit from the shop\"]\n for i in xrange(len(content)):\n font = pygame.font.SysFont(\"Arial\", 25, True, True)\n text = font.render(content[i], True, (0, 0, 255))\n self.screen.blit(text, (200, 200 + i * 50))\n\n\n def draw(self):\n self.bg_slice.blit(self.background, (0, 0), (0, 0, 800, 600))\n self.screen.blit(self.bg_slice, (0, 0))\n self.startButton()\n self.title()\n if (self.showtips):\n self.tips()\n\n\n ","sub_path":"IntroBackgroundClass.py","file_name":"IntroBackgroundClass.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"629941370","text":"# mandelbrot.py\r\n# Lab 9\r\n#\r\n# Name: Rob Herley\r\n\r\n# keep this import line...\r\nfrom cs5png import *\r\nimport turtle\r\n\r\n# start your Lab 8 functions here:\r\ndef mult( c, n ):\r\n \"\"\" mult uses only a loop and addition\r\n to multiply c by the integer n\r\n \"\"\"\r\n result = 0\r\n for x in range(n):\r\n result += c\r\n n -= 1\r\n return result\r\n\r\ndef update(c,n):\r\n \"\"\" update starts with z=0 and runs z = z**2 + c\r\n for a total of n times. It returns the final z.\r\n \"\"\"\r\n z = 0\r\n for x in range(n):\r\n z = z**2 + c\r\n n -= 1\r\n return z\r\n\r\n\r\ndef inmset(c,n):\r\n z = 0\r\n for x in range(n):\r\n z = z**2 + c\r\n if abs(z) > 2:\r\n return False\r\n return True\r\n\r\ndef scale(pix,pixMax,floatMin,floatMax):\r\n return ((pix / pixMax) * (abs(floatMin) + (abs(floatMax)))) + floatMin\r\n\r\ndef mset():\r\n width=300\r\n height=200\r\n turtle.speed(0)\r\n turtle.tracer(0,0)\r\n turtle.screensize(width,height)\r\n turtle.setworldcoordinates(0,0,width,height)\r\n turtle.setpos(0,0)\r\n turtle.penup()\r\n image=PNGImage(width,height)\r\n for col in range(width):\r\n for row in range(height):\r\n turtle.setpos(col,row)\r\n x = scale(col, width, -2.0, 1.0)\r\n y = scale(row, height, -1.0, 1.0)\r\n c = x+y*1j\r\n if inmset(c,25):\r\n turtle.dot()\r\n turtle.update()\r\n turtle.done()\r\n\r\nprint(mult(3,4))\r\nprint(update(-1,3))\r\nc = 3+4*1j\r\nprint(inmset(c,25))\r\nprint(scale(100,300,-2,1))\r\n\r\nmset()","sub_path":"lab9new.py","file_name":"lab9new.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"4262690","text":"#Windows\n# #! python3\n#Mac\n#! /usr/local/var/lib/pyenv/versions/anaconda3-2.0.1/python3.4\n#Linux\n# #! /usr/bin/python3\n\n## readCensusExcel.py - 12.4 群ごとに人口と人口調査標準地域の数を集計する\n# Usage:\n# python readCensusExcel.py\n\nimport openpyxl, pprint\nprint('ワークブックを開いています...')\nwb = openpyxl.load_workbook('censuspopdata.xlsx')\nsheet = wb.get_sheet_by_name('Population by Census Tract')\ncounty_data = {}\n\n# county_dataに群の人口と地域数を格納する\nprint('行を読み込んでいます...')\nfor row in range(2, sheet.max_row + 1):\n\t# スプレッドシートの1行に、一つの人口調査標準地域のデータがある\n\tstate = sheet['B' + str(row)].value\n\tcounty = sheet['C' + str(row)].value\n\tpop = sheet['D' + str(row)].value\n\n\t# この州のキーが確実に存在するようにする(setdefault()を使って存在しないときだけ辞書を設定する)\n\tcounty_data.setdefault(state,{})\n\t# この州のこの群のキーが確実に存在するようにする\n\tcounty_data[state].setdefault(county, {'tracts': 0, 'pop': 0})\n\n\t# 各行が人口調査標準地域を表すので、数を一つ増やす\n\tcounty_data[state][county]['tracts'] += 1\n\t# この人口調査標準地域の人口だけ郡の人口を増やす\n\tcounty_data[state][county]['pop'] += int(pop)\n\n# 新しいテキストファイルを開き、county_dataの内容を書き込む\nprint('結果を書き込み中...')\nresult_file = open('census2010.py', 'w')\nresult_file.write('all_data = ' + pprint.pformat(county_data))\nresult_file.close()\nprint('完了')\n\n","sub_path":"ch12/readCensusExcel.py","file_name":"readCensusExcel.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"516682746","text":"\nimport cv2\nimport os\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom joblib import dump\nfrom sklearn.model_selection import GridSearchCV\n\n\ndef load_data(class_a_path, class_b_path):\n\n\tlabels = []\n\tgenerate_arrays = True # Create arrays where we store the dataset\n\tfor img_name in os.listdir(class_a_path):\n\t\tif not img_name.startswith('.'):\n\t\t\t# Read the image\n\t\t\timg = cv2.imread(class_a_path + img_name, 0)\n\t\t\timg = np.reshape(img, (1, img.shape[0], img.shape[1]))\n\t\t\timg_vector = np.reshape(img.ravel(), (1, -1))\n\n\t\t\t# Create arrays where we store the dataset executed only at beginning\n\t\t\tif generate_arrays:\n\t\t\t\timages = img\n\t\t\t\timages_vector = img_vector\n\t\t\t\tgenerate_arrays = False\n\t\t\telse:\n\t\t\t\timages = np.concatenate((images, img), axis=0)\n\t\t\t\timages_vector = np.concatenate((images_vector, img_vector), axis=0)\n\n\t\t\tlabels.append(0)\n\n\tfor img_name in os.listdir(class_b_path):\n\t\tif not img_name.startswith('.'):\n\t\t\t# Read the image\n\t\t\timg = cv2.imread(class_b_path + img_name, 0)\n\t\t\timg = np.reshape(img, (1, img.shape[0], img.shape[1]))\n\t\t\timg_vector = np.reshape(img.ravel(), (1, -1))\n\t\t\timages = np.concatenate((images, img), axis=0)\n\t\t\timages_vector = np.concatenate((images_vector, img_vector), axis=0)\n\n\t\t\tlabels.append(1)\n\n\treturn images, images_vector, labels\n\n\ndef train_svm():\n\t# Load data\n\tclass_names = ['a', 'b']\n\n\timages, images_vector, labels = load_data(class_a_path='images/class_a/', class_b_path='images/class_b/')\n\n\tpca = PCA(n_components=150, svd_solver='randomized', whiten=True, random_state=42)\n\tsvc = SVC(kernel='rbf', class_weight='balanced')\n\tmodel = make_pipeline(pca, svc)\n\n\txtrain, xtest, ytrain, ytest = train_test_split(images_vector, labels, random_state=42)\n\n\tparam_grid = {'svc__C': [1, 5, 10, 50], 'svc__gamma': [0.0001, 0.0005, 0.001, 0.005]}\n\tgrid = GridSearchCV(model, param_grid)\n\n\tprint('Fit the SVM model')\n\tgrid.fit(xtrain, ytrain)\n\tprint(grid.best_params_)\n\n\tmodel = grid.best_estimator_\n\n\t# Save the model\n\tdump(model, 'modelSVM.joblib')\n\n\treturn model\n\n\n#yfit = model.predict(xtest)\n\n#\tprint(classification_report(ytest, yfit, target_names=class_names))\n","sub_path":"Labs/Lab5_CognitiveAgents_Part1/Exercise I/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"69289792","text":"\"\"\"\n Pygame base template for opening a window\n \n Sample Python/Pygame Programs\n Simpson College Computer Science\n http://programarcadegames.com/\n http://simpson.edu/computer-science/\n \n Explanation video: http://youtu.be/vRB_983kUMc\n\"\"\"\nimport time\n \nimport pygame\nfrom Classes import COLORS\n\nfrom Classes.Background import *\nfrom Classes.MSquare import *\nfrom Classes.TUnit import *\n\n\ntodoList = \"Test TUnit \"\nprint(todoList)\n\n\npygame.init()\n \n# Set the width and height of the screen [width, height]\nsize = CONSTANTS.WINDOW_SIZE\nscreen = pygame.display.set_mode(size)\n \npygame.display.set_caption(\"My Game\")\n\n#Objects Being Tested\ntUnit1 = TUnit()\ntUnit1.setPosition([1,2])\n\ntUnit2 = TUnit()\ntUnit2.setPosition([20, 2])\ntUnit2.setP2Controls()\n\n#Background\nbackground = Background(size)\n \n \n# Loop until the user clicks the close button.\ndone = False\n \n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\nt0 = time.time()\nfall_time = 1.0\n\nfont = pygame.font.Font(\"C:/Windows/Fonts/BRUSHSCI.TTF\", 25)\nscore = 100\ntext_position = [20,20]\n \n# -------- Main Program Loop -----------\nwhile not done:\n\t# --- Main event loop\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\tdone = True\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tprint(\"mouse click\")\n\t\t# User pressed down on a key\n\t\telif event.type == pygame.KEYDOWN:\n\t\t\t#\n\t\t\tprint(event)\n\t\t\ttUnit1.processInput(event)\n\t\t\ttUnit2.processInput(event)\n\n \n # --- Game logic should go here\n\tif time.time() - t0 > fall_time:\n\t\tt0 = time.time()\n\t\t\n\t\ttUnit1.tick()\n\t\ttUnit2.tick()\n\n\t\t\n\t\t\n # --- Screen-clearing code goes here\n \n # Here, we clear the screen to white. Don't put other drawing commands\n # above this, or they will be erased with this command.\n \n # If you want a background image, replace this clear with blit'ing the\n # background image.\n\tscreen.fill(COLORS.BLACK)\n \n # --- Drawing code should go here\n\tbackground.display(screen)\n\ttUnit1.display(screen)\n\ttUnit2.display(screen)\n\t\n\t\n\ttext = font.render(\"Score: \" +str(score), True, COLORS.YELLOW)\n\tscreen.blit(text, text_position)\n\t\n \n # --- Go ahead and update the screen with what we've drawn.\n\tpygame.display.flip()\n \n # --- Limit to 60 frames per second\n\tclock.tick(60)\n \n# Close the window and quit.\npygame.quit()\n","sub_path":"test_Input.py","file_name":"test_Input.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418898576","text":"import unittest\nfrom tests.unit_test_helper.console_test_helper import *\n\n\nclass TestOutput(unittest.TestCase):\n\n def test(self):\n result = get_script_output(\"lab02/ch02_t09_dot_notation.py\")\n self.assertEqual(\"27\\nTHE MINISTRY OF SILLY WALKS\\n\", result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/lab02/test_ch02_t09_dot_notation.py","file_name":"test_ch02_t09_dot_notation.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"235965151","text":"import os\nfrom google.appengine.api import app_identity\n\n\nAPP_ROOT_PATH = os.path.dirname(__file__)\n\nimport jinja2\nfrom authomatic.providers import oauth2\n\n''' Common configuration '''\n\nSERVER_SOFTWARE = os.environ.get('SERVER_SOFTWARE')\n\nIS_LOCAL_DEV_SERVER = True if not SERVER_SOFTWARE or SERVER_SOFTWARE.startswith('Development') else False\n\n\nAUTH_CONFIG = {\n 'fb': {\n 'class_': oauth2.Facebook,\n # Facebook is an AuthorizationProvider too.\n 'consumer_key': '1503589679910417',\n 'consumer_secret': '1a9610402f49e6d708bbd7849aaf8ec5',\n # But it is also an OAuth 2.0 provider and it needs scope.\n 'scope': ['user_about_me', 'email', 'publish_stream', 'read_stream'],\n }\n}\n\nENVIRONMENTS = {\n 'cofficeapp': {'environment': 'PROD',\n 'site_url': 'http://app.coffeeup.co',\n 'auth_config': AUTH_CONFIG\n }\n}\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(APP_ROOT_PATH),\n extensions=['jinja2.ext.autoescape'])\n\nrunning_app_id = app_identity.get_application_id() if SERVER_SOFTWARE else 'cofficeapp'\n\nis_production = True if ENVIRONMENTS[running_app_id]['environment'] == 'PROD' else False\n\ncurrent_environment = ENVIRONMENTS[running_app_id]\nauth_config = current_environment['auth_config']\n\n# Authorisation\nCACHE_G_APPS_USER_ID_KEY_PREFIX = '__gae_userid_'\nCACHE_G_APPS_USER_ID_EXPIRY_MINS = 20\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"377895150","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n for i in range(len(matrix)):\n if i==len(matrix)-1:\n if target in matrix[i]:\n return True\n else:\n return False\n if target>=matrix[i][0] and target 0):\n return color\n elif (color_list.index(color) == len(color_list) - 2 and diff > 1):\n diff = 1\n elif (color_list.index(color) == 1 and diff < -1):\n diff = -1\n return color_list[color_list.index(color) + diff]\n\n def colorize_chart(barlist, values):\n colors = ['b'] * len(barlist)\n starters = values[:3]\n if min(starters) == values[0] and values[0] == 0:\n colors = Korodata.set_color(0, 'green', colors, barlist)\n elif min(starters) == values[0] and values[0] != 0:\n colors = Korodata.set_color(0, 'yellowgreen', colors, barlist)\n elif min(starters) == values[1]:\n colors = Korodata.set_color(0, 'yellowgreen', colors, barlist)\n elif min(starters) == values[2]:\n if values[0] < values[1] and values[0] == 0:\n colors = Korodata.set_color(0, 'green', colors, barlist)\n elif values[0] < values[1] and values[0] != 0:\n colors = Korodata.set_color(0, 'yellowgreen', colors, barlist)\n elif values[0] > values[1]:\n colors = Korodata.set_color(0, 'yellowgreen', colors, barlist)\n elif values[0] == values[1]:\n colors = Korodata.set_color(0, 'yellowgreen', colors, barlist)\n\n i = 1\n while i < len(barlist):\n if values[i] == 0:\n colors = Korodata.set_color(i, 'green', colors, barlist)\n elif values[i - 1] < values[i]:\n diff = 1\n if (values[i - 1] * 2 < values[i] and values[i - 1] != 0) or (4 < values[i] and values[i - 1] == 0):\n diff = 2\n colors = Korodata.set_color(i, Korodata.select_color(\n colors[i - 1], diff), colors, barlist)\n elif values[i - 1] > values[i]:\n diff = -1\n colors = Korodata.set_color(i, Korodata.select_color(\n colors[i - 1], diff), colors, barlist)\n else:\n colors = Korodata.set_color(i, colors[i - 1], colors, barlist)\n i += 1\n\n def format_filename(s):\n valid_chars = \"-_%s%s\" % (string.ascii_letters, string.digits)\n filename = ''.join(c for c in unidecode(s) if c in valid_chars)\n return filename\n\n def has_today_data():\n with urllib.request.urlopen(\"https://opendata.euskadi.eus/contenidos/ds_informes_estudios/covid_19_2020/opendata/generated/covid19-bymunicipality.json\") as url:\n url_str = url.read().decode('unicode_escape').encode('utf-8')\n data = json.loads(url_str)\n today = datetime.datetime.strftime(\n datetime.datetime.now(), '%Y-%m-%d')\n lastUpdate = datetime.datetime.strftime(\n datetime.datetime.strptime(\n data['lastUpdateDate'], '%Y-%m-%dT%H:%M:%SZ'),\n '%Y-%m-%d %H:%M'\n )\n return today in lastUpdate\n\n def herria(herriname):\n how_may_days_original = 24\n days = 0\n file = None\n with urllib.request.urlopen(\"https://opendata.euskadi.eus/contenidos/ds_informes_estudios/covid_19_2020/opendata/generated/covid19-bymunicipality.json\") as url:\n f = open('tmp.json', 'w')\n url_str = url.read().decode('unicode_escape').encode('utf-8')\n data = json.loads(url_str)\n f.write(str(json.dumps(data)))\n lastUpdate = datetime.datetime.strftime(\n datetime.datetime.strptime(\n data['lastUpdateDate'], '%Y-%m-%dT%H:%M:%SZ'),\n '%Y-%m-%d %H:%M'\n )\n\n herritot = 0\n datesforplot = []\n positivesforplot = []\n i = 0\n herria = ''\n for herri in data['newPositivesByDateByMunicipality'][len(data['newPositivesByDateByMunicipality']) - 1]['items']:\n datesforplot = []\n positivesforplot = []\n herritot = 0\n how_may_days = how_may_days_original\n if 'geoMunicipality' in herri.keys():\n if herriname.lower() in herri['geoMunicipality']['officialName'].lower():\n herria = herri['geoMunicipality']['officialName']\n i += 1\n print('#' + herriname)\n date = data['newPositivesByDateByMunicipality'][\n len(data['newPositivesByDateByMunicipality']) - 1]['date']\n while days <= how_may_days:\n herrisum, datetoplot = Korodata.getHerriBefore(\n herria, date, how_may_days)\n positivesforplot.append(herrisum)\n datesforplot.append(datetoplot)\n herritot += herrisum\n how_may_days -= 1\n Korodata.draw_figure(\n i, herria, datesforplot, positivesforplot)\n lastDays = 'Azken ' + \\\n str(len(positivesforplot)) + \\\n ' egunetan guztira: ' + str(sum(positivesforplot))\n\n def zerrenda():\n min_positives = 2\n how_may_days_original = 24\n days = 0\n\n with urllib.request.urlopen(\"https://opendata.euskadi.eus/contenidos/ds_informes_estudios/covid_19_2020/opendata/generated/covid19-bymunicipality.json\") as url:\n f = open('tmp.json', 'w')\n url_str = url.read().decode('unicode_escape').encode('utf-8')\n data = json.loads(url_str)\n f.write(str(json.dumps(data)))\n lastUpdate = datetime.datetime.strftime(\n datetime.datetime.strptime(\n data['lastUpdateDate'], '%Y-%m-%dT%H:%M:%SZ'),\n '%Y-%m-%d %H:%M'\n )\n print('Azken eguneraketa:', lastUpdate)\n herritot = 0\n datesforplot = []\n positivesforplot = []\n i = 0\n for herri in data['newPositivesByDateByMunicipality'][len(data['newPositivesByDateByMunicipality']) - 1]['items']:\n datesforplot = []\n positivesforplot = []\n herritot = 0\n how_may_days = how_may_days_original\n herriname = herri['geoMunicipality']['officialName']\n if herri['positiveCount'] >= min_positives:\n i += 1\n print('#' + herriname)\n date = data['newPositivesByDateByMunicipality'][\n len(data['newPositivesByDateByMunicipality']) - 1]['date']\n while days <= how_may_days:\n herrisum, datetoplot = Korodata.getHerriBefore(\n herriname, date, how_may_days)\n positivesforplot.append(herrisum)\n datesforplot.append(datetoplot)\n herritot += herrisum\n how_may_days -= 1\n Korodata.draw_figure(\n i, herriname, datesforplot, positivesforplot)\n lastDays = 'Azken ' + \\\n str(len(positivesforplot)) + \\\n ' egunetan guztira: ' + str(sum(positivesforplot))\n","sub_path":"korodata.py","file_name":"korodata.py","file_ext":"py","file_size_in_byte":11096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"543726759","text":"import importlib\nfrom models import *\nimport core\n\nexper = importlib.import_module('exper-final-mnist-cnn-loss-based-lr', __package__)\n\nFLAGS = {\n 'task': 'attack',\n 'eid': [10000],\n 'attack_type': core.AttackType.L_2_DEEPFOOL,\n 'attack_N': 'all',\n 'attack_B': 128,\n **exper.FLAGS\n}\n\ndef main(argv):\n core.run(FLAGS)\n\nif __name__ == '__main__':\n core.app_start()","sub_path":"eval-final-mnist-l2-deepfool.py","file_name":"eval-final-mnist-l2-deepfool.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349241607","text":"# coding: utf-8\r\n\r\nimport time\r\n\r\nfrom com.android.monkeyrunner import MonkeyRunner, MonkeyDevice\r\n\r\nAPK = \"example.apk\"\r\nPACKAGE = \"com.example.package\"\r\nACTIVITY = \"com.example.package.activity\"\r\n\r\ndef log(fn, device):\r\n msg = device.shell('logcat -d')\r\n f_log = open(fn, 'at')\r\n if msg is None:\r\n msg = 'None'\r\n f_log.write(msg.encode('utf-8'))\r\n f_log.close() \r\n device.shell('logcat -c')\r\n\r\nif __name__ == '__main__':\r\n device = MonkeyRunner.waitForConnection()\r\n device.removePackage(PACKAGE) # Uninstall package if already installed\r\n device.shell('logcat -c') # Clear log buffer\r\n device.installPackage(APK) # Install the application\r\n log('install.log', device) # Write install logs\r\n run_component = PACKAGE + '/' + ACTIVITY\r\n device.startActivity(component=run_component) # Launch the application\r\n time.sleep(10) # Wait 10 seconds\r\n log('start.log', device) # Write launch logs\r\n device.press('KEYCODE_MENU', MonkeyDevice.DOWN_AND_UP) # Open a menu\r\n screen = device.takeSnapshot() # Capture a screenshot\r\n screen.writeToFile('screenshot.png', 'png') # Save to screenshot.png\r\n log('run.log', device) # Write test logs\r\n device.removePackage(PACKAGE) # Uninstall the application\r\n log('uninstall.log', device) # Write uninstall logs\r\n","sub_path":"RL-DOU/monkeyrunner_test.py","file_name":"monkeyrunner_test.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"174031030","text":"class Solution:\n def openLock(self, deadends, target):\n queue = [[0, 0, 0, 0]]\n while queue:\n nums = queue.pop(0)\n\n if str(nums) == target:\n return True\n if str(nums) in deadends:\n continue\n\n for i in range(4):\n if nums[i] < int(target[i]):\n tmp = nums[:]\n tmp[i] += 1\n queue.append(tmp)\n\n return -1\n\n\nif __name__ == \"__main__\":\n print(Solution().openLock([\"0201\", \"0101\", \"0102\", \"1212\", \"2002\"], \"0202\"))\n","sub_path":"OpenLock.py","file_name":"OpenLock.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"332988049","text":"#! /usr/bin/env python\n\nimport sys\nfrom gluon.html import *\nfrom gluon.template import render\n\nmember_pages = [\n [\"New Members\", \"newmembers.html\"],\n [\"Plot Rules\", \"rules.html\"],\n [\"Documents/Archive\", \"documents.html\"],\n [\"Committee\", \"committee.html\"],\n [\"Using the Level Crossing\", \"level_crossing.html\"]\n ]\nservice_pages = [\n [\"Trap Trading\", \"traptrading.html\"],\n [\"Links & Resources\", \"links.html\"],\n [\"Shared Equipment\", \"sharedequipment.html\"],\n [\"Growing Tips\", \"toptips.html\"],\n [\"Pollution\", \"pollution.html\"],\n [\"Composting\", \"compost.html\"],\n [\"Location of Wells\", \"wells.html\"],\n ]\ncalendar_pages = [\n [\"January\", \"calendar.html#jan\"],\n [\"February\", \"calendar.html#feb\"],\n [\"March\", \"calendar.html#mar\"],\n [\"April\", \"calendar.html#apr\"],\n [\"May\", \"calendar.html#may\"],\n [\"June\", \"calendar.html#jun\"],\n [\"July\", \"calendar.html#jul\"],\n [\"August\", \"calendar.html#aug\"],\n [\"September\", \"calendar.html#sep\"],\n [\"October\", \"calendar.html#oct\"],\n [\"November\", \"calendar.html#nov\"],\n [\"December\", \"calendar.html#dec\"],\n ]\nnews_pages = [\n [\"Chiltern Railways Project\", \"evergreen3.html\"],\n [\"Photo Competition 2011\", \"photocompetition2011entryform.html\"],\n [\"Photo Competition 2010\", \"photocompetition2010.html\"],\n [\"Photo Competition 2009\", \"photocompetition2009.html\"],\n [\"Photo Competition 2008\", \"photocompetition2008.html\"],\n ]\ntop_pages = [\n [\"Home\", \"index.html\"],\n [\"History\", \"history.html\"],\n [\"Membership\", \"membership.html\", member_pages],\n [\"Services\", \"services.html\", service_pages],\n [\"Garden Calendar\", \"calendar.html\", calendar_pages],\n [\"News & Events\", \"news.html\", news_pages],\n ]\n\ndef build_menu_list(pages, target_page=None):\n '''\n Build a menu list from lists\n '''\n menu = []\n for page in pages:\n menu.append([page[0], page[1] == target_page, page[1]])\n if len(page) > 2:\n menu[-1].append(build_menu_list(page[2], target_page))\n return menu\n\ndef lookup_title(list, target_page):\n for item in list:\n if item[1] == target_page:\n return item[0]\n if len(item)>2:\n #import pdb; pdb.set_trace()\n ans = lookup_title(item[2], target_page)\n if ans:\n return ans\n return None\n\ndef build_menu(menu_list, li_active='current', _class='nav', ul_class=None,\n li_class=None):\n menu = MENU(menu_list, _id=_class, _class=_class, li_active=li_active,\n ul_class=None, li_class=None)\n return menu\n\n\nif __name__ == \"__main__\":\n source_dir = '.'\n target_dir = '..'\n if len(sys.argv) != 2:\n sys.exit(\"Usage: make_page.py page (e.g. index.html\")\n target_page = sys.argv[1]\n menu = build_menu(build_menu_list(top_pages, target_page))\n #print menu\n from gluon.storage import Storage\n vars = dict(batch_title='Trap Ground Allotment Association',\n batch_sub_title=lookup_title(top_pages, target_page), batch_menu=menu)\n output_page = render(filename=target_page, path=source_dir,\n context=vars)\n dest = open('%s/%s'%(target_dir,target_page), 'w')\n dest.write(output_page)\n dest.close\n\n","sub_path":"public_html/applications/pc/modules/make_page.py","file_name":"make_page.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"285441107","text":"# pylint: disable=W0621\n\"\"\"Asynchronous Python client for the P1 Monitor API.\"\"\"\n\nimport asyncio\n\nfrom p1monitor import P1Monitor, Phases, Settings, SmartMeter\n\n\nasync def main():\n \"\"\"Test.\"\"\"\n async with P1Monitor(\n host=\"example\",\n ) as p1mon:\n smartmeter: SmartMeter = await p1mon.smartmeter()\n settings: Settings = await p1mon.settings()\n phases: Phases = await p1mon.phases()\n print(f\"P1 Monitor - SmartMeter: {smartmeter}\")\n print()\n print(f\"P1 Monitor - Settings: {settings}\")\n print()\n print(f\"P1 Monitor - Phases: {phases}\")\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"test_output.py","file_name":"test_output.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412548982","text":"import errno\nimport json\nimport os\n\nfrom collections import OrderedDict\n\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\n\nVALID_MODIFIERS = set((\n '_rotation',\n '_size',\n))\n\nSIZE_MAP = {\n 'thumb': (256, 256),\n 'small': (512, 512),\n 'medium': (1024, 1024),\n 'large': (2048, 2048),\n}\n\n\ndef load_config(config_filepath='~/.cigac.json'):\n \"\"\"\n Load the cigac json config.\n \"\"\"\n config_filepath = os.path.expanduser(config_filepath)\n with open(config_filepath, 'r') as config_file:\n return json.loads(config_file.read())\n\n\ndef extract_modifiers_from_url(url):\n \"\"\"\n Extract an ordered dictionary of modifiers from a url\n\n Modifiers are _key:value pairs specified as path parameters with the key\n always starting with an underscore. Invalid modifiers will be removed.\n \"\"\"\n return OrderedDict(map(\n lambda x: x.split(':'), filter(\n lambda x: x.startswith('_'), url.split('/'))))\n\n\ndef construct_ordered_modifier_url(url):\n \"\"\"\n Reorder any modifiers requested to be in alphabetical order\n \"\"\"\n modifiers = extract_modifiers_from_url(url)\n sorted_keys = sorted(modifiers.keys())\n\n if modifiers.keys() == sorted(modifiers.keys()):\n return url\n\n ordered = OrderedDict((key, modifiers[key]) for key in sorted_keys)\n\n non_modifiers = filter(lambda x: not x.startswith('_'), url.split('/'))\n # Build up our new URL after .cigac in the URL\n new_split = non_modifiers[0:non_modifiers.index('.cigac') + 1]\n new_split += [\n \"{}:{}\".format(key, val) for key, val in ordered.items()]\n new_split += non_modifiers[non_modifiers.index('.cigac') + 1:]\n\n return \"/\".join(new_split)\n\n\ndef modify_image(image_path, modifiers):\n image = Image.open(image_path)\n exif = image._getexif()\n if exif:\n for tag, value in exif.items():\n decoded = TAGS.get(tag, tag)\n if decoded == 'Orientation':\n if value == 3: image = image.rotate(180)\n if value == 6: image = image.rotate(270)\n if value == 8: image = image.rotate(90)\n break\n\n for modifier, value in modifiers.items():\n if modifier == \"_size\":\n image.thumbnail(SIZE_MAP[value], Image.ANTIALIAS)\n\n return image\n\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST:\n pass\n","sub_path":"cigac/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"323903957","text":"#!/usr/bin/env python3\n# File: chaos.py\n#A simple program illistrating chaotic behavior.\n\n\n\n#Brittany Manuel -- CTEC 121 -- April 7, 2014\n#1.6 Python Homework\n#Programming Exercies\n#Problem 2\n\ndef main():\n\n\tprint (\"This program illustrates a chaotic function\")\n\n\tx = eval(input(\"Enter a number between 0 and 1: \"))\n\n\tfor i in range(10):\n\t\tx = 3.9 * x * (1 - x)\n\t\tprint (x)\n\nmain ()\n\n\n\n\n\n\n#Problem 3\n\ndef main():\n\n\tprint (\"This program illustrates a chaotic function\")\n\n\tx = eval(input(\"Enter a number between 0 and 1: \"))\n\n\tfor i in range(10):\n\t\tx = 2.0 * x * (1 - x)\n\t\tprint (x)\n\nmain ()\n\n\n\n\n\n#Problem 4\n\ndef main():\n\n\tprint (\"This program illustrates a chaotic function\")\n\n\tx = eval(input(\"Enter a number between 0 and 1: \"))\n\t\n\tfor i in range(20):\n\t\tx = 2.0 * x * (1 - x)\n\t\tprint (x)\n\nmain ()\n\n\n\n\n\n#Problem 5\n\ndef main():\n\n\tprint (\"This program illustrates a chaotic function\")\n\n\tx = eval(input(\"Enter a number between 0 and 1: \"))\n\tn = eval (input(\"How many numbers should I print? \"))\n\n\tfor i in range(n):\n\t\tx = 2.0 * x * (1 - x)\n\t\tprint (x)\n\nmain ()\n\n\n\n\n\n#Problem 6\n\ndef main ():\n\n\tprint (\"This program illustrates a chaotic function\")\n\n\tx = eval (input(\"Function X: Enter a number between 0 and 1: \"))\n\ty = eval (input(\"Function Y: Enter a number between 0 and 1: \"))\n\tz = eval (input(\"Function Z: Enter a number between 0 and 1: \"))\n\t\n\tfor i in range (100) :\n\t\tx = 3.9 * x * (1 - x)\n\t\tprint (\"Function X: \", x)\n\n\tfor i in range (100) :\n\t\ty = 3.9 * (x - x * x)\n\t\tprint (\"Function Y: \", y)\n\n\tfor i in range (100) :\n\t\tz = 3.9 * x - 3.9 * x * x\n\t\tprint (\"Function Z: \", z)\n\n\tprint (\"The final results for functions X, Y, Z are as follows: \", x, \",\", y, \", and\", z)\n\n\tif x == y:\n\t\tprint (\"Functions X and Y are equal\")\n\n\tif x == z:\n\t\tprint (\"Functions X and Y are equal\")\n\n\tif y == z:\n\t\tprint (\"Functions X and Y are equal\")\n\n\telse:\n\t\tprint (\"None of the functions are equal to each other.\")\n\nmain()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Week 1/Chaos.py","file_name":"Chaos.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"205218628","text":"from django.contrib.admin.views.main import ChangeList\nfrom django.db import connections\n\nclass RawChangeList(ChangeList):\n \"\"\"\n Extended Django ChangeList to be able show data from RawQueryset.\n \"\"\"\n def get_count(self):\n connection = connections[self.queryset.db]\n with connection.cursor() as c:\n if connection.vendor == 'microsoft': # CTE in subquery is not working in SQL Server\n c.execute(self.queryset.raw_query)\n c.execute('SELECT @@ROWCOUNT')\n else:\n query = 'SELECT COUNT(*) FROM ({query}) AS sq'\n c.execute(query.format(query=self.queryset.raw_query))\n\n return c.fetchone()[0]\n\n def get_queryset_slice(self):\n connection = connections[self.queryset.db]\n if connection.vendor == 'microsoft':\n # SQL Server needs ordered query for slicing\n if hasattr(self.queryset, 'ordered') and self.queryset.ordered:\n query = '{query}'\n else:\n query = '{query} ORDER BY 1'\n query += ' OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY'\n else:\n query = '{query} LIMIT {limit} OFFSET {offset}'\n\n return self.queryset.model.objects.raw(\n query.format(\n query=self.queryset.raw_query,\n offset=self.page_num * self.list_per_page,\n limit=(self.page_num + 1) * self.list_per_page - self.page_num * self.list_per_page,\n )\n )\n\n def get_queryset(self, request):\n \"\"\"\n Overriding to avoid applying filters in ChangeList because RawQueryset has not filter method.\n So any filters has to be applied manually for now.\n \"\"\"\n qs = self.root_queryset\n if not hasattr(qs, 'count'):\n qs.count = lambda: self.get_count()\n return qs\n\n def get_results(self, request):\n if self.show_all:\n qs = self.queryset\n else:\n qs = self.get_queryset_slice()\n\n paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)\n\n self.result_count = paginator.count\n self.show_full_result_count = False\n self.show_admin_actions = True\n self.full_result_count = 0\n self.result_list = list(qs)\n self.can_show_all = True\n self.multi_page = True\n self.paginator = paginator","sub_path":"Rice/RawChangeList.py","file_name":"RawChangeList.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"162467184","text":"import torch\nimport torchvision\nimport torchvision.datasets as tdata\nimport torchvision.transforms as tTrans\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optm\nimport matplotlib.pyplot as plt\n# Python Imaging Library\nimport PIL\nimport numpy as np\nimport sys as sys\n\n# Index definitions to be used with history log.\ntrainAcc = 0\ntrainLoss= 1\ntestAcc = 2\ntestLoss = 3\n\n\n# Parameters\nbatch = 64\nepochs = 100\ngamma = 0.01\nmomnt = 0.5\n# device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# # bundle common args to the Dataloader module as a kewword list.\n# # pin_memory reserves memory to act as a buffer for cuda memcopy \n# # operations\n# comArgs = {'shuffle': True,'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}\n\n# # Data Loading -----------------------\n# # ******************\n# # At this point the data come in Python tuples, a 28x28 image and a label.\n# # while the label is a tensor, the image is not; it needs to be converted. \n# # So we need to transform PIL image to tensor and then normalize it.\n# # Normalization is quite a good practise to avoid numerical and convergence\n# # problems. For that we need the dataset's mean and std which fortunately\n# # can be computed!\n# # ******************\n# mean = 0.1307\n# std = 0.3081\n# # Bundle our transforms sequentially, one after another. This is important.\n# # Convert images to tensors + normalize\n# transform = tTrans.Compose([tTrans.ToTensor(), tTrans.Normalize( (mean,), (std,) )])\n# # Load data set\n# mnistTrainset = tdata.MNIST(root='../data', train=True, download=True, transform=transform)\n# mnistTestset = tdata.MNIST(root='../data', train=False, download=True, transform=transform)\n\n# # Once we have a dataset, torch.utils has a very nice lirary for iterating on that\n# # dataset, wit hshuffle AND batch logic. Very usefull in larger datasets.\n# trainLoader = torch.utils.data.DataLoader(mnistTrainset, batch_size = batch, **comArgs )\n# testLoader = torch.utils.data.DataLoader(mnistTestset, batch_size = 10*batch, **comArgs)\n# End of DataLoading -------------------\n\n\n# Sanity Prints---\n# print(len(mnistTrainset))\n# print(type(mnist_trainset[0]))\n\n# ----------------\n\n\n# Model Definition\n#-----------------------------\n# define network aas a Class\nclass Net(nn.Module):\n\n # Class variables for measures.\n trainAcc = 0\n trainLoss= 0\n testAcc = 0\n testLoss = 0\n # History Log\n # train acc, train loss, test acc, test loss\n history = [[],[],[],[]]\n # Mod init + boiler plate code\n # Skeleton of this network; the blocks to be used.\n # Similar to Fischer prize building blocks!\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.fc1 = nn.Linear(64*144, 128)\n self.fc2 = nn.Linear(128, 10)\n\n # Set the aove defined building blocks as an\n # organized, meaningful architecture here.\n def forward(self, x):\n x = F.relu(self.conv1(x), 2)\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, 64*144)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n x = F.log_softmax(x, dim=1)\n return x\n\n # This function will return the number of features\n # required to turn a tensor to a !d Tensor. We exclude batch\n # size. So a [64, 64, 5, 5] tensor will have 1600 features and\n # as such requires a 1, 1600 place holder. Notice we ignore the first\n # 64, as that is the bactc size which is irrelevant for this operation.\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size: # Get the products\n num_features *= s\n return num_features\n \n # Call this function to facilitate the traiing process\n # While there are many ways to go on about calling the\n # traing and testing code, define a function within\n # a \"Net\" class seems quite intuitive. Many examples\n # do not have a class function; rather they set up\n # the training logic as a block script layed out.\n # Perhaps the object oriented approach leads to less \n # anguish in large projects...\n def train(self, args, device, indata, optim):\n true = 0\n acc = 0\n for idx, (img, label) in enumerate(indata):\n data, label = img.to(device), label.to(device)\n # forward pass calculate output of model\n output = self.forward(data)\n pred = output.max(dim = 1, keepdim = True)\n true += label.eq(pred[1].view_as(label)).sum().item()\n # print(data.shape)\n # print(output.shape)\n # compute loss\n # loss = F.nll_loss(output, label)\n loss = F.cross_entropy(output, label)\n # Backpropagation part\n # 1. Zero out Grads\n optim.zero_grad()\n # 2. Perform the backpropagation based on loss\n loss.backward() \n # 3. Update weights \n optim.step()\n\n # Training Progress report for sanity purposes! \n # if idx % 20 == 0: \n # print(\"Epoch: {}->Batch: {} / {}. Loss = {}\".format(args, idx, len(indata), loss.item() ))\n # Log the current train loss\n acc = true/len(indata.dataset)\n self.history[trainLoss].append(loss) \n self.history[trainAcc].append(acc)\n # Testing and error reports are done here\n\n def test(self, device, testLoader):\n print(\"Commence Testing!\") \n loss = 0 \n true = 0\n acc = 0\n # Inform Pytorch that keeping track of gradients is not required in\n # testing phase.\n with torch.no_grad():\n for data, label in testLoader:\n data, label = data.to(device), label.to(device)\n # output = self.forward(data)\n output = self.forward(data)\n # Sum all loss terms and tern then into a numpy number for late use.\n loss += F.cross_entropy(output, label, reduction = 'elementwise_mean').item()\n # Find the max along a row but maitain the original dimenions.\n # in this case a 10 -dimensional array.\n pred = output.max(dim = 1, keepdim = True)\n # Select the indexes of the prediction maxes.\n # Reshape the output vector in the same form of the label one, so they \n # can be compared directly; from batchsize x 10 to batchsize. Compare\n # predictions with label; 1 indicates equality. Sum the correct ones\n # and turn them to numpy number. In this case the idx of the maximum \n # prediciton coincides with the label as we are predicting numbers 0-9.\n # So the indx of the max output of the network is essentially the predicted\n # label (number).\n true += label.eq(pred[1].view_as(label)).sum().item()\n acc = true/len(testLoader.dataset)\n self.history[testAcc].append(acc)\n self.history[testLoss].append(loss/len(testLoader.dataset)) \n # Print accuracy report!\n print(\"Accuracy: {} ({} / {})\".format(acc, true,\n len(testLoader.dataset)))\n\n def report(self):\n\n print(\"Current stats of MNIST_NET:\")\n print(\"Accuracy: {}\" .format(self.history[trainAcc]))\n print(\"Training Loss: {}\" .format(self.trainLoss))\n print(\"Test Accuracy: {}\" .format(self.testAcc))\n print(\"Test Loss: {}\" .format(self.testLoss))\n\n\n# Execution\n#-----------------------------\ndef main():\n print(\"######### Initiating MNIST N0-DROP Network Training #########\\n\")\n\n model = Net().to(device)\n # optim = optm.SGD(model.parameters(), lr=gamma, momentum=momnt)\n optim = optm.Adam(model.parameters())\n tTotal = 0\n testIters = 1000\n for e in range(epochs):\n print(\"Epoch: {} start ------------\\n\".format(e))\n # print(\"Dev {}\".format(device))\n args = e\n model.train(args, device, trainLoader, optim)\n model.test(device, testLoader)\n\n # Final report\n # model.report()\n\n with open('PyTorch_no_drop_rep.txt', 'w') as f:\n for i in range(len(model.history[0])):\n f.write(\"{:.4f} {:.4f} {:4f} {:.4f}\\n\".format(model.history[0][i], model.history[1][i],\n model.history[2][i], model.history[3][i]))\n for t in range(testIters):\n model.test(device, testLoader)\n with open('PyTorch_no_drop_eval.txt', 'w') as f:\n for i in range(len(model.history[testAcc])):\n f.write(\"{:.4f} {:.4f} \\n\".format(model.history[testAcc][i], model.history[testLoss][i]))\n\n# Define behavior if this module is the main executable.\nif __name__ == '__main__':\n main()\n","sub_path":"Code/Architecture/forward_simple.py","file_name":"forward_simple.py","file_ext":"py","file_size_in_byte":8983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"215812055","text":"# coding: utf-8\nimport os\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport re\nfrom ebooklib import epub\ntutorial_name = 'javascript'\nhome_page_url = '/wiki/001434446689867b27157e896e74d51a89c25cc8b43bdb3000'\n\nwebsite_domain = 'http://www.liaoxuefeng.com'\ntutorial_name_prefix = tutorial_name + '_'\ntemp_folder = tutorial_name_prefix + 'temp'\nparent_folder = tutorial_name_prefix + 'htmls'\ndef get_detailed_content():\n home_html_str = requests.get(website_domain + home_page_url).content\n home_soup = BeautifulSoup(home_html_str, 'lxml')\n content_ul = home_soup.find('ul', {'class':'uk-nav uk-nav-side', 'style':'margin-right:-15px;'})\n is_first_level_l = []\n for li_tag in content_ul.find_all('li'):\n if li_tag.has_attr('style'):\n indent_style = li_tag['style']\n if indent_style.endswith('1em;'):\n is_first_level_l.append(True)\n else:\n is_first_level_l.append(False)\n else:\n is_first_level_l.append(True)\n return is_first_level_l\n\nfirst_level_indicator_l = get_detailed_content()\nhtml_l_file_name = tutorial_name_prefix + 'html_l_file.txt'\n\nwith open(html_l_file_name, 'r') as f:\n html_str_l = json.load(f, encoding='utf-8')\n \ntitle = tutorial_name.capitalize() + u'教程'\nauthor = u'廖雪峰'\nepub_name = '{}_tutorial.epub'.format(tutorial_name)\nbook = epub.EpubBook()\n# set metadata\n#book.set_identifier('id123456')\nbook.set_language('cn')\nbook.set_title(title)\nbook.add_author(author)\nget_title = lambda _str: re.compile('

(.*)

').search(_str).group(1)\n\ntoc_list = []\nspine_list = ['cov', 'nav']\n\n# add css\nhight_css = epub.EpubItem(uid=\"style_nav\", file_name=\"style.css\", media_type=\"text/css\")\nhight_css.content = open(parent_folder + '/styles.css', 'r').read()\nbook.add_item(hight_css)\n\nsection_list = None\nfor chap_index, html_str in enumerate(html_str_l):\n chap_title = get_title(html_str)\n chap_uid = 'chapter_{:d}'.format(chap_index)\n chap_file_name = chap_uid + '.xhtml'\n # create chapter\n one_chap = epub.EpubHtml(title=chap_title, file_name=chap_file_name, lang='cn', content=html_str)\n one_chap.add_item(hight_css)\n # add chapter\n book.add_item(one_chap)\n # toc\n if first_level_indicator_l[chap_index]:\n if not (section_list is None):\n toc_list.append(section_list)\n section_list = []\n section_list.append(epub.Section(chap_title))\n section_list.append([])\n section_list[1].append(one_chap)\n else:\n section_list[1].append(one_chap)\n #toc_list.append(epub.Link(chap_file_name, chap_title, chap_uid))\n # spine\n spine_list.append(one_chap)\n # add picture and video\n chap_pic_dir = parent_folder + '/' + chap_uid\n if os.path.exists(chap_pic_dir):\n for media_file_name in os.listdir(chap_pic_dir):\n media_file_full_path = chap_pic_dir + '/' + media_file_name\n media_file_save_path = chap_uid + '/' + media_file_name\n media_data = open(media_file_full_path, 'rb').read()\n if media_file_name.endswith('mp4'):\n # video\n media_type = 'video/mp4'\n else:\n # pic\n media_type = 'image/png'\n one_media = epub.EpubItem(uid=media_file_name, file_name=media_file_save_path, \n media_type=media_type, content=media_data)\n book.add_item(one_media)\ntoc_list.append(section_list)\n\n# define Table Of Contents\nbook.toc = toc_list\n\n# add default NCX and Nav file\nbook.add_item(epub.EpubNcx())\nbook.add_item(epub.EpubNav())\n\n# define css style\nstyle = '''\n@namespace epub \"http://www.idpf.org/2007/ops\";\nbody {\n font-family: Cambria, Liberation Serif, Bitstream Vera Serif, Georgia, Times, Times New Roman, serif;\n}\nh2 {\n text-align: left;\n text-transform: uppercase;\n font-weight: 200; \n}\nol {\n list-style-type: none;\n}\nol > li:first-child {\n margin-top: 0.3em;\n}\nnav[epub|type~='toc'] > ol > li > ol {\n list-style-type:square;\n}\nnav[epub|type~='toc'] > ol > li > ol > li {\n margin-top: 0.3em;\n}\n'''\n\n# add css file\nnav_css = epub.EpubItem(uid=\"style_nav\", file_name=\"style/nav.css\", media_type=\"text/css\", content=style)\nbook.add_item(nav_css)\n\n\n# basic spine\n#book.set_cover(\"image.png\", pic_data)\nbook.spine = spine_list\n# write to the file\nepub.write_epub(epub_name, book, {})\n","sub_path":"blog_lxf/convert_to_epub_javascript.py","file_name":"convert_to_epub_javascript.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"355075095","text":"import numpy as np\nimport pandas as pd\nimport streamlit as st\n\nfrom .constants import RANDOM_STATE\n\n\ndef app():\n gpt = pd.read_csv('data/gpt.csv')\n gpt['file_path'] = gpt['audio_path'].str[1:]\n\n # gpt_split = pd.read_csv('data/gpt_split.csv')\n # gpt_split['file_path'] = gpt_split['file_path'].str[1:]\n\n st.title('Dataset')\n st.write('
', unsafe_allow_html=True)\n\n st.write(\n 'The Guitar Playing Technique (GPT) datasets from the work of [Su et al. (2014)](http://mac.citi.sinica.edu.tw/GuitarTranscription/) was utilized.')\n st.write('This dataset comprises `7 playing techniques` of the electrical guitar, including: `bending`, `hamming`, `mute`, `normal`, `pulling`, `slide`, and `trill`')\n # st.write('There are two sets of data:')\n # st.write('1. A `complete dataset`, which includes complete audio signals of each guitar sound with a duration of `4.0 s`.')\n st.write('This dataset includes complete audio signals of each guitar sound with a duration of `4.0 s`.')\n # st.write('2. A `split dataset`, which includes only portions of the waveform signals on the onsets of each guitar sound, obtained by clipping them from `0.1 s` before the onset to `0.2 s` after the onset.')\n st.write('To make the quality of the sound recordings akin to that of real-world performance, `7 different guitar tones` are used with differences in effect and equalizer settings.')\n st.markdown(' \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n \\\n
Tone nameEffectEqualizer
1 (Normal tone)moderate distortionno modification on EQ
2 (Solo tone)moderate distortion and moderate reverbmid-frequency is emphasized
3 (Solo tone)moderate distortion, intense chorus, slight reverbmid-frequency is emphasized
4 (Solo tone)moderate distortion, intense delay, moderate reverbmid-frequency is emphasized
5 (Riff tone)intense distortionmid-frequency is suppressed while high-frequency and low-frequency are emphasized
6 (Country tone)very slight distortionno modification on EQ
7 (Funk tone)slight distortion, slight delay, and slight reverbhigh-frequency component is emphasized a little
', unsafe_allow_html=True)\n st.write('
', unsafe_allow_html=True)\n\n st.header('GPT Dataset')\n st.subheader(\"Number of Sound Clips in GPT Dataset\")\n st.write('*Total:', gpt.shape[0], ' audio files.*')\n st.bar_chart(pd.value_counts(gpt['technique']))\n\n st.subheader('Play an Audio Clip of GPT Dataset')\n techniques = gpt['technique'].unique()\n tones = gpt['tone_type'].unique()\n selected_technique = st.selectbox('Select Technique:', np.sort(techniques))\n selected_tone = st.selectbox('Select Tone Type:', np.sort(tones))\n files = gpt['audio_path'].loc[(gpt['technique'] == selected_technique) & (\n gpt['tone_type'] == selected_tone)].sort_values()\n df_files = files.to_frame()\n df_files['value'] = np.array(files.str.split('/').tolist())[:, 6]\n df_files['audio_path'] = df_files['audio_path'].str[3:]\n # st.dataframe(df_files)\n selected_file = st.selectbox('Select File:', df_files['value'].tolist())\n selected_file_path = df_files['audio_path'].loc[df_files['value'] == selected_file].item()\n st.write('`Play: ', selected_file_path, '`')\n audio_file = open(selected_file_path, 'rb')\n audio_bytes = audio_file.read()\n st.audio(audio_bytes)\n st.write('
', unsafe_allow_html=True)\n\n # st.header('2. GPT-split Dataset')\n # st.subheader(\"Number of Sound Clips in GPT-split Dataset\")\n # st.write('*Total:', gpt_split.shape[0], ' audio files.*')\n # st.bar_chart(pd.value_counts(gpt_split['technique']))\n\n # st.subheader('Play an Audio Clip of GPT-split Dataset')\n # techniques2 = gpt_split['technique'].unique()\n # tones2 = gpt_split['tone_type'].unique()\n # selected_technique2 = st.selectbox(\n # 'Select Technique', np.sort(techniques2))\n # selected_tone2 = st.selectbox('Select Tone Type', np.sort(tones2))\n # files2 = gpt_split['file_path'].loc[(gpt_split['technique'] == selected_technique2) & (\n # gpt_split['tone_type'] == selected_tone2)].sort_values()\n # df_files2 = files2.to_frame()\n # df_files2['value'] = np.array(files2.str.split('/').tolist())[:, 5]\n # selected_file2 = st.selectbox('Select File', df_files2['value'].tolist())\n # selected_file_path2 = df_files2['file_path'].loc[df_files2['value']\n # == selected_file2].item()\n # st.write('`Play: ', selected_file_path2, '`')\n # audio_file2 = open(selected_file_path2, 'rb')\n # audio_bytes2 = audio_file2.read()\n # st.audio(audio_bytes2)\n # st.write('
', unsafe_allow_html=True)\n\n st.header('Extracted Features of GPT Datasets')\n st.write('To represent musical signal, the `mean`, `std`, `variance`, `skewness`, and `kurtosis` as the statistics measure of various audio descriptors including: *MFCC-13*, *$\\Delta$MFCC-13* (first-order derivative), *$\\Delta$2MFCC-13* (second-order derivative) was utilized.', unsafe_allow_html=True)\n st.latex(r'''\n Total = 5 \\times 13 \\times 3 = 195D \\ Feature \\ Vector\n ''')\n # st.write('The audio descriptors are computed using python package for music and audio analysis, [librosa](https://librosa.org/doc/latest/index.html).')\n st.markdown('### GPT dataset (1% sampling)')\n sample_gpt = gpt.sample(frac=0.01, random_state=RANDOM_STATE)\n st.dataframe(sample_gpt)\n\n # st.markdown('### GPT-split dataset (1% sampling)')\n # sample_gpt_split = gpt_split.sample(frac=0.01, random_state=RANDOM_STATE)\n # st.dataframe(sample_gpt_split)\n","sub_path":"apps/app_dataset.py","file_name":"app_dataset.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"367988596","text":"#!/usr/bin/env python3\n\nkey = sum(ord(i) for i in 'aaooeeuu') % 0xff\ndec = 'flag{women_they_are_a_complete_mystery}'\nenc = ''.join(chr(ord(c) ^ key) for c in dec)\n\nprint(', '.join(f'0x{ord(c):=02x}' for c in enc) + ', 0x00')\n\n# Test decode\nassert ''.join(chr(ord(i) ^ key) for i in enc) == dec\n","sub_path":"rev2 (Ivan Tham)/Resource/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"35070202","text":"#!/usr/bin/python3\n\n\nclass Trader:\n \"\"\"Modélise un trader\"\"\"\n listTraders = []\n\n def __init__(self, name=None, capital=None, *args, **kwargs):\n \"\"\"Un trader est représenté par son nom, son capital,\n son porte action, son historique\"\"\"\n self.name = name\n self.capital = capital\n self.porte_action = {} #{Action: int}\n self.history = [] #[(date, Trader)]\n Trader.listTraders.append(self)\n\n def __str__(self):\n return \"{} : {}\\n \\t {}\\n Total = {}\"\\\n .format(self.name, self.capital, self.porte_action,\n self.total_values())\n\n def save(self): #fonction qui doit être utilisé plus souvent.\n \"\"\"Sauvegarde les données dans history et dans un fichier\"\"\"\n self.history.append((ctime(time()), self))\n #must save in a file\n\n def total_values(self):\n return self.capital \\\n + sum(action.price * number\n for action, number in self.porte_action.items())\n\n def buy(self, action, number):\n \"\"\"Buy and action\"\"\"\n action = [x for x in action.listActions if x.name == action][0]\n action.update()\n self.capital -= action.price * number\n try:\n self.porte_action[action] += number\n except KeyError:\n self.porte_action[action] = number\n\n def sell(self, action, number):\n \"\"\"Sell and action\"\"\"\n action = [x for x in action.listActions if x.name == action][0]\n action.update()\n self.capital += action.price * number\n try:\n self.porte_action[action] -= number\n except KeyError:\n self.porte_action[action] = number\n","sub_path":"trader.py","file_name":"trader.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"406465516","text":"import sys, math\nlines = [s.rstrip(\"\\n\") for s in sys.stdin.readlines()]\nx, = [int(num) for num in lines.pop(0).split(\" \")]\nmax_base = int(math.sqrt(x))\nmax = 1\nfor base in range(2, max_base + 1):\n i = 2\n while True:\n tmp_max = base ** i\n if tmp_max <= x:\n if tmp_max > max:\n max = tmp_max\n i += 1\n else:\n break\nprint(max)\n","sub_path":"Python_codes/p03352/s555925771.py","file_name":"s555925771.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"507256340","text":"import torch\nfrom torch import nn, einsum\nfrom torch.nn import parameter\nimport torch.nn.functional as F\nfrom vit_pytorch import ViT\n\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\n\n\nclass ViT_feature(ViT):\n def __init__(self, *, args, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool='cls', channels=3, dim_head=64, dropout=0., emb_dropout=0.):\n super().__init__(image_size=image_size, patch_size=patch_size, num_classes=num_classes, dim=dim, depth=depth,\n heads=heads, mlp_dim=mlp_dim, pool=pool, channels=channels, dim_head=dim_head, dropout=dropout, emb_dropout=emb_dropout)\n # 为计算EMD距离做准备\n self.args = args\n self.patch_size = patch_size\n self.image_size = image_size\n num_patches = (image_size // patch_size) ** 2\n patch_dim = channels * patch_size ** 2\n assert image_size % patch_size == 0\n # 每行的patch数\n self.n_patch = self.image_size // self.patch_size\n\n # 使用命令行参数覆盖pool方法 [\"mean\", \"cls\"]\n self.pool = self.args.vit_mode\n if self.args.not_use_clstoken:\n print(\"不使用cls token\")\n # self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))\n print(\"预测分类时使用{}模式\".format(self.args.vit_mode))\n\n \n # 在forward pass中除去最后一层,不修改网络参数也许可以方便存储与读取\n def forward(self, img):\n x = self.to_patch_embedding(img)\n b, n, _ = x.shape\n\n if not self.args.not_use_clstoken:\n cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b)\n x = torch.cat((cls_tokens, x), dim=1)\n x += self.pos_embedding[:, :(n + 1)]\n else:\n # 不用添加额外的cls token维度\n x += self.pos_embedding[:, :n]\n\n x = self.dropout(x)\n\n x = self.transformer(x) # x输出形状为[batch_size, patch_size+1, dim] or [batch_size, patch_size, dim]\n\n # x = x.mean(dim=1) if self.pool == 'mean' else x[:, 0]\n\n # x = self.to_latent(x)\n return x # 作为特征提取器最终输出为[batch_size, dim] eg [64, 1024]\n\n\nif __name__ == \"__main__\":\n model = ViT(\n image_size=256,\n patch_size=32,\n num_classes=1000,\n dim=512,\n depth=4,\n heads=16,\n mlp_dim=2048,\n dropout=0.1,\n emb_dropout=0.1\n )\n img = torch.randn((1, 3, 256, 256))\n out = model(img)\n print(out.shape)\n print()\n","sub_path":"Models/models/vit_feature.py","file_name":"vit_feature.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"416582647","text":"import cv2\n\nimg = cv2.imread(\"test.jpg\", cv2.IMREAD_UNCHANGED)\n# (多少列 行) 一定要是整数\ndst_img = cv2.resize(img, (200, 150))\ncv2.imshow(\"dst_img\", dst_img)\ndst_img = cv2.resize(img, None, fx=0.5, fy=0.7) # 宽设置成原图像的0.5倍 高设置成原图像的0.7倍 None处填写希望缩放到的图像大小元组\ncv2.imshow(\"dst_img_\", dst_img)\ncv2.imshow(\"img\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"图像缩放.py","file_name":"图像缩放.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"585536025","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\" Sherpany application urls \"\"\"\n\nfrom django.conf.urls import url\nfrom .views import index, store, reset\n\nurlpatterns = [\n\n url(r'^$', index, name='index'),\n url(r'^store/$', store, name='store'),\n url(r'^reset/$', reset, name='reset'),\n # url(r'^login/$', views.login, name='login'),\n # url(r'^logout/$', views.logout, name='logout'),\n # url(r'^catalog/pack/$', views.packs, name='packs'),\n # url(r'^catalog/photo/$', views.photos, name='photos'),\n\n # url(r'^catalog/pack/add/$', views.add_pack, {}, name='add_pack'),\n # url(r'^catalog/pack/edit/(?P\\d+)/$', views.add_pack, {}, name='edit_pack'),\n]\n","sub_path":"sherpany/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"449148896","text":"# encoding: utf-8\nimport json\n\nfrom project import Cache\nfrom project.users.models import User\nfrom project.users.tests.factories import UserFactory, UrlFactory\nfrom project.users.tests.views import BaseViewTestCase\n\n\nclass TestViewUrl(BaseViewTestCase):\n def tearDown(self):\n User.drop_collection()\n Cache.redis.flushall()\n\n def test_post_url_without_shorten(self):\n user = UserFactory.create()\n\n post_data = {\n 'original': 'http://globo.com',\n }\n\n response = self.client.post(\n \"/user/hugo/url\",\n data=json.dumps(post_data),\n content_type='application/json; charset=utf-8'\n )\n self.assertEquals(response.status_code, 201)\n user.reload()\n\n self.assertEquals(user.urls.count(), 1)\n self.assertNotEquals(user.urls[0].shortened, None)\n\n def test_post_url_with_shorten(self):\n user = UserFactory.create()\n my_shortened = u'myurl'\n post_data = {\n 'original': 'http://testtest.com',\n 'shortened': my_shortened,\n }\n\n response = self.client.post(\n \"/user/hugo/url\",\n data=json.dumps(post_data),\n content_type='application/json; charset=utf-8')\n self.assertEquals(response.status_code, 201)\n user.reload()\n\n self.assertEquals(user.urls.count(), 1)\n self.assertEquals(user.urls[0].shortened, my_shortened)\n\n def test_post_url_should_return_409_for_duplicated_shortened(self):\n user = UserFactory.build()\n url = UrlFactory.build()\n user.urls.append(url)\n user.save()\n\n post_data = {\n 'original': 'http://mysecondeurl.com',\n 'shortened': 'myurl'\n }\n\n response = self.client.post(\n \"/user/hugo/url\",\n data=json.dumps(post_data),\n content_type='application/json; charset=utf-8'\n )\n self.assertEquals(response.status_code, 409)\n\n user.reload()\n self.assertEquals(user.urls.count(), 1)\n\n def test_get_url_should_return_200_when_url_exist(self):\n user = UserFactory.build()\n url = UrlFactory.build()\n user.urls.append(url)\n user.save()\n\n response = self.client.get(\"/user/hugo/url/myurl\")\n self.assertEquals(response.status_code, 200)\n self.assertIn('clicks', response.data)\n\n def test_get_url_should_return_404_when_url_does_not_exist(self):\n user = UserFactory.build()\n url = UrlFactory.build()\n user.urls.append(url)\n user.save()\n\n response = self.client.get(\"/user/hugo/url/myanotherurl\")\n self.assertEquals(response.status_code, 404)\n\n def test_put_url_should_return_200(self):\n user = UserFactory.build()\n url = UrlFactory.build()\n user.urls.append(url)\n user.save()\n\n post_data = {\n 'shortened': 'myanotherurl',\n }\n\n response = self.client.put(\n \"/user/hugo/url/myurl\",\n data=json.dumps(post_data),\n content_type='application/json; charset=utf-8'\n )\n self.assertEquals(response.status_code, 200)\n\n content = json.loads(response.data)\n self.assertEquals('myanotherurl', content['shortened'])\n\n user.reload()\n self.assertEquals(user.urls.count(), 1)\n\n cache = Cache.redis.get('myurl:clicks')\n self.assertEquals(cache, None)\n cache = Cache.redis.get('myurl:original')\n self.assertEquals(cache, None)\n cache = Cache.redis.get('myanotherurl:clicks')\n self.assertNotEquals(cache, None)\n cache = Cache.redis.get('myanotherurl:original')\n self.assertNotEquals(cache, None)\n\n def test_delete_url_should_return_200_when_url_exist(self):\n user = UserFactory.build()\n url = UrlFactory.build()\n user.urls.append(url)\n user.save()\n\n response = self.client.delete(\"/user/hugo/url/myurl\")\n self.assertEquals(response.status_code, 200)\n\n user.reload()\n self.assertEquals(user.urls.count(), 0)\n\n cache = Cache.redis.get('myurl:clicks')\n self.assertEquals(cache, None)\n cache = Cache.redis.get('myurl:original')\n self.assertEquals(cache, None)\n\n def test_delete_url_should_return_204_when_url_does_not_exist(self):\n UserFactory.create()\n response = self.client.delete(\"/user/hugo/url/myurl\")\n self.assertEquals(response.status_code, 204)\n","sub_path":"project/users/tests/views/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":4495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"258054252","text":"import uuid\nfrom datetime import datetime\nfrom unittest.mock import Mock, ANY\n\nimport pytest\nfrom assertpy import assert_that\nfrom hca_ingest.api.ingestapi import IngestApi\nfrom hca_ingest.utils.date import date_to_json_string, parse_date_string\nfrom openpyxl.workbook import Workbook\n\nfrom exporter.ingest.service import IngestService\nfrom exporter.metadata.descriptor import FileDescriptor\nfrom exporter.metadata.resource import MetadataResource\nfrom exporter.schema.resource import SchemaResource\nfrom exporter.terra.spreadsheet.exporter import SpreadsheetExporter\nfrom exporter.terra.storage import TerraStorageClient\n\n\n@pytest.fixture\ndef ingest_api(mocker):\n ingest_api: IngestApi = mocker.Mock(spec=IngestApi)\n ingest_api.get_latest_schema_url.return_value = 'https://schema.humancellatlas.org/type/file/2.5.0/supplementary_file'\n return ingest_api\n\n\n@pytest.fixture\ndef ingest_service(mocker, ingest_api, project, spreadsheet_dcp_version):\n ingest_service: IngestService = mocker.Mock(spec=IngestService)\n ingest_service.api = ingest_api\n ingest_service.get_metadata.return_value = project\n ingest_service.get_submission_dcp_version_from_uuid.return_value = spreadsheet_dcp_version\n return ingest_service\n\n\n@pytest.fixture\ndef service_with_new_spreadsheet(ingest_service, new_spreadsheet_version):\n ingest_service.get_submission_dcp_version_from_uuid.return_value = new_spreadsheet_version\n return ingest_service\n\n\n@pytest.fixture\ndef spreadsheet_dcp_version() -> str:\n return \"2022-06-13T14:32:59.593000Z\"\n\n\n@pytest.fixture\ndef new_spreadsheet_version() -> str:\n return date_to_json_string(datetime.now())\n\n\n@pytest.fixture\ndef submission_uuid():\n return str(uuid.uuid4())\n\n\n@pytest.fixture\ndef new_submission_uuid():\n return str(uuid.uuid4())\n\n\n@pytest.fixture\ndef project_uuid():\n return str(uuid.uuid4())\n\n\n@pytest.fixture(params=[{}, {\"project_short_name\": \"Test_Project\"}], ids=[\"project without short name\", \"project with short name\"])\ndef project_dict(project_uuid, request):\n return {\n \"uuid\": {\"uuid\": project_uuid},\n \"dcpVersion\": \"2022-05-29T13:51:08.593000Z\",\n \"content\": {\n \"describedBy\": \"https://schema.humancellatlas.org/type/project/17.0.0/project\",\n \"project_core\": request.param\n },\n \"type\": \"project\",\n \"submissionDate\": \"2022-03-28T13:51:08.593000Z\",\n \"updateDate\": \"2022-05-28T13:51:08.593000Z\",\n }\n\n\n@pytest.fixture\ndef project(project_dict) -> MetadataResource:\n return MetadataResource.from_dict(project_dict)\n\n\n@pytest.fixture\ndef terra_client(mocker):\n terra_client: TerraStorageClient = mocker.Mock(spec=TerraStorageClient)\n return terra_client\n\n\n@pytest.fixture()\ndef workbook():\n return Workbook()\n\n\n@pytest.fixture\ndef exporter(ingest_service, terra_client, workbook, mocker):\n exporter = SpreadsheetExporter(ingest_service, terra_client)\n exporter.downloader.get_workbook_from_submission = mocker.Mock(return_value=workbook)\n return exporter\n\n\n@pytest.fixture\ndef exporter_with_new_spreadsheet(service_with_new_spreadsheet, terra_client, workbook, mocker):\n exporter = SpreadsheetExporter(service_with_new_spreadsheet, terra_client)\n exporter.downloader.get_workbook_from_submission = mocker.Mock(return_value=workbook)\n return exporter\n\n\n@pytest.fixture()\ndef failing_exporter(ingest_service, terra_client, mocker):\n exporter = SpreadsheetExporter(ingest_service, terra_client)\n exporter.downloader.get_workbook_from_submission = mocker.Mock(\n side_effect=RuntimeError('spreadsheet generation problem')\n )\n return exporter\n\n\n@pytest.fixture\ndef initial_supplementary_file(terra_client, exporter, project, submission_uuid):\n exporter.export_spreadsheet(project.uuid, submission_uuid)\n return check_file_metadata(project, terra_client=terra_client)\n\n\n@pytest.fixture\ndef supplementary_file_from_new_export(terra_client, exporter_with_new_spreadsheet, project, submission_uuid):\n exporter_with_new_spreadsheet.export_spreadsheet(project.uuid, submission_uuid)\n return check_file_metadata(project, terra_client=terra_client)\n\n\n@pytest.fixture\ndef supplementary_file_from_new_submission(terra_client, exporter_with_new_spreadsheet, project, new_submission_uuid):\n exporter_with_new_spreadsheet.export_spreadsheet(project.uuid, new_submission_uuid)\n return check_file_metadata(project, terra_client=terra_client)\n\n\ndef test_happy_path(exporter: SpreadsheetExporter,\n terra_client: Mock,\n project: MetadataResource,\n submission_uuid: str,\n caplog):\n # when\n exporter.export_spreadsheet(project.uuid, submission_uuid)\n\n # then\n actual_file_metadata = check_file_metadata(project, terra_client=terra_client)\n check_generated_links(terra_client, project, actual_file_metadata)\n check_spreadsheet_copied_to_terra(actual_file_metadata, project, terra_client)\n assert \"Generating Spreadsheet\" in caplog.text\n\n\ndef test_exception_during_export(failing_exporter: SpreadsheetExporter, project_uuid, submission_uuid, caplog):\n # given an exception is thrown while generating the spreadsheet\n\n # when\n with pytest.raises(RuntimeError):\n failing_exporter.export_spreadsheet(project_uuid, submission_uuid)\n\n\ndef test_spreadsheet_metadata_on_submission_update(initial_supplementary_file, supplementary_file_from_new_export):\n check_file_prefix_matches(initial_supplementary_file, supplementary_file_from_new_export)\n check_uuids_match(initial_supplementary_file, supplementary_file_from_new_export)\n check_dates_differ(initial_supplementary_file, supplementary_file_from_new_export)\n\n\ndef test_spreadsheet_metadata_on_new_submission(initial_supplementary_file, supplementary_file_from_new_submission):\n check_file_prefix_matches(initial_supplementary_file, supplementary_file_from_new_submission)\n check_uuids_differ(initial_supplementary_file, supplementary_file_from_new_submission)\n check_dates_differ(initial_supplementary_file, supplementary_file_from_new_submission)\n\n\ndef check_file_prefix_matches(initial, new):\n assert_that(get_file_info(initial)['project_shortname_or_uuid']).is_equal_to(get_file_info(new)['project_shortname_or_uuid'])\n\n\ndef check_uuids_match(initial, new):\n assert_that(initial.uuid).is_equal_to(new.uuid)\n assert_that(initial.full_resource['dataFileUuid']).is_equal_to(new.full_resource['dataFileUuid'])\n\n\ndef check_uuids_differ(initial, new):\n assert_that(initial.uuid).is_not_equal_to(new.uuid)\n assert_that(initial.full_resource['dataFileUuid']).is_not_equal_to(new.full_resource['dataFileUuid'])\n\n\ndef check_dates_differ(initial, new):\n assert_that(initial.dcp_version).is_not_equal_to(new.dcp_version)\n assert_that(get_file_info(initial)['date']).is_not_equal_to(get_file_info(new)['date'])\n\n\ndef check_spreadsheet_copied_to_terra(actual_file_metadata: MetadataResource,\n project: MetadataResource, terra_client):\n terra_client.write_to_staging_bucket.assert_called_with(\n object_key=f'{project.uuid}/data/{actual_file_metadata.full_resource[\"fileName\"]}',\n data_stream=ANY,\n overwrite=True\n )\n\n\ndef check_generated_links(terra_client, project_metadata: MetadataResource, file_metadata: MetadataResource):\n terra_client.write_links.assert_called_with(\n ANY,\n file_metadata.uuid,\n file_metadata.dcp_version,\n project_metadata.uuid,\n )\n\n\ndef check_file_metadata(project_metadata: MetadataResource, file_metadata=None, terra_client=None) -> MetadataResource:\n if terra_client and not file_metadata:\n terra_client.write_metadata.assert_called_with(ANY, project_metadata.uuid, overwrite=True)\n if file_metadata:\n raise ValueError('Bad input. Use only one of terra_client or file_metadata arguments')\n file_metadata = terra_client.write_metadata.call_args.args[0]\n assert_that(file_metadata.metadata_json['file_core']) \\\n .has_format('xlsx') \\\n .has_file_source(\"DCP/2 Ingest\") \\\n .has_content_description([{\n \"text\": \"metadata spreadsheet\",\n \"ontology\": \"data:2193\",\n \"ontology_label\": \"Database entry metadata\"\n }])\n filename = file_metadata.full_resource['fileName']\n short_name = project_metadata.metadata_json.get('project_core', {}).get('project_short_name')\n assert_that(filename) \\\n .starts_with(short_name if short_name else project_metadata.uuid) \\\n .contains('_metadata_') \\\n .contains(parse_date_string(file_metadata.dcp_version).strftime('%d-%m-%Y')) \\\n .ends_with('.xlsx')\n TerraStorageClient.validate_json_doc(file_metadata.get_content())\n TerraStorageClient.update_schema_info_and_validate(\n FileDescriptor.from_file_metadata(file_metadata).to_dict(),\n SchemaResource(schema_url='https://schema.humancellatlas.org/system/2.0.0/file_descriptor', schema_version='2.0.0'))\n return file_metadata\n\n\ndef get_file_info(file: MetadataResource):\n name_split = file.full_resource['fileName'].split('_metadata_')\n return {\n 'project_shortname_or_uuid': name_split[0],\n 'date': name_split[1]\n }\n","sub_path":"tests/exporter/terra/spreadsheet/test_exporter.py","file_name":"test_exporter.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"338118611","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport matplotlib.patches as mpatches\nimport time\n\nlist1 = []\nlist2 = []\na = np.arange(100,10000,1)\n\ndef time1(n):\n start1 = time.time()\n y = F2(n)\n end1 = time.time()\n temp1 = end1-start1\n list1.append(temp1)\n\n start2 = time.time()\n z = F3(n)\n end2 = time.time()\n temp2 = end2-start2\n list2.append(temp2)\n\n\n\ndef F2(n):\n if(n <= 2 ):\n return 1\n else:\n p = 1\n q = 1\n r = 0\n for i in range (3,n+1):\n r = p + q\n p = q\n q = r\n return r\n\ndef power_of_G(n):\n G = np.array([[0,1], [1,1]])\n if(n == 1):\n return G\n else:\n if( n%2 == 0):\n H = power_of_G(n/2)\n C = np.dot(H , H)\n return C #returning H X H\n else:\n H = power_of_G((n-1)/2) \n C = np.dot(H , H)\n temp = np.dot(C , G)\n return temp #returning H x H x G\n\n\ndef F3(n):\n temp = power_of_G(n)\n return temp[0][1]\n\n\nfor i in a:\n time1(i)\n\n\nplt.plot(a,list1, label=\"O(n)\", color = 'red')\nplt.plot(a,list2, label=\"O(log(n))\", color = 'black' )\n# plt.ylim(0,0.0020)\n# plt.xlim(0,4000)\nplt.xlabel('n')\nplt.ylabel('Running time')\nplt.title('Running time of F2(n) vs F3(n)')\nplt.legend()\nplt.show()\n","sub_path":"q2_graph.py","file_name":"q2_graph.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"132358968","text":"from color import color_dist, calculate_background_color\n\ndef remove_color(pixels, color, threshold=35):\n \"\"\" Sets every pixel with a near color to the given one to black \"\"\"\n\n width = len(pixels[0])\n height = len(pixels)\n\n # output calculation\n out = pixels.copy()\n\n for x in range(0, width):\n for y in range(0, height):\n current_pixel = pixels[y][x]\n\n if color_dist(color, current_pixel) < threshold:\n out[y][x] = [0, 0, 0]\n\n return out ","sub_path":"color_removal.py","file_name":"color_removal.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"10743653","text":"import socket\nimport sys\n\nHOST, PORT = \"localhost\", 8888\n# data = \" \".join(sys.argv[1:])\ndata = \" LOLiLOL\"\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# sock.sendto(data + \"\\n\", (HOST, PORT))\nsock.sendto((data+\"\\n\").encode(), (HOST, PORT))\nreceived = sock.recv(1024)\n\nprint(\"Sent: {}\".format(data))\nprint(\"Received: {}\".format(received))","sub_path":"tests/client_gist.py","file_name":"client_gist.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"402190580","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float64\n\nn = 0\n\ndef cb(message):\n global n\n n = message.data*3\n\nif __name__ == '__main__': \n rospy.init_node('three')\n sub = rospy.Subscriber('count_up', Float64, cb) \n pub = rospy.Publisher('three', Float64, queue_size=1) \n rate = rospy.Rate(1)\n while not rospy.is_shutdown():\n pub.publish(n)\n rate.sleep(100)\n \n \n","sub_path":"mypkg/scripts/three.py","file_name":"three.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"203849697","text":"import json\nfrom jsonschema import validate\n\n\nclass Procurer(object):\n schema = {\n 'type': 'object',\n 'properties': {\n 'name': {\n 'type': 'string'\n },\n 'url': {\n 'type': 'string',\n 'pattern': '^https?://\\[?\\w'\n },\n 'procurementPageUrl': {\n 'type': 'string',\n 'pattern': '^https?://\\[?\\w'\n },\n 'selectors': {\n 'type': 'object',\n 'properties': {\n 'title': {\n 'type': 'string',\n },\n 'url': {\n 'type': 'string'\n },\n 'publishDate': {\n 'type': 'string'\n },\n 'closureDate': {\n 'type': 'string'\n }\n },\n 'required': ['title']\n }\n },\n 'required': ['name', 'url', 'procurementPageUrl', 'selectors']\n }\n\n def __init__(self, data):\n self.data = data\n\n def validate(self):\n validate(self.data, self.schema)\n\n return self\n\n\ndef get_procurers():\n fd = open('./sources.json')\n\n return [Procurer(data=procurer).validate() for procurer in json.load(fd)]\n","sub_path":"bagatela/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"144059291","text":"# -*- coding:utf-8 -*-\n\n''' \n 链表的结点类\n'''\n\nclass Node:\n def __init__(self,value):\n self.data = value\n self.next = None\n\n'''\n 根据生成的节点列表,把各个节点连接成一个单向链表\n'''\n\ndef linked_node(nodes):\n length = len(nodes)\n for note_index in range(length - 1):\n if note_index == length - 1:\n nodes[note_index].next = None\n else:\n nodes[note_index].next = nodes[note_index + 1]\n return nodes[0]\n\n'''\n 判断链表中是否有重复元素,有则返回True\n'''\n\ndef is_dumlicate(nodes):\n while True:\n if nodes.next != None:\n if nodes.next.data == nodes.data:\n return True\n nodes = nodes.next\n if nodes is None:\n break\n return False\n\n'''\n 根据已经排好序的数据集生成一个节点列表\n'''\n\ndef generate(values):\n nodes = []\n for value in values:\n node = Node(value)\n if node not in nodes:\n nodes.append(node)\n return nodes\n\n'''\n 删除节点中重复的数据,只能实现一趟重复元素的删除,也就是说只执行一次无法全部删除\n'''\n\ndef delete(nodes):\n while True:\n if nodes.next != None and nodes.next.data == nodes.data:\n if nodes.next.next is None:\n nodes.next = None\n else:\n nodes.next = nodes.next.next\n nodes = nodes.next\n if nodes is None:\n break\n\n''' \n 打印链表\n'''\n\ndef print_nodes(node):\n print(f\"{node.data} --> \",end=\"\")\n while True:\n node = node.next\n if node is None:\n break\n print(f\"{node.data} --> \",end=\"\")\n\n print(\"end\")\n\ndef run():\n data = [1,1,2,2,2,2,2,2,2,2,2]\n nodes = generate(data)\n linked_table = linked_node(nodes)\n print_nodes(linked_table)\n while True:\n if is_dumlicate(linked_table):\n delete(linked_table)\n else:\n break\n print_nodes(linked_table)\n\ndef main():\n run()\n\nif __name__ == '__main__':\n main()","sub_path":"Easy/Python/rm_list_dump.py","file_name":"rm_list_dump.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"68849196","text":"'''\n###############################################################################\nTools: Useful classes for use throughout the project\n###############################################################################\n'''\nimport scipy as _sp\nfrom collections import OrderedDict as _odict\n\nclass PrintableList(list):\n def __str__(self):\n count = 0\n header = '-'*60\n print('\\n')\n print(header)\n self.sort()\n for item in self:\n count = count + 1\n print(count,'\\t: ',item)\n return header\n\nclass PrintableDict(_odict):\n def __str__(self):\n header = '-'*60\n print(header)\n print(\"{a:<25s} {b:<25s}\".format(a='key', b='value'))\n print(header)\n for item in self.keys():\n print(\"{a:<25s} {b:<25s}\".format(a=item, b=self[item]))\n print(header)\n return ''\n \nclass AttributVeiew(object):\n def __init__(self, d):\n temp = {}\n for item in d:\n if type(d[item][0]) == _sp.bool_:\n key = 'label_'+item.replace('.','_')\n else:\n key = 'prop_'+item.replace('.','_')\n temp[key] =d[item]\n self.__dict__ = temp\n\nclass ClonedCore(dict):\n def __init__(self,obj):\n self.update(obj)\n self.name = obj.name\n","sub_path":"OpenPNM/Base/__Tools__.py","file_name":"__Tools__.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"247386133","text":"\r\n\r\nimport os\r\nimport time\r\nimport sys\r\nsys.path.append(\"./GraphSAGE/graphsage\")\r\n\r\nfrom edgeMinibatch import EdgeMinibatchIterator\r\nimport networkx as nx\r\nimport numpy as np\r\nimport tensorflow as tf\r\n#tf.config.experimental_run_functions_eagerly(True)\r\n\r\nedgelist = [\r\n [1,2, np.random.rand(10,500), 0],\r\n [3,4, np.random.rand(10,500), 0],\r\n [4,5, np.random.rand(10,500), 0],\r\n [5,6, np.random.rand(10,500), 0]\r\n]\r\n\r\nG = nx.Graph()\r\nfor i in range(1,7):\r\n G.add_node(i, title = \"test\", test = False, val = False)\r\n\r\n G.add_node(10*i, title = \"test\", test = False, val = False)\r\n G.add_node(10*i+1, title = \"test\", test = False, val = False)\r\n G.add_node(10*i+2, title = \"test\", test = False, val = False)\r\n G.add_node(10*i+3, title = \"test\", test = False, val = False)\r\n\r\n G.add_node(100*i, title = \"test\", test = False, val = False)\r\n G.add_node(100*i+1, title = \"test\", test = False, val = False)\r\n G.add_node(100*i+2, title = \"test\", test = False, val = False)\r\n G.add_node(100*i+3, title = \"test\", test = False, val = False)\r\n\r\n\r\n G.add_edge(i,10*i)\r\n G.add_edge(i,10*i+1)\r\n G.add_edge(i,10*i+2)\r\n G.add_edge(i,10*i+3)\r\n\r\n G.add_edge(10*i,100*i)\r\n G.add_edge(10*i+1,100*i+1)\r\n G.add_edge(10*i+2,100*i+2)\r\n G.add_edge(10*i+3,100*i+3)\r\n\r\ncompteur = 0\r\ndicoIdMap = {}\r\nfor node in G.nodes:\r\n dicoIdMap[node] = compteur\r\n compteur+=1\r\n\r\n\r\n#features = train_data[1]\r\nid_map = dicoIdMap\r\nprint(G.nodes[10][\"test\"])\r\nminibatch = EdgeMinibatchIterator(G, edgelist,\r\n id_map,\r\n batch_size=2,\r\n max_degree=25)\r\n\r\nprint(minibatch.next_minibatch_feed_dict())\r\n# adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)\r\n# adj_info = tf.Variable(adj_info_ph, trainable=False, name=\"adj_info\")\r\n\r\nfrom neigh_samplers import UniformNeighborSampler\r\nfrom models import SAGEInfo\r\nfrom classifier import EdgeClassifier\r\nprint(minibatch.adj)\r\nsampler = UniformNeighborSampler(minibatch.adj)\r\nlayer_infos = [SAGEInfo(\"node\", sampler, 4, 128),\r\n SAGEInfo(\"node\", sampler, 4, 256)]\r\nfeatures = []\r\nfor node in G.nodes:\r\n features.append(np.random.rand(5))\r\nfeatures = tf.Variable(features)\r\nlabels = [0 for i in range(len(G.nodes))]\r\n\r\nmodeleFinal = EdgeClassifier(features= features, adj= minibatch.adj, degrees = minibatch.deg,layer_infos=layer_infos)\r\n\r\n#print(modeleFinal(minibatch.next_minibatch_feed_dict()))\r\n\r\n#print(modeleFinal(minibatch.next_minibatch_feed_dict()))\r\n\r\n@tf.function\r\ndef normloss(vecA):\r\n return tf.norm(vecA)\r\ndef train_step(model, batch, labels):\r\n with tf.GradientTape() as tape:\r\n print(batch)\r\n # training=True is only needed if there are layers with different\r\n # behavior during training versus inference (e.g. Dropout).\r\n\r\n predictions = model(batch, training=True)\r\n print(predictions)\r\n loss = normloss(predictions)\r\n gradients = tape.gradient(loss, model.trainable_variables)\r\n print(\"LOSS:\",loss)\r\n model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))\r\n\r\na = minibatch.next_minibatch_feed_dict()\r\nfor i in range(500):\r\n train_step(modeleFinal,a,0)","sub_path":"fichier_debug.py","file_name":"fichier_debug.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"162403043","text":"import numpy, random\nfrom . import tools\n\ndef datasetiris(shuffle = None):\n\t'''\n\t\tMethod to return the Iris Dataset\n\t\t: param shuffle : parameter to control whether the dataset is randomly shuffled\n\t\t: returns : Iris Dataset\n\t\tThe dataset was obtained from http://archive.ics.uci.edu/ml/machine-learning-databases/iris/\n\t'''\n\tshuffle = shuffle if shuffle is not None else False\n\turl = 'http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n\tfilename = 'iris.data'\n\tclassnames = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}\n\tinputs = 4\n\toutputs = 3\n\ttools.download(url, filename)\n\tdataset = list()\n\twith open(filename, 'r') as datafile:\n\t\tfor line in datafile:\n\t\t\tif line == '\\n':\n\t\t\t\tcontinue\n\t\t\tsepallength, sepalwidth, petallength, petalwidth, classname = line.strip().split(',')\n\t\t\tinputvector = numpy.array([[float(sepallength)], [float(sepalwidth)], [float(petallength)], [float(petalwidth)]])\n\t\t\toutputvector = numpy.zeros((outputs, 1), dtype = float)\n\t\t\toutputvector[classnames[classname]][0] = 1.0\n\t\t\tdataset.append((inputvector, outputvector))\n\tif shuffle:\n\t\trandom.shuffle(dataset)\n\treturn dataset\n","sub_path":"data/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"47057574","text":"#!/bin/python3\n\n# import sys\n# from bisect import insort\n\n\n# def median(arr):\n# if len(arr) % 2 == 0:\n# left = arr[len(arr)//2]\n# right = arr[(len(arr)//2)-1]\n# m = (left + right) / 2.0\n# else:\n# m = arr[len(arr)//2]\n# return m\n\n# n = int(input().strip())\n# a = []\n# a_i = 0\n# for a_i in range(n):\n# insort(a, int(input().strip()))\n# print(float(median(a)))\n\n\nimport sys\nimport heapq\n\n\nn = int(input().strip())\nleft = [] # max heap\nright = [] # min heap\n\nfor _ in range(n):\n val = int(input().strip())\n\n pop_val = heapq.heappushpop(right, val)\n val = -heapq.heappushpop(left, -pop_val)\n\n if len(right) <= len(left):\n heapq.heappush(right, val)\n else:\n heapq.heappush(left, -val)\n\n if len(right) > len(left):\n print(format(right[0], '.1f'))\n else:\n print((right[0] - left[0]) / 2.0)\n","sub_path":"Cracking the Coding Interview/Heaps- Find the Running Median.py","file_name":"Heaps- Find the Running Median.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"535195651","text":"import os\nimport sys\nimport site\nimport platform\n\n\ndef under_virtualenv():\n return hasattr(sys, 'real_prefix')\n\nif hasattr(site, 'getsitepackages'):\n INSTALL_DIR = site.getsitepackages()[0]\n USER_SITE = site.getusersitepackages()\n ALL_SITE_PACKAGES = site.getsitepackages() + [USER_SITE]\nelse:\n # XXX: WORKAROUND for older python versions and some broken virtualenvs\n ## we have to guess site packages location...\n USER_SITE = site.USER_SITE\n INSTALL_DIR = None\n system = platform.system()\n if system == 'Linux':\n tmp = '{0}/local/lib/python{1}.{2}/'.format(sys.prefix, *sys.version_info[:2])\n d, s = os.path.join(tmp, 'dist-packages'), os.path.join(tmp, 'site-packages')\n if os.path.exists(d):\n INSTALL_DIR = d\n elif os.path.exists(s):\n INSTALL_DIR = s\n elif system == 'Windows':\n tmp = os.path.join(sys.prefix, 'site-packages')\n if os.path.exists(tmp):\n INSTALL_DIR = tmp\n if INSTALL_DIR is None:\n from distutils.sysconfig import get_python_lib\n INSTALL_DIR = get_python_lib(True)\n\n if under_virtualenv():\n ALL_SITE_PACKAGES = [INSTALL_DIR]\n else:\n ALL_SITE_PACKAGES = [INSTALL_DIR, USER_SITE]\n\n #try:\n # INSTALL_DIR = sorted([p for p in sys.path if p.endswith('dist-packages')],\n # key=lambda i: 'local' in i, reverse=True)[0]\n #except IndexError:\n # pass\n #if not INSTALL_DIR: ## Are we on Windows?\n # try:\n # INSTALL_DIR = sorted([p for p in sys.path if p.endswith('site-packages')],\n # key=lambda i: 'local' in i, reverse=True)[0]\n # except IndexError:\n # pass\n #if not INSTALL_DIR: ## We have to use /usr/lib/pythonx.y/dist-packages or something similar\n # from distutils.sysconfig import get_python_lib\n # INSTALL_DIR = get_python_lib()\n\n## Under virtualenv USER_SITE is the same as INSTALL_DIR\nif under_virtualenv():\n USER_SITE = INSTALL_DIR\n\nEASY_INSTALL = os.path.join(INSTALL_DIR, 'easy-install.pth')\nif not os.path.exists(EASY_INSTALL):\n d = os.path.dirname(EASY_INSTALL)\n try:\n if not os.path.exists(d):\n os.makedirs(d)\n open(EASY_INSTALL, 'w').close()\n ## We do not have root permissions...\n except IOError:\n ## So we do nothing!\n pass\n\nPYG_LINKS = os.path.join(USER_SITE, 'pyg-links.pth')\n\nif platform.system() == 'Windows':\n BIN = os.path.join(sys.prefix, 'Scripts')\n if not os.path.exists(BIN):\n BIN = os.path.join(sys.prefix, 'bin')\nelse:\n BIN = os.path.join(sys.prefix, 'bin')\n ## Forcing to use /usr/local/bin on standard Mac OS X\n if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':\n BIN = '/usr/local/bin'\n\n\n## If we are running on a Unix system and we are in a SUDO session the `SUDO_UID`\n## environment variable will be set. We can use that to get the user's home\n## We also set to None all the variables that depend on HOME.\n\nHOME = os.getenv('HOME')\nPYG_HOME = None\nCFG_FILES = [os.path.join(os.getcwd(), 'pyg.conf')]\n_sudo_uid = os.getenv('SUDO_UID')\nif _sudo_uid:\n import pwd\n _sudo_uid = int(_sudo_uid)\n HOME = pwd.getpwuid(_sudo_uid).pw_dir\n\n## Here is Pyg's HOME directory\n## If it does not exists we create it\nif HOME is not None:\n PYG_HOME = os.path.join(HOME, '.pyg')\n if not os.path.exists(PYG_HOME):\n os.makedirs(PYG_HOME)\n\n ## PACKAGES_CACHE has been removed because with `pkg_resources.working_set` we\n ## don't need a cache\n CFG_FILES.extend([os.path.join(HOME, 'pyg.conf'),\n os.path.join(PYG_HOME, 'pyg.conf')]\n )\n","sub_path":"pyg/locations.py","file_name":"locations.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"513015640","text":"class Strategy(): # Strategy no.3: Graville's rules\r\n # option setting needed\r\n def __setitem__(self, key, value):\r\n self.options[key] = value\r\n\r\n # option setting needed\r\n def __getitem__(self, key):\r\n return self.options.get(key, '')\r\n\r\n def __init__(self):\r\n # strategy property\r\n self.subscribedBooks = {\r\n 'Binance': {\r\n 'pairs': ['BTC-USDT'],\r\n },\r\n }\r\n self.period = 35*60\r\n self.options = {}\r\n\r\n # User defined constants\r\n self.buy_ratio = 0.7 # The ratio of the balance spent on buying BTC\r\n\r\n # user defined class attribute\r\n self.last_type = 'sell' # Last action other than waiting\r\n self.last_ma_trend = None # Trend of MA at the last period\r\n self.close_price_trace = np.array([]) # The array of recent prices\r\n self.ma_short = 20 # Range of coverage of the short MA, in periods\r\n self.ma_long = 50 # Range of coverage of the long MA, in periods\r\n self.last_buy = -1 # Last buy price (-1 if waiting to buy)\r\n self.last_s_ma = None # Value of the short MA at the last period\r\n\r\n # Representing the trends\r\n self.UP = 1\r\n self.sUP = 2\r\n self.FLAT = 3\r\n self.sDOWN = 4\r\n self.DOWN = 5\r\n Log(\"peroid: %d; ma: long %d, short %d; buy ratio %f\" %(self.period, self.ma_long, self.ma_short, self.buy_ratio))\r\n\r\n def get_current_ma_trend(self, s_ma, l_ma):\r\n # Trend determination method: MA Cross\r\n if np.isnan(s_ma) or np.isnan(l_ma):\r\n return None\r\n\r\n Log(\"s_ma %f l_ma %f\" %(s_ma, l_ma))\r\n if s_ma > l_ma:\r\n return self.UP\r\n elif s_ma < l_ma:\r\n return self.DOWN\r\n return self.FLAT\r\n \r\n def granville(self, close_price, s_ma, ma_trend):\r\n # Currently implemented: rules 1 and 5\r\n if np.isnan(close_price) or np.isnan(s_ma) or ma_trend == None:\r\n return -1\r\n\r\n # Log(\"cur price %f ma %f; last price %f ma %f\" %(close_price, ma, self.close_price_trace[-2], self.last_ma))\r\n if ma_trend == self.UP:\r\n if self.close_price_trace[-2] < self.last_s_ma and close_price > s_ma:\r\n return 1\r\n if ma_trend == self.DOWN:\r\n if self.close_price_trace[-2] > self.last_s_ma and close_price < s_ma:\r\n return 5\r\n return 0\r\n\r\n # called every self.period\r\n def trade(self, information):\r\n\r\n exchange = list(information['candles'])[0]\r\n pair = list(information['candles'][exchange])[0]\r\n close_price = information['candles'][exchange][pair][0]['close']\r\n\r\n # add latest price into trace\r\n self.close_price_trace = np.append(self.close_price_trace, [float(close_price)])\r\n # only keep max length of ma count elements\r\n self.close_price_trace = self.close_price_trace[-self.ma_long:]\r\n # calculate current ma cross status\r\n l_ma = talib.SMA(self.close_price_trace, self.ma_long)[-1]\r\n s_ma = talib.SMA(self.close_price_trace, self.ma_short)[-1]\r\n cur_ma_trend = self.get_current_ma_trend(s_ma, l_ma)\r\n if cur_ma_trend is None:\r\n return []\r\n cur_type = self.granville(float(close_price),s_ma, cur_ma_trend)\r\n # if cur_type > 0:\r\n # Log(\"current trend: %d\" %cur_type)\r\n self.last_s_ma = s_ma\r\n # Log('info: ' + str(information['candles'][exchange][pair][0]['time']) + ', ' + str(information['candles'][exchange][pair][0]['open']) + ', assets' + str(self['assets'][exchange]['BTC']))\r\n\r\n if self.last_ma_trend is None:\r\n self.last_ma_trend = cur_ma_trend\r\n return []\r\n self.last_ma_trend = cur_ma_trend\r\n \r\n action = 'wait' \r\n if self.last_type == 'sell' and cur_type > 0 and cur_type <= 4:\r\n action = 'buy' # Rules 1 to 4 => buy\r\n elif self.last_type == 'buy' and cur_type > 4:\r\n action = 'sell' # Rules 5 to 8 => sell\r\n\r\n if action == 'buy':\r\n # Log('buying, ' + exchange + ':' + pair)\r\n self.last_type = 'buy'\r\n self.last_buy = float(close_price)\r\n return [\r\n {\r\n 'exchange': exchange,\r\n 'amount': self['assets'][exchange]['USDT'] * self.buy_ratio / float(close_price),\r\n 'price': -1,\r\n 'type': 'MARKET',\r\n 'pair': pair,\r\n }\r\n ]\r\n elif action == 'sell':\r\n # Log('selling, ' + exchange + ':' + pair)\r\n self.last_type = 'sell'\r\n self.last_buy = -1\r\n return [\r\n {\r\n 'exchange': exchange,\r\n 'amount': -self['assets'][exchange]['BTC'],\r\n 'price': -1,\r\n 'type': 'MARKET',\r\n 'pair': pair,\r\n }\r\n ]\r\n return [] # Default case 'wait'\r\n","sub_path":"Strat_3.py","file_name":"Strat_3.py","file_ext":"py","file_size_in_byte":5155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"18795460","text":"# Author : Dhaval Harish Sharma\n# Red ID : 824654344\n# Assignment 5, Question 6D\n# Region Growing Function\n\n# Importing the required libraries\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2gray\nimport numpy as np\nimport cv2\n\ndef regiongrowing(img, threshold = 2):\n # This function performs \"region growing\" in an image from specified\n # seedpoints\n \n # J = regiongrowing(I, seeds, threshold) \n \n # I : input image \n # J : logical output image of region\n # seeds : the position of the seedpoints\n # threshold : maximum intensity distance (defaults to 2)\n\n # The region is iteratively grown by comparing all unallocated neighbouring pixels to the region. \n # The difference between a pixel's intensity value and the region's mean, \n # is used as a measure of similarity. The pixel with the smallest difference \n # measured this way is allocated to the respective region. \n # This process stops when the intensity difference between region mean and\n # new pixel become larger than a certain threshold (t)\n \n # Example:\n \n # I = plt.imread('Regions.jpg')\n # x = 175\n # y = 175\n # J = regiongrowing(I, [x,y], 4) \n # plt.imshow(J, cmap='gray')\n \n # Author: D. Kroon, University of Twente\n \n # Getting the red color of the beans from the image\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n mask1 = cv2.inRange(img_hsv, (120,110,100), (125,180,200))\n red_beans = cv2.bitwise_and(img, img, mask = mask1)\n \n # Getting the yellow color of the beans from the image\n mask2 = cv2.inRange(img_hsv, (90,150,200), (104,190,255))\n yellow_beans = cv2.bitwise_and(img, img, mask = mask2)\n \n # Getting the circular red and yellow beans from the image\n gray_img = (rgb2gray(red_beans + yellow_beans) * 255).astype(np.uint8)\n circles = cv2.HoughCircles(gray_img, cv2.HOUGH_GRADIENT, 1.4, 10)\n \n # Getting the seeds and the grayscale image\n img = (rgb2gray(img) * 255).astype(np.uint8)\n seeds = circles[0].astype(int)\n\n # Dimensions of input image\n dims = img.shape\n \n # Output\n reg = np.zeros(shape = (dims[0], dims[1])).astype('uint8')\n \n # Initializing the nearest elements array\n nearest_elements = []\n \n for seed in seeds:\n \n sum_of_xelements = 0\n sum_of_yelements = 0\n total_elements = 0\n \n # The mean of the segmented region\n mean_reg = float(img[seed[1], seed[0]])\n \n # Number of pixels in region\n size = 1\n pix_area = dims[0] * dims[1]\n \n # Free memory to store neighbours of the (segmented) region\n contour = []\n contour_val = []\n \n # Distance of the region newest pixel to the region mean\n dist = 0\n \n # Neighbor locations (footprint)\n orient = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n cur_pix = [seed[0], seed[1]]\n \n # Start region growing until distance between region and posible new pixels become\n # higher than a certain treshold\n while(dist < threshold and size < pix_area):\n # Add new neighbors pixels\n for j in range(4):\n # Calculate the neighbour coordinate\n temp_pix = [cur_pix[0] + orient[j][0], cur_pix[1] + orient[j][1]]\n \n # Check if neighbour is inside or outside the image\n is_in_img = dims[0] > temp_pix[0] > 0 and dims[1] > temp_pix[1] > 0\n \n # Add neighbor if inside and not already part of the segmented area\n if (is_in_img and (reg[temp_pix[1], temp_pix[0]] == 0)):\n contour.append(temp_pix)\n contour_val.append(img[temp_pix[1], temp_pix[0]] )\n reg[temp_pix[1], temp_pix[0]] = 150\n \n # Add pixel with intensity nearest to the mean of the region, to the region\n dist = abs(int(np.mean(contour_val)) - mean_reg)\n \n dist_list = [abs(i - mean_reg) for i in contour_val]\n dist = min(dist_list)\n index = dist_list.index(min(dist_list))\n size += 1\n reg[cur_pix[1], cur_pix[0]] = 255\n \n total_elements += 1\n sum_of_xelements += cur_pix[0]\n sum_of_yelements += cur_pix[1]\n \n # Calculate the new mean of the region\n mean_reg = (mean_reg * size + float(contour_val[index])) / (size+1)\n \n # Save the x and y coordinates of the pixel (for the neighbour add proccess)\n cur_pix = contour[index]\n \n # Remove the pixel from the neighbour (check) list\n del contour[index]\n del contour_val[index]\n \n # Calculating the distance array\n nearest_elements.append([(sum_of_xelements / total_elements), (sum_of_yelements / total_elements)])\n min_dist = abs(nearest_elements[0] - nearest_elements[1])\n \n # Return the segmented area as logical matrix\n return reg, min_dist\n\n# Converting the image into numpy array\nI = plt.imread('Beans.jpg')\n\n# Calling regiongrowing\nJ, min_dist = regiongrowing(I, 25)\n\n# Printing the minimum distance\nprint(min_dist)\n\n# Printing the output image\nfig, ax = plt.subplots(nrows = 1, ncols = 2)\nax[0].imshow(I, cmap = 'gray')\nax[1].imshow(J, cmap = 'gray')","sub_path":"Assignment 5/Question6D.py","file_name":"Question6D.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"202113400","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nimport os\nimport urllib\n\nclass DoubanMoviesPipeline(object):\n\n def process_item(self, item, spider):\n\n if not os.path.isdir('movies'):\n os.mkdir('movies')\n if not os.path.isdir('movies/'+item['name']):\n os.mkdir('movies/'+item['name'])\n\n filename = 'movies/'+item['name']+'/'+item['name']+'.txt'\n with open(filename, 'w+') as f:\n actors=''\n for actor in item['actors']:\n actors += actor+'|'\n info = '电影名:%s\\n导演:%s\\n主演:%s\\n评分:%s\\n简介:%s'%(item['name'],item['directer'],actors,item['score'],item['info'])\n f.write(info)\n #下载图片\n\n urllib.urlretrieve(item['cover'], 'movies/'+item['name']+'/'+item['name']+'.jpg')\n\n","sub_path":"douban_movies/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"617201062","text":"# Case 2 - Mapper using standard input and output\n# Easy to test locally with Bash\n\n\n\nimport sys \n\n\n# iterate through each line provided via standard input\nfor line in sys.stdin:\n datalist = line.strip().split(\" \")\n if (len(datalist) == 6) : \n date, time,store, department, cost, paymentType = datalist\n\n # print intermediate key-value pairs to standard output\n print(store + \"\\t\" + cost + \"\\n\")\n\n","sub_path":"21mapper.py","file_name":"21mapper.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"31557190","text":"\nimport numpy as np\nimport scipy.stats as sc\n\nUCB_factor = 1.0\n\nINIT_TEMP = 0.8\nFINAL_TEMP = 0.1\n\n# 0.8 - 0.2 is good\n\n# INIT_TEMP = 0.9\n# FINAL_TEMP = 0.1\n\nTOTAL_STATES = 21\n\ndef calculate_entropy(action_values):\n base = np.shape(action_values)[0]\n max_action_value = max(action_values)\n action_values = [i - max_action_value for i in action_values]\n action_probabilities = np.exp(action_values) / (np.sum(np.exp(action_values)))\n entropy = sc.entropy(action_probabilities, base = base)\n return np.power(entropy,1)\n\ndef boltzmann_probs(action_values):\n return np.exp(action_values) / np.sum(np.exp(action_values))\n\n# train using Q-learning\ndef Q_learning_train(env, n_episodes, max_steps, alpha, gamma, epsilon_profile, USE_ENTROPY=False, USE_BZ=False, USE_UCB=False):\n correct_Q_tablle = [[0.,0.],[gamma**0 , gamma**2],[gamma**1, gamma**3],[gamma**2, gamma**4],\n [gamma**3, gamma**5],[gamma**4, gamma**6],[gamma**5, gamma**7],[gamma**6, gamma**8],\n [gamma**7, gamma**9],[gamma**8, gamma**10],[gamma**9, gamma**9],[gamma**10, gamma**8],\n [gamma**9, gamma**7],[gamma**8, gamma**6],[gamma**7 , gamma**5],[gamma**6 , gamma**4],\n [gamma**5 , gamma**3],[gamma**4 , gamma**2],[gamma**3 , gamma**1],[gamma**2 , gamma**0],[0. , 0.] ]\n correct_Q_tablle =np.array(correct_Q_tablle)\n \n mse = []\n Q = np.zeros([env.n_states, env.n_actions])\n action_visitation_counts = np.zeros([env.n_states, env.n_actions]) + 1\n state_visitation_count = np.zeros(TOTAL_STATES)\n\n avg_entropy_per_episode = []\n test_rewards = []\n\n n_steps = np.zeros(n_episodes) + max_steps\n sum_rewards = np.zeros(n_episodes) # total reward for each episode\n epsilon = epsilon_profile.init\n temperature = INIT_TEMP\n for k in range(n_episodes):\n s = env.init_state\n entropys_for_all_states = []\n for i in range(TOTAL_STATES):\n entropys_for_all_states.append(calculate_entropy(Q[i]))\n entropys_for_all_states = np.array(entropys_for_all_states)\n avg_entropy_per_episode.append(entropys_for_all_states.mean())\n for j in range(max_steps):\n state_visitation_count[s] += 1\n epsilon_use = epsilon\n if USE_ENTROPY:\n epsilon_use = calculate_entropy(Q[s])\n if np.random.rand() < epsilon_use:\n a = np.random.randint(env.n_actions) # random action\n else:\n mx = np.max(Q[s])\n a = np.random.choice(np.where(Q[s]==mx)[0]) # greedy action with random tie break\n if USE_BZ:\n # print(temperature)\n a = np.random.choice(env.n_actions, p = boltzmann_probs(Q[s]/temperature)) # boltzmann exploration\n if USE_UCB:\n qvalues_plus_visitation = Q + UCB_factor * np.sqrt(2*np.log(k+1) / action_visitation_counts)\n mx = np.max(qvalues_plus_visitation[s])\n # print(mx)\n a = np.random.choice(np.where(qvalues_plus_visitation[s]==mx)[0])\n\n action_visitation_counts[s,a] += 1\n\n sn = env.next_state[s,a] \n r = env.reward[s,a]\n sum_rewards[k] += r\n Q[s,a] = (1.-alpha)*Q[s,a]+alpha*(r+gamma*np.max(Q[sn]))\n if env.terminal[sn]:\n n_steps[k] = j+1 # number of steps taken\n break\n s = sn\n epsilon = max(epsilon - epsilon_profile.dec_step, epsilon_profile.final)\n # mse.append(np.square(correct_Q_tablle - Q).mean(axis=None))\n mse.append(np.sum(np.square(correct_Q_tablle - Q)))\n epsilon = max(epsilon - epsilon_profile.dec_episode, epsilon_profile.final)\n temperature = max(temperature - epsilon_profile.dec_episode, FINAL_TEMP)\n _, test_episode_reward, _,_,_,_,_ = Q_test(Q, env, 1, 10, 0.01)\n test_rewards.append(test_episode_reward)\n # print(epsilon)\n return Q, n_steps, sum_rewards, mse, entropys_for_all_states, test_rewards, state_visitation_count\n\n\n# run tests using action-value function table Q assuming epsilon greedy\ndef Q_test(Q, env, n_episodes, max_steps, epsilon):\n n_steps = np.zeros(n_episodes) + max_steps # number of steps taken for each episode\n sum_rewards = np.zeros(n_episodes) # total rewards obtained for each episode\n state = np.zeros([n_episodes, max_steps], dtype=np.int) \n action = np.zeros([n_episodes, max_steps], dtype=np.int) \n next_state = np.zeros([n_episodes, max_steps], dtype=np.int)\n reward = np.zeros([n_episodes, max_steps])\n\n avg_entropy_per_episode = []\n\n for k in range(n_episodes):\n entropys_for_all_states = []\n for i in range(TOTAL_STATES):\n entropys_for_all_states.append(calculate_entropy(Q[i]))\n entropys_for_all_states = np.array(entropys_for_all_states)\n avg_entropy_per_episode.append(entropys_for_all_states.mean())\n s = env.init_state\n for j in range(max_steps):\n state[k,j] = s\n if np.random.rand() < epsilon:\n a = np.random.randint(env.n_actions) # random action\n else:\n mx = np.max(Q[s])\n a = np.random.choice(np.where(Q[s]==mx)[0]) # greedy action with random tie break\n action[k,j] = a\n sn = env.next_state[s,a]\n r = env.reward[s,a]\n next_state[k,j] = sn\n reward[k,j] = r\n sum_rewards[k] += r\n if env.terminal[sn]:\n n_steps[k] = j+1\n break\n s = sn\n return n_steps, sum_rewards, state, action, next_state, reward, avg_entropy_per_episode\n\n","sub_path":"linear_environment/qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":5675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"16592420","text":"from threading import Thread\nimport time\n\ndef BigBox(color,x):\n while True:\n print(color,'BigBox is Open')\n time.sleep(5)\n print(color,'BigBox is Closed')\n time.sleep(5)\n\ndef SmallBox(color,x):\n while True:\n print(color,'SmallBox is Open')\n time.sleep(1)\n print(color,'SmallBox is Closed')\n time.sleep(1)\n\nSmallBoxThread=Thread(target=SmallBox,args=['red',4])\nBigBoxThread=Thread(target=BigBox,args=['blue',5])\n\nSmallBoxThread.daemon = True\nBigBoxThread.daemon = True\n\nSmallBoxThread.start()\nBigBoxThread.start()\n\ntime.sleep(20)\n","sub_path":"faceRecognizer/faceRecognizer-7-Thread1.py","file_name":"faceRecognizer-7-Thread1.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"354392897","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport logging\nimport hashlib\nimport re\nimport json\nimport datetime\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom kafka import KafkaProducer\nimport pyspark_cassandra.streaming\n\n# from utils import Utils\nfrom utils import Utils\n\n\n\ndef split_data_m(message):\n return message.split(\"\\t\")\n\n\ndef hash_id_with_sha256(id_value):\n \"\"\" hash passed value \"\"\"\n hash_id = hashlib.sha256(id_value)\n return hash_id.hexdigest()\n\ndef filter_messages(message):\n \"\"\"Filter the records from log source 'sdposbdomain' and urls containing 'peal/'\"\"\"\n try:\n if bool(message) and \"sdposbdomain\" in message and \"peal/\" in message:\n return message\n except TypeError:\n logging.exception('Error raised with the unavailable fields in the json')\n\ndef get_request_url(url):\n \"\"\"user id from request url is hashed\"\"\"\n hashed_url = ''\n peal_url = ''\n hashed_customer_id = ''\n\n try:\n if 'customers' in url:\n if 'NOT_AVAILABLE' in url:\n url_elements = url.split('/')\n usersid_pos = url_elements.index('NOT_AVAILABLE') + 1\n for element in url_elements:\n hashed_url = hashed_url+\"/\"+element\n hashed_url = hashed_url[1:]\n\n for element in url_elements[usersid_pos:]:\n peal_url = peal_url+\"/\"+element\n\n else:\n url_elements = url.split('/')\n usersid_pos = url_elements.index('customers') + 1\n pealid_pos = url_elements.index('customers') + 2\n usersid = url_elements[usersid_pos]\n\n if usersid == 'products':\n hashed_userid = usersid\n else:\n hashed_userid = hash_id_with_sha256(usersid)\n hashed_customer_id = hashed_userid\n\n url_elements[usersid_pos] = hashed_userid\n\n for element in url_elements:\n hashed_url = hashed_url + \"/\" + element\n hashed_url = hashed_url[1:]\n\n for element in url_elements[pealid_pos:]:\n peal_url = peal_url+\"/\"+element\n\n elif 'users' in url:\n url_elements = url.split('/')\n usersid_pos = url_elements.index('users') + 1\n pealid_pos = url_elements.index('users') + 2\n if len(url_elements) > usersid_pos:\n usersid = url_elements[usersid_pos]\n\n if usersid == 'user':\n hashed_userid = usersid\n else:\n hashed_userid = hash_id_with_sha256(usersid)\n hashed_customer_id = hashed_userid\n\n url_elements[usersid_pos] = hashed_userid\n\n for element in url_elements:\n hashed_url = hashed_url+\"/\"+element\n hashed_url = hashed_url[1:]\n\n for element in url_elements[pealid_pos:]:\n peal_url = peal_url+\"/\"+element\n else:\n hashed_url = url\n\n else:\n hashed_url = url\n\n return hashed_url,peal_url,hashed_customer_id\n except TypeError:\n logging.exception('Error raised with the unavailable fields in the json')\n except Exception:\n logging.exception('Error with the broad exception')\n\n\ndef hash_request_params(reqparams):\n \"\"\"Each key from request param is extracted and user id is hashed\"\"\"\n query_dict = {}\n id_dict = {}\n\n family = ''\n chl = ''\n cty = ''\n sdprq_id = ''\n mobile_cust_id = ''\n fixed_cust_id = ''\n customer_id = ''\n\n for val in reqparams.split(\" \")[0].split('&'):\n query_dict[val.split(\"=\")[0]] = val.split(\"=\")[1]\n\n if \"family\" in query_dict:\n family = query_dict.get(\"family\")\n\n if \"chl\" in query_dict:\n chl = query_dict.get(\"chl\")\n\n if \"cty\" in query_dict:\n cty = query_dict.get(\"cty\")\n\n if \"sdprqId\" in query_dict:\n sdprq_id = query_dict.get(\"sdprqId\")\n\n if 'mobileCustId' in query_dict:\n mobile_cust_id = hash_id_with_sha256(query_dict.get(\"mobileCustId\"))\n id_dict['mobileCustId'] = mobile_cust_id\n\n if 'fixedCustId' in query_dict:\n customer_id = hash_id_with_sha256(query_dict.get(\"fixedCustId\"))\n id_dict['fixedCustId'] = fixed_cust_id\n\n if 'customerId' in query_dict:\n customer_id = hash_id_with_sha256(query_dict.get(\"customerId\"))\n id_dict['customerId'] = customer_id\n\n if 'userId' in query_dict:\n customer_id = hash_id_with_sha256(query_dict.get(\"userId\"))\n id_dict['customerId'] = customer_id\n\n if 'custId' in query_dict:\n customer_id = hash_id_with_sha256(query_dict.get(\"custId\"))\n id_dict['customerId'] = customer_id\n\n return family, chl, cty, sdprq_id, mobile_cust_id, fixed_cust_id, customer_id\n\n\ndef parse_message(record):\n \"\"\"source and message are extracted from json and splitted with delimiter\"\"\"\n try:\n json_message = json.loads(record.encode('ascii', 'ignore'))\n\n message = json_message.get(\"message\")\n splited_message = message.split(\"\\t\")\n source = json_message.get(\"source\")\n reg_number = re.compile(r'\\d+')\n\n source_log = source\n time_stamp = splited_message[0] +' '+splited_message[1]\n day = splited_message[0]\n method = splited_message[2]\n raw_request = splited_message[3]\n response_code = splited_message[4]\n time_spent = splited_message[5]\n\n if reg_number.match(time_spent):\n time_spent = time_spent\n else:\n time_spent = 0\n\n source_ip = splited_message[6]\n\n if '?' in raw_request:\n split_request = raw_request.split(\"?\")\n request_url,peal_api,hashed_customer_id = get_request_url(split_request[0])\n request_param = split_request[1]\n api_family, channel_name, country_name, sdprq_id, mobile_cust_id, fixed_cust_id, customer_id = hash_request_params(\n request_param)\n if mobile_cust_id == '' and fixed_cust_id == '' and customer_id == '' and hashed_customer_id != '':\n customer_id = hashed_customer_id\n\n else:\n request_url = raw_request\n peal_api = ''\n api_family = ''\n channel_name = ''\n country_name = ''\n sdprq_id = ''\n mobile_cust_id = ''\n fixed_cust_id = ''\n customer_id = ''\n\n insert_time = str(datetime.datetime.now())\n\n final_dict = dict(\n {\"log_timestamp\": time_stamp,\n \"day\": day,\n \"source_log\": source_log,\n \"method\": method,\n \"request_url\": request_url,\n \"peal_api\": peal_api,\n \"response_code\": int(response_code),\n \"time_spent\": float(time_spent),\n \"source_ip\": source_ip,\n \"api_family\" : api_family,\n \"channel_name\": channel_name,\n \"country_name\": country_name,\n \"sdprq_id\": sdprq_id,\n \"mobile_cust_id\": mobile_cust_id,\n \"fixed_cust_id\": fixed_cust_id,\n \"customer_id\": customer_id,\n \"insert_time\": insert_time\n }\n )\n\n return final_dict\n\n except ValueError:\n logging.exception('Error with the message format.')\n except Exception:\n logging.exception('Error with the broad exception')\n\n\ndef push_to_cassandra_kafka(message):\n try:\n if None not in message.take(1):\n message.saveToCassandra(configuration.property(\"cassandra.keyspace\"),\n configuration.property(\"cassandra.table_name\"))\n options = configuration.property('kafka')\n producer = KafkaProducer(bootstrap_servers=options['bootstrap.servers'])\n\n # send each json element to output kafka topic\n for row in message.collect():\n producer.send(options['topic.output'], bytes(json.dumps(row)))\n\n producer.close()\n except ValueError as e:\n # logging.exception(\"ValueError when pushing the data!!!\", row)\n logging.exception(\"the data!!!\", e.message)\n\nif __name__ == \"__main__\":\n configuration = Utils.load_config(sys.argv[:])\n sc = SparkContext(appName=configuration.property(\"spark.appName\"))\n ssc = StreamingContext(sc, 2)\n kafka_params = {\"bootstrap.servers\": configuration.property(\"kafka\")[\"bootstrap.servers\"],\n \"startingOffsets\": configuration.property(\"kafka\")[\"startingOffsets\"]}\n messages = KafkaUtils.createDirectStream(ssc, [configuration.property(\"kafka\")[\"topic.input\"]], kafka_params)\n\n try:\n peal_messages = messages.filter(lambda message: filter_messages(message[1]))\n output = peal_messages.map(lambda record: parse_message(record[1]))\n output.foreachRDD(lambda x: push_to_cassandra_kafka(x))\n\n except ValueError:\n logging.exception('ValueError came up when parsing and pushing the data')\n\n ssc.start()\n ssc.awaitTermination()\n","sub_path":"src/applications/connect-app/peal_log_parser.py","file_name":"peal_log_parser.py","file_ext":"py","file_size_in_byte":9223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"643765698","text":"import os\nimport pygame\nimport argparse\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom random import randint\nfrom keras.utils import to_categorical\nimport random\nimport statistics\nfrom keras.optimizers import Adam\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout\nimport pandas as pd\nfrom operator import add\nimport collections\n\n#################################\n# Define parameters manually #\n#################################\ndef define_parameters():\n params = dict()\n # Neural Network\n params['epsilon_decay_linear'] = 1/75\n params['learning_rate'] = 0.0005\n params['first_layer_size'] = 50 # neurons in the first layer\n params['second_layer_size'] = 300 # neurons in the second layer\n params['third_layer_size'] = 50 # neurons in the third layer\n params['episodes'] = 150 \n params['memory_size'] = 2500\n params['batch_size'] = 1000\n # Settings\n params['weights_path'] = 'weights/weights3.hdf5'\n params['load_weights'] = True\n params['train'] = False\n params['plot_score'] = True\n return params\n\nclass Agent(object):\n def __init__(self, params):\n self.reward = 0\n self.gamma = 0.9\n self.dataframe = pd.DataFrame()\n self.short_memory = np.array([])\n self.agent_target = 1\n self.agent_predict = 0\n self.learning_rate = params['learning_rate'] \n self.epsilon = 1\n self.actual = []\n self.first_layer = params['first_layer_size']\n self.second_layer = params['second_layer_size']\n self.third_layer = params['third_layer_size']\n self.memory = collections.deque(maxlen=params['memory_size'])\n self.weights = params['weights_path']\n self.load_weights = params['load_weights']\n self.model = self.network()\n\n def network(self):\n model = Sequential()\n model.add(Dense(self.first_layer, activation='relu', input_dim=2400))\n model.add(Dense(self.second_layer, activation='relu'))\n model.add(Dense(self.third_layer, activation='relu'))\n model.add(Dense(4, activation='softmax'))\n opt = Adam(self.learning_rate)\n model.compile(loss='mse', optimizer=opt)\n\n return model\n \n def get_state(self, game, WIDTH, HEIGHT):\n state = []\n walls = game.get_walls()\n obstacles = game.get_obstacles()\n player = game.get_player()\n goal = game.get_goal()\n\n for i in range(WIDTH):\n for j in range(HEIGHT):\n val = 0\n if (i, j) == player:\n val = 1\n elif (i, j) == goal:\n val = 2\n elif (i, j) in walls:\n val = 3\n elif (i, j) in obstacles:\n val = 4\n\n state.append(val/4)\n \n return np.asarray(state)\n \n def set_reward(self, player, goal, dead):\n self.reward = 100 - (goal[0] - player[0]) + (goal[1] - player[1])\n if dead:\n self.reward = -200\n return self.reward\n return self.reward\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def replay_new(self, memory, batch_size):\n if len(memory) > batch_size:\n minibatch = random.sample(memory, batch_size)\n else:\n minibatch = memory\n for state, action, reward, next_state, done in minibatch:\n target = reward\n if not done:\n target = reward + self.gamma * np.amax(self.model.predict(np.array([next_state]))[0])\n target_f = self.model.predict(np.array([state]))\n target_f[0][np.argmax(action)] = target\n self.model.fit(np.array([state]), target_f, epochs=1, verbose=0)\n\n def train_short_memory(self, state, action, reward, next_state, done):\n target = reward\n if not done:\n target = reward + self.gamma * np.amax(self.model.predict(next_state.reshape((1, 2400)))[0])\n target_f = self.model.predict(state.reshape((1, 2400)))\n target_f[0][np.argmax(action)] = target\n self.model.fit(state.reshape((1, 2400)), target_f, epochs=1, verbose=0)\n","sub_path":"PrevTests/NN test/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"278947188","text":"class Solution:\n def removeDuplicates_slow(self, nums):\n i = 2\n while i < len(nums):\n if nums[i-2] == nums[i]: nums.remove(nums[i])\n else: i += 1\n return len(nums)\n\n def removeDuplicates(self, nums):\n i, j = 2, 2\n while j < len(nums):\n if nums[i-2] != nums[j]:\n nums[i] = nums[j]\n i+= 1\n j += 1\n return i\n\ns = Solution()\ns.removeDuplicates([1,1,1,2,2,3])\n","sub_path":"RemoveDuplicatesFromSortedArrayII.py","file_name":"RemoveDuplicatesFromSortedArrayII.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"407472020","text":"def crc_remainder(input_bitstring, polynomial_bitstring='101', initial_filler='0'):\n '''\n Calculates the CRC remainder of a string of bits using a chosen polynomial.\n initial_filler should be '1' or '0'.\n '''\n polynomial_bitstring = polynomial_bitstring.lstrip('0')\n len_input = len(input_bitstring)\n initial_padding = initial_filler * (len(polynomial_bitstring) - 1)\n input_padded_array = list(input_bitstring + initial_padding)\n while '1' in input_padded_array[:len_input]:\n cur_shift = input_padded_array.index('1')\n for i in range(len(polynomial_bitstring)):\n input_padded_array[cur_shift + i] = str(int(polynomial_bitstring[i] != input_padded_array[cur_shift + i]))\n return ''.join(input_padded_array)[len_input:]\n\n\ndef crc_check(input_bitstring, polynomial_bitstring='101',check_value=0):\n '''\n Calculates the CRC check of a string of bits using a chosen polynomial.\n '''\n try:\n polynomial_bitstring = polynomial_bitstring.lstrip('0')\n len_input = len(input_bitstring)\n # initial_padding = check_value\n input_padded_array = list(input_bitstring)\n while '1' in input_padded_array[:len_input]:\n cur_shift = input_padded_array.index('1')\n for i in range(len(polynomial_bitstring)):\n input_padded_array[cur_shift + i] = str(int(polynomial_bitstring[i] != input_padded_array[cur_shift + i]))\n return ('1' not in ''.join(input_padded_array)[len_input:])\n except Exception as e:\n return False\n\n\nif __name__ == '__main__':\n print(crc_check('11010011101100100','1011','100'))\n\n print(crc_check('1010101','101','01'))\n\n print(crc_remainder('11111','101','0'))\n","sub_path":"hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"599684081","text":"# Copyright (C) 2018-present prototyped.cn. All rights reserved.\n# Distributed under the terms and conditions of the Apache License.\n# See accompanying files LICENSE.\n\nimport os\nimport taksi.descriptor.types as types\nimport taksi.descriptor.predef as predef\nimport taksi.descriptor.lang as lang\nimport taksi.generator.genutil as genutil\nimport taksi.descriptor.strutil as strutil\nimport taksi.version as version\nfrom taksi.generator.go.gen_struct import GoStructGenerator\n\n\n# Go csv load generator\nclass GoCsvLoadGenerator(GoStructGenerator):\n TAB_SPACE = '\\t'\n\n @staticmethod\n def name():\n return \"go-csv\"\n\n def get_const_key_name(self, name):\n return 'Key%sName' % name\n\n def gen_const_names(self, descriptors):\n content = 'const (\\n'\n for struct in descriptors:\n name = strutil.camel_to_snake(struct['name'])\n content += '\\t%s = \"%s\"\\n' % (self.get_const_key_name(struct['name']), name)\n content += ')\\n\\n'\n return content\n\n # 生成赋值方法\n def gen_field_assgin_stmt(self, name, typename, valuetext, tabs, tips):\n content = ''\n space = self.TAB_SPACE * tabs\n if typename == 'string':\n return '%s%s = %s\\n' % (space, name, valuetext)\n else:\n content += '%svar value = MustParseTextValue(\"%s\", %s, %s)\\n' % (space, typename, valuetext, tips)\n content += '%s%s = value.(%s)\\n' % (space, name, typename)\n return content\n\n # 生成array赋值\n def gen_field_array_assign_stmt(self, prefix, typename, name, row_name, array_delim, tabs):\n assert len(array_delim) == 1\n array_delim = array_delim.strip()\n if array_delim == '\\\\':\n array_delim = '\\\\\\\\'\n\n space = self.TAB_SPACE * tabs\n content = ''\n elem_type = types.array_element_type(typename)\n elem_type = lang.map_go_type(elem_type)\n\n content += '%sfor _, item := range strings.Split(%s, \"%s\") {\\n' % (space, row_name, array_delim)\n content += '%s var value = MustParseTextValue(\"%s\", item, %s)\\n' % (space, elem_type, row_name)\n content += '%s %s%s = append(p.%s, value.(%s))\\n' % (space, prefix, name, name, elem_type)\n content += '%s}\\n' % space\n return content\n\n # 生成map赋值\n def gen_field_map_assign_stmt(self, prefix, typename, name, row_name, map_delims, tabs):\n assert len(map_delims) == 2, map_delims\n delim1 = map_delims[0].strip()\n if delim1 == '\\\\':\n delim1 = '\\\\\\\\'\n delim2 = map_delims[1].strip()\n if delim2 == '\\\\':\n delim2 = '\\\\\\\\'\n\n space = self.TAB_SPACE * tabs\n k, v = types.map_key_value_types(typename)\n key_type = lang.map_go_type(k)\n val_type = lang.map_go_type(v)\n\n content = ''\n content += '%s%s%s = map[%s]%s{}\\n' % (space, prefix, name, key_type, val_type)\n content += '%sfor _, text := range strings.Split(%s, \"%s\") {\\n' % (space, row_name, delim1)\n content += '%s if text == \"\" {\\n' % space\n content += '%s continue\\n' % space\n content += '%s }\\n' % space\n content += '%s var items = strings.Split(text, \"%s\")\\n' % (space, delim2)\n content += '%s var value = MustParseTextValue(\"%s\", items[0], %s)\\n' % (space, key_type, row_name)\n content += '%s var key = value.(%s)\\n' % (space, key_type)\n content += '%s value = MustParseTextValue(\"%s\", items[1], %s)\\n' % (space, val_type, row_name)\n content += '%s var val = value.(%s)\\n' % (space, val_type)\n content += '%s %s%s[key] = val\\n' % (space, prefix, name)\n content += '%s}\\n' % space\n return content\n\n # KV模式的ParseFromRow方法\n def gen_kv_parse_method(self, struct):\n content = ''\n rows = struct['data_rows']\n keycol = struct['options'][predef.PredefKeyColumn]\n valcol = struct['options'][predef.PredefValueColumn]\n typcol = int(struct['options'][predef.PredefValueTypeColumn])\n assert keycol > 0 and valcol > 0 and typcol > 0\n\n keyidx, keyfield = genutil.get_field_by_column_index(struct, keycol)\n validx, valfield = genutil.get_field_by_column_index(struct, valcol)\n typeidx, typefield = genutil.get_field_by_column_index(struct, typcol)\n\n array_delim = struct['options'].get(predef.OptionArrayDelimeter, predef.DefaultArrayDelimiter)\n map_delims = struct['options'].get(predef.OptionMapDelimeters, predef.DefaultMapDelimiters)\n\n content += 'func (p *%s) ParseFromRows(rows [][]string) error {\\n' % struct['camel_case_name']\n content += '\\tif len(rows) < %d {\\n' % len(rows)\n content += '\\t\\tlog.Panicf(\"%s:row length out of index, %%d < %d\", len(rows))\\n' % (struct['name'], len(rows))\n content += '\\t}\\n'\n\n idx = 0\n for row in rows:\n content += '\\tif rows[%d][%d] != \"\" {\\n' % (idx, validx)\n name = rows[idx][keyidx].strip()\n name = strutil.camel_case(name)\n origin_typename = rows[idx][typeidx].strip()\n typename = lang.map_go_type(origin_typename)\n valuetext = 'rows[%d][%d]' % (idx, validx)\n # print('kv', name, origin_typename, valuetext)\n if origin_typename.startswith('array'):\n content += self.gen_field_array_assign_stmt('p.', origin_typename, name, valuetext, array_delim, 2)\n elif origin_typename.startswith('map'):\n content += self.gen_field_map_assign_stmt('p.', origin_typename, name, valuetext, map_delims, 2)\n else:\n content += self.gen_field_assgin_stmt('p.'+name, typename, valuetext, 2, idx)\n content += '%s}\\n' % self.TAB_SPACE\n idx += 1\n content += '%sreturn nil\\n' % self.TAB_SPACE\n content += '}\\n\\n'\n return content\n\n #生成ParseFromRow方法\n def gen_parse_method(self, struct):\n if struct['options'][predef.PredefParseKVMode]:\n return self.gen_kv_parse_method(struct)\n\n array_delim = struct['options'].get(predef.OptionArrayDelimeter, predef.DefaultArrayDelimiter)\n map_delims = struct['options'].get(predef.OptionMapDelimeters, predef.DefaultMapDelimiters)\n\n inner_class_done = False\n inner_field_names, inner_fields = genutil.get_inner_class_mapped_fields(struct)\n\n vec_idx = 0\n vec_names, vec_name = genutil.get_vec_field_range(struct)\n\n content = ''\n content += 'func (p *%s) ParseFromRow(row []string) error {\\n' % struct['camel_case_name']\n content += '\\tif len(row) < %d {\\n' % len(struct['fields'])\n content += '\\t\\tlog.Panicf(\"%s: row length out of index %%d\", len(row))\\n' % struct['name']\n content += '\\t}\\n'\n\n idx = 0\n for field in struct['fields']:\n fname = field['name']\n prefix = 'p.'\n if fname in inner_field_names:\n if not inner_class_done:\n inner_class_done = True\n content += self.gen_inner_class_parse(struct, prefix)\n else:\n content += '\\tif row[%d] != \"\" {\\n' % idx\n origin_type_name = field['original_type_name']\n typename = lang.map_go_type(origin_type_name)\n field_name = field['camel_case_name']\n valuetext = 'row[%d]' % idx\n if origin_type_name.startswith('array'):\n content += self.gen_field_array_assign_stmt(prefix, field['original_type_name'], fname, valuetext, array_delim, 2)\n elif origin_type_name.startswith('map'):\n content += self.gen_field_map_assign_stmt(prefix, field['original_type_name'], fname, valuetext, map_delims, 2)\n else:\n if field_name in vec_names:\n name = '%s[%d]' % (vec_name, vec_idx)\n content += self.gen_field_assgin_stmt(prefix+name, typename, valuetext, 2, 'row')\n vec_idx += 1\n else:\n content += self.gen_field_assgin_stmt(prefix+field_name, typename, valuetext, 2, 'row')\n content += '%s}\\n' % self.TAB_SPACE\n idx += 1\n content += '%sreturn nil\\n' % self.TAB_SPACE\n content += '}\\n\\n'\n return content\n\n # 生成内部class的赋值方法\n def gen_inner_class_parse(self, struct, prefix):\n content = ''\n inner_class_type = struct[\"options\"][predef.PredefInnerTypeClass]\n inner_var_name = struct[\"options\"][predef.PredefInnerTypeName]\n inner_fields = genutil.get_inner_class_struct_fields(struct)\n start, end, step = genutil.get_inner_class_range(struct)\n assert start > 0 and end > 0 and step > 1\n content += ' for i := %s; i < %s; i += %s {\\n' % (start, end, step)\n content += ' var item %s;\\n' % inner_class_type\n for n in range(step):\n field = inner_fields[n]\n origin_type = field['original_type_name']\n typename = lang.map_go_type(origin_type)\n field_name = field['camel_case_name']\n valuetext = 'row[i + %d]' % n\n content += ' if row[i + %d] != \"\" {\\n' % n\n content += self.gen_field_assgin_stmt('item.' + field_name, typename, valuetext, 2, 'row')\n content += ' }\\n'\n content += ' %s%s = append(%s%s, item);\\n' % (prefix, inner_var_name, prefix, inner_var_name)\n content += ' }\\n'\n return content\n\n # KV模式下的Load方法\n def gen_load_method_kv(self, struct):\n content = ''\n content += 'func Load%s(loader DataSourceLoader) (*%s, error) {\\n' % (struct['name'], struct['name'])\n content += '\\tbuf, err := loader.LoadDataByKey(%s)\\n' % self.get_const_key_name(struct['name'])\n content += '\\tif err != nil {\\n'\n content += '\\treturn nil, err\\n'\n content += '\\t}\\n'\n content += '\\tr := csv.NewReader(buf)\\n'\n content += '\\trows, err := r.ReadAll()\\n'\n content += '\\tif err != nil {\\n'\n content += '\\t log.Errorf(\"%s: csv read all, %%v\", err)\\n' % struct['name']\n content += '\\t return nil, err\\n'\n content += '\\t}\\n'\n content += '\\tvar item %s\\n' % struct['name']\n content += '\\tif err := item.ParseFromRows(rows); err != nil {\\n'\n content += '\\t log.Errorf(\"%s: parse row %%d, %%v\", len(rows), err)\\n' % struct['name']\n content += '\\t return nil, err\\n'\n content += '\\t}\\n'\n content += '\\treturn &item, nil\\n'\n content += '}\\n\\n'\n return content\n\n # 生成Load方法\n def gen_load_method(self, struct):\n content = ''\n if struct['options']['parse-kv-mode']:\n return self.gen_load_method_kv(struct)\n\n content += 'func Load%sList(loader DataSourceLoader) ([]*%s, error) {\\n' % (struct['name'], struct['name'])\n content += '\\tbuf, err := loader.LoadDataByKey(%s)\\n' % self.get_const_key_name(struct['name'])\n content += '\\tif err != nil {\\n'\n content += '\\t return nil, err\\n'\n content += '\\t}\\n'\n content += '\\tvar list []*%s\\n' % struct['name']\n content += '\\tvar r = csv.NewReader(buf)\\n'\n content += '\\tfor i := 0; ; i++ {\\n'\n content += '\\t row, err := r.Read()\\n'\n content += '\\t if err == io.EOF {\\n'\n content += '\\t break\\n'\n content += '\\t }\\n'\n content += '\\t if err != nil {\\n'\n content += '\\t log.Errorf(\"%s: read csv %%v\", err)\\n' % struct['name']\n content += '\\t return nil, err\\n'\n content += '\\t }\\n'\n content += '\\t var item %s\\n' % struct['name']\n content += '\\t if err := item.ParseFromRow(row); err != nil {\\n'\n content += '\\t log.Errorf(\"%s: parse row %%d, %%s, %%v\", i+1, row, err)\\n' % struct['name']\n content += '\\t return nil, err\\n'\n content += '\\t }\\n'\n content += '\\t list = append(list, &item)\\n'\n content += '\\t}\\n'\n content += '\\treturn list, nil\\n'\n content += '}\\n\\n'\n return content\n\n def generate(self, struct, params):\n content = ''\n content += self.gen_struct_define(struct, params)\n content += self.gen_parse_method(struct)\n content += self.gen_load_method(struct)\n return content\n\n def run(self, descriptors, params):\n content = '// This file is auto-generated by taxi v%s, DO NOT EDIT!\\n\\n' % version.VER_STRING\n content += 'package %s\\n' % params['pkg']\n content += 'import (\\n'\n content += ' \"encoding/csv\"\\n'\n content += ' \"io\"\\n'\n content += ' \"strings\"\\n'\n content += ')\\n'\n content += '\\nvar (\\n'\n content += '\\t_ = io.EOF\\n'\n content += '\\t_ = strings.Compare\\n'\n content += ')\\n\\n'\n content += self.gen_const_names(descriptors)\n\n for struct in descriptors:\n genutil.setup_comment(struct)\n genutil.setup_key_value_mode(struct)\n\n for struct in descriptors:\n content += self.generate(struct, params)\n\n filename = params.get(predef.OptionOutSourceFile, 'config.go')\n filename = os.path.abspath(filename)\n strutil.compare_and_save_content(filename, content, 'utf-8')\n print('wrote source to %s' % filename)\n\n goroot = os.getenv('GOROOT')\n if goroot is not None:\n cmd = goroot + '/bin/go fmt ' + filename\n print(cmd)\n os.system(cmd)\n","sub_path":"taksi/generator/go/gen_csv_load.py","file_name":"gen_csv_load.py","file_ext":"py","file_size_in_byte":13668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"389333683","text":"import platform\n\nfrom bson.py3compat import string_type\n\nfrom . import errors\nfrom .base import BaseObject, ClientOptions\nfrom .configure import MontyConfigure\nfrom .database import MontyDatabase\n\n\nclass MontyClient(BaseObject):\n\n def __init__(self,\n repository=None,\n document_class=dict,\n tz_aware=None,\n **kwargs):\n \"\"\"\n \"\"\"\n with MontyConfigure(repository) as conf:\n self._storage = conf.load()._get_storage_engine()\n wconcern = self._storage.wconcern_parser(kwargs)\n\n options = kwargs\n options[\"document_class\"] = document_class\n options[\"tz_aware\"] = tz_aware or False\n self.__options = ClientOptions(options, wconcern)\n super(MontyClient, self).__init__(self.__options.codec_options,\n self.__options.write_concern)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.address == other.address\n return NotImplemented\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return (\"MontyClient({})\".format(\n \", \".join([\n \"repository={!r}\".format(\n self.address\n ),\n \"document_class={}.{}\".format(\n self.__options._options[\"document_class\"].__module__,\n self.__options._options[\"document_class\"].__name__\n ),\n \"storage_engine={}\".format(\n self._storage\n ),\n ]))\n )\n\n def __getattr__(self, name):\n if name.startswith('_'):\n raise AttributeError(\n \"MontyClient has no attribute {0!r}. To access the {0}\"\n \" database, use client[{0!r}].\".format(name))\n return self.get_database(name)\n\n def __getitem__(self, key):\n return self.get_database(key)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n if self._storage.is_open:\n self.close()\n\n @property\n def address(self):\n return self._storage.repository\n\n def close(self):\n self._storage.close()\n\n def database_names(self):\n \"\"\"\n Return a list of database names.\n \"\"\"\n return self._storage.database_list()\n\n def drop_database(self, name_or_database):\n \"\"\"\n Remove database.\n # Could raise OSError: Device or resource busy\n if db file is locked by other connection...\n \"\"\"\n name = name_or_database\n if isinstance(name_or_database, MontyDatabase):\n name = name_or_database.name\n elif not isinstance(name_or_database, string_type):\n raise TypeError(\"name_or_database must be an instance of \"\n \"basestring or a Database\")\n\n self._storage.database_drop(name)\n\n def get_database(self, name):\n \"\"\"\n Get a database, create one if not exists.\n \"\"\"\n # verify database name\n if platform.system() == \"Windows\":\n is_invaild = set('/\\. \"$*<>:|?').intersection(set(name))\n else:\n is_invaild = set('/\\. \"$').intersection(set(name))\n\n if is_invaild or not name:\n raise errors.OperationFailure(\"Invaild database name.\")\n else:\n return MontyDatabase(self, name)\n","sub_path":"montydb/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"78685574","text":"#!/usr/bin/env python3\n\nimport sys\n\nsys.setrecursionlimit(10**7)\n\n# Override `input` function because `stdin.readline()` is 10x faster than built-in `input()`\ninput = sys.stdin.readline\n\n\ndef read_h(typ=int):\n return list(map(typ, input().split()))\n\n\ndef read_v(n, m=1, typ=int):\n return [read_h(typ) if m > 1 else typ(input()) for _ in range(n)]\n\n\ndef main():\n n, = read_h()\n arr = read_v(n)\n\n max_asc = [0]\n for a in arr[:n - 1]:\n max_asc.append(max(max_asc[-1], a))\n # print(max_asc)\n\n max_desc = [0]\n for a in arr[::-1][:n - 1]:\n max_desc.append(max(max_desc[-1], a))\n max_desc = max_desc[::-1]\n # print(max_desc)\n\n # assert len(max_asc) == len(max_desc)\n # print(len(max_asc))\n\n for a, d in zip(max_asc, max_desc):\n print(max(a, d))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc134/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"605513127","text":"class mobile:\n def __init__(self,price,brand):\n self.price=price\n self.brand=brand\n\nmob1=mobile(1000,\"apple\")\nmob2=mobile(2000,\"samsung\")\nmob3=mobile(3000,\"mi\")\n\nlist_mobile=[mob1,mob2,mob3]\n\nfor i in list_mobile:\n print(i.price,i.brand)\n","sub_path":"program19.py","file_name":"program19.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71562438","text":"import random\n\n\nclass Die:\n def __init__(self):\n self.value = 0\n\n def roll(self):\n self.value = random.randrange(1, 7)\n\n\nclass Game:\n def __init__(self):\n self.score = {\"One\": 'None', \"Two\": 'None', \"Three\": 'None', \"Four\": 'None', \"Five\": 'None', \"Six\": 'None',\n \"3 of a kind\": 'None', \"4 of a kind\": 'None', \"Full House\": 'None', \"Short Straight\": 'None',\n \"Long Straight\": 'None', \"Yahtzee\": 'None'}\n self.projected_score = {\"One\": 0, \"Two\": 0, \"Three\": 0, \"Four\": 0, \"Five\": 0, \"Six\": 0,\n \"3 of a kind\": 0, \"4 of a kind\": 0, \"Full House\": 0, \"Short Straight\": 0,\n \"Long Straight\": 0, \"Yahtzee\": 0}\n self.dice = [Die(), Die(), Die(), Die(), Die()]\n self.rolls_left = 2\n self.category_ordering = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \"Upper Total\", \"3 of a kind\",\n \"4 of a kind\", \"Full House\", \"Short Straight\", \"Long Straight\", \"Yahtzee\",\n \"Lower Total\", \"Total Score\"]\n self.numbers = [\"zero\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\"]\n\n def reset_game(self):\n self.score = {\"One\": 'None', \"Two\": 'None', \"Three\": 'None', \"Four\": 'None', \"Five\": 'None', \"Six\": 'None',\n \"3 of a kind\": 'None', \"4 of a kind\": 'None', \"Full House\": 'None', \"Short Straight\": 'None',\n \"Long Straight\": 'None', \"Yahtzee\": 'None'}\n self.projected_score = {\"One\": 0, \"Two\": 0, \"Three\": 0, \"Four\": 0, \"Five\": 0, \"Six\": 0,\n \"3 of a kind\": 0, \"4 of a kind\": 0, \"Full House\": 0, \"Short Straight\": 0,\n \"Long Straight\": 0, \"Yahtzee\": 0}\n self.roll_dice()\n self.rolls_left = 2\n\n @property\n def upper_score(self):\n score = 0\n for i in xrange(0, 6):\n sc = self.score[self.category_ordering[i]]\n if sc is not 'None':\n score += sc\n return score\n\n @property\n def lower_score(self):\n score = 0\n for i in xrange(7, 12):\n sc = self.score[self.category_ordering[i]]\n if sc is not 'None':\n score += sc\n return score\n\n @property\n def total_score(self):\n return sum(i for i in self.score.values() if isinstance(i, int))\n\n @property\n def print_score(self):\n score_string = {}\n for category, score in self.score.iteritems():\n score_string[str(category)] = str(score)\n return score_string\n\n @property\n def print_dice(self):\n dice_string = ''\n for die in self.dice:\n dice_string += str(die.value) + \" \"\n return dice_string\n\n def restart_turn(self):\n self.roll_dice(\"1,2,3,4,5\")\n self.rolls_left = 2\n\n def roll_dice(self, selection=''):\n roll_selection = selection.split(',')\n\n for i in roll_selection:\n if i:\n self.dice[int(i) - 1].roll()\n self.rolls_left -= 1\n\n def get_projected_scores(self):\n for category in self.projected_score.iterkeys():\n self.projected_score[category] = self.calculate_score(category)\n\n def calculate_score(self, category):\n dice_list = [x.value for x in self.dice]\n\n if category in ['One', 'Two', 'Three', 'Four', 'Five', 'Six']:\n numeric_category = self.numbers.index(category)\n return dice_list.count(numeric_category) * numeric_category\n\n if category in ['3 of a kind']:\n if dice_list.count(max(set(dice_list), key=dice_list.count)) >= 3:\n return sum(dice_list)\n else:\n return 0\n\n if category in ['4 of a kind']:\n if dice_list.count(max(set(dice_list), key=dice_list.count)) >= 4:\n return sum(dice_list)\n else:\n return 0\n\n if category in ['Full House']:\n if dice_list.count(max(set(dice_list), key=dice_list.count)) is 3 \\\n and dice_list.count(min(set(dice_list), key=dice_list.count)) is 2:\n return 25\n else:\n return 0\n\n if category in ['Yahtzee']:\n if dice_list.count(max(set(dice_list), key=dice_list.count)) is 5:\n return 50\n else:\n return 0\n\n if category in ['Short Straight']:\n if set(dice_list).issuperset([1, 2, 3, 4]) or set(dice_list).issuperset([2, 3, 4, 5]) \\\n or set(dice_list).issuperset([3, 4, 5, 6]):\n return 30\n else:\n return 0\n\n if category in ['Long Straight']:\n if set(dice_list).issuperset([1, 2, 3, 4, 5]) or set(dice_list).issuperset([2, 3, 4, 5, 6]):\n return 40\n else:\n return 0\n","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594293417","text":"\"\"\"\nLab: Lab 3\nInstructor: Olac Fuentes\nTeaching Assistant: Anindita Nath\nCode Author: Jose M Rodriguez\nCourse: CS 2302\n\nLast Modified: 10/04/2019\n\nPurpose:\nTests SortedList.Print()\nTests NormalList.Print()\n\"\"\"\n\nfrom lab3 import *\n\ndef main():\n printTitle('CS 2302 Lab 3 - Jose Rodriguez')\n printTitle('Print test')\n array_generators = [generateRandomIntList, generateAscendingIntList, generateDescendingIntList]\n \n while True:\n print('Please input the size of the list to enter: ', end = '')\n size = input()\n if not isInt(size):\n printError('Invalid size value: should be an integer!')\n continue\n size = int(size)\n if size < 0:\n printError('Invalid size value: list cannot have a negative size')\n continue\n print(f'Entered size: {size}')\n\n print('Would you like to view list before SortedList? (y/n): ', end='')\n answer = input()\n view_output = answer == 'Y' or answer == 'y'\n\n nums = array_generators[0](size)\n print('Generated numpy list:')\n if view_output:\n print(nums)\n sorted_list = SortedList()\n for i in nums:\n sorted_list.Insert(i)\n\n normal_list = NormalList()\n for i in nums:\n normal_list.Insert(i)\n\n print('SortedList list:')\n timer = Timer()\n timer.start()\n sorted_list.Print()\n time_taken = timer.stop()\n printTitle(f'Time taken: {time_taken}s')\n\n print('NormalList list:')\n timer.start()\n normal_list.Print()\n time_taken = timer.stop()\n printTitle(f'Time taken: {time_taken}s')\n\n print('Would you like to perform another test? (y/n): ', end='')\n answer = input()\n if answer == 'Y' or answer == 'y':\n continue\n break\n \n print()\n printTitle('Thanks for testing!')\n\n\nif __name__ == '__main__':\n main()","sub_path":"cs-2302/lab3/print_test.py","file_name":"print_test.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"155450609","text":"\"\"\"\nGiven a complete binary tree, count the number of nodes.\n\"\"\"\nfrom TreeNode import TreeNode\n\n\nclass Solution(object):\n def countNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return 0\n height = self.get_height(root)\n count = (1 << height) - 1\n while root:\n if self.get_height(root.right) == height - 1:\n root = root.right\n else:\n root = root.left\n count -= 1 << (height - 2)\n height -= 1\n return count\n \n def get_height(self, root):\n return 0 if not root else 1 + self.get_height(root.left)\n","sub_path":"Leetcode-Python/CountCompleteTreeNodes.py","file_name":"CountCompleteTreeNodes.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"164832546","text":"from app.ext.database import db\n\n\nclass CustomerModel(db.Model):\n\n __tablename__ = 'customers'\n\n customer_id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n email = db.Column(db.String(80))\n phone = db.Column(db.String(9))\n doc = db.Column(db.String(11))\n\n def __init__(self, customer_id, name, email, phone, doc):\n self.customer_id = customer_id\n self.name = name\n self.email = email\n self.phone = phone\n self.doc = doc\n\n def json(self):\n return {\n 'customer_id': self.customer_id,\n 'name': self.name,\n 'email': self.email,\n 'phone': self.phone,\n 'doc': self.doc\n }\n\n @classmethod\n def find_customer(cls, customer_id):\n customer = cls.query.filter_by(customer_id=customer_id).first()\n if customer:\n return customer\n return None\n\n def save_customer(self):\n db.session.add(self)\n db.session.commit()\n","sub_path":"app/restapi/customer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"381816722","text":"\"\"\"\n462. Minimum Moves to Equal Array Elements II\n\nGiven a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.\n\nYou may assume the array's length is at most 10,000.\n\nExample:\n\nInput:\n[1,2,3]\n\nOutput:\n2\n\nExplanation:\nOnly two moves are needed (remember each move increments or decrements one element):\n\n[1,2,3] => [2,2,3] => [2,2,2]\n\n\n\"\"\"\n\n'''\n\n# try binary search but not work\n\nclass Solution:\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n l, r = nums[0], nums[-1]\n lCost, rCost = self.cost(nums, l), self.cost(nums, r)\n while l <= r:\n mid = (l + r) >> 1\n mCost = self.cost(nums, mid)\n if \n \n \n def cost(self, nums, target):\n res = 0\n for num in nums:\n if num < target:\n res += target - num\n else:\n res += num - target\n return res\n\n'''\n\n'''\n# seems correct, but memory error\n\nclass Solution:\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n freq = [1] * (nums[-1] + 1)\n for i in range(1, len(nums)):\n freq[nums[i]] = freq[nums[i - 1]] + 1\n \n extra = None\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]: continue\n incr = freq[nums[i - 1]] # (nums[i] - nums[i - 1])\n decr = len(nums) - freq[nums[i - 1]]\n if decr < incr:\n return self.cost(nums, nums[i - 1])\n \n return self.cost(nums, nums[-1])\n \n def cost(self, nums, target):\n res = 0\n for num in nums:\n if num < target:\n res += target - num\n else:\n res += num - target\n return res\n \n'''\n\n# math + hashtable, passed with ridiculous slow speed\n# change vector to hashtable\n\nclass Solution:\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n freq = {}\n freq[nums[0]] = 1\n for i in range(1, len(nums)):\n freq[nums[i]] = freq[nums[i - 1]] + 1\n\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]: continue\n incr = freq[nums[i - 1]] # (nums[i] - nums[i - 1])\n decr = len(nums) - freq[nums[i - 1]]\n if decr < incr:\n return self.cost(nums, nums[i - 1])\n\n return self.cost(nums, nums[-1])\n\n def cost(self, nums, target):\n res = 0\n for num in nums:\n if num < target:\n res += target - num\n else:\n res += num - target\n return res\n\n# modified math without hashtable\n# intuition: find the median\n\nclass Solution:\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n\n for i in range(1, len(nums)):\n if nums[i] == nums[i - 1]: continue\n if i << 1 > len(nums):\n return self.cost(nums, nums[i - 1])\n return self.cost(nums, nums[-1])\n\n def cost(self, nums, target):\n res = 0\n for num in nums:\n if num < target:\n res += target - num\n else:\n res += num - target\n return res\n\n# quick selection without sort\n# time complexity O(n)\n\nclass Solution:\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n median = self.qSelect(nums, 0, n - 1, 1 + n >> 1) # median = 1 + n >> 1 not affect the even length\n return self.cost(nums, median)\n\n def qSelect(self, nums, low, high, k):\n left, right = 0, len(nums) - 1\n pivot = nums[(left + right) >> 1]\n i = left\n while i <= right:\n if nums[i] > pivot:\n nums[i], nums[right] = nums[right], nums[i]\n right -= 1\n elif nums[i] < pivot:\n nums[i], nums[left] = nums[left], nums[i]\n i += 1\n left += 1\n else:\n i += 1\n if k <= left:\n return self.qSelect(nums, low, left - 1, k)\n elif k - 1 > right:\n return self.qSelect(nums, right + 1, high, k)\n else:\n return pivot\n\n def cost(self, nums, target):\n res = 0\n for num in nums:\n if num < target:\n res += target - num\n else:\n res += num - target\n return res\n\n\n# 2021/05/19\n# Runtime: 224 ms, faster than 5.03% of Python3 online submissions for Minimum Moves to Equal Array Elements II.\n# Memory Usage: 15.3 MB, less than 75.16% of Python3 online submissions for Minimum Moves to Equal Array Elements II.\n\n# 二分法过了所有的test,但是正确性还是有待验证的。O( n * lg(max - min) )\n# 如果知道最佳的数的大小,那么cost很容易就计算出来。\n# 直观的想,这个最佳的数会在数组中间的某处: 即, [l, m] 是递减的,[m, r]是递增的。\n\nclass Solution:\n def minMoves2(self, nums: List[int]) -> int:\n l, r = min(nums), max(nums)\n\n while l + 1 < r:\n m = (l + r) >> 1\n if self.cost(nums, m + 1) > self.cost(nums, m):\n r = m\n else:\n l = m\n if self.cost(nums, l) < self.cost(nums, r):\n return self.cost(nums, l)\n\n return self.cost(nums, r)\n\n def cost(self, nums, k):\n ans = 0\n for num in nums:\n ans += abs(num - k)\n return ans\n\n\n\n\n\n","sub_path":"0462. Minimum Moves to Equal Array Elements II.py","file_name":"0462. Minimum Moves to Equal Array Elements II.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"119234845","text":"# Names scores\nimport time\n\n\ndef e22():\n start = time.time()\n f = open(\"p022_names.txt\")\n names = sorted(f.readline().replace('\"', '').split(','))\n f.close()\n total = 0\n for i in range(len(names)):\n avalue = 0\n for char in names[i]: # determination of the alphabetical value\n avalue += (ord(char) - 64) # of the name\n total += (avalue * (i + 1))\n end = time.time() - start\n print(\"Runtime =\", end)\n return total\n\n\nprint('Total scores is:', e22()) # 871198282\n","sub_path":"euler22_Names_scores.py","file_name":"euler22_Names_scores.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"342555931","text":"\"\"\"\nPROBLEM 21\n\nLet d(n) be defined as the sum of proper divisors of n \n(numbers less than n which divide evenly into n).\n\nIf d(a) = b and d(b) = a, where a != b, then a and b are an \namicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are \n1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; \ntherefore d(220) = 284. \nThe proper divisors of 284 are 1, 2, 4, 71 and 142; \nso d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10000.\n\nhttps://projecteuler.net/problem=21\n\"\"\"\n\nfrom functions import sum_divisors, all_divisors\nimport time \n\ndef d(n):\n return sum_divisors(n, include_n=False)\n\nstart = time.time()\namicable_sets = set()\nlim = 10000\nd_map = {}\nfor i in range(1, lim+1):\n d_map[i] = d(i)\n\nfor i in range(1, lim+1):\n for j in range(1, lim+1):\n if i == j or (min(i,j), max(i,j)) in amicable_sets:\n continue\n \n di = d_map[i]\n dj = d_map[j]\n if i == dj and j == di:\n amicable_sets.add((min(i,j), max(i,j)))\nend = time.time()\nprint(sum([sum(pair) for pair in list(amicable_sets)]))\nprint(\"there are {} amicable pairs under {}\".format(len(amicable_sets), lim))\nprint(\"it took {}s\".format(round(end-start, 2)))","sub_path":"problem021.py","file_name":"problem021.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"415491433","text":"import pygame\nimport random\n\npygame.init()\nscreen = pygame.display.set_mode((1000,650))\npygame.display.set_caption(\"Money Grabber\")\nicon = pygame.image.load('dollar.png')\npygame.display.set_icon(icon)\n# scoring\nscore=0\nlives=5\nfont = pygame.font.Font('freesansbold.ttf', 32)\ndef show_score(x, y):\n scoreRender = font.render(\"Score : \" + str(score), True, (255, 255, 255))\n screen.blit(scoreRender, (x, y))\ndef show_lives(x,y):\n livesRender = font.render(\"lives : \" + str(lives), True, (255, 255, 255))\n screen.blit(livesRender, (x, y))\ndef gameOver(x,y):\n screen.fill((0,0, 0))\n gameOverRender = font.render(\"Game Over \", True, (255, 255, 255))\n screen.blit(gameOverRender, (x, y))\n scoreRender = font.render(\"press the cross symbol to close game\" , True, (255, 255, 255))\n screen.blit(scoreRender, (x, y+50))\n hintRender = font.render(\"Score : \" + str(score), True, (255, 255, 255))\n screen.blit(hintRender, (x, y+100))\n \n\n# bucket\nbucketImg = pygame.image.load('bucket.png')\nbucketImg = pygame.transform.scale(bucketImg,(64,64))\nbucketX=470\nbucketY=550\nbucketXchange=0.5\nbucketCurrentChange=0\ndef bucket(x, y):\n screen.blit(bucketImg, (x, y))\ndef bucketPositionUpdate(bucketCurrentChange):\n global bucketX\n bucketX+=bucketCurrentChange\n if bucketX<0:\n bucketX=0\n if bucketX>1000-64:\n bucketX=1000-64\n\n#coin\ncoinImg = pygame.image.load('dollar.png')\ncoinImg = pygame.transform.scale(coinImg,(32,32))\ncoinX=random.randint(0,1000-64)\ncoinY=0\ncoinYchange=0.2\ncoinSpeedIncrement=0.05\ndef coin(x, y):\n screen.blit(coinImg, (x, y))\ndef coinPositionUpdate():\n global coinY,coinX,lives,score,coinYchange\n coinY+=coinYchange\n if(coinY>=550-32):\n if(coinX>=bucketX-10 and coinX+32<=bucketX+72):\n score+=1\n if(score%5==0):\n coinYchange+=coinSpeedIncrement\n else:\n lives-=1\n coinX=random.randint(0,1000-64)\n coinY=0\n\n\nisRunning=True\nwhile isRunning:\n screen.fill((0,0, 0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n isRunning = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n bucketCurrentChange = -bucketXchange\n if event.key == pygame.K_RIGHT:\n bucketCurrentChange = +bucketXchange\n\n if event.type == pygame.KEYUP:\n bucketCurrentChange=0\n\n bucketPositionUpdate(bucketCurrentChange) \n coinPositionUpdate()\n coin(coinX,coinY)\n bucket(bucketX,bucketY)\n show_score(800,0)\n show_lives(600,0)\n if(lives<0):\n gameOver(200,200)\n pygame.display.update()\n\npygame.quit()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"32947881","text":"import os\nimport torch\nimport glob\nimport datetime as dt\nimport pickle\nfrom threading import Thread\n\nimport time # for start stop calc\n\nimport numpy as np\n\nimport torch.utils.data as data\n\nfrom deeplio.common import utils\nfrom deeplio.common.laserscan import LaserScan\nfrom deeplio.common.logger import PyLogger\n\n\nclass KittiRawData:\n \"\"\" KiitiRawData\n more or less same as pykitti with some application specific changes\n \"\"\"\n MAX_DIST_HDL64 = 120.\n IMU_LENGTH = 10.25\n\n def __init__(self, base_path, date, drive, cfg=None, oxts_bin=False, oxts_txt=True, **kwargs):\n self.drive = drive\n self.date = date\n self.dataset = kwargs.get('dataset', 'extract')\n self.drive_full = date + '_drive_' + drive + '_' + self.dataset\n self.calib_path = os.path.join(base_path, date)\n self.data_path = os.path.join(base_path, date, self.drive_full)\n self.frames = kwargs.get('frames', None)\n\n if cfg is not None:\n ds_config = cfg['kitti']\n self.image_width = ds_config.get('image-width', 1024)\n self.image_height = ds_config.get('image-height', 64)\n self.fov_up = ds_config.get('fov-up', 3)\n self.fov_down = ds_config.get('fov-down', -25)\n self.seq_size = cfg.get('sequence-size', 2)\n self.max_depth = ds_config.get('max-depth', 80)\n self.min_depth = ds_config.get('min-depth', 2)\n self.inv_depth = ds_config.get('inverse-depth', False)\n\n # Find all the data files\n self._get_velo_files()\n\n #self._load_calib()\n self._load_timestamps()\n\n # Give priority to binary files, sicne they are laoded much faster\n if oxts_bin:\n self._load_oxts_bin()\n elif oxts_txt:\n self._get_oxt_files()\n self._load_oxts()\n\n self.imu_get_counter = 0\n\n def __len__(self):\n return len(self.velo_files)\n\n def get_velo(self, idx):\n \"\"\"Read velodyne [x,y,z,reflectance] scan at the specified index.\"\"\"\n return utils.load_velo_scan(self.velo_files[idx])\n\n def get_velo_image(self, idx):\n scan = LaserScan(H=self.image_height, W=self.image_width, fov_up=self.fov_up, fov_down=self.fov_down,\n min_depth=self.min_depth, max_depth=self.max_depth, inverse_depth=self.inv_depth)\n scan.open_scan(self.velo_files[idx])\n scan.do_range_projection()\n # collect projected data and adapt ranges\n\n proj_xyz = scan.proj_xyz\n proj_remission = scan.proj_remission\n proj_range = scan.proj_range\n proj_range_xy = scan.proj_range_xy\n\n image = np.dstack((proj_xyz, proj_remission, proj_range, proj_range_xy))\n return image\n\n def _get_velo_files(self):\n # first try to get binary files\n self.velo_files = sorted(glob.glob(\n os.path.join(self.data_path, 'velodyne_points',\n 'data', '*.npy')))\n # if there is no bin files for velo, so the velo file are in text format\n if self.velo_files is None:\n self.velo_files = sorted(glob.glob(\n os.path.join(self.data_path, 'velodyne_points',\n 'data', '*.txt')))\n\n # Subselect the chosen range of frames, if any\n if self.frames is not None:\n self.velo_files = utils.subselect_files(\n self.velo_files, self.frames)\n self.velo_files = np.asarray(self.velo_files)\n\n def _get_oxt_files(self):\n \"\"\"Find and list data files for each sensor.\"\"\"\n self.oxts_files = sorted(glob.glob(\n os.path.join(self.data_path, 'oxts', 'data', '*.txt')))\n\n if self.frames is not None:\n self.oxts_files = utils.subselect_files(\n self.oxts_files, self.frames)\n self.oxts_files = np.asarray(self.oxts_files)\n\n def _load_calib_rigid(self, filename):\n \"\"\"Read a rigid transform calibration file as a numpy.array.\"\"\"\n filepath = os.path.join(self.calib_path, filename)\n data = utils.read_calib_file(filepath)\n return utils.transform_from_rot_trans(data['R'], data['T'])\n\n def _load_calib(self):\n \"\"\"Load and compute intrinsic and extrinsic calibration parameters.\"\"\"\n # We'll build the calibration parameters as a dictionary, then\n # convert it to a namedtuple to prevent it from being modified later\n data = {}\n\n # Load the rigid transformation from IMU to velodyne\n data['T_velo_imu'] = self._load_calib_rigid('calib_imu_to_velo.txt')\n\n def _load_timestamps(self):\n \"\"\"Load timestamps from file.\"\"\"\n timestamp_file_imu = os.path.join(self.data_path, 'oxts', 'timestamps.txt')\n timestamp_file_velo = os.path.join(self.data_path, 'velodyne_points', 'timestamps.txt')\n\n # Read and parse the timestamps\n self.timestamps_imu = []\n with open(timestamp_file_imu, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = dt.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n self.timestamps_imu.append(t)\n self.timestamps_imu = np.array(self.timestamps_imu)\n\n # Read and parse the timestamps\n self.timestamps_velo = []\n with open(timestamp_file_velo, 'r') as f:\n for line in f.readlines():\n # NB: datetime only supports microseconds, but KITTI timestamps\n # give nanoseconds, so need to truncate last 4 characters to\n # get rid of \\n (counts as 1) and extra 3 digits\n t = dt.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n self.timestamps_velo.append(t)\n self.timestamps_velo = np.array(self.timestamps_velo)\n\n def _load_oxts(self):\n \"\"\"Load OXTS data from file.\"\"\"\n self.oxts = np.array(utils.load_oxts_packets_and_poses(self.oxts_files))\n\n def _load_oxts_bin(self):\n oxts_file = os.path.join(self.data_path, 'oxts', 'data.pkl')\n with open(oxts_file, 'rb') as f:\n self.oxts = pickle.load(f)\n\n def _load_oxts_lazy(self, indices):\n oxts = utils.load_oxts_packets_and_poses(self.oxts_files[indices])\n return oxts\n\n def calc_gt_from_oxts(self, oxts):\n transformations = [oxt.T_w_imu for oxt in oxts]\n\n T_w0 = transformations[0]\n R_w0 = T_w0[:3, :3]\n t_w0 = T_w0[:3, 3]\n T_w0_inv = np.identity(4)\n T_w0_inv[:3, :3] = R_w0.T\n T_w0_inv[:3, 3] = -np.matmul(R_w0.T, t_w0)\n\n gt_s = [np.matmul(T_w0_inv, T_0i) for T_0i in transformations]\n return gt_s\n\n\nclass Kitti(data.Dataset):\n # In unsynced KITTI raw dataset are some timestamp holes - i.g. 2011_10_03_27\n # e.g. there is no corresponding IMU/GPS measurment to some velodyne frames,\n # We set the min. no. so we can check and ignore these holes.\n MIN_NUM_OXT_SAMPLES = 8\n MAX_NUM_OXT_SAMPLES = 10\n\n def __init__(self, config, ds_type='train', transform=None):\n \"\"\"\n :param root_path:\n :param config: Configuration file including split settings\n :param transform:\n \"\"\"\n ds_config_common = config['datasets']\n ds_config = ds_config_common['kitti']\n root_path = ds_config['root-path']\n\n self.seq_size = ds_config_common['sequence-size']\n self.channels = config['channels']\n\n self.ds_type = ds_type\n self.transform = transform\n\n self.datasets = []\n self.length_each_drive = []\n self.bins = []\n self.images = [None] * self.seq_size\n\n # Since we are intrested in sequence of lidar frame - e.g. multiple frame at each iteration,\n # depending on the sequence size and the current wanted index coming from pytorch dataloader\n # we must switch between each drive if not enough frames exists in that specific drive wanted from dataloader,\n # therefor we separate valid indices in each drive in bins.\n last_bin_end = -1\n for date, drives in ds_config[self.ds_type].items():\n for drive in drives:\n date = str(date).replace('-', '_')\n drive = '{0:04d}'.format(drive)\n ds = KittiRawData(root_path, date, drive, ds_config_common, oxts_bin=True)\n\n length = len(ds)\n\n bin_start = last_bin_end + 1\n bin_end = bin_start + length - 1\n self.bins.append([bin_start, bin_end])\n last_bin_end = bin_end\n\n self.length_each_drive.append(length)\n self.datasets.append(ds)\n\n self.bins = np.asarray(self.bins)\n self.length_each_drive = np.array(self.length_each_drive)\n\n self.length = self.bins.flatten()[-1] + 1\n\n self.logger = PyLogger(name=\"KittiDataset_{}\".format(self.ds_type))\n\n def load_images(self, dataset, indices):\n threads = [None] * self.seq_size\n\n for i in range(self.seq_size):\n idx = indices[i]\n\n threads[i] = Thread(target=self.load_image, args=(dataset, indices[i], i))\n threads[i].start()\n\n for i in range(self.seq_size):\n threads[i].join()\n\n def load_image(self, dataset, ds_index, img_index):\n img = dataset.get_velo_image(ds_index)\n img = img[:, :, self.channels]\n self.images[img_index] = torch.from_numpy(img)\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n if torch.is_tensor(index):\n index = index.tolist()\n\n start = time.time()\n\n idx = -1\n num_drive = -1\n for i, bin in enumerate(self.bins):\n bin_start = bin[0]\n bin_end = bin[1]\n if bin_start <= index <= bin_end:\n idx = index - bin_start\n num_drive = i\n break\n\n if idx < 0 or num_drive < 0:\n print(\"Error: No bins and no drive number found!\")\n return None\n\n dataset = self.datasets[num_drive]\n\n # get frame indices\n len_ds = len(dataset)\n if idx <= len_ds - self.seq_size:\n indices = list(range(idx, idx + self.seq_size))\n elif (len_ds - self.seq_size) < idx < len_ds:\n indices = list(range(len_ds - self.seq_size, len_ds))\n else:\n self.logger.error(\"Wrong index ({}) in {}_{}\".format(idx, dataset.date, dataset.drive))\n raise Exception(\"Wrong index ({}) in {}_{}\".format(idx, dataset.date, dataset.drive))\n\n # Get frame timestamps\n velo_timespamps = [dataset.timestamps_velo[idx] for idx in indices]\n\n self.load_images(dataset, indices)\n images = self.images\n\n imus = []\n gts = []\n for i in range(self.seq_size - 1):\n velo_start_ts = velo_timespamps[i]\n velo_stop_ts = velo_timespamps[i+1]\n\n mask = ((dataset.timestamps_imu >= velo_start_ts) & (dataset.timestamps_imu < velo_stop_ts))\n oxt_indices = np.argwhere(mask).flatten()\n len_oxt = len(oxt_indices)\n\n if (len_oxt== 0) or (len_oxt < self.MIN_NUM_OXT_SAMPLES):\n self.logger.warning(\"Not enough OXT-samples: Index: {}, DS: {}_{}, len:{}, velo-timestamps: {}-{}\".format(index, dataset.date, dataset.drive, len_oxt, velo_start_ts, velo_stop_ts))\n tmp_imu = np.zeros((self.seq_size - 1, self.MAX_NUM_OXT_SAMPLES, 6))\n tmp_gt = np.zeros((self.seq_size - 1, self.MAX_NUM_OXT_SAMPLES, 4, 4))\n items = [images, tmp_imu, tmp_gt]\n if self.transform:\n items = self.transform(items)\n data = {'images': items[0], 'imus': items[1], 'gts': items[2], 'valid': False}\n return data\n else:\n oxts = dataset.oxts[oxt_indices]\n imu_values = np.array([[oxt[0].ax, oxt[0].ay, oxt[0].az, oxt[0].wx, oxt[0].wy, oxt[0].wz] for oxt in oxts])\n gt = np.array([oxt[1] for oxt in oxts])\n\n # TODO we need a customized dataloader (maybe ccollate_fn-func) so we do not need to expand or crop here\n if len_oxt > self.MAX_NUM_OXT_SAMPLES:\n imu_values = imu_values[:self.MAX_NUM_OXT_SAMPLES]\n gt = gt[:self.MAX_NUM_OXT_SAMPLES]\n #self.logger.info(\"Cutting OXT-Samples: Index: {}, length:{}, velo-timestamps: {}-{})\".format(index, len_oxt, velo_start_ts, velo_stop_ts))\n elif len_oxt < self.MAX_NUM_OXT_SAMPLES:\n imu_values = np.pad(imu_values, ((0, self.MAX_NUM_OXT_SAMPLES - len_oxt), (0, 0)), constant_values=0.)\n gt = np.pad(gt, ((0, self.MAX_NUM_OXT_SAMPLES - len_oxt), (0, 0), (0, 0)), mode='edge')\n #self.logger.info(\"Padding OXT-Samples: Index: {}, length:{}, velo-timestamps: {}-{}\".format(index, len_oxt, velo_start_ts, velo_stop_ts))\n\n imus.append(imu_values)\n gts.append(gt)\n\n items = [images, imus, gts]\n\n\n if self.transform:\n items = self.transform(items)\n\n data = {'images': items[0], 'imus': items[1], 'gts': items[2], 'valid': True}\n\n end = time.time()\n #self.logger.debug(\"Idx:{}, dt: {}\".format(index, end - start))\n\n return data\n\n def __repr__(self):\n # printing dataset informations\n rep = \"Kitti-Dataset\" \\\n \"Type: {}, Length: {}, Seq.length: {}\\n\" \\\n \"Date\\tDrive\\tlength\\tstart-end\\n\".format(self.ds_type, self.length, self.seq_size)\n seqs = \"\"\n for i in range(len(self.length_each_drive)):\n date = self.datasets[i].date\n drive = self.datasets[i].drive\n length = self.length_each_drive[i]\n bins = self.bins[i]\n seqs = \"\".join(\"{}{}\\t{}\\t{}\\t{}\\n\".format(seqs, date, drive, length, bins))\n rep = \"{}{}\".format(rep,seqs)\n return rep\n","sub_path":"deeplio/datasets/kitti.py","file_name":"kitti.py","file_ext":"py","file_size_in_byte":14147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"297588464","text":"from Tkinter import *\r\nfrom tkMessageBox import *\r\nfrom Map import *\r\nimport tkMessageBox, tkFileDialog\r\nfrom Commons import *\r\n\r\ndef notdone(): \r\n showerror('Not implemented', 'Not yet available')\r\n \r\ndef exitGIS():\r\n if tkMessageBox.askokcancel(\"Exit\", \"Wanna leave?\"):\r\n root.destroy()\r\n\r\ndef importShp():\r\n dirname = tkFileDialog.askdirectory(parent=root,initialdir=\".\",title='Please select a directory') \r\n map.openMap(dirname)\r\n\r\ndef addShp():\r\n shpFile = tkFileDialog.askopenfilename(parent=root,initialdir=\".\",title='Please select a shpfile',\r\n defaultextension='.shp', filetypes=((\"shp file\", \"*.shp\"),(\"all files\", \"*.*\")))\r\n layer = map.addLayer(os.path.splitext(shpFile)[0], randomColor())\r\n if len(map.layers) == 1:\r\n map.initVisParameter()\r\n # delete all, then redraw to make sure the layer visualization order polygon->polylin->point\r\n can.delete('all')\r\n map.vis()\r\n \r\ndef close():\r\n if tkMessageBox.askokcancel(\"Exit\", \"Wanna close?\"):\r\n can.delete('all')\r\n map.clean()\r\n \r\n# need to unbind all the button events in the canvas before operations\r\n# other operations are same\r\ndef zoomIn2Times():\r\n can.delete('all')\r\n map.zoomIn2Times()\r\n\r\ndef zoomOut2Times():\r\n can.delete('all')\r\n map.zoomOut2Times()\r\n\r\ndef zoom2Full():\r\n can.delete('all')\r\n map.zoom2Full()\r\n\r\ndef zoom2Extent():\r\n unbindCanvas(can)\r\n map.zoom2Extent()\r\n \r\ndef addPoint():\r\n unbindCanvas(can)\r\n map.addPoint() \r\n\r\ndef addPolyline():\r\n unbindCanvas(can)\r\n map.addPolyline()\r\n\r\ndef addCircle():\r\n unbindCanvas(can)\r\n map.addCircle()\r\n \r\ndef addPolygon():\r\n unbindCanvas(can)\r\n map.addPolygon()\r\n \r\ndef closeLayer():\r\n map.closeLayer()\r\n \r\n\r\ndef checkIntersect():\r\n map.checkIntersect()\r\n \r\ndef makemenu(win):\r\n top = Menu(win) \r\n win.config(menu=top)\r\n \r\n file = Menu(top, tearoff=0)\r\n file.add_command(label='Import Shp', command=importShp, underline=0)\r\n file.add_command(label='Add Shp Layer', command=addShp, underline=0)\r\n file.add_command(label='Close', command=close, underline=0)\r\n file.add_command(label='Exit', command=exitGIS, underline=0)\r\n top.add_cascade(label='File', menu=file, underline=0)\r\n\r\n view = Menu(top, tearoff=0)\r\n view.add_command(label='Zoom In', command=zoomIn2Times, underline=0)\r\n view.add_command(label='Zoom Out', command=zoomOut2Times, underline=0)\r\n view.add_command(label='Zoom Full', command=zoom2Full, underline=0)\r\n view.add_command(label='Zoom Extent', command=zoom2Extent, underline=0)\r\n view.add_command(label='Close Layer', command=closeLayer, underline=0)\r\n top.add_cascade(label='View', menu=view, underline=0)\r\n\r\n edit = Menu(top, tearoff=0)\r\n edit.add_command(label='Draw Point', command=addPoint, underline=0)\r\n edit.add_command(label='Draw Polyline', command=addPolyline, underline=0)\r\n edit.add_command(label='Draw Circle', command=addCircle, underline=0)\r\n edit.add_command(label='Draw Polygon', command=addPolygon, underline=0)\r\n top.add_cascade(label='Edit', menu=edit, underline=0)\r\n\r\n intersect = Menu(top, tearoff=0)\r\n intersect.add_command(label='Check Intersect', command=checkIntersect, underline=0)\r\n top.add_cascade(label='Intersect', menu=intersect, underline=0)\r\n \r\n\r\nif __name__ == '__main__':\r\n global root\r\n global can\r\n global map\r\n\r\n root = Tk() \r\n root.title('MiniGIS')\r\n can = Canvas(root, width = 800, height = 600)\r\n map = Map(can, 800, 600)\r\n can.pack()\r\n makemenu(root) \r\n\r\n root.mainloop()\r\n","sub_path":"MiniGIS.py","file_name":"MiniGIS.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"620645953","text":"def swapFileData():\r\n fileName1 = input(\"Enter the file name: \")\r\n fileName2 = input(\"Enter the file name: \")\r\n \r\n with open(fileName1,'r') as a:\r\n data_a = a.read()\r\n print (\"File 1 has\", data_a)\r\n\r\n with open(fileName2, 'r') as b:\r\n data_b = b.read()\r\n print(\"File 2 has\", data_b)\r\n\r\n with open(fileName1,'w') as a:\r\n a.write(data_b)\r\n print(\"File 1 has\", data_b)\r\n\r\n with open(fileName2,'w') as b:\r\n b.write(data_a)\r\n print(\"File 2 has\", data_a)\r\n\r\nswapFileData()","sub_path":"SwappingFiles.py","file_name":"SwappingFiles.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"480079860","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Jean\n#\n# Created: 09/02/2018\n# Copyright: (c) Jean 2018\n# Licence: \n#-------------------------------------------------------------------------------\nimport os\nimport fileinput\ncwd=os.getcwd()\nprint(cwd)\nfile_path = os.path.join(cwd, \"e decimals 3.txt\") #classé par centaines/ligne\nf = open(file_path, 'r')\nlines = [line.rstrip('\\n') for line in tuple(f)]\nf.close()\nn=120\nhundreds=n//100+1\npointer=n%100-1\nprint (n,lines[hundreds][pointer])\n\nf = open(file_path, 'r')\ntext = f.read()\nprint(text[0:n+3+n//100])\nf.close()\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"e decimals.py","file_name":"e decimals.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"296477709","text":"import streamlit as st # type:ignore\nimport pandas as pd # type:ignore\nimport plotly.express as px\n\n\nst.sidebar.markdown(\"\"\"# Hyperparameter tuning benchmark\"\"\")\n\ndatasets = pd.read_csv(\"./data/openml_datasets.csv\")\nresults = pd.read_csv(\"./data/openml_results.csv\")\n\nst.write(datasets)\nst.write(results)\n\n\n# best_models[\"max_features\"] = best_models[\"max_features\"].apply(lambda x: round(x, 4))\n# best_models[\"max_samples\"] = best_models[\"max_samples\"].apply(lambda x: round(x, 4))\n\nresults = results.drop([\"name\", \"improvement\", \"total_data_points\", \"samples\", \"features\", \"classes\"], axis=1)\n\n\nfig = px.parallel_coordinates(\n results,\n color=\"test_score_tuned\",\n # labels={\n # \"\": \"\",\n # \"\": \"\",\n # \"\": \"\",\n # \"\": \"\",\n # \"\": \"\",\n # },\n color_continuous_scale=px.colors.diverging.Tealrose,\n color_continuous_midpoint=0.5,\n)\nfig.show()\n","sub_path":"random_forest/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"25198063","text":"import importlib\nimport importlib.resources\n\nimport pytest\n\nimport climate_categories\nimport climate_categories.tests\nimport climate_categories.tests.data\n\n\ndef read_cat(fragment):\n return climate_categories.from_yaml(\n importlib.resources.files(\"climate_categories.tests.data\")\n .joinpath(f\"{fragment}_categorization.yaml\")\n .open(\"r\")\n )\n\n\n@pytest.fixture\ndef SimpleCat():\n return read_cat(\"simple\")\n\n\n@pytest.fixture\ndef HierCat():\n return read_cat(\"hierarchical\")\n\n\n@pytest.fixture(params=[\"hierarchical\", \"simple\"])\ndef any_cat(request):\n \"\"\"Test with all available valid example Categorizations.\"\"\"\n return read_cat(request.param)\n\n\n@pytest.fixture\ndef spec_simple():\n return {\n \"name\": \"SimpleCat\",\n \"title\": \"Simple Categorization\",\n \"comment\": \"A simple example categorization without relationships between\"\n \" categories\",\n \"references\": \"doi:00000/00000\",\n \"institution\": \"PIK\",\n \"hierarchical\": False,\n \"last_update\": \"2021-02-23\",\n \"version\": \"1\",\n \"categories\": {\n \"1\": {\n \"title\": \"Category 1\",\n \"comment\": \"The first category\",\n \"alternative_codes\": [\"A\", \"CatA\"],\n \"info\": {\n \"important_data\": [\"A\", \"B\", \"C\"],\n \"other_important_thing\": \"ABC\",\n },\n },\n \"2\": {\n \"title\": \"Category 2\",\n \"comment\": \"The second category\",\n \"alternative_codes\": [\"B\", \"CatB\"],\n },\n \"3\": {\n \"title\": \"Category 3\",\n \"comment\": \"The third category\",\n \"alternative_codes\": [\"C\", \"CatC\"],\n },\n \"unnumbered\": {\"title\": \"The unnumbered category\"},\n },\n }\n\n\n@pytest.fixture\ndef spec_hier():\n return {\n \"name\": \"HierCat\",\n \"title\": \"Hierarchical Categorization\",\n \"comment\": \"A simple hierarchical categorization with categories with\"\n \" relationships\",\n \"references\": \"doi:00000/00000\",\n \"institution\": \"PIK\",\n \"hierarchical\": True,\n \"last_update\": \"2021-02-23\",\n \"version\": \"one\",\n \"total_sum\": False,\n \"canonical_top_level_category\": \"0\",\n \"categories\": {\n \"0\": {\n \"title\": \"Category 0\",\n \"comment\": \"Top-most category\",\n \"alternative_codes\": [\"TOTAL\"],\n \"children\": [[\"1\", \"2\", \"3\"], [\"0X3\", \"3\"], [\"1A\", \"1B\", \"2\", \"3\"]],\n },\n \"1\": {\n \"title\": \"Category 1\",\n \"comment\": \"The first category\",\n \"info\": {\"SomeInfo\": \"A\", \"OtherInfo\": [\"A\", \"B\", \"C\"]},\n \"children\": [[\"1A\", \"1B\"]],\n },\n \"2\": {\n \"title\": \"Category 2\",\n \"comment\": \"The second category\",\n \"children\": [[\"2A\", \"2B\"]],\n },\n \"3\": {\n \"title\": \"Category 3\",\n \"comment\": \"The third category\",\n \"children\": [[\"3A\"]],\n },\n \"1A\": {\"title\": \"Category 1A\", \"alternative_codes\": [\"1a\"]},\n \"1B\": {\"title\": \"Category 1B\", \"alternative_codes\": [\"1b\"]},\n \"2A\": {\"title\": \"Category 2A\", \"alternative_codes\": [\"2a\"]},\n \"2B\": {\"title\": \"Category 2B\", \"alternative_codes\": [\"2b\"]},\n \"3A\": {\"title\": \"Category 3A\", \"alternative_codes\": [\"3a\"]},\n \"0X3\": {\n \"title\": \"Total excluding category 3\",\n \"alternative_codes\": [\"0E3\"],\n \"children\": [[\"1\", \"2\"]],\n },\n \"OT\": {\"title\": \"Other top category\", \"children\": [[\"1B\", \"2B\"]]},\n },\n }\n","sub_path":"climate_categories/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"37118617","text":"import csv\nimport utils\nimport sys\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\n\ndef ChangeData(data):\n if data != \"timeout\" and data != \"failed\" and data != \"0.0\" and data != \"0\":\n changed_data = float(data)\n else:\n changed_data = 3600.00\n return changed_data\n\nif __name__ == '__main__':\n time_basic_data_path = utils.time_basic_data_path\n \n for method in utils.method_list:\n save_path = utils.time_result_path + \"2-depth_Encoding_Sort_\" + method + \".pdf\"\n plt.figure()\n predict_name_sort = utils.ReadJson(time_basic_data_path + method + \"_name_sort_2.json\")\n truth_name_sort = utils.ReadJson(time_basic_data_path + method + \"_name_sort_truth.json\")\n yaxis = list(range(len(truth_name_sort)))\n xaxis = []\n for name in truth_name_sort:\n xaxis.append(predict_name_sort.index(name))\n plt.plot([0, 824], [0, 824], color=\"k\", linewidth=2)\n plt.scatter(xaxis, yaxis, s=7, color=\"k\")\n # temp_ax.set_xscale('linear')\n # temp_ax.set_yscale('linear')\n plt.title(utils.NameMap(method), size=30)\n\n plt.xlim(0,824)\n plt.ylim(0,824)\n plt.xticks(range(0, len(yaxis), 200), [\"0 \", \"200\", \"400\", \"600\", \"800\"], size=30)\n plt.yticks(range(200, len(yaxis), 200), size=30)\n \n plt.subplots_adjust(left=0.14, right=0.95, top=0.91, bottom=0.1)\n plt.savefig(save_path)\n\n\n # predict_data_path = time_basic_data_path + \"time_predict_data.csv\"\n\n # with open(predict_data_path, newline='') as csvfile:\n # data = list(csv.reader(csvfile))\n\n # data = data[1:] \n # truth_dprove_data_list = []\n # truth_pdr_data_list = []\n # truth_iimc_data_list = []\n # truth_IC3_data_list = []\n # for line in data:\n # truth_dprove_data_list.append(ChangeData(line[1]))\n # truth_pdr_data_list.append(ChangeData(line[2]))\n # truth_iimc_data_list.append(ChangeData(line[3]))\n # truth_IC3_data_list.append(ChangeData(line[4]))\n\n # figure_label_list = [\"0-depth Encoding\", \"1-depth Encoding\", \"2-depth Encoding\"]\n # for i in range(len(figure_label_list)):\n # label = figure_label_list[i]\n # dprove_data_list = []\n # pdr_data_list = []\n # iimc_data_list = []\n # IC3_data_list = []\n # for line in data:\n # dprove_data_list.append(ChangeData(line[4 * i + 5]))\n # pdr_data_list.append(ChangeData(line[4 * i + 6]))\n # iimc_data_list.append(ChangeData(line[4 * i + 7]))\n # IC3_data_list.append(ChangeData(line[4 * i + 8]))\n \n # save_path = utils.time_result_path + label + \".pdf\"\n # fig, ax = plt.subplots(2, 2)\n # ax[0][0].set_title(utils.NameMap(\"dprove\"))\n # ax[0][0].scatter(dprove_data_list, truth_dprove_data_list, s=2)\n # # ax[0][0].set_xlabel('Predict Time (s)')\n # # ax[0][0].set_ylabel('Truth Time (s)')\n # ax[0][0].set_xscale('log')\n # ax[0][0].set_yscale('log')\n\n # ax[0][1].set_title(utils.NameMap(\"pdr\"))\n # ax[0][1].scatter(pdr_data_list, truth_pdr_data_list, s=2)\n # ax[0][1].set_xscale('log')\n # ax[0][1].set_yscale('log')\n\n # ax[1][0].set_title(utils.NameMap(\"iimc\"))\n # ax[1][0].scatter(iimc_data_list, truth_iimc_data_list, s=2)\n # ax[1][0].set_xscale('log')\n # ax[1][0].set_yscale('log')\n\n # ax[1][1].set_title(utils.NameMap(\"IC3\"))\n # ax[1][1].scatter(IC3_data_list, truth_IC3_data_list, s=2)\n # ax[1][1].set_xscale('log')\n # ax[1][1].set_yscale('log')\n\n # plt.savefig(save_path)","sub_path":"code/DrawTimeDotFigure.py","file_name":"DrawTimeDotFigure.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195316203","text":"import serial # you need to install the pySerial :pyserial.sourceforge.net\nimport time\n# your Serial port should be different!\narduino = serial.Serial('/dev/ttyACM0', 9600)\n#s = s.decode(\"utf-8\")\ndef onOffFunction():\n command = 'on';\n if command ==\"on\": \n time.sleep(1) \n arduino.write(b's')\n onOffFunction()\n elif command ==\"off\":\t\t\n time.sleep(1) \n arduino.write(b'L')\n onOffFunction()\n elif command ==\"bye\":\n time.sleep(1) \n arduino.close()\n else:\n onOffFunction()\n\ntime.sleep(2) #waiting the initialization...\n\nonOffFunction()","sub_path":"acenderled.py","file_name":"acenderled.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93051413","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 13:43:02 2020\n\n@author: Vladimir Sivak\n\"\"\"\n\n\nfrom init_script import *\nfrom scipy.optimize import curve_fit\n\nclass T1_vs_nbar(FPGAExperiment):\n\n amp = RangeParameter((0.0, 1.0, 21))\n length = RangeParameter((0.0, 4e5, 101))\n loop_delay = FloatParameter(1e6)\n channel = (0, 3)\n T1_guess = 100 # guess for the fit in us\n\n def sequence(self):\n with scan_amplitude(self.channel, *self.amp):\n with scan_length(*self.length) as dyn_len:\n readout(init_state='se')\n qubit.flip()\n sync()\n constant_pulse(self.channel, dyn_len, amp='dynamic')\n delay(1000)\n readout()\n delay(self.loop_delay)\n\n def process_data(self):\n # postselect on initial measurement\n init_state = self.results['init_state'].threshold()\n self.results['postselected'] = self.results['default'].postselect(\n init_state, [0])[0].thresh_mean().data\n self.results['postselected'].ax_data = self.results['init_state'].ax_data[1:]\n self.results['postselected'].labels = self.results['init_state'].labels[1:]\n \n # fit data to exponential decay\n def exp_decay(x, a, b, c):\n return a*np.exp(-x/b)+c\n \n T1 = np.zeros(self.amp[-1])\n for i in range(self.amp[-1]):\n xdata = self.results['default'].ax_data[2] #time in ns\n ydata = np.mean(self.results['default'].data[:,i,:].real, axis=0)\n popt, pcov = curve_fit(exp_decay, xdata, ydata, \n p0=[1, self.T1_guess*1e3, 0])\n T1[i] = popt[1]*1e-3 # convert to us\n self.results['T1'] = T1\n self.results['T1'].ax_data = [self.results['default'].ax_data[1]]\n self.results['T1'].labels = [self.results['default'].labels[1]]\n ","sub_path":"fpga_exp/T1_vs_nbar.py","file_name":"T1_vs_nbar.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"68397740","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template(\"home.html\")\n\n@app.route('/', methods=[\"GET\"])\ndef method():\n\tr = request.args\n\treturn '/'\n \nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Flask/Scraper/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"303514553","text":"# GeneralTransaction.py\nimport simplejson\nimport com.ihsan.foundation.pobjecthelper as phelper\nimport sys\n\nstatus = 0\nmsg = ''\nFileKwitansi = ''\n\ndef DAFScriptMain(config,parameters,returnpacket):\n status = returnpacket.CreateValues([\"Is_Err\", 0], [\"Err_Message\", \"\"])\n dsListAccount = returnpacket.AddNewDatasetEx(\n 'ListBudgetOwner',\n ';'.join([\n 'OwnerCode: string',\n 'OwnerName: string',\n ])\n )\n\n try:\n BranchCode = config.SecurityContext.GetUserInfo()[4]\n sOQL = \"select from BudgetOwner \\\n [Is_Detail='T' ] \\\n ( OwnerCode, \\\n OwnerName, \\\n self) then order by OwnerCode;\"\n \n oql = config.OQLEngine.CreateOQL(sOQL)\n oql.ApplyParamValues()\n \n oql.active = 1\n ds = oql.rawresult\n \n while not ds.Eof:\n recAccount = dsListAccount.AddRecord()\n recAccount.OwnerCode = ds.OwnerCode\n recAccount.OwnerName = ds.OwnerName\n \n ds.Next()\n # end while\n\n except:\n status.Is_Err = 1\n status.Err_Message = str(sys.exc_info()[1]) \n","sub_path":"scripts/Tools/S_GetBudgetTemplate.py","file_name":"S_GetBudgetTemplate.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"204586471","text":"# Casablanca’s hippodrome is organizing a new type of horse racing: duals.\r\n# During a dual, only two horses will participate in the race. In order for\r\n# the race to be interesting, it is necessary to try to select two horses with\r\n# similar strength.\r\n\r\n# Write a program which, using a given number of strengths, identifies the two\r\n# closest strengths and shows their difference with an integer (≥ 0).\r\n\r\n \r\nN = int(input())\r\nP = sorted(int(input()) for i in range(N))\r\nD = min(P[i+1] - P[i] for i in range(len(P) - 1))\r\nprint(D)","sub_path":"CG/easy_horse-racing-duals.py","file_name":"easy_horse-racing-duals.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"315088408","text":"from main.models import CustomUser,Hostel,Room\nimport names\nimport random\nimport datetime\nnow = datetime.datetime.now()\n\nhostel = Hostel.objects.get(number_hostel=1)\nfor x in range(200, 300, 100):\n for y in range(1, 33):\n now_quant = random.randint(a=0, b=3)\n max_quant = random.randint(a=2, b=3)\n number_room = x + y\n room = Room.objects.create(number=number_room, floor=x / 100,\n hostel=hostel, max_quantity=max_quant, now_quantity=now_quant)\n room.save()\n print(f\"create_user: {room.number},now_quznt: {room.now_quantity}\")\n for xx in range(now_quant):\n first_name = names.get_first_name()\n last_name = names.get_last_name()\n email = first_name + '.' + last_name\n course = now.year - random.randint(1, 6)\n year_of_start = str(course)[:2]\n specialization = random.choice(['ki','si','pi','kn','rt','ekt','sa','et','pm'])\n group = specialization + \"-\" + year_of_start + \"-\" + str(random.randint(1, 6))\n\n user = CustomUser.objects.create_user(email=email+'@nure.ua', password=\"admin\", first_name=first_name,hostel=hostel,\n room=room, group=group, course=course, last_name=last_name)\n user.save()\n\n print(f\"create_user: {user.email}\")\n","sub_path":"generate_names.py","file_name":"generate_names.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"317299971","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('addstation', views.add_station, name='add-station'),\n path(r'addurl//', views.add_url, name=\"add-url\"),\n path('apiv1/stations', views.stations_json, name='stations-json'),\n]\n","sub_path":"stations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"45771445","text":"#!/usr/bin/env python\n\n\"\"\"\nNotes:\n * Page is divide into 3 types of bands: top, bottom, and section(s)\n * If a column is CALCULATED it is column + (index into boatSection * 4)\n * If a row is OFFSET it is offset + row\n * These are done with sections[], boatSizes[] start[], end[]\n * sections[] the order the sections are to be found in on the sheet\n * boatSizes[] the boat lengths are integers found in Row 1\n * start[] of each section where \"QTY\" found in Column A (1)\n * end[] of each section where \"SUBTOTAL \" found in Column J (10)\n * To process a sheet the a dictionary entry is made with the key being\n the name of boat model and the value is another dictionary.\n * Examples use the Seahawk Outboard for Boat Model:\n * Top/Bottom directly addressed: ['Seahawk Outboard']['OVERHEAD BOAT']\n * Section top/bottom directly addressed: ['Seahawk Outboard']['TRAILER LABOR RATE']\n * Section parts list is an array found in: ['Seahawk Outboard']['TRAILER PARTS']\n * Section parts are referenced by index: ['Seahawk Outboard']['TRAILER PARTS'][0]\n * Parts details are addressed: ['Seahawk Outboard']['TRAILER PARTS'][0]['UOM']\n\"\"\"\n\nstart = 10\nend = 12\nwidth = 4\n\n# order of sections corresponds with order of starts and ends\nsections = [\n \"TRAILER\",\n \"ENGINE & JET\",\n \"FABRICATION\",\n \"CANVAS\",\n \"PAINT\",\n \"OUTFITTING\"\n]\n\npossibleSize = [\n 16,\n 17,\n 18,\n 19,\n 20,\n 21,\n 22,\n 23,\n 24,\n 25,\n 26,\n 27,\n 28,\n 29,\n 30,\n 31,\n 32,\n 33,\n 34,\n 35,\n 36,\n 37,\n 38,\n 39,\n 40,\n 41,\n 42,\n 43,\n 44,\n 45,\n 46,\n 47,\n 48,\n 49,\n 50,\n 51,\n 52,\n 53,\n 54,\n 55,\n 56,\n 57\n]\n\n## TOP BAND #########################################################\n\n# top of sheet, absolute row, absolute column - not by boat size\n# [0] title, [1] column, [2] row, [3] default\ntopSection = [\n [\"BOAT MODEL\", 1, 1, \"\"],\n [\"OVERHEAD BOAT\", 6, 5, \"\"],\n [\"RETAIL BOAT\", 5, 6, \"\"],\n [\"OVERHEAD MOTOR AND TRAILER\", 6, 7, \"0\"],\n [\"RETAIL MOTOR AND TRAILER\", 5, 8, \"0\"],\n] \n\n# top of sheet, calculated column, absolute row - by boat size\n# [0] title, [1] column, [2] row, [3] default\ncostSummary = [\n [\" LABOR TOTAL\", 13, 2, \"0\"],\n [\" MATERIAL TOTAL\", 13, 3, \"0\"],\n [\" TRAILER / ENGINE & JET\", 13, 4, \"0\"],\n [\" TOTAL COST\", 13, 5, \"0\"],\n\t[\" RETAIL BASE BOAT\", 13, 6, \"0\"],\n\t[\" RETAIL MOTOR / TRAILER\", 13, 7, \"0\"],\n\t[\" CALCULATED RETAIL TOTAL\", 13, 8, \"0\"],\n\t[\" CALCULATED DEALER INVOICE\", 13, 9, \"0\"],\n\t[\" CALCULATED CM\", 13, 10, \"0\"],\n\t[\" ADVERTISED RETAIL TOTAL\", 13, 12, \"0\"],\n\t[\" ADVERTISED DEALER INVOICE\", 13, 13, \"0\"],\n\t[\" ADVERTISED CM\", 13, 14, \"0\"],\n]\n\nboatLength = [\n\t[\"BOAT SIZE\", 10, 1, \"\"],\n]\n\n## BOTTOM BAND ##########################################################\n\n# bottom section - max(end) + 5, absolute column, offest row - not by boat size\n# [0] title, [1] column, [2] row, [3] default\nbottomSection = [\n]\n\n\n## SECTION BANDS ##########################################################\n\n# top of section, absolute column, offset row - not by boat size\n# [0] title, [1] column, [2] row, [3] default\nstartSections = [\n [\" CONSUMABLES\", 4, -1, \"0\"],\n [\" LABOR RATE\", 6, -1, \"0\"],\n]\n\n# top of section, calculated column, offset row - by boat size\n# [0] title, [1] column, [2] row, [3] default\nstartSectionsSize = [\n [\" HOURS\", 12, -1, \"0\"]\n]\n\n# bottom of section, calculated column, offset row\n# [0] title, [1] column, [2] row, [3] default\nendSections = [\n [\" SUBTOTAL ALL\", 13, 0, \"0\"],\n [\" CONSUMABLES\", 13, 1, \"0\"],\n [\" TOTAL\", 13, 2, \"0\"],\n]\n\n# 1/2 body of section, absolute column, offset row\n# [0] title, [1] column, [2] row, [3] default\npartSection = [\n [\"PART NUMBER\", 4, 1, \"\"],\n [\"DESCRIPTION\", 5, 1, \"\"],\n [\"UOM\", 6, 1, \"\"],\n [\"PRICE\", 7, 1, \"0\"],\n [\"VENDOR\", 8, 1, \"\"],\n [\"VENDOR PART\", 10, 1, \"\"],\n]\n\n# 2/2 body of section, calculated column, offset row\n# [0] title, [1] column, [2] row, [3] default\npartSectionByModel = [\n [\" QTY\", 10, 1, \"0\"],\n [\" TOTAL\", 13, 1, \"0\"], \n]","sub_path":"fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":4113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"37270354","text":"#!/usr/bin/env python\n################################################################################\n# DATE: 2017/03/10\n#\n# MODULE: lasso_enet_var_select.py\n#\n# VERSION: 1.1\n#\n# AUTHOR: Miguel Ibarra (miguelib@ufl.edu), Matt Thoburn (mthoburn@ufl.edu).\n#\n# DESCRIPTION: This runs an Elastic Net or Lasso Test on wide data\n################################################################################\n\n# Import built-in libraries\nimport os\nimport sys\nimport logging\nimport argparse\nimport itertools as it\nfrom argparse import RawDescriptionHelpFormatter\n# Import add-on libraries\nimport rpy2\nimport rpy2.robjects.numpy2ri\nimport rpy2.robjects as robjects\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.packages import importr\nfrom rpy2.robjects.vectors import StrVector\nfrom rpy2.robjects.packages import SignatureTranslatedAnonymousPackage as STAP\nimport pandas\nimport numpy as np\nfrom numpy import genfromtxt\n# Import local data libraries\nfrom secimtools.dataManager import logger as sl\nfrom secimtools.dataManager.interface import wideToDesign\n\n\ndef getOptions(myOpts=None):\n description=\"\"\"\n The tool performs feature selection using LASSO/Elastic Net feature selection method.\n \"\"\"\n parser = argparse.ArgumentParser(description=description, \n formatter_class=RawDescriptionHelpFormatter)\n # Standard Input\n standard = parser.add_argument_group(title='Standard input', \n description='Standard input for SECIM tools.')\n standard.add_argument( \"-i\",\"--input\", dest=\"input\", action='store', \n required=True, help=\"Input dataset in wide format.\")\n standard.add_argument(\"-d\" ,\"--design\",dest=\"design\", action='store', \n required=True, help=\"Design file.\")\n standard.add_argument(\"-id\", \"--ID\",dest=\"uniqID\", action='store', \n required=True, help=\"Name of the column with unique\"\\\n \" identifiers.\")\n standard.add_argument(\"-g\", \"--group\", dest=\"group\", action='store', \n required=False, default=False, help=\"Name of the column\"\\\n \" with groups.\")\n # Tool Input\n tool = parser.add_argument_group(title='Tool Especific')\n tool.add_argument(\"-a\", \"--alpha\", dest=\"alpha\", action=\"store\",\n required=True, help=\"Alpha Value.\")\n # Tool Output\n output = parser.add_argument_group(title='Required output')\n output.add_argument(\"-c\", \"--coefficients\", dest=\"coefficients\", \n action=\"store\", required=False, help=\"Path of en\"\\\n \" coefficients file.\")\n output.add_argument(\"-f\", \"--flags\", dest=\"flags\", action=\"store\", \n required=False, help=\"Path of en flag file.\")\n output.add_argument(\"-p\", \"--plots\", dest=\"plots\", action=\"store\", \n required=False, help=\"Path of en coefficients file.\") \n args = parser.parse_args()\n\n # Standardize paths\n args.input = os.path.abspath(args.input)\n args.plots = os.path.abspath(args.plots)\n args.flags = os.path.abspath(args.flags)\n args.design = os.path.abspath(args.design)\n args.coefficients = os.path.abspath(args.coefficients)\n\n return(args)\n\n\ndef main(args):\n #Get R ready\n # Get current pathway\n myPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))\n\n # Stablish path for LASSO script\n my_r_script_path = os.path.join(myPath, \"lasso_enet.R\")\n logger.info(my_r_script_path)\n\n # Activate pandas2ri\n pandas2ri.activate()\n\n # Running LASSO R sctrip\n with open(my_r_script_path, 'r') as f:\n rFile = f.read()\n lassoEnetScript = STAP(rFile, \"lasso_enet\")\n\n # Importing data trought interface\n dat = wideToDesign(args.input, args.design, args.uniqID, group=args.group,\n logger=logger)\n\n # Cleaning from missing data\n dat.dropMissing()\n\n # Transpossing data\n dat.trans = dat.transpose()\n dat.trans.columns.name=\"\"\n\n # Dropping nan columns from design\n removed = dat.design[dat.design[dat.group]== \"nan\"]\n dat.design = dat.design[dat.design[dat.group] != \"nan\"]\n dat.trans.drop(removed.index.values, axis=0, inplace=True)\n\n logger.info(\"{0} removed from analysis\".format(removed.index.values))\n dat.design.rename(columns={dat.group:\"group\"}, inplace=True)\n dat.trans.rename(columns={dat.group:\"group\"}, inplace=True)\n\n #Generate a group List\n groupList = [title for title, group in dat.design.groupby(\"group\") \n if len(group.index) > 2]\n\n #Turn group list into pairwise combinations\n comboMatrix = np.array(list(it.combinations(groupList,2)))\n comboLength = len(comboMatrix)\n\n #Run R\n correct_list_of_names = np.array(dat.trans.columns.values.tolist())\n returns = lassoEnetScript.lassoEN(dat.trans, dat.design, args.uniqID, correct_list_of_names, comboMatrix, \n comboLength,args.alpha,args.plots)\n robjects.r['write.table'](returns[0],file=args.coefficients,sep='\\t',\n quote=False, row_names = False, col_names = True)\n robjects.r['write.table'](returns[1],file=args.flags,sep='\\t',\n quote=False, row_names = False, col_names = True)\n # Finishing\n logger.info(\"Script Complete!\")\n\n\nif __name__ == '__main__':\n args = getOptions()\n logger = logging.getLogger()\n sl.setLogger(logger)\n logger.info(u\"Importing data with the folowing parameters: \"\\\n \"\\n\\tWide: {0}\"\\\n \"\\n\\tDesign:{1}\"\\\n \"\\n\\tUniqID:{2}\"\\\n \"\\n\\tAlpha: {3}\".\\\n format(args.input,args.design,args.uniqID,args.alpha))\n main(args)\n","sub_path":"src/scripts/lasso_enet_var_select.py","file_name":"lasso_enet_var_select.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"534039985","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n# self.next = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing\n def connect(self, root):\n dummy = TreeNode(-1)\n current = root\n while current:\n dummy.next = None\n node = dummy\n while current:\n if current.left:\n node.next = current.left\n node = node.next\n if current.right:\n node.next = current.right\n node = node.next\n current = current.next\n current = dummy.next\n return root\n","sub_path":"connect2.py","file_name":"connect2.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"602457638","text":"from abc import ABCMeta, abstractmethod\nimport queue\n\nfrom bt.components.event.event import SignalEvent, MarketEvent, EventType\nfrom bt.components.data_handler.data import DataHandler\n\n\nclass Strategy(metaclass=ABCMeta):\n \"\"\"\n 1、这是所有strategy类的父类,定义了统一的方法\n 2、这个类的作用是根据市场数据(MarketEvent),为特定的股票生成SignalEvent;\n 3、这个方法可以适用于历史数据,也可以用于在线实时交易系统\n 4、strategy类接受MarketEvent,然后生成SignalEvent\n \"\"\"\n\n def __init__(self, data_handler: DataHandler, events: queue.Queue):\n \"\"\"\n 用来初始化BuyAndHoldStrategy这个类的\n :param data_handler: DataHandler类的实例\n :param events: 消息队列\n \"\"\"\n self.data_handler = data_handler\n self.symbol_list = self.data_handler.symbol_list\n self.events = events\n\n self.bought = {symbol: False for symbol in self.symbol_list}\n\n @abstractmethod\n def calculate_signals(self, event: MarketEvent):\n \"\"\"\n 提供计算signal的原型,其计算结果就是生成一个个的OrderEvent\n :return:\n \"\"\"\n raise NotImplementedError(\"Should implement calculate_signals()\")\n\n\nclass BuyAndHoldStrategy(Strategy):\n \"\"\"\n 1、这个类是最简单的类,策略就是买并且一直持有某只股票\n 2、作用是:这个策略所持有的股票可以作为benchmark,用来跟其他策略做比较\n \"\"\"\n\n def __init__(self, data_handler: DataHandler, events: queue.Queue):\n \"\"\"\n 用来初始化BuyAndHoldStrategy这个类的\n :param data_handler: DataHandler类的实例\n :param events: 消息队列\n \"\"\"\n super(BuyAndHoldStrategy, self).__init__(data_handler, events)\n\n def calculate_signals(self, event: MarketEvent):\n \"\"\"\n 在BuyAndHoldStrategy中,为每一个symbol产生一个买signal,这意味着我们长久持有symbol_list中的股票\n :param event: MarketEvent\n :return:\n \"\"\"\n if event.type_enum == EventType.MARKET:\n for s in self.symbol_list:\n bar = self.data_handler.get_latest_bars(s)\n if bar is not None and bar != []:\n if self.bought[s] is False:\n signal = SignalEvent(bar[0][0], bar[0][1], \"LONG\", 10)\n self.events.put(signal)\n self.bought[s] = True\n pass\n","sub_path":"bt/components/strategy/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"280800202","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the jimOrders function below.\ndef jimOrders(orders):\n s = sorted(enumerate(orders,1),key=lambda x:sum(x[1]))\n return [i[0] for i in s]\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n orders = []\n\n for _ in range(n):\n orders.append(list(map(int, input().rstrip().split())))\n\n result = jimOrders(orders)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()","sub_path":"Interview_Challenges/Arrays/jim_and_orders.py","file_name":"jim_and_orders.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"461659774","text":"#!/usr/bin/env python \n\nfrom __future__ import print_function \nfrom builtins import * \n\nimport pwd \n\nname = \"lilmac\"\n\ndef usr_check(): \n \"\"\"Check if username exists.\"\"\"\n\n try: \n pwd.getpwnam(name)\n print(\"User %s DOES EXIST. Try a different username.\" % (name)) \n\n except KeyError: \n print(\"User %s DOES NOT EXIST. Continuing.\" % (name)) \n \nusr_check()\n","sub_path":"functions/python/usr_check.py","file_name":"usr_check.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157465069","text":"import json, uuid, time\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom ..models import Notification, AddrEntity\n\nclass CreateUpdateEntity(APITestCase):\n \"\"\" Test module for Create Entity \"\"\"\n url = reverse(\"notification:create_or_update_entity\")\n fixed_uuid='7a85e49b-66dd-48df-a92f-1f9e84263d15'\n\n def test_create_entity_minimal(self):\n self.valid_payload={\n 'eid':str(uuid.uuid4()),\n 'e_type':1\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n #print(response.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_create_entity_complete(self):\n self.valid_payload={\n 'eid':str(uuid.UUID(self.fixed_uuid)),\n 'e_type':1,\n 'phones':['+917829862689'],\n 'emails':['ravinder@changepay.in'],\n 'fcm_tokens':['+917829862689'],\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n #print(response.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_update_entity(self):\n self.valid_payload={\n 'eid':str(uuid.UUID(self.fixed_uuid)),\n 'e_type':1,\n 'phones':['+917829862689'],\n 'emails':['ravinder@changepay.in'],\n 'fcm_tokens':['+917829862689'],\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n #print(response.data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n #Update\n self.valid_payload={\n 'eid':str(uuid.UUID(self.fixed_uuid)),\n 'e_type':1,\n 'emails':['ravindernitks@gmail.com', 'ravinder@changepay.in'],\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n #print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_neg_create_entity_minimal(self):\n self.valid_payload={\n 'eid':str(uuid.uuid4()),\n 'e_type':6\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\nclass NotificationSMSTestCases(APITestCase):\n \"\"\" Test module for Notification \"\"\"\n url = reverse(\"notification:sms-send\")\n fixed_uuid='7a85e49b-66dd-48df-a92f-1f9e84263d15'\n\n def setUp(self):\n self.eid=str(uuid.uuid4())\n self.e_type=AddrEntity.AddrEntityType.TYPE_CUSTOMER\n self.valid_payload={\n 'eid':self.eid,\n 'e_type':self.e_type\n }\n response = self.client.post(\n reverse(\"notification:create_or_update_entity\"), data=json.dumps(self.valid_payload),\n content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_create_sms_minimal(self):\n self.valid_payload={\n 'n_type':Notification.NotificationType.TYPE_SMS,\n 'to_entity':self.eid,\n 'to_entity_type':self.e_type,\n 'priority':1,\n 'sms_text':\"Hello SMS World!\",\n 'sms_type':1,\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n print(response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_sms_complete(self):\n ni_d=str(uuid.uuid4())\n self.valid_payload={\n 'n_id':ni_d,\n 'to_entity':self.eid,\n 'to_entity_type':self.e_type,\n 'priority':1,\n 'cb_url':'svc://customer/cb/notif',\n 'cb_states':[Notification.NotificationState.STATE_SENT, Notification.NotificationState.STATE_FAILED],\n 'max_ts':int(time.time())+5000,\n 'from_phone':\"+917829876435\",\n 'from_code':\"FMCODE\",\n 'sms_text':\"Hello SMS World!\",\n 'sms_type':1,\n 'template_name':'template-otp-1'\n }\n response = self.client.post(\n self.url, data=json.dumps(self.valid_payload),\n content_type='application/json')\n print(response.data)\n self.assertEqual(response.data['nid'], ni_d)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n","sub_path":"mail_khaifa/notification/tests/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565179280","text":"import cv2\r\nimport numpy as np\r\n\r\ndef stackImages(scale,imgArray):\r\n rows = len(imgArray)\r\n cols = len(imgArray[0])\r\n rowsAvailable = isinstance(imgArray[0], list)\r\n width = imgArray[0][0].shape[1]\r\n height = imgArray[0][0].shape[0]\r\n if rowsAvailable:\r\n for x in range ( 0, rows):\r\n for y in range(0, cols):\r\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\r\n if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\r\n imageBlank = np.zeros((height, width, 3), np.uint8)\r\n hor = [imageBlank]*rows\r\n hor_con = [imageBlank]*rows\r\n for x in range(0, rows):\r\n hor[x] = np.hstack(imgArray[x])\r\n ver = np.vstack(hor)\r\n else:\r\n for x in range(0, rows):\r\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\r\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\r\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\r\n hor= np.hstack(imgArray)\r\n ver = hor\r\n return ver\r\n\r\n\r\n\r\ndef empty(a):\r\n pass\r\n\r\n\r\n\r\ncv2.namedWindow(\"Track Bar\")\r\ncv2.resizeWindow(\"Track Bar\",640,200)\r\ncv2.createTrackbar(\"Hue Min\",\"Track Bar\",0,179,empty)\r\ncv2.createTrackbar(\"Hue Max\",\"Track Bar\",179,179,empty)\r\ncv2.createTrackbar(\"Sat Min\",\"Track Bar\",0,255,empty)\r\ncv2.createTrackbar(\"Sat Max\",\"Track Bar\",255,255,empty)\r\ncv2.createTrackbar(\"Val Min\",\"Track Bar\",0,255,empty)\r\ncv2.createTrackbar(\"Val Max\",\"Track Bar\",255,255,empty)\r\n\r\n\r\nvideo = cv2.VideoCapture(0)\r\nvideo.set(3,340)\r\nvideo.set(4,480)\r\n\r\n# for capturing whole video we use for loop\r\nwhile True:\r\n check, img = video.read()\r\n imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n h_min = cv2.getTrackbarPos(\"Hue Min\",\"Track Bar\")\r\n h_max = cv2.getTrackbarPos(\"Hue Max\",\"Track Bar\")\r\n s_min = cv2.getTrackbarPos(\"Sat Min\",\"Track Bar\")\r\n s_max = cv2.getTrackbarPos(\"Sat Max\",\"Track Bar\")\r\n v_min = cv2.getTrackbarPos(\"Val Min\",\"Track Bar\")\r\n v_max = cv2.getTrackbarPos(\"Val Max\",\"Track Bar\")\r\n\r\n lower = np.array([h_min,s_min,v_min])\r\n upper = np.array([h_max,s_max,v_max]) \r\n mask = cv2.inRange(imgHSV,lower,upper) # filter out image of that color (keep it white)\r\n\r\n imgResult = cv2.bitwise_and(img,img,mask=mask) #show our picked color range\r\n\r\n # cv2.imshow(\"jatin\", img)\r\n # cv2.imshow(\"HSV\", imgHSV)\r\n # cv2.imshow(\"Mask\", mask)\r\n # cv2.imshow(\"Final\", imgResult)\r\n\r\n # ======== Call stack fun to show images together\r\n imgStack = stackImages(1,([mask,imgResult],[img,imgHSV]))\r\n cv2.imshow(\"ImageStack\",imgStack)\r\n\r\n key = cv2.waitKey(1)\r\n\r\n if key == ord('q'): # on pressing q it quits the window\r\n break\r\n\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n\r\n","sub_path":"colorDetection_Video.py","file_name":"colorDetection_Video.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"92720967","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom flasgger import Swagger\nfrom flask import Flask, jsonify, redirect, render_template\n\nfrom datetime import datetime, timezone\nfrom dateutil import parser\nimport pytz\n\n\napp = Flask(__name__)\nSwagger(app)\n\n\n@app.route(\"/\")\ndef home():\n \"\"\"/ Route will redirect to API Docs: /apidocs\"\"\"\n\n return redirect(\"/apidocs\")\n\n@app.route(\"/now\", methods = ['GET'])\ndef now():\n \"\"\"Returns local time of cities.\n\n\n GET /api/now\n ---\n responses:\n 200:\n description: Returns local times.\n\n\n \"\"\"\n\n # note that timezone requires python3\n tz_utc = pytz.utc.localize(datetime.utcnow())\n tz_nyc = tz_utc.astimezone(pytz.timezone(\"America/New_York\"))\n tz_chicago = tz_utc.astimezone(pytz.timezone(\"America/Chicago\"))\n tz_berlin = tz_utc.astimezone(pytz.timezone(\"Europe/Berlin\"))\n tz_Sydney = tz_utc.astimezone(pytz.timezone(\"Australia/Sydney\"))\n\n return jsonify(UTC=tz_utc.isoformat(), Berlin=tz_berlin.isoformat(), New_York=tz_nyc.isoformat(), Chicago=tz_chicago.isoformat(), Sydney=tz_Sydney.isoformat())\n\n\n@app.route(\"/normalize/\", methods = ['GET'])\ndef normalize(input_date):\n \"\"\"Returns dates in different formats\n\n\n GET /api/normalize\n ---\n parameters:\n - in: path\n name: input_date\n type: string\n required: true\n description: Date that shall get normalized\n responses:\n 200:\n description: Returns dates in different formats.\n\n\n \"\"\"\n\n if input_date.isdigit():\n ts = input_date\n dt = datetime.fromtimestamp(float(input_date)).strftime(\"%Y-%m-%d\") \n \n return jsonify(timestamp=input_date, day=dt)\n\n else:\n d = parser.parse(input_date)\n dt = d.strftime(\"%Y-%m-%d\")\n # timezone requires python3\n ts = parser.parse(input_date).replace(tzinfo=timezone.utc).timestamp()\n return jsonify(input=input_date, day=dt, timestamp=ts)\n\n\n# Runs server\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host = \"0.0.0.0\", port = 80)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"481111081","text":"import sys\r\nimport csv\r\nimport time\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nfrom torchvision.models import resnet101\r\nimport torchvision.transforms as transform\r\nfrom torch.autograd.gradcheck import zero_gradients\r\nfrom torch.autograd import Variable\r\nfrom scipy.misc import imsave\r\nfrom PIL import Image\r\nimport pandas as pd \r\nimport os\r\n\r\nepsilon = 0.06\r\nstep_alpha = 0.01\r\neps = 2 * 8 / 225. \r\nsteps = 40\r\n\r\nmean=[0.485, 0.456, 0.406]\r\nstd=[0.229, 0.224, 0.225]\r\n\r\nfolder = sys.argv[1]\r\nlabelpath = \"labels.csv\"\r\noutput_path = sys.argv[2]\r\n\r\nmodel = resnet101(pretrained=True)\r\nmodel.eval()\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\ntransform1 = transform.Compose([\r\n\t\ttransform.ToTensor(), # range [0, 255] -> [0.0,1.0]\r\n\t\ttransform.Normalize(mean=[0.485, 0.456, 0.406],\r\n\t\t\t\t\t\tstd=[0.229, 0.224, 0.225])\r\n\t\t]\r\n)\r\ntrans_PIL = transform.ToPILImage()\r\n# invTrans = transform.Compose([transform.Normalize(mean = [0., 0., 0.],\r\n# \t\t\t\t\t\t\t\t\t\t\t\t\t std = [1/0.229, 1/0.224, 1/0.225]),\r\n# \t\t\t\t\t\t\t\ttransform.Normalize(mean = [-0.485, -0.456, -0.406],\r\n# \t\t\t\t\t\t\t\t\t\t\t\t\t std = [1., 1., 1.]),\r\n# \t\t\t\t\t\t\t ])\r\nclass UnNormalize(object):\r\n\tdef __init__(self, mean, std):\r\n\t\tself.mean = mean\r\n\t\tself.std = std\r\n\r\n\tdef __call__(self, tensor):\r\n\t\tfor t, m, s in zip(tensor, self.mean, self.std):\r\n\t\t\tt.mul_(s).add_(m)\r\n # The normalize code -> t.sub_(m).div_(s)\r\n\t\treturn tensor\r\n\r\nunorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\r\n\r\ndef load_data(folder_path=folder, labels_path=labelpath):\r\n\timg_list = []\r\n\tfor i in range(200):\r\n\t\timg_path = os.path.join(folder_path, str(i).zfill(3) + \".png\")\r\n\t\timg = np.asarray(Image.open(img_path))\r\n\t\timg_list.append(img)\r\n\r\n\timg_list = np.array(img_list)\r\n\t#np.save(img_list_path, img_list)\r\n\r\n\tlabeltx = pd.read_csv(labelpath, sep=',')\r\n\tlabels = labeltx.values[:,3:4]\r\n\tlabels = np.array(labels, dtype = 'int32')\r\n\t#print(labels)\r\n\r\n\treturn img_list, labels\r\n\r\ndef fgsm(x, label):\r\n\tx = transform1(x)\r\n\tx = x.unsqueeze(0)\r\n\tx.requires_grad = True\r\n\r\n\t#for step in range(steps):\r\n\tzero_gradients(x)\r\n\t\t\r\n\toutput = model(x)\r\n\tloss = criterion(output, label)\r\n\tloss.backward() \r\n\r\n\t#fgsm \r\n\tsign_data_grad = torch.sign(x.grad.data)\r\n\tperturbed_image = x + epsilon*sign_data_grad\r\n\r\n\t#perturbed_image = unorm(perturbed_image)\r\n\tperturbed_image = perturbed_image.mul(torch.FloatTensor(std).view(3, 1, 1)).add(torch.FloatTensor(mean).view(3, 1, 1))\r\n\tperturbed_image = torch.clamp(perturbed_image, 0, 1)\r\n\r\n\treturn perturbed_image\r\n\r\nif __name__ == '__main__':\r\n\tx,labels = load_data()\r\n\t# x = torch.FloatTensor(x)\r\n\tlabels = torch.LongTensor(labels)\r\n\t\t\t\r\n\tif not os.path.exists(output_path):\r\n \t\tos.makedirs(output_path)\r\n\tfor i in range(labels.shape[0]):\r\n\t\tprint('for index: ', str(i))\r\n\t\timg_path = os.path.join(output_path, str(i).zfill(3) + \".png\")\r\n\t\timg = fgsm(x[i],labels[i])\r\n\t\t#img_save = (img.detach().numpy()) * 255\r\n\t\timg = torch.squeeze(img)\r\n\t\t#print(img)\r\n\t\timg_save = trans_PIL(img)\r\n\t\timsave(img_path, img_save)\r\n","sub_path":"hw5/fgsm_res101_pytorch.py","file_name":"fgsm_res101_pytorch.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"129550893","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport articles.models\nimport django.utils.timezone\nfrom django.conf import settings\nimport ckeditor_uploader.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sites', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100)),\n ('slug', models.SlugField(unique_for_year=b'publish_date')),\n ('keywords', models.TextField(help_text='If omitted, the keywords will be the same as the article tags.', blank=True)),\n ('description', models.TextField(help_text=\"If omitted, the description will be determined by the first bit of the article's content.\", blank=True)),\n ('markup', models.CharField(default=b'h', help_text='Select the type of markup you are using in this article.\\n', max_length=1, choices=[(b'h', 'HTML/Plain Text'), (b'm', 'Markdown'), (b'r', 'ReStructured Text'), (b't', 'Textile')])),\n ('content', ckeditor_uploader.fields.RichTextUploadingField()),\n ('rendered_content', models.TextField()),\n ('auto_tag', models.BooleanField(default=True, help_text='Check this if you want to automatically assign any existing tags to this article based on its content.')),\n ('publish_date', models.DateTimeField(default=django.utils.timezone.now, help_text='The date and time this article shall appear online.')),\n ('expiration_date', models.DateTimeField(help_text='Leave blank if the article does not expire.', null=True, blank=True)),\n ('is_active', models.BooleanField(default=True)),\n ('login_required', models.BooleanField(help_text='Enable this if users must login before they can read this article.')),\n ('use_addthis_button', models.BooleanField(default=True, help_text='Check this to show an AddThis bookmark button when viewing an article.', verbose_name='Show AddThis button')),\n ('addthis_use_author', models.BooleanField(default=True, help_text=\"Check this if you want to use the article author's username for the AddThis button. Respected only if the username field is left empty.\", verbose_name=\"Use article author's username\")),\n ('addthis_username', models.CharField(default=None, help_text='The AddThis username to use for the button.', max_length=50, verbose_name='AddThis Username', blank=True)),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ('followup_for', models.ManyToManyField(help_text='Select any other articles that this article follows up on.', related_name='followups', to='articles.Article', blank=True)),\n ('related_articles', models.ManyToManyField(related_name='related_articles_rel_+', to='articles.Article', blank=True)),\n ('sites', models.ManyToManyField(to='sites.Site', blank=True)),\n ],\n options={\n 'ordering': ('-publish_date', 'title'),\n 'get_latest_by': 'publish_date',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ArticleStatus',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('ordering', models.IntegerField(default=0)),\n ('is_live', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('ordering', 'name'),\n 'verbose_name_plural': 'Article statuses',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Attachment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('attachment', models.FileField(upload_to=articles.models.upload_to)),\n ('caption', models.CharField(max_length=255, blank=True)),\n ('article', models.ForeignKey(related_name='attachments', to='articles.Article')),\n ],\n options={\n 'ordering': ('-article', 'id'),\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=64)),\n ('slug', models.CharField(max_length=64, unique=True, null=True, blank=True)),\n ],\n options={\n 'ordering': ('name',),\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='article',\n name='status',\n field=models.ForeignKey(to='articles.ArticleStatus'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='tags',\n field=models.ManyToManyField(help_text='Tags that describe this article, comma separated when input into admin.', to='articles.Tag', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"articles/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"584617842","text":"#Blackjack project\nimport math\nimport random\ndef getcard(n):\n if (n < 0 or n > 51): \n return 'error'\n \n suits = ['c', 'd', 'h', 's']\n rank = n % 13 + 1\n return str(rank) + ' ' + suits[(int(n)/13)]\n \n\ndef countpoints(hand):\n total = 0\n acecount = 0\n for i in range(0, len(hand)): \n if (hand[i] == 1):\n acecount += 1\n total += 10\n total += min(hand[i], 10)\n while (total > 21 and acecount > 0): \n total -= 10\n acecount -= 1\n return total\n \n\ndef shuffle(cards):\n length = len(cards)\n for i in range(length, 0, 1): \n j = (random.random() * length)\n swap(cards, i, j)\n return cards\n\ndef getstrategy(currenthand, n): \n return (countpoints(currenthand) < n)\n \ndef applystrategy(hand, n):\n strat = getstrategy(hand, n)\n if strat:\n return True\n else:\n return False\n\n\ndef swap(a, i, j):\n temp = a[i]\n a[i] = a[j]\n a[j] = temp\n","sub_path":"Blackjack game/blackjack-python.py","file_name":"blackjack-python.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"318753585","text":"########################## En fil kun for testing av syntax etc ########################\n\n\"\"\"\na = \"HEI\"\n\nprint(type(a))\n\na = \"3\"\n\ntry:\n a = float(a)\n print(a)\n print(type(a))\nexcept:\n print(\"did not work\")\n\n\"\"\"\n\"\"\"\nA = [[2, 6], [31, 3], [1,9]]\nprint(A)\nA = sorted(A,key=lambda l:l[0])\nprint(A)\n\ndel A[0]\nprint(A)\n\"\"\"\n\"\"\"\nA.append([3,2])\nprint(A[0][:])\n\nprint(A)\nif 2 in A[1]:\n print(\"Ja\")\nif not [3,4] in A:\n print(\"Nei\")\n\n\nlist1 = [item[0] for item in A]\nprint(list1)\n\n\nlist2 = [3,6,1,8,3]\nprint(list2)\nlist3 = sorted(list2)\nprint(list2)\nprint(list3)\n\n\"\"\"\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\n\nroot = tk.Tk()\nstyle = ttk.Style()\n\nstyle.theme_create( \"MyStyle\", parent=\"alt\", settings={\n \"TNotebook\": {\"configure\": {\"tabmargins\": [2, 5, 2, 0] } },\n \"TNotebook.Tab\": {\"configure\": {\"padding\": [100, 100] },}})\n\nstyle.theme_use(\"MyStyle\")\n\na_notebook = ttk.Notebook(root, width=200, height=200)\na_tab = ttk.Frame(a_notebook)\na_notebook.add(a_tab, text = 'This is the first tab')\nanother_tab = ttk.Frame(a_notebook)\na_notebook.add(another_tab, text = 'This is another tab')\na_notebook.pack(expand=True, fill=tk.BOTH)\nroot.configure(bg='#D68910')\ntk.Button(root, text='Some Text!').pack(fill=tk.X)\n\nroot.mainloop()\n\"\"\"\n\"\"\"\nfrom tkinter import *\n#from tkFileDialog import askopenfilename\nfrom PIL import Image, ImageTk\n\nroot = Tk()\n#root.overrideredirect(True) \n#setting up a tkinter canvas with scrollbars\nframe = Frame(root, bd=2, relief=SUNKEN)\nframe.grid_rowconfigure(0, weight=1)\nframe.grid_columnconfigure(0, weight=1)\n#xscroll = Scrollbar(frame, orient=HORIZONTAL)\n#xscroll.grid(row=1, column=0, sticky=E+W)\n#yscroll = Scrollbar(frame)\n#yscroll.grid(row=0, column=1, sticky=N+S)\ncanvas = Canvas(frame, bd=0)#, xscrollcommand=xscroll.set, yscrollcommand=yscroll.set)\ncanvas.grid(row=0, column=0, sticky=N+S+E+W)\n#xscroll.config(command=canvas.xview)\n#yscroll.config(command=canvas.yview)\nframe.pack(fill=BOTH,expand=1)\n\n#adding the image\n#File = askopenfilename(parent=root, initialdir=\"C:/\",title='Choose an image.')\nimg = Image.open(\"050_100_200_000_025_002.tif\")\nscaleh = img.width/408\nscalev = img.height/508\nprint(scaleh, scalev)\nimg = img.resize((408,508))\nimg = ImageTk.PhotoImage(image=img)\n#img = img.subsample(250) \nw = 10 + img.width()\nh = 10 + img.height()\nroot.geometry(\"%dx%d+0+0\" % (w, h))\ncanvas.create_image(0,0,image=img,anchor=\"nw\")\ncanvas.config(scrollregion=canvas.bbox(ALL), cursor='sb_up_arrow')\nx_coor = []\ny_coor = []\n #function to be called when mouse is clicked\n\n#sjekk at ikke x1-x2 =0\ndef line(x1, x2, y1, y2, x):\n return ((y1-y2)/(x1-x2))*(x-x1) + y1\n\n\ndef printcoords(event):\n #outputting x and y coords to console\n x_coor.append(event.x)\n y_coor.append(event.y)\n canvas.create_oval(event.x-2, event.y-2, event.x+2, event.y+2, fill='red')\n if (len(x_coor)==1):\n canvas.config(cursor='sb_down_arrow')\n elif(len(x_coor)==2):\n canvas.config(cursor='sb_right_arrow')\n elif(len(x_coor)==3):\n canvas.config(cursor='sb_left_arrow')\n else:\n root.destroy()\n print (event.x,event.y)\n #mouseclick event\ncanvas.bind(\"'%cont\n cont+=1\n response['success']=True\n response['data']=program\n response['recordsTotal']=total['count']\n response['recordsFiltered']=total['count']\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo.'\n except:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo más tarde.'\n exc_info=sys.exc_info()\n app.logger.info(traceback.format_exc(exc_info))\n return json.dumps(response)\n\n@bp.route('/getDeprecFiscUsers', methods=['GET','POST'])\n@is_logged_in\ndef getDeprecFiscUsers():\n response={}\n try:\n if request.method=='POST':\n valid,data=GF.toDict(request.form,'post')\n if valid:\n users=db.query(\"\"\"\n select user_id,name\n from system.user\n where company_id=%s\n and enabled in (1,3)\n \"\"\"%data['company_id']).dictresult()\n users.append({'user_id':-1,'name':'Ninguno'})\n response['success']=True\n response['data']=users\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error al intentar obtener los datos, favor de intentarlo de nuevo.'\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo más tarde.'\n except:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo más tarde.'\n exc_info=sys.exc_info()\n app.logger.info(traceback.format_exc(exc_info))\n return json.dumps(response)\n\n@bp.route('/saveDeprecFiscProg',methods=['GET','POST'])\n@is_logged_in\ndef saveDeprecFiscProg():\n response={}\n try:\n if request.method=='POST':\n valid,data=GF.toDict(request.form,'post')\n if valid:\n check=db.query(\"\"\"\n select resolved_by,reviewed_by from tax.wp_content\n where wp_content_id=%s\n \"\"\"%data['wp_content_id']).dictresult()[0]\n resolved=\"\"\n if check['resolved_by']==-1 and data['resolved_by']!=-1:\n resolved=\" ,resolved_by=%s, resolved_date=now()\"%data['resolved_by']\n reviewed=\"\"\n if check['reviewed_by']==-1 and data['reviewed_by']!=-1:\n reviewed=\" ,reviewed_by=%s, reviewed_date=now()\"%data['reviewed_by']\n db.query(\"\"\"\n update tax.wp_content\n set results='%s'\n %s %s\n where wp_content_id=%s\n \"\"\"%(data['results'].encode('utf-8'),resolved,reviewed,data['wp_content_id']))\n response['success']=True\n response['msg_response']='El programa ha sido actualizado.'\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error al intentar obtener los datos, favor de intentarlo de nuevo.'\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo.'\n except:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo más tarde.'\n exc_info=sys.exc_info()\n app.logger.info(traceback.format_exc(exc_info))\n return json.dumps(response)\n\n@bp.route('/saveNewINPC', methods=['GET','POST'])\n@is_logged_in\ndef saveNewINPC():\n response={}\n try:\n if request.method=='POST':\n valid,data=GF.toDict(request.form,'post')\n if valid:\n if data['inpc_id']==-1:\n last_inpc=db.query(\"\"\"\n select year from deprec_fiscal.inpc\n order by year desc limit 1\n \"\"\").dictresult()[0]\n if int(data['year'])==last_inpc['year']+1:\n del data['inpc_id']\n db.insert(\"deprec_fiscal.inpc\",data)\n response['success']=True\n response['msg_response']='El INPC correspondiente al año %s ha sido agregado.'%data['year']\n else:\n response['success']=False\n response['msg_response']='Primero debe ingresar el INPC correspondiente al año %s.'%str(last_inpc['year']+1)\n else:\n db.query(\"\"\"\n update deprec_fiscal.inpc\n set ene='{ene}', feb='{feb}', mar='{mar}',abr='{abr}',may='{may}',jun='{jun}',\n jul='{jul}',ago='{ago}',sep='{sep}',oct='{oct}',nov='{nov}',dic='{dic}'\n where inpc_id={inpc_id}\n \"\"\".format(**data))\n response['success']=True\n response['msg_response']='EL INPC ha sido actualizado.'\n\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error al intentar obtener los datos, favor de intentarlo de nuevo.'\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo.'\n except:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo más tarde.'\n exc_info=sys.exc_info()\n app.logger.info(traceback.format_exc(exc_info))\n return json.dumps(response)\n\n@bp.route('/checkLastINPC', methods=['GET','POST'])\n@is_logged_in\ndef checkLastINPC():\n response={}\n try:\n if request.method=='POST':\n valid,data=GF.toDict(request.form,'post')\n if valid:\n inpc=db.query(\"\"\"\n select *\n from deprec_fiscal.inpc\n order by year desc limit 1\n \"\"\").dictresult()[0]\n response['success']=True\n response['data']=inpc\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error al intentar obtener la información, favor de intentarlo de nuevo.'\n else:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo.'\n except:\n response['success']=False\n response['msg_response']='Ocurrió un error, favor de intentarlo de nuevo más tarde.'\n exc_info=sys.exc_info()\n app.logger.info(traceback.format_exc(exc_info))\n return json.dumps(response)\n","sub_path":"views/taxes.py","file_name":"taxes.py","file_ext":"py","file_size_in_byte":49739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"326944720","text":"# want to play a game?\n\nfrom physics.config import MuZeroConfig\nfrom physics.game import AbstractGame\nfrom physics.networks import AbstractNetwork\nfrom physics.networks import SharedStorage\nfrom physics.self_play.mcts import run_mcts, select_action, expand_node, add_exploration_noise\nfrom physics.self_play import Node\nfrom physics.training import ReplayBuffer\n\ndef run_selfplay(config: MuZeroConfig, storage: SharedStorage, replay_buffer: ReplayBuffer, train_episodes: int):\n \"\"\"take the latest network, produces multiple games and save them in the shared replay buffer\"\"\"\n network = storage.latest_network()\n rewards = []\n for _ in range(train_episodes):\n game = play_game(config, network)\n replay_buffer.save_game(game)\n rewards.append(sum(game.rewards))\n\n return sum(rewards) / train_episodes\n\ndef run_eval(config: MuZeroConfig, storage: SharedStorage, eval_episodes: int):\n \"\"\"evaluate MuZero without noise added to the prior of the root and without softmax action selection\"\"\"\n network = storage.latest_network()\n rewards = []\n for _ in range(eval_episodes):\n game = play_game(config, network, train=False)\n rewards.append(sum(game.rewards))\n return sum(rewards) / eval_episodes if eval_episodes else 0\n\ndef play_game(config: MuZeroConfig, network: AbstractNetwork, train: bool=True) -> AbstractGame:\n \"\"\"\n each game is produced by starting at the initial board position, then repeatedly executing MCTS\n to generate moves until the game ends\n :param config:\n :param network:\n :param train:\n :return:\n \"\"\"\n game = config.new_game()\n mode_action_select = 'softmax' if train else 'max'\n\n while not game.terminal() and len(game.history) < config.max_moves :\n # at the root of the tree, use the representation function to obtain a hidden state\n # given the current observation\n root = Node(0)\n current_observation = game.make_image(-1)\n expand_node(root, game.to_play(), game.legal_actions(), network.initial_inference(current_observation))\n if train:\n add_exploration_noise(config, root)\n\n # then run a monte carlo tree search using only action sequences and\n # the model learned by the networks\n run_mcts(config, root, game.action_history(), network)\n action = select_action(config, len(game.history), root, network, mode=mode_action_select)\n game.apply(action)\n game.store_search_statistics(root)\n return game","sub_path":"physics/self_play/self_play.py","file_name":"self_play.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"201692958","text":"import os\nfrom threading import Lock\nimport traceback\nimport collections\n\nfrom math import sqrt, sin, cos, atan2\nfrom functools import partial\nfrom profilehooks import profile, coverage\n\nfrom qsrlib.qsrlib import QSRlib, QSRlib_Request_Message\nfrom qsrlib_io.world_trace import Object_State, World_Trace\n\nimport numpy as np\n\n\nfrom opencog.type_constructors import ConceptNode, ListLink, StateLink\nfrom opencog.utilities import initialize_opencog\n\nimport tf\nimport rospy\nfrom rasberry_hri.msg import Action\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Pose, PoseStamped\n\nfrom common.utils import wp2sym, atomspace, db, suppress\n\nfrom bdi_system import BDISystem\nfrom knowledge_base import KnowledgeBase\n\nfrom common.parameters import (\n NS, MINIMUM_DISTANCE, REASONING_LOOP_FREQUENCY,\n PICKER_WIDTH, PICKER_LENGTH, ROBOT_WIDTH, ROBOT_LENGTH, PICKER_SPEED,\n PICKER_UPDATE_FREQUENCY, DIRECTION_PERCEPTION, PICKERS,\n ROBOT_REACTION_SAFETY_MARGIN_MOVING, ROBOT_REACTION_SAFETY_MARGIN_STANDING)\nQUANTIZATION = PICKER_SPEED / PICKER_UPDATE_FREQUENCY * 0.9\n\n\n# from opencog.logger import log\n\n# log.use_stdout()\n# log.set_level(\"DEBUG\")\n\n\nclass Scheduler:\n\n def __init__(self, robot_id):\n rospy.loginfo(\"SCH: Initializing Scheduler\")\n self.shutting_down = False\n self.rate = rospy.Rate(REASONING_LOOP_FREQUENCY)\n self.atomspace = atomspace\n initialize_opencog(self.atomspace)\n self.kb = KnowledgeBase()\n self.sensory_lock1 = Lock()\n self.sensory_lock2 = Lock()\n self.has_reached_50cm = {}\n self.has_reached_100cm = {}\n self.has_reached_150cm = {}\n self.has_reached_200cm = {}\n self.latest_robot_msg = None\n self.latest_actual_robot_msg = None\n self.latest_people_msgs = {}\n self.latest_actual_people_msgs = {}\n self.robot_tracks = {}\n self.people_tracks = {}\n self.directions = {}\n self.latest_people_nodes = {}\n self.latest_distances = {}\n self.qsrlib = QSRlib()\n self.speed = [-1,-1,-1,-1]\n self.human_position_subs = []\n self.robot_id = robot_id\n self.latest_robot_node = None\n self.bdi = BDISystem(self.robot_id, self.kb)\n rospy.on_shutdown(self.shutdown)\n for name in PICKERS:\n self.has_reached_50cm[name] = False\n self.has_reached_100cm[name] = False\n self.has_reached_150cm[name] = False\n self.has_reached_200cm[name] = False\n rospy.loginfo(\"BDI: Subscribing to /{}/posestamped\".format(name))\n self.human_position_subs.append(rospy.Subscriber(\n \"/{}/posestamped\".format(name),\n PoseStamped,\n partial(self.human_position_callback, name=name)\n ))\n self.robot_pose_sub = rospy.Subscriber(\n \"/{:}/robot_pose\".format(self.robot_id),\n Pose,\n self.robot_position_coordinate_callback,\n )\n self.robot_sub = rospy.Subscriber(\n \"/{:}/closest_node\".format(self.robot_id),\n String,\n self.robot_position_node_callback,\n )\n self.human_action_sub = rospy.Subscriber(\n \"{}/human_actions\".format(NS),\n Action, self.human_action_callback\n )\n rospy.loginfo(\"SCH: Initialization finished\")\n\n def spin(self):\n \"\"\"\n Blocks until ROS node is shutdown. Yields activity to other threads.\n @raise ROSInitException: if node is not in a properly initialized state\n \"\"\"\n if not rospy.core.is_initialized():\n raise rospy.exceptions.ROSInitException(\n \"client code must call rospy.init_node() first\"\n )\n rospy.logdebug(\n \"node[%s, %s] entering spin(), pid[%s]\",\n rospy.core.get_caller_id(),\n rospy.core.get_node_uri(),\n os.getpid(),\n )\n try:\n while not rospy.core.is_shutdown():\n self.bdi.loop()\n self.rate.sleep()\n rospy.loginfo(\"SCH: at end of spin\")\n except KeyboardInterrupt:\n rospy.logwarn(\"SCH: keyboard interrupt\")\n except rospy.ROSInterruptException:\n rospy.logwarn(\"SCH: remote interrupt\")\n finally:\n self.shutdown()\n\n def shutdown(self):\n if not self.shutting_down:\n rospy.loginfo(\"SCH: Shutting down\")\n self.shutting_down = True\n self.bdi.shutdown()\n self.robot_pose_sub.unregister()\n self.robot_sub.unregister()\n self.human_action_sub.unregister()\n for sub in self.human_position_subs:\n sub.unregister()\n\n def add_position_noise(self, pose, old_pose):\n if old_pose is not None:\n dx = pose.position.x - old_pose.position.x\n dy = pose.position.y - old_pose.position.y\n translation = sqrt(dx*dx + dy*dy)\n q = [old_pose.orientation.x,\n old_pose.orientation.y,\n old_pose.orientation.z,\n old_pose.orientation.w]\n (r, p, old_theta) = tf.transformations.euler_from_quaternion(q)\n q = [pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w]\n (r, p, theta) = tf.transformations.euler_from_quaternion(q)\n pre_movement_rotation = atan2(dy, dx) - old_theta\n post_movement_rotation = theta - (pre_movement_rotation\n + old_theta)\n # calculate standard deviations\n sd_pre_movement_rotation = (\n MOVEMENT_NOISE[0] * abs(pre_movement_rotation)\n + MOVEMENT_NOISE[1] * translation)\n sd_post_movement_rotation = (\n MOVEMENT_NOISE[0] * abs(post_movement_rotation)\n + MOVEMENT_NOISE[1] * translation)\n sd_translation = (\n MOVEMENT_NOISE[2] * translation\n + MOVEMENT_NOISE[3] * (abs(pre_movement_rotation)\n + abs(post_movement_rotation)))\n\n translation += np.random.normal(\n 0, sd_translation * sd_translation)\n pre_movement_rotation += np.random.normal(\n 0, sd_pre_movement_rotation * sd_pre_movement_rotation)\n post_movement_rotation += np.random.normal(\n 0, sd_post_movement_rotation * sd_post_movement_rotation)\n\n pose.position.x = old_pose.position.x + \\\n translation * cos(old_theta + pre_movement_rotation)\n pose.position.y = old_pose.position.y + \\\n translation * sin(old_theta + pre_movement_rotation)\n pose.orientation.x, pose.orientation.y, pose.orientation.z, \\\n pose.orientation.w = tf.transformations.quaternion_from_euler(\n 0, 0, old_theta + pre_movement_rotation\n + post_movement_rotation)\n return pose\n\n def get_speed(self, positions):\n # for position in positions:\n # rospy.loginfo(position)\n # simple speed estimation from last two log entries\n pos_1 = positions[-1].to_list()\n x1 = pos_1[0]\n y1 = pos_1[1]\n t1 = pos_1[2]\n pos_2 = positions[-2].to_list()\n x2 = pos_2[0]\n y2 = pos_2[1]\n t2 = pos_2[2]\n v = sqrt((x1-x2)**2 + (y1-y2)**2)/(t1-t2)\n return v\n\n def robot_position_coordinate_callback(self, pose):\n # rospy.loginfo(\"SCH: Robot position coordinate callback\")\n self.latest_actual_robot_msg = pose\n self.latest_robot_msg = pose\n self.bdi.world_state.set_position(\n self.bdi.me, pose.position.x, pose.position.y, rospy.get_time())\n\n def robot_position_node_callback(self, msg):\n # rospy.loginfo(\"SCH: Robot position node callback\")\n if msg.data != \"none\":\n initialize_opencog(self.atomspace)\n self.latest_robot_node = wp2sym(msg.data)\n self.bdi.world_state.update_position(\n ConceptNode(self.robot_id),\n ConceptNode(self.latest_robot_node),\n )\n else:\n self.latest_robot_node = None\n\n def human_position_callback(self, msg, name):\n timestamp = rospy.get_time()\n initialize_opencog(self.atomspace)\n if not self.sensory_lock2.locked():\n self.sensory_lock2.acquire()\n self._handle_position_msgs(name, msg, timestamp)\n self.sensory_lock2.release()\n if not self.sensory_lock1.locked():\n self.sensory_lock1.acquire()\n person = ConceptNode(name)\n self.bdi.world_state.set_position(\n person, msg.pose.position.x, msg.pose.position.y, timestamp)\n self._update_picker_node(person, msg)\n self.latest_people_msgs[name] = msg\n self._react_to_distance_events(person)\n self.sensory_lock1.release()\n\n def human_action_callback(self, msg):\n if msg.action != \"\":\n initialize_opencog(self.atomspace)\n # msg.person = self.get_closest_human()\n self.bdi.world_state.update_action(msg.person, msg.action)\n\n def get_closest_human(self):\n closest_human = None\n shortest_distance = float(\"inf\")\n for id in self.latest_people_msgs.keys():\n distance = self.bdi.world_state.get_distance(\n ConceptNode(self.robot_id), ConceptNode(id), True)\n if distance < shortest_distance:\n distance = shortest_distance\n closest_human = id\n # rospy.logwarn(\"Closest picker is: {}\".format(closest_human))\n return closest_human\n\n def _update_picker_node(self, person, msg):\n try:\n (current_node, closest_node) = self.bdi.locator.localise_pose(msg)\n # rospy.logwarn(\"\\ncurrent: {:}\\nclosest: {:}\"\n # .format(current_node, closest_node))\n if current_node == \"WayPoint104\":\n self.kb.debug += 1\n if current_node != \"none\":\n latest_node = None\n with suppress(KeyError):\n latest_node = self.latest_people_nodes[person.name][1]\n self.latest_people_nodes[person.name] = (\"is_at\", current_node)\n if current_node != latest_node:\n self.bdi.world_state.update_position(\n person, ConceptNode(current_node)\n )\n elif closest_node is None:\n rospy.logwarn(\n (\"BDI: We have no idea \" \"where {} is currently\").format(\n person.name\n )\n )\n else: # we know the closest but not the current node\n pass\n except TypeError as err:\n rospy.logerr(\n (\"BDI - Couldn't update picker node. \" \"Error: {:}\").format(\n err\n )\n )\n\n def _react_to_distance_events(self, person):\n try:\n distance = self.bdi.world_state.get_distance(self.bdi.me, person)\n self.latest_distances[person.name] = distance\n minimum_distance = max(\n MINIMUM_DISTANCE,\n self.bdi.world_state.get_optimum_distance(person)\n )\n try:\n if self.directions[person.name][0] == \"-\":\n minimum_distance += ROBOT_REACTION_SAFETY_MARGIN_MOVING\n else:\n minimum_distance += ROBOT_REACTION_SAFETY_MARGIN_STANDING\n except KeyError:\n minimum_distance += ROBOT_REACTION_SAFETY_MARGIN_STANDING\n if distance <= minimum_distance:\n if not self.bdi.world_state.too_close:\n rospy.loginfo(\n \"BDI: Robot has met picker. Halting at {:.2f}.\"\n .format(distance))\n self.bdi.world_state.too_close = True\n self.bdi.robco.cancel_movement()\n x, y, _ = self.bdi.world_state.get_position(\n self.bdi.me)[-1].to_list()\n db.add_meet_entry(distance, self.speed)\n elif self.bdi.world_state.too_close:\n rospy.loginfo(\"BDI: Robot has left picker.\")\n self.bdi.world_state.too_close = False\n if distance <= 0.5 \\\n and self.has_reached_100cm[person.name] \\\n and not self.has_reached_50cm[person.name]:\n self.has_reached_50cm[person.name] = True\n rospy.loginfo(\"SCH: Distance to {:} is 50cm: {:.2f}\"\n .format(person.name, distance))\n # save half meter distance speed\n speed = self.get_speed(\n self.bdi.world_state.get_position(self.bdi.me))\n if self.speed[3] == -1:\n self.speed[3] = speed\n elif (distance <= 1.0\n and self.has_reached_150cm[person.name]\n and not self.has_reached_100cm[person.name]):\n self.has_reached_100cm[person.name] = True\n rospy.loginfo(\"SCH: Distance to {:} is 100cm: {:.2f}\"\n .format(person.name, distance))\n # save one meter distance speed\n speed = self.get_speed(\n self.bdi.world_state.get_position(self.bdi.me))\n if self.speed[2] == -1:\n self.speed[2] = speed\n elif (distance <= 1.5\n and self.has_reached_200cm[person.name]\n and not self.has_reached_150cm[person.name]):\n self.has_reached_150cm[person.name] = True\n rospy.loginfo(\"SCH: Distance to {:} is 150cm: {:.2f}\"\n .format(person.name, distance))\n # save two meter distance spe.ed\n speed = self.get_speed(\n self.bdi.world_state.get_position(self.bdi.me))\n if self.speed[1] == -1:\n self.speed[1] = speed\n elif (distance <= 2.0\n and not self.has_reached_200cm[person.name]):\n self.has_reached_200cm[person.name] = True\n rospy.loginfo(\"SCH: Distance to {:} is 200cm: {:.2f}\"\n .format(person.name, distance))\n # save two meter distance speed\n speed = self.get_speed(\n self.bdi.world_state.get_position(self.bdi.me))\n if self.speed[0] == -1:\n self.speed[0] = speed\n elif (distance > 2.1\n and self.has_reached_200cm[person.name]):\n self.has_reached_50cm[person.name] = False\n self.has_reached_100cm[person.name] = False\n self.has_reached_150cm[person.name] = False\n self.has_reached_200cm[person.name] = False\n self.speed = [-1,-1,-1,-1]\n # self.bdi.last_distance = distance\n except Exception as err:\n rospy.logerr(\n \"SCH: 275 - Couldn't react to distance events. Error: {:}\"\n .format(err)\n )\n\n def _calculate_directions(self, world, pairs):\n dynamic_args = {\n \"qtcbs\": {\n \"quantisation_factor\": QUANTIZATION,\n \"validate\": False,\n \"no_collapse\": False,\n \"qsrs_for\": pairs,\n }\n }\n qsrlib_request_message = QSRlib_Request_Message(\n which_qsr=\"qtcbs\", input_data=world, dynamic_args=dynamic_args\n )\n try:\n qsrlib_response_message = self.qsrlib.request_qsrs(\n qsrlib_request_message\n )\n t = qsrlib_response_message.qsrs.get_sorted_timestamps()[-1]\n for k, v in qsrlib_response_message.qsrs.trace[t].qsrs.items():\n picker = ConceptNode(k.split(\",\")[1])\n directions = v.qsr.get(\"qtcbs\").split(\",\")\n picker_direction = directions[1]\n try:\n self.directions[picker.name].appendleft(picker_direction)\n if self.directions[picker.name].count(picker_direction) == 5 and not picker_direction == \"0\":\n self._handle_direction_change(picker, picker_direction)\n elif abs(self.directions[picker.name].count(\"+\") - self.directions[picker.name].count(\"-\")) < 2:\n self._handle_direction_change(picker, \"0\")\n except KeyError:\n self.directions[picker.name] = collections.deque([picker_direction], maxlen=7)\n except (ValueError) as err:\n pass\n except IndexError as err:\n rospy.logwarn(\"SCH: 414 - Index error: {}\".format(err))\n except KeyError as err:\n rospy.logerr(\"SCH: 416 - Timestamp mismatch error: {}\".format(err))\n\n\n def _handle_direction_change(self, picker, direction):\n if direction == \"+\":\n if not (\n self.bdi.world_state.is_leaving(picker)\n ):\n self.bdi.world_state.leaving(picker).tv = self.kb.TRUE\n rospy.loginfo(\"BDI: Observation: {} is leaving\"\n .format(picker.name))\n elif direction == \"-\":\n if not (\n self.bdi.world_state.is_approaching(picker)\n ):\n self.bdi.world_state.approaching(\n picker).tv = self.kb.TRUE\n rospy.loginfo(\n \"BDI: Observation: {} is approaching\"\n .format(picker.name)\n )\n else:\n if not (\n self.bdi.world_state.is_standing(picker)\n ):\n if (\n self.bdi.world_state.is_approaching(picker)\n ):\n self.bdi.world_state.set_latest_distance(\n picker, self.latest_distances[picker.name]\n )\n rospy.loginfo(\n \"BDI: Observation: {} is stopping at {:.2f}m distance\"\n .format(picker.name, self.latest_distances[picker.name])\n )\n self.bdi.world_state.standing(picker).tv = self.kb.TRUE\n else:\n if not (\n self.bdi.world_state.is_standing(picker)\n ):\n rospy.loginfo(\n \"BDI: Observation: {} is standing\"\n .format(picker.name)\n )\n self.bdi.world_state.standing(picker).tv = self.kb.TRUE\n\n def _handle_position_msgs(self, name, msg, timestamp):\n \"\"\"Abstracts received position messages to the Knowledge Base\"\"\"\n if self.latest_robot_msg is not None:\n world = World_Trace()\n position = Object_State(\n name=self.robot_id,\n timestamp=timestamp,\n x=self.latest_robot_msg.position.x,\n y=self.latest_robot_msg.position.y,\n xsize=ROBOT_WIDTH,\n ysize=ROBOT_LENGTH,\n object_type=\"Person\"\n )\n try:\n self.robot_tracks[name].append(position)\n except KeyError:\n self.robot_tracks[name] = collections.deque([position], maxlen=2)\n world.add_object_state_series(self.robot_tracks[name])\n position = Object_State(\n name=name,\n timestamp=timestamp,\n x=msg.pose.position.x,\n y=msg.pose.position.y,\n xsize=PICKER_WIDTH,\n ysize=PICKER_LENGTH,\n object_type=\"Person\"\n )\n try:\n self.people_tracks[name].append(position)\n except KeyError:\n self.people_tracks[name] = collections.deque([position], maxlen=2)\n world.add_object_state_series(self.people_tracks[name])\n pairs = [(self.robot_id, name)]\n if DIRECTION_PERCEPTION:\n self._calculate_directions(world, pairs)\n # if direction changed and picker close and robot stationary, consider saving new perferred distance\n\n def save(self):\n self.bdi.save()\n","sub_path":"src/bdi/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":20618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"296832801","text":"from Linked_List import *\n\"\"\"\n链表指针指向前一个元素\n实现链表元素从尾到头打印\n\n这个办法需要改变单向链表原有的结构\n\"\"\"\n\n\ndef reverse_linked_list(head):\n p_pre = None\n p_cur = head\n i = 0\n while p_cur:\n p_next = p_cur.next\n p_cur.next = p_pre\n p_pre = p_cur\n p_cur = p_next\n i += 1\n head.next = p_pre\n print_list(head, i)\n return True\n\n\ndef print_list(p, length):\n arr = []\n i = 0\n while i < length:\n arr.append(p.next.elem)\n p = p.next\n i += 1\n print(arr)\n return True\n\n\nif __name__ == '__main__':\n linklist = LinkList()\n linklist.create_list_head(10)\n linklist.travel()\n reverse_linked_list(linklist.get_head())\n\n","sub_path":"05_reverse_linked_list.py","file_name":"05_reverse_linked_list.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"456150333","text":"#!/usr/bin/env python2.7\n# coding: utf-8\n\n\nimport os\nfrom os.path import join, exists\nimport time\nfrom collections import defaultdict\nimport cv2\nimport numpy as np\nimport h5py\nfrom common import logger, createDir, getDataFromTxt, getPatch, processImage\nfrom common import shuffle_in_unison_scary\nfrom utils import randomShift, randomShiftWithArgument\n\n\ntypes = [(0, 'LE1', 0.11),\n (0, 'LE2', 0.12),\n (1, 'RE1', 0.11),\n (1, 'RE2', 0.12),\n (2, 'N1', 0.11),\n (2, 'N2', 0.12),\n (3, 'LM1', 0.11),\n (3, 'LM2', 0.12),\n (4, 'RM1', 0.11),\n (4, 'RM2', 0.12),]\nfor t in types:\n d = '/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s' % t[1]\n createDir(d)\n\ndef generate(ftxt, mode, argument=False):\n \"\"\"\n Generate Training Data for LEVEL-3\n mode = train or test\n \"\"\"\n data = getDataFromTxt(ftxt)\n\n trainData = defaultdict(lambda: dict(patches=[], landmarks=[]))\n for (imgPath, bbox, landmarkGt) in data:\n img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n assert(img is not None)\n logger(\"process %s\" % imgPath)\n\n landmarkPs = randomShiftWithArgument(landmarkGt, 0.01)\n if not argument:\n landmarkPs = [landmarkPs[0]]\n\n for landmarkP in landmarkPs:\n for idx, name, padding in types:\n patch, patch_bbox = getPatch(img, bbox, landmarkP[idx], padding)\n patch = cv2.resize(patch, (15, 15))\n patch = patch.reshape((1, 15, 15))\n trainData[name]['patches'].append(patch)\n _ = patch_bbox.project(bbox.reproject(landmarkGt[idx]))\n trainData[name]['landmarks'].append(_)\n\n for idx, name, padding in types:\n logger('writing training data of %s'%name)\n patches = np.asarray(trainData[name]['patches'])\n landmarks = np.asarray(trainData[name]['landmarks'])\n patches = processImage(patches)\n\n shuffle_in_unison_scary(patches, landmarks)\n\n with h5py.File('/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5'%(name, mode), 'w') as h5:\n h5['data'] = patches.astype(np.float32)\n h5['landmark'] = landmarks.astype(np.float32)\n with open('/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.txt'%(name, mode), 'w') as fd:\n fd.write('/home/tyd/下载/deep_landmark/mydataset/mytrain/3_%s/%s.h5'%(name, mode))\n\n\nif __name__ == '__main__':\n np.random.seed(int(time.time()))\n # trainImageList.txt\n generate('/home/tyd/下载/deep_landmark/cnn-face-data/trainImageList.txt', 'train', argument=True)\n # testImageList.txt\n generate('/home/tyd/下载/deep_landmark/cnn-face-data/testImageList.txt', 'test')\n # Done\n","sub_path":"04-deeplearn_project/06-人脸关键点/00-课上代码/src/data/level3.py","file_name":"level3.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"632089338","text":"import json\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect, JsonResponse, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.db import transaction\n\nfrom alipay.aop.api.domain.AlipayTradePagePayModel import AlipayTradePagePayModel\nfrom alipay.aop.api.request.AlipayTradePagePayRequest import AlipayTradePagePayRequest\n\nfrom ttsx.sx_shopping.models import CartInfo\nfrom ttsx.sx_user.models import UserModel\nfrom ttsx.sx_store.models import GoodsValue\nfrom ttsx.sx_order.models import OrderModel, OrderDetailModel\nfrom ttsx.utils.functions import get_aliapy_client\n\n\n# 提交订单\ndef place_order(request):\n \"\"\"\n 购物车选好商品后,创建订单,返回订单详情\n 请求支付跳转\n 取消订单\n \"\"\"\n if request.method == 'GET':\n user = request.user\n\n # 从购物车获取订单信息\n goods_id_list = request.GET.get('goods_id_list', '')\n goods_id_list = json.loads(goods_id_list).get('goods_id', [])\n carts = CartInfo.objects.filter(user=user).select_related('goods')\n carts_list = []\n\n # 创建订单,一旦出错,回滚\n try:\n with transaction.atomic():\n # 创建订单\n order = OrderModel.objects.create(o_user=user)\n\n # 创建订单详情\n order_details = []\n for good_id in goods_id_list:\n good_cart = carts.get(goods__id=good_id)\n order_detail = OrderDetailModel.objects.create(\n good_id=good_id, \n order=order, \n price=good_cart.goods.g_price, \n count =good_cart.count\n )\n order_details.append(order_detail)\n\n # 订单更新\n order.o_freight = 10\n order.o_total = sum([good.get_total for good in order_details])\n order.save()\n except Exception as e:\n return HttpResponse(\"出现错误....\")\n\n # 创建支付链接\n alipay_client = get_aliapy_client()\n pay_model = AlipayTradePagePayModel()\n pay_model.out_trade_no = order.o_id\n pay_model.total_amount = order.o_total + order.o_freight\n pay_model.subject = \"测试\"\n pay_model.body = \"支付宝测试\"\n pay_model.product_code = \"FAST_INSTANT_TRADE_PAY\"\n alipay_request = AlipayTradePagePayRequest(biz_model=pay_model)\n aipay_url = alipay_client.page_execute(alipay_request, http_method=\"GET\")\n\n cancel_url = \"\"\n data = {'carts': order_details,\n 'aipay_url': aipay_url,\n \"cancel_url\": cancel_url\n }\n return render(request, 'place_order.html', data)\n\n\n\n# 个人信息\ndef user_center_info(request):\n if request.method == 'GET':\n return render(request, 'user_center_info.html')\n\n\n# 全部订单\ndef user_center_order(request):\n if request.method == 'GET':\n return render(request, 'user_center_order.html')\n\n\n# 收货地址\ndef user_center_site(request):\n # 拿到登陆用户的id\n id = request.user.id\n user_info = UserModel.objects.filter(id=id).first()\n if request.method == 'GET':\n data = {'user_info': user_info}\n return render(request, 'user_center_site.html', data)\n\n if request.method == 'POST':\n recipients = request.POST.get('recipients')\n direction = request.POST.get('direction')\n addressee_p = request.POST.get('addressee_p')\n phone = request.POST.get('phone')\n # 验证信息是否填写完整\n if not all([recipients, direction, addressee_p, phone]):\n data = {'msg': '请填写完整的收货信息!',\n 'user_info': user_info} # 避免提交表单信息为空时当前地址不显示\n return render(request, 'user_center_site.html', data)\n user_info.recipients=recipients\n user_info.direction=direction\n user_info.addressee_p=addressee_p\n user_info.phone=phone\n user_info.save()\n data = {'msg': '收货地址添加成功'}\n return HttpResponseRedirect(reverse('order:user_center_site'), data)\n","sub_path":"ttsx/sx_order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"237603759","text":"#!/usr/bin/python\nimport sys, os\n\ndef main(ip_start, ip_end):\n start = ip_start.split(\".\")\n end = ip_end.split(\".\")\n\n for lastGroup in range( int(start[3]), int(end[3])):\n ip = \"%s.%s.%s.%s\" % (start[0],start[1],start[2],lastGroup)\n os.system('host %s' % (ip))\n \n # print start, end\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2])","sub_path":"reversedns.py","file_name":"reversedns.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"229031091","text":"import Errors\n\nclass TemplateDict:\n def __init__(self,templateDict,**items):\n #Template should always have the following format:\n #field_name(str) : default value, acceptable form, length, content type, convert type\n\n #ToDo: check template format\n\n self._template = templateDict\n for key,tmpl in templateDict.iteritems():\n if key not in items:\n items[key] = tmpl[0]\n self.__dict__[key] = None\n self.setItems(**items)\n def __getitem__(self,key):\n return self.__dict__[key]\n def __setitem__(self,key,val):\n self.__dict__[key]=val\n\n def getItems(self):\n return [ v for v in self.__dict__.keys() if not str.startswith(v,'_') ]\n def setItems(self,**items):\n # modify an old dictionary\n dummyNew = dict.fromkeys(items)\n selfKeys = self.getItems() \n for key,val in items.iteritems():\n if key not in selfKeys:\n raise Errors.TemplUndefinedFieldError(key)\n tmpl = self._template[key]\n if tmpl[1] is not None:\n if not isinstance(val,tmpl[1]):\n raise Errors.TemplFieldTypeError(key)\n if tmpl[2] is not None:\n if len(val)!=tmpl[2]:\n raise Errors.TemplFieldLengthError(key,tmpl[2])\n for v in val:\n if not isinstance(v,tmpl[3]):\n raise Errors.TemplFieldContentError(key)\n del dummyNew[key]\n if tmpl[4] is None:\n self.__dict__[key]=val\n else:\n if tmpl[2] is None:\n self.__dict__[key]=tmpl[4](val)\n else:\n self.__dict__[key]=[ tmpl[4](v) for v in val ]\n\n def genDict(self):\n # Generate a regular dictionary\n ret = dict()\n for key in self.getItems():\n ret[key] = self.__dict__[key]\n return ret\n\n def genNestDict(self):\n # Generate a nest compatible dictionary.\n ret = dict()\n for key in self.getItems():\n if isinstance(self.__dict__[key],Distribution):\n ret[key] = self.__dict__[key].genNestDict()\n else:\n ret[key] = self.__dict__[key]\n return ret\n\nclass Distribution:\n _DistTypes = {\n 'const' : ( 'val', ),\n 'linear' : ( 'start', 'end' ),\n 'normal' : ( 'mu', 'sigma' ),\n 'normal_clipped' : ( 'mu', 'sigma', 'low ', 'high' ),\n 'normal_clipped_to_boundary' : ( 'mu', 'sigma', 'low ', 'high' ),\n 'lognormal' : ( 'mu', 'sigma' ),\n 'lognormal_clipped' : ( 'mu', 'sigma', 'low', 'high' ),\n 'lognormal_clipped_to_boundary' : ( 'mu', 'sigma', 'low', 'high' ),\n 'uniform' : ( 'low', 'high' ),\n 'uniform_int' : ( 'low', 'high' ),\n 'binomial' : ( 'n', 'p' ),\n 'binomial_clipped' : ( 'n', 'p', 'low', 'high' ),\n 'binomial_clipped_to_boundary' : ( 'n', 'p', 'low', 'high' ),\n 'gsl_binomial' : ( 'n', 'p' ),\n 'exponential' : ( 'lambda', ),\n 'exponential_clipped' : ( 'lambda', 'low', 'high' ),\n 'exponential_clipped_to_boundary' : ( 'lambda', 'low', 'high' ),\n 'gamma' : ( 'order', 'scale' ),\n 'gamma_clipped' : ( 'order', 'scale', 'low', 'high' ),\n 'gamma_clipped_to_boundary' : ( 'order', 'scale', 'low', 'high' ),\n 'poisson' : ( 'lambda', ),\n 'poisson_clipped' : ( 'lambda', 'low', 'high' ),\n 'poisson_clipped_to_boundary' : ( 'lambda', 'low', 'high' )\n }\n\n def __init__(self,args):\n # __init__(Distribution object)\n # __init__(distType,param1, param2,...):\n self.dist = None\n self.params = None\n self.data = None\n\n from copy import deepcopy\n\n if isinstance(args,Distribution):\n # copy constructor\n self.dist = deepcopy(args.dist)\n self.params = deepcopy(args.params)\n self.data = deepcopy(args.data)\n else:\n if not isinstance(args,(list,tuple)):\n raise Errors.ParamTypeError('args','Distribution, list or tuple')\n if len(args)<2:\n raise Errors.ParamSizeError('args','>= 2')\n dist = args[0]\n params = args[1:]\n\n if not isinstance(dist,str):\n raise Errors.ParamTypeError(\"dist\",\"string\")\n\n distTypes = Distribution._DistTypes\n\n if dist not in distTypes.keys():\n raise Errors.ParamError(\"dist\",\"value\", \\\n \"Undefined distribution type {}\".format(dist))\n self.dist = dist\n\n if len(distTypes[dist])!=len(params):\n raise Errors.ParamSizeError(\"params\",len(distTypes[dist]))\n for v in params:\n if not isinstance(v,(int,float)):\n raise Errors.ParamTypeError(\"params content\",\"int or float\")\n self.params = [ float(v) for v in params ]\n\n def genNestDict(self):\n return self.params[0]\n #if self.dist == 'const' or self.dist == 'linear':\n #return self.params[0]\n #else:\n #ret = {'distribution' : self.dist}\n #for i,p in enumerate(Distribution._DistTypes[self.dist]):\n #ret[p]=self.params[i]\n #return ret\n\n def rand(self,N):\n if self.dist == 'const':\n self.data = [ self.params[0] for i in range(N) ]\n if self.dist == 'linear':\n import numpy as np\n self.data = np.linspace(self.params[0],self.params[1],N).tolist()\n if self.dist == 'uniform':\n from numpy.random import uniform\n self.data = uniform(low=self.params[0],high=self.params[1],size=N).tolist()\n\n\ndef DataSegment(data,t0i=None,t1i=None):\n import numpy as np\n if not isinstance(data,np.ndarray):\n data = np.array(data)\n\n if t0i is None:\n t0i = 0\n if t1i is None:\n t1i = data.shape[-1]\n\n return np.take(data,range(t0i,t1i),data.ndim-1)\n\nclass Time:\n def __init__(self,seconds=0.0, msec=None, minutes=None, hours=None, days=None):\n if isinstance(seconds,str):\n #convert time string to Time object\n res=seconds\n timekeys = ('d','h','\\'','\"')\n timelist = [ 0.0 , 0.0 , 0.0 , 0.0 ]\n for i,k in enumerate(timekeys):\n if k in res:\n splits = res.split(k)\n if len(splits)!=2:\n raise Errors.TimeStrError\n keystr=splits[0].strip()\n if not keystr:\n raise Errors.TimeStrError\n timelist[i] = float(keystr)\n res=splits[1]\n days, hours, minutes, seconds = timelist\n if res.strip():\n msec = float(res)\n self.sec = float(seconds)\n if msec is not None:\n self.sec = self.sec + float(msec) / 1000.0\n if minutes is not None:\n self.sec = self.sec + float(minutes) * 60.0\n if hours is not None:\n self.sec = self.sec + float(hours) * 3600.0\n if days is not None:\n self.sec = self.sec + float(days) * 3600.0 * 24.0\n\n def as_msec(self):\n return self.sec * 1000.0\n\n def __float__(self):\n # return the msec value\n return self.sec\n\n def __add__(self,x):\n return Time(seconds = self.sec+float(x))\n\n def __sub__(self,x):\n return Time(seconds = self.sec-float(x))\n\n def __cmp__(self,x):\n return cmp(float(self),float(x))\n\n def __repr__(self):\n import numpy as np\n seconds = self.sec\n sign = \"\" if seconds>=0 else \"-\"\n seconds = abs(seconds)\n\n msec = int(seconds % 1.0 * 100)\n sec = int(seconds % 60)\n minutes = int(seconds / 60)\n\n timestr = \"\\\"\"\n if msec > 0:\n timestr = \".{:02d}\".format(msec)+timestr\n if minutes>0:\n timestr = \"{:02d}\".format(sec)+timestr\n else:\n timestr = \"{:d}\".format(sec)+timestr\n if minutes > 0:\n timestr = \"{:d}'\".format(minutes)+timestr\n timestr = sign+timestr\n return timestr\n\nclass fgcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n","sub_path":"Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":9180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"419796290","text":"# Homework: Lesson 5. Task 2\n\n\"\"\"\nСоздать т��кстовый файл (не программно), сохранить в нем несколько строк, выполнить\nподсчет количества строк, количества слов в каждой строке.\n\"\"\"\n\ncontent = []\nwith open('hw_5_2_in.txt', 'r') as f:\n content = f.readlines()\n print(f\"В файле {len(content)} строк.\")\n\nfor i, s in enumerate(content, 1):\n n = 0\n if len(s) > 0:\n n = 1 + s.count(' ') # Будем считать, что нет пробелов повторяющихся и на концах\n print(f\"В {i} строке {n} слов\")\n","sub_path":"Lesson_5/hw_5_2.py","file_name":"hw_5_2.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"44875254","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:lichengbing\n\nimport time\nimport os\nimport json\n\nfrom conf import setting\nfrom src import log\n\n\ndef main():\n \"\"\"\n 如果当天是11号 且 信用卡可用额度小于最高额度 则生成欠款记录\n 如果上月有欠费记录 且有 未还款 则 每天计息 扣可用额度\n 账单生成 于 各用户文件夹 record下\n :return:\n \"\"\"\n struct_time = time.localtime()\n user_list = os.listdir(setting.USER_DIR)\n\n for user in user_list:\n user_dic = json.load(open(os.path.join(setting.USER_DIR, user, 'user_base.json'), 'r'))\n for item in user_dic['debt']:\n if item['left_debt'] != 0:\n interest = item['total_debt'] * 0.0005\n user_dic['save'] -= (interest + item['left_debt'])\n item['left_debt'] = user_dic['save']\n logger_obj = log.get_logger(user, struct_time)\n logger_obj.info(\"欠款利息:%.2f (账单日期:%s 总欠款:%.2f 未还款:%.2f)\" %\n (interest, item['date'], item['total_debt'], item['left_debt']))\n\n json.dump(user_dic, open(os.path.join(setting.USER_DIR, user, 'user_base.json'), 'w'))\n\n if struct_time.tm_mday == 11 and user_dic['balance'] < user_dic['card_limit']:\n date = time.strftime('%Y-%m-%d')\n dic = {\n 'date': date,\n 'total_debt': user_dic['card_limit'] - user_dic['balance'],\n 'left_debt': user_dic['card_limit'] - user_dic['balance']\n }\n user_dic['debt'].append(dic)\n user_dic['balance'] = user_dic['card_limit']\n json.dump(user_dic, open(os.path.join(setting.USER_DIR, user, 'user_base.json'), 'w'))","sub_path":"day5/atm/src/atm_bank.py","file_name":"atm_bank.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"338330031","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom openpyxl import Workbook\nfrom openpyxl.worksheet.datavalidation import DataValidation\n\nwb = Workbook()\nws = wb.active\n\ndv = DataValidation(type=\"list\", formula1='\"Dog,Cat,Bat\"', allow_blank=True)\ndv.error ='Your entry is not in the list'\ndv.errorTitle = 'Invalid Entry'\ndv.prompt = 'Please select from the list'\ndv.promptTitle = 'List Selection'\nws.add_data_validation(dv)\nc1 = ws[\"A1\"]\nc1.value = \"Dog\"\ndv.add(c1)\nc2 = ws[\"A2\"]\nc2.value = \"An invalid value\"\ndv.add(c2)\ndv.ranges.append('B1:B1048576')\nwb.save(\"test3.xlsx\")","sub_path":"ML_by_zhouzhihua/openpyxl_validation.py","file_name":"openpyxl_validation.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"280444039","text":"\"\"\"\n171. Excel表列序号\n给定一个Excel表格中的列名称,返回其相应的列序号。\n例如,\n A -> 1\n B -> 2\n C -> 3\n ...\n Z -> 26\n AA -> 27\n AB -> 28\n ...\n示例 1:\n输入: \"A\"\n输出: 1\n示例 2:\n\n输入: \"AB\"\n输出: 28\n示例 3:\n\n输入: \"ZY\"\n输出: 701\n致谢:\n特别感谢 @ts 添加此问题并创建所有测试用例。\n\"\"\"\n\n\nclass Solution:\n def titleToNumber(self, s: str) -> int:\n if not s:\n return\n total = 0\n for idx, c in enumerate(reversed(s)):\n total += (ord(c) - ord('A') + 1) * (26 ** idx)\n return total\n","sub_path":"titleToNumber_171.py","file_name":"titleToNumber_171.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"135935822","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'scraper.views.home'),\n url(r'^scraper/', include('scraper.urls')),\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT,\n }),\n )\n","sub_path":"helpout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"274843846","text":"\"\"\"OpenAQ Air Quality Dashboard with Flask.\"\"\"\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nimport requests\nimport json\nfrom datetime import datetime\nimport openaq\nimport sqlite3\n\nAPP = Flask(__name__)\n\nAPP.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'\nDB = SQLAlchemy(APP)\n\ndef retrieve_data():\n \"\"\"gets data from OpenAQ api and returns as a string of tuples\"\"\"\n api = openaq.OpenAQ()\n test = api.measurements(city='Los Angeles', parameter='pm25')\n number, dict = [i for i in test]\n results_dict = dict['results']\n date_dict = [i['date'] for i in results_dict]\n utc_list = [i['utc'] for i in date_dict]\n value_list = [i['value'] for i in results_dict]\n tuples_list = list(zip(utc_list, value_list))\n return str(tuples_list)\n\ndef retrieve_data2():\n \"\"\"gets data from OpenAQ api and returns as a list of tuples\n used to insert into the database\"\"\"\n api = openaq.OpenAQ()\n test = api.measurements(city='Los Angeles', parameter='pm25')\n number, dict = [i for i in test]\n results_dict = dict['results']\n date_dict = [i['date'] for i in results_dict]\n utc_list = [i['utc'] for i in date_dict]\n value_list = [i['value'] for i in results_dict]\n tuples_list = list(zip(utc_list, value_list))\n return tuples_list\n\n\nclass Record(DB.Model):\n id = DB.Column(DB.Integer, primary_key=True)\n datetime = DB.Column(DB.String(25))\n value = DB.Column(DB.Float, nullable=False)\n\n # def __repr__(self, id=id, datetime=datetime, value=value):\n # # write a nice representation of Records'\n # self.id = id\n # self.datetime = str(datetime)\n # self.value = value\n # return '{}, {}, {}'.format(self.id, self.datetime, self.value)\n # # i tried returning in a nice format\n # # this didn't work\n\n\n@APP.route('/refresh')\ndef refresh():\n \"\"\"Pull fresh data from Open AQ and replace existing data.\"\"\"\n DB.drop_all()\n DB.create_all()\n api_data = retrieve_data2()\n\n # enter the data into the sqlite3 database\n conn = sqlite3.connect('db.sqlite3')\n c = conn.cursor()\n c.executemany('INSERT INTO Record (datetime,value) VALUES (?,?)', api_data)\n conn.commit()\n conn.close()\n DB.session.commit()\n\n return 'Data refreshed!'\n\n@APP.route('/')\ndef root():\n \"\"\"Base view.\"\"\"\n data = retrieve_data()\n\n # Get Records greater than or equal to 10\n def greater_than_10():\n conn = sqlite3.connect('db.sqlite3')\n c = conn.cursor()\n query = 'SELECT * FROM Record WHERE value >=10;'\n answer = c.execute(query).fetchall()\n return str(answer)\n\n filtered_records = greater_than_10()\n string = \"\"\" The data is: \\n {}\n \\n\\n\n The records with values greater than or equal to 10 are: \\n {}\"\"\".format(data, filtered_records)\n return string\n\nif __name__ == '__main__':\n APP.run(port = 5000, debug=True)","sub_path":"SC/aq_dashboard.py","file_name":"aq_dashboard.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"121592070","text":"from PIL import Image, ImageDraw\n\ndata = []\nwith open('q22.01.txt') as f:\n for l in f:\n data.append(list(l.replace('\\n', '')))\n\nim = Image.new('RGB', (len(data)*10, len(data)*10), (255, 255, 255))\ndraw = ImageDraw.Draw(im)\nfor i in range(len(data)):\n for j in range(len(data[i])):\n if int(data[i][j]) == 1:\n draw.rectangle((i*10, j*10, i*10+10, j*10+10), fill=(0, 0, 0))\nim.save('QR.jpg', quality=95)","sub_path":"q22.py","file_name":"q22.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"451737978","text":"from __future__ import print_function\nimport sys\nimport datetime\nfrom operator import add\nfrom pyspark import SparkContext\n\n# set up the spark context for the whole module\nsc = SparkContext(appName=\"PythonTaxiTask2\")\n\n# define the main function and it's inputs\ndef main(inputTaxis, inputPoi, output): \n # make sure we are getting the needed arguments\n if len(sys.argv) != 4:\n print(\"Please supply taxi input, poi input, and output arguments\", file=sys.stderr)\n exit(-1) \n # error \n # UnicodeEncodeError: 'cp949' codec can't encode character '\\xe9' in position 1586: illegal multibyte sequence\n # UnicodeEncodeError: 'charmap' codec can't encode character '\\u0302' in position 952: character maps to \n \n # to fix the encode error,\n # use_unicode=False)\n # x.decode(\"iso-8859-1\").split('||')) \\\n \n # filter out file within 8 ~ 11 am\n def morningTime(value):\n date = value.split(' ')\n time = date[1].split(':')\n hour = int(time[0])\n if hour >= 8 and hour < 11:\n return True\n return False\n \n # listline [3] = dropoff_datetime\n # listline [8] = dropoff_longitude\n # listline [9] = dropoff_latitude\n \n # remove lines if they don't have 16 values\n # also if values are empty.\n def correctFormat(listline):\n if(len(listline) == 17):\n try:\n time = listline[3]\n longi = float(listline[8])\n lati = float(listline[9])\n except Exception:\n return\n \n if longi and lati and time: #is not empty and\n if longi !=0.0 and lati != 0.0: #if value is not 0.\n if morningTime(time):\n return listline\n \n def isfloat(value):\n try:\n float(value)\n return True\n except:\n return False\n\n # listplace[0] = latitude\n # listplace[1] = longitude\n # listplace[2] = name of POI\n \n def correctPoint(listplace):\n lati = listplace[0]\n longi = listplace[1]\n nameplace = listplace[2]\n \n # if lati and logi is float and nameofplace is not empty.\n if isfloat(lati) and isfloat(longi) and nameplace:\n return listplace\n \n def getCellID(lat, lon):\n return (str(round(lat, 2)) + \" & \"+str(round(lon, 2)))\n \n #daytime.\n #weekday() Return the day of the week as an integer, where Monday is 0 and Sunday is 6.\n def getDay(value):\n dateform = value.split(' ')\n date = dateform[0].split('-')\n day = datetime.date(int(date[0]), int(date[1]), int(date[2]))\n return day.weekday()\n \n # value[1] is a day. \n def isSunday(value):\n if value[1] == 0:\n return value\n \n # 1. filter out\n # 2. get only Cell ID and day\n filteredTaxi = inputTaxis.map(lambda x: x.decode(\"iso-8859-1\").split(',')) \\\n .filter(correctFormat) \\\n .map(lambda x: (getCellID(float(x[9]), float(x[8])), getDay(x[3]) ) ) \\\n \n # map point of interest with location and name\n #1. map the input and get the cell ID\n #2. reduce function that add up the list of place if the location are the same\n #3. sort for fast lookup.\n placelist = inputPoi.map(lambda x: x.decode(\"iso-8859-1\").split('||')) \\\n .filter(correctPoint) \\\n .map(lambda x: (getCellID(float(x[0]), float(x[1])), x[2]) ) \\\n .reduceByKey(lambda a,b : a + ', ' +b) \\\n .sortByKey() \\\n .collectAsMap()\n\n # get top 20 Sunday taxi\n sundayTaxi = filteredTaxi.filter(isSunday) \\\n .map(lambda x: (x[0], 1)) \\\n .reduceByKey(add) \\\n .map(lambda x: (x[1], x[0])) \\\n .top(20)\n \n # get top 20 week taxi\n # different way to filter\n weekTaxi = filteredTaxi.filter(lambda x: x[1] != 0) \\\n .map(lambda x: (x[0], 1)) \\\n .reduceByKey(add) \\\n .map(lambda x: (x[1],x[0])) \\\n .top(20)\n \n def lookforplaces(value):\n lookupv = placelist.get(value)\n if lookupv:\n return str(lookupv.encode('utf-8'))\n return ''\n \n # generate the sunday sets\n sundaySet = set() \n for taxilist in sundayTaxi:\n # make a tuple of 3 item. get the name of point of interest\n temptuple = (taxilist[1], taxilist[0], lookforplaces(taxilist[1]))\n # add to set\n sundaySet.add(temptuple)\n\n # generate the weekday seys\n weekSet = set()\n print ('Week Location top 20')\n for taxilist in weekTaxi:\n temptuple = (taxilist[1], taxilist[0], lookforplaces(taxilist[1]))\n weekSet.add(temptuple)\n\n # Write the results of the processing back to S3; using parallelize here, so we don't have to use Boto3 or other py library to write to s3\n sc.parallelize(weekSet).saveAsTextFile(output+\"weekDay\")\n sc.parallelize(sundaySet).saveAsTextFile(output+\"sunday\")\n \nif __name__ == \"__main__\":\n\n inputTaxis = sc.textFile(sys.argv[1], 1, use_unicode=False)\n inputPoi = sc.textFile(sys.argv[2], 1, use_unicode=False)\n output = sys.argv[3]\n\n main(inputTaxis, inputPoi, output)\n \n ","sub_path":"Assignment3/SPARK-Examples-CS755/Tasks/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"528712473","text":"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\n\nSite = 'Projects . Elkhorn.io'\n\nTimezone = 'Pacific/Honolulu'\n\n\n # - System\nimport os\nimport cgi\nimport urllib\nimport wsgiref.handlers\nimport datetime\nimport json, ast\nimport sys,imp\n # - Appengine\nfrom google.appengine.api import users\nfrom google.appengine.api import mail\nfrom google.appengine.api import images\nfrom urlparse import urlparse\n # -\nfrom google.appengine.ext import ndb\nimport webapp2\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp import blobstore_handlers\nfrom google.appengine.ext.webapp.util import run_wsgi_app\n\n\nimport _html as _html\n\n\n\n\n\n#----------------------------------------------#\n# Completed Data Stucture #\n#----------------------------------------------#\nclass People_db(ndb.Model):\n data_id = ndb.StringProperty()\n#\n user_id = ndb.StringProperty()\n user_email = ndb.StringProperty()\n#\n item_id = ndb.StringProperty()\n item_name = ndb.StringProperty()\n item_kind = ndb.StringProperty()\n#\n item_status = ndb.StringProperty()\n status_date = ndb.StringProperty()\n\n @classmethod\n def _get_my_status(self):\n client_email = users.get_current_user().email()\n q = People_db.query(People_db.user_email == client_email)\n db_data = []\n for item in q.iter():\n db_data.append(item.to_dict(exclude=['user_id','user_email']))\n return json.dumps(db_data)\n\n\nclass updatePeople_db(webapp2.RequestHandler):\n def post(self):\n page_address = self.request.uri\n base = os.path.basename(page_address)\n \n user = users.get_current_user()\n if user:\n item_id = self.request.get('item_id')\n client_email = user.email()\n key_name = item_id + '_' + client_email\n item = People_db.get_by_id(key_name)\n \n if not item:\n item = People_db(id=key_name)\n \n item.user_id = user.user_id()\n item.user_email = user.email()\n item.status_date = datetime.datetime.now(pytz.timezone(Timezone)).strftime(\"%Y/%m/%d %H:%M:%S\")\n \n\n item.item_id = self.request.get('item_id')\n item.item_name = self.request.get('item_name')\n item.item_kind = self.request.get('item_kind')\n item.item_status = self.request.get('item_status')\n\n item.put()\n \n self.redirect('/my_info')\n\n\n\n\n#----------------------------------------------#\n# Completed Data Stucture #\n#----------------------------------------------#\nclass Progress_db(ndb.Model):\n data_id = ndb.StringProperty()\n#\n user_id = ndb.StringProperty()\n user_email = ndb.StringProperty()\n#\n item_id = ndb.StringProperty()\n item_name = ndb.StringProperty()\n item_kind = ndb.StringProperty()\n#\n item_status = ndb.StringProperty()\n status_date = ndb.StringProperty()\n\n @classmethod\n def _get_my_status(self):\n client_email = users.get_current_user().email()\n q = People_db.query(People_db.user_email == client_email)\n db_data = []\n for item in q.iter():\n db_data.append(item.to_dict(exclude=['user_id','user_email']))\n return json.dumps(db_data)\n\n\nclass updateProgress_db(webapp2.RequestHandler):\n def post(self):\n page_address = self.request.uri\n base = os.path.basename(page_address)\n \n user = users.get_current_user()\n if user:\n item_id = self.request.get('item_id')\n client_email = user.email()\n key_name = item_id + '_' + client_email\n item = People_db.get_by_id(key_name)\n \n if not item:\n item = People_db(id=key_name)\n \n item.user_id = user.user_id()\n item.user_email = user.email()\n item.status_date = datetime.datetime.now(pytz.timezone(Timezone)).strftime(\"%Y/%m/%d %H:%M:%S\")\n \n\n item.item_id = self.request.get('item_id')\n item.item_name = self.request.get('item_name')\n item.item_kind = self.request.get('item_kind')\n item.item_status = self.request.get('item_status')\n\n item.put()\n \n self.redirect('/my_info')\n\n\n\n\n\n\nclass publicSite(webapp2.RequestHandler):\n def get(self):\n # - URL Parse\n page_address = self.request.uri\n uri = urlparse(page_address)\n path = uri[2] # - uri.path\n layers = path.split('/')\n path_layer = layers[1]\n base = os.path.basename(page_address)\n # - user\n user = users.get_current_user()\n if users.get_current_user(): # - logged in\n login_key = users.create_logout_url(self.request.uri)\n gate = 'Sign out'\n user_name = user.nickname()\n else: # - logged out\n login_key = users.create_login_url(self.request.uri)\n gate = 'Sign in'\n user_name = 'No User'\n # - app data\n \n html_file = 'main_layout.html'\n\n page_html = _html.front_page\n page_id = ''\n page_name = 'Front Page'\n nav_select = ''\n \n \n # -\n if path_layer == 'my_info':\n page_html = _html.user_page + _html.account_page\n page_id = 'my_info'\n page_name = 'My Info'\n nav_select = 'my_info'\n user_header = 'on'\n\n\n # - template\n objects = {\n\n 'login_key': login_key,\n 'gate': gate,\n 'user_name': user_name,\n \n 'page_id': page_id,\n 'page_name': page_name,\n 'nav_select': nav_select,\n \n 'page_html': page_html,\n \n \n \n }\n # - render\n path = os.path.join(os.path.dirname(__file__), 'html/%s' %html_file)\n self.response.out.write(template.render(path, objects))\n\n\n\n\napp = webapp2.WSGIApplication([ # - Pages\n ('/', publicSite),\n \n ('/my_info', publicSite),\n ('/my_progress', publicSite),\n \n ('/add_people', updatePeople_db),\n \n ('/add_progress', updateProgress_db),\n \n \n ('/site_people', publicSite),\n \n\n], debug=True)\n","sub_path":"webapp_folder/projects-server.py","file_name":"projects-server.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"654300609","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 3 08:43:33 2020\n\n@author: matthew\n\"\"\"\n\n# input Clarity and BAM dataframes and combine into one dataframe so can cut based on the blank measurement threshold\n# then calc slope using same code as in Augusta comparison script\n# then use the sigma_blank from Mark Rowe and the slope to calculate LOD\n\n# LOD = 3 sigmablank/k\n\n# sigma_blank = standard deviation of reference at blank conditions ( blank conditions = < 1 ug/m3 according to Sayahi paper or use the LOD from Mark Rowe at SRCAA for their BAM at )\n# k = the slope of the linear relationship for each PMS sensor versus FEM concentrations\n\nimport numpy as np\nfrom scipy import stats\nimport scipy\nimport copy \n\ndef lod(clarity, bam, threshold):\n \n \n df = copy.deepcopy(clarity)\n df['bam'] = bam['PM2_5']\n \n \n # Calculate slope of clarity node vs reference BAM\n\n #the data\n x=np.array(df.bam)\n y=np.array(df.PM2_5) \n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y) \n print('slope = ' , slope)\n r_squared1 = r_value**2\n print('r^2 = ' , r_squared1)\n\n # determine best fit line\n par = np.polyfit(x, y, 1, full=True)\n slope1=par[0][0]\n \n print('slope1 = ' , slope1)\n \n # intercept1=par[0][1]\n # y1_predicted = [slope1*i + intercept1 for i in x]\n \n \n df = df[df['bam'] < threshold]\n print('Number of Measurements = ' , len(df.index))\n sigma_blank = np.std(df['PM2_5'])\n print('sigma_blank = ' , sigma_blank)\n \n lod = (3*sigma_blank)/slope\n print('Limit of Detection = ' , lod)\n \n return lod","sub_path":"python/analysis/limit_of_detection.py","file_name":"limit_of_detection.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512705914","text":"from .bike24_spider import Bike24Spider\nfrom .bike_components_spider import BikeComponentsSpider\nfrom .bike_discount_spider import BikeDiscountSpider\n\n\nspiders = {\n 'bike24': Bike24Spider,\n 'bike_components': BikeComponentsSpider,\n 'hibike': BikeDiscountSpider,\n}\n\n\ndef get_result_as_text(stores, category, item_name):\n result_list = []\n for store in stores:\n res = spiders[store](category, item_name).run()\n result_list.extend(res)\n\n result_list.sort(key=lambda x: list(x.values())[0]['price'])\n return repr_in_text(result_list)\n\n\ndef repr_in_text(result_list):\n text = ''\n for ind, item in enumerate(result_list, 1):\n name = list(item.keys())[0]\n price = item[name]['price']\n url = item[name]['link']\n text += f'{ind}. {price}€ {name} \\n{url[:35]}...\\n'\n\n return text\n","sub_path":"spiders/starting_spiders.py","file_name":"starting_spiders.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"159414023","text":"from referential_array import build_array\r\n\r\nclass arrayStack:\r\n def __init__(self,size):\r\n self.array = build_array(size)\r\n self.maxSize = size\r\n self.top = 0\r\n\r\n def __str__(self):\r\n string = \"Stack:\"\r\n for index in range(self.top):\r\n string += \" \"+str(self.array[index])\r\n return string + \"<- top\"\r\n\r\n def push(self,item):\r\n if self.top >= self.maxSize:\r\n raise StopIteration(\"the stack is full\")\r\n self.array[self.top] = item\r\n self.top+=1\r\n\r\n def pop(self):\r\n if self.top==0:\r\n raise StopIteration(\"Stack is empty\")\r\n self.top -= 1\r\n item = self.array[self.top]\r\n return item\r\n\"\"\"\r\nS = arrayStack(5)\r\n#S.pop() #test passed triggered stop iteration\r\nS.push(20)\r\nprint(S)\r\nS.push(4)\r\nprint(S)\r\nprint(S.pop(),\"and stack is now\",str(S))\r\nS.push(5)\r\nS.push(6)\r\nS.push(7)\r\nS.push(8)\r\nS.push(10000)\r\n\"\"\"\r\n","sub_path":"Data_Structures/arrayStack.py","file_name":"arrayStack.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"35072881","text":"# -*- coding: utf-8 -*-\nfrom shop.settings import redirect_uri\n\n# https://api.weibo.com/oauth2/authorize?client_id=123050457758183&redirect_uri=http://www.example.com/response&response_type=code\n\nclass Weibo:\n def __init__(self, client_id, client_secret=None, code=None):\n self.client_id = client_id\n self.client_secret = client_secret\n self.code = code\n\n def get_weibo_login_code(self):\n weibo_oauth2_url = 'https://api.weibo.com/oauth2/authorize?client_id={}'.format(self.client_id)\n redirect_url = 'http://139.199.123.96:8000/weibo/'\n send_url = weibo_oauth2_url + '&redirect_uri={}'.format(redirect_url)\n return send_url\n\n def get_access_token(self):\n import requests\n api = 'https://api.weibo.com/oauth2/access_token'\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'authorization_code',\n 'code': self.code,\n 'redirect_uri': redirect_uri\n }\n response = requests.post(api, data=data)\n return response.json()\n\n\nif __name__ == '__main__':\n w = Weibo('569009949')\n print(w.get_weibo_login_code())\n","sub_path":"apps/utils/weibo.py","file_name":"weibo.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"450992146","text":"\"\"\"\n476 / 476 test cases passed.\nStatus: Accepted\nRuntime: 78 ms\nYou are here!\nYour runtime beats 15.26% of python submissions.\n\"\"\"\nclass Solution(object):\n def reverseString(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n result = list(s)\n result.reverse()\n return \"\".join(result)\nprint(Solution().reverseString(\"hello\"))","sub_path":"E_344_ReverseString.py","file_name":"E_344_ReverseString.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"86915032","text":"def is_prime(x):\n if x == 1:\n return False\n\n c = 2\n while (c < x):\n if x % c == 0:\n return False \n c += 1\n\n return True\n\nx = int(input())\nprint('S' if is_prime(x) else 'N')\n","sub_path":"se01/01-2019/day04/nepsacademy-python-bootcamp/primo-simples.py","file_name":"primo-simples.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"554282993","text":"# given a list of numbers a and a multiper b multiply each element of a by b and print a\n# forexample given:\n# a = [11, 14, 17, 20]\n# b = 10\n# output: [110,140,170,200]\n\na = [11, 14, 17, 20]\nb = 10\n\nfor i in range(0, len(a)):\n a[i] = a[i] * b\n\nprint(a)\n","sub_path":"Part 1 CE-CS essentials/source/p11v2.py","file_name":"p11v2.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"295178534","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\nimport tensorflow as tf\nimport math\nimport datetime\nimport utils\nfrom os.path import join as join_path\nfrom termcolor import colored\nfrom tensorflow.python import pywrap_tensorflow\n\ndef convert_to_coverage_model():\n \"\"\"Load non-coverage checkpoint, add initialized extra variables for\n coverage, and save as new checkpoint\"\"\"\n print(\"converting non-coverage model to coverage model..\")\n\n # initialize an entire coverage model from scratch\n sess = tf.Session(config=utils.get_config())\n print(\"initializing everything...\")\n sess.run(tf.global_variables_initializer())\n\n # load all non-coverage weights from checkpoint\n saver = tf.train.Saver([v for v in tf.global_variables() if \"coverage\" not in v.name and \"Adagrad\" not in v.name])\n print(\"restoring non-coverage variables...\")\n curr_ckpt = utils.load_ckpt(saver, sess)\n print(\"restored.\")\n\n # save this model and quit\n new_fname = curr_ckpt + '_cov_init'\n print(\"saving model to %s...\" % (new_fname))\n new_saver = tf.train.Saver()\n # this one will save all variables that now exist\n new_saver.save(sess, new_fname)\n print(\"saved.\")\n exit()\n\n\ndef calc_running_avg_loss(loss, running_avg_loss, step, decay=0.9):\n \"\"\"Calculate the running average loss via exponential decay.\n This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve.\n\n Args:\n loss: loss on the most recent eval step\n running_avg_loss: running_avg_loss so far\n step: training iteration step\n decay: rate of exponential decay, a float between 0 and 1. Larger is smoother.\n\n Returns:\n running_avg_loss: new running average loss\n \"\"\"\n if running_avg_loss == 0: # on the first iteration just take the loss\n running_avg_loss = loss\n else:\n running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\n running_avg_loss = min(running_avg_loss, 12) # clip\n loss_sum = tf.Summary()\n tag_name = 'running_avg_loss/decay=%f' % (decay)\n loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\n tf.logging.info('running_avg_loss: %f', running_avg_loss)\n return running_avg_loss\n\n\ndef get_best_loss_from_chpt(val_dir):\n ckpt = tf.train.get_checkpoint_state(val_dir)\n best_loss = None\n if ckpt:\n reader = pywrap_tensorflow.NewCheckpointReader(ckpt.model_checkpoint_path)\n var_to_shape_map = reader.get_variable_to_shape_map()\n best_loss = reader.get_tensor(\n [key for key in var_to_shape_map if \"least_val_loss\" in key][0]).item()\n print(colored(\"the stored best loss is %s\" % best_loss, \"green\"))\n else:\n print(colored(\"check point not found in %s\" % val_dir, \"red\"))\n return best_loss\n\n\ndef save_ckpt(sess, model, best_loss, model_dir, model_saver,\n val_batcher, val_dir, val_saver, global_step):\n \"\"\"\n save model to model dir or evaluation directory\n \"\"\"\n if not val_batcher:\n return None, best_loss\n\n saved = False\n val_save_path = join_path(val_dir, \"best_model\")\n model_save_path = join_path(model_dir, \"model\")\n\n losses = []\n while True:\n val_batch = val_batcher.next_batch()\n if not val_batch:\n break\n results_val = model.run_one_batch(sess, val_batch, update=False, gan_eval=True)\n loss_eval = results_val[\"loss\"]\n # why there exists nan?\n if not math.isnan(loss_eval):\n losses.append(loss_eval)\n else:\n print(colored(\"Encountered a NAN.\", 'red'))\n eval_loss = sum(losses) / len(losses)\n if best_loss is None or eval_loss < best_loss:\n sess.run(model.least_val_loss.assign(eval_loss))\n print(\n 'Found new best model with %.3f evaluation loss. Saving to %s %s' %\n (eval_loss, val_save_path,\n datetime.datetime.now().strftime(\"on %m-%d at %H:%M\")))\n val_saver.save(sess, val_save_path, global_step=global_step)\n print(\"Model is saved to\" + colored(\" %s\", 'green') % val_save_path)\n saved = True\n best_loss = eval_loss\n\n if not saved:\n model_saver.save(sess, model_save_path, global_step=global_step)\n print(\"Model is saved to\" + colored(\" %s\", 'yellow') % model_save_path)\n\n return eval_loss, best_loss\n","sub_path":"gen_utils.py","file_name":"gen_utils.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"170335628","text":"from __future__ import print_function\nimport torch\nimport cv2\nimport time\nfrom pylab import plt\nfrom collections import OrderedDict\n\n# weights = \"../weights/ssd300_mAP_77.43_v2.pth\"\n# weights = \"../weights/VOC.pth\"\nweights = \"../models/shufflenetv2_ssd_detach_20181126_iter_60000.pkl\"\nuse_cuda = True\n\ndevice = torch.device('cuda:0' if use_cuda else 'cpu')\n\nCOLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]\nFONT = cv2.FONT_HERSHEY_SIMPLEX\n\nimport sys\nfrom os import path\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nfrom data import BaseTransform, VOC_CLASSES as labelmap\nfrom shufflenetv2_ssd_detach import *\n\n\n# ssd base net\nnet = ShuffleNetV2SSD_Detach(\"test\", 300, 21, 1)\nw = torch.load(weights)\nnw = OrderedDict()\nfor k, v in w.items():\n name = k[7:]\n nw[name] = v\nnet.load_state_dict(nw)\ntransform = BaseTransform(net.size, (104/256.0, 117/256.0, 123/256.0))\n\nnet.to(device)\nnet.eval()\n\n# priorbox\nfrom layers.functions import prior_box, detection\nfrom data.config import *\ncfg = voc_shufflenetv2\n# priorbox\nnet_priorbox = PriorBox(cfg)\nwith torch.no_grad():\n priorboxes = net_priorbox.forward()\npriorboxes = priorboxes.to(device)\n\n# criterion\nout_layer = Detect(21, 0, 200, 0.01, 0.45)\nout_layer.to(device)\nout_layer.eval()\n\n\n\n\ndef predict(frame):\n height, width = frame.shape[:2]\n x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)\n x = x.unsqueeze(0).to(device)\n begin = time.time()\n with torch.no_grad():\n y = net(x) # forward pass\n loc, conf = y\n y = out_layer(loc, conf, priorboxes)\n end = time.time()\n print(\"preidct time: {} ms\".format((end-begin) * 1000))\n detections = y.data\n # scale each detection back up to the image\n scale = torch.Tensor([width, height, width, height])\n for i in range(detections.size(1)):\n j = 0\n while detections[0, i, j, 0] >= 0.6:\n pt = (detections[0, i, j, 1:] * scale).cpu().numpy()\n cv2.rectangle(frame,\n (int(pt[0]), int(pt[1])),\n (int(pt[2]), int(pt[3])),\n COLORS[i % 3], 2)\n cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),\n FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)\n j += 1\n return frame\n\nimg_path = \"/home/beichen2012/dataset/VOCdevkit/VOC2012/JPEGImages/2012_003937.jpg\"\nimg = cv2.imread(img_path, 1)\n\n# for i in range(0,5):\n# frame = predict(img)\n\n\nbegin = time.time()\nimg = predict(img)\nend = time.time()\nprint(\"time cost: {} ms\".format((end-begin) * 1000.0))\nplt.figure()\nplt.imshow(img[:,:,::-1])\nplt.show()\n\n","sub_path":"demo/test_pic_shufflenetv2_ssd_detach.py","file_name":"test_pic_shufflenetv2_ssd_detach.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"369422693","text":"from util.ObjectMap import * #查找元素的模块\nfrom util.ParseConfigurationFile import ParseConfigFile\n\nclass HomePage(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.cf = ParseConfigFile()\n\n def addressLink(self):\n '''\n 通讯录菜单对象\n :return:\n '''\n by, locator = self.cf.getElementValue('126mail_homePage','homePage.addressbook')\n\n elementObj = getElement(self.driver, by, locator)\n return elementObj\n\nif __name__=='__main__':\n from selenium import webdriver\n from pageObjects.LoginPage import LoginPage\n import time\n driver = webdriver.Firefox()\n driver.get('https://mail.126.com')\n login = LoginPage(driver)\n homePage = HomePage(driver)\n time.sleep(5)\n login.switchToFrame()\n login.userNameObj().send_keys('linux')\n login.passwordObj().send_keys('chao')\n login.loginBtnObj().click()\n login.switchToDefaultFrame()\n time.sleep(3)\n homePage.addressLink().click()\n time.sleep(10)\n driver.quit()","sub_path":"pageObjects/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"251315964","text":"# import os\n# import csv\nimport numpy as np\n# import pandas as pd\n\nfrom sklearn.metrics import *\n\nimport torch\nfrom torch.utils.data import DataLoader\n\n\ndef compute_metrics(y_true, y_pred):\n \"\"\"\n Computes prediction quality metrics.\n\n Parameters:\n ----------\n y_true : 1d array-like, or label indicator array / sparse matrix\n Ground truth (correct) labels.\n\n y_pred : 1d array-like, or label indicator array / sparse matrix\n Predicted labels, as returned by a classifier.\n\n Returns:\n --------\n accuracy : accuracy\n conf_mat : confusion matrix\n precision : weighted precision score\n recall : weighted recall score\n f1 : weighted f1 score\n \"\"\"\n accuracy = accuracy_score(y_true, y_pred)\n conf_mat = confusion_matrix(y_true, y_pred)\n precision = precision_score(y_true, y_pred, average='weighted')\n recall = recall_score(y_true, y_pred, average='weighted')\n f1 = f1_score(y_true, y_pred, average='weighted')\n return accuracy, conf_mat, precision, recall, f1\n\n\n\ndef get_dataloader(dataset, batch_size=1):\n \"\"\"\n Converts a dataset to a dataloader.\n \n Parameters:\n ----------\n \n X : numpy ndarray\n Input dataset with columns as features and rows as observations.\n\n y : numpy ndarray\n Class labels.\n\n batch_size: int, default=1\n The batch size.\n\n \n Returns:\n --------\n dataloader : a pytorch dataloader. \n \"\"\"\n \n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n \n return dataloader\n\n\n\ndef sample_vec(vec, n):\n \"\"\"\n Subsample a vector uniformly from each level. Used to subsample datasets with several classes in a balanced manner.\n \n Parameters:\n ----------\n vec : numpy ndarray\n The vector to sample from.\n\n n : int\n Number of samples per level.\n\n Returns:\n --------\n to_ret : a numpy array including indices of the selected subset.\n \"\"\"\n vec_list = vec.tolist()\n vec_list = set(vec_list)\n to_ret = np.array([], dtype='int')\n for val in vec_list:\n ii = np.where(vec == val)[0] \n index = np.random.choice(ii, n)\n to_ret = np.append(to_ret, index)\n return to_ret","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"141714253","text":"class Calculator(object):\r\n def __init__(self, w=None, q=None):\r\n self.w = w\r\n self.q = q\r\n self.object = object\r\n\r\n def power(self, q, w):\r\n if self.q < 0 or self.w < 0:\r\n return 'n and p should be non-negative'\r\n else:\r\n return q ** w\r\n\r\n\r\nmyCalculator = Calculator()\r\nn, p = map(int, input().split())\r\ntry:\r\n ans = myCalculator.power(n, p)\r\n print(ans)\r\nexcept Exception as e:\r\n print(e)\r\n","sub_path":"power_calc.py","file_name":"power_calc.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"626965","text":"import random\n\nrandom.seed(1)\n\ndef make_cars():\n f = open(\"Python_Programming/machine_learning_examples/supervised/Decision_Trees/cars.data\", \"r\")\n cars = []\n for line in f:\n cars.append(line.rstrip().split(\",\"))\n return cars\n\n\ndef change_data(data):\n dicts = [{'vhigh': 1.0, 'high': 2.0, 'med': 3.0, 'low': 4.0},\n {'vhigh': 1.0, 'high': 2.0, 'med': 3.0, 'low': 4.0},\n {'2': 1.0, '3': 2.0, '4': 3.0, '5more': 4.0},\n {'2': 1.0, '4': 2.0, 'more': 3.0},\n {'small': 1.0, 'med': 2.0, 'big': 3.0},\n {'low': 1.0, 'med': 2.0, 'high': 3.0}]\n\n for row in data:\n for i in range(len(dicts)):\n row[i] = dicts[i][row[i]]\n\n return data\n\n\ncars = change_data(make_cars())\nrandom.shuffle(cars)\ncar_data = [x[:-1] for x in cars]\ncar_labels = [x[-1] for x in cars]\n\ntraining_points = car_data[:int(len(car_data) * 0.9)]\ntraining_labels = car_labels[:int(len(car_labels) * 0.9)]\n\ntesting_points = car_data[int(len(car_data) * 0.9):]\ntesting_labels = car_labels[int(len(car_labels) * 0.9):]","sub_path":"machine_learning_examples/supervised/Decision_Trees/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"440023829","text":"from django.conf import settings as django_settings\n\n\ndef get_override_settings(attributes=None):\n \"\"\"\n Overrides settings by attribute keys\n :param attributes: attribute = keys of dictionary\n :return: built override settings\n \"\"\"\n # runtime parameters should be const\n if not attributes:\n attributes = []\n\n # trying to get real settings\n if not django_settings._wrapped:\n # if not real settings calling settings setup\n django_settings._setup()\n\n # getting real settings\n settings = django_settings._wrapped\n\n # building settings\n return build_str({key: val for key, val in settings.__dict__.items() if key in attributes})\n\n\ndef build_str(dictionary_to_build_from):\n \"\"\"\n Building override string\n :param dictionary_to_build_from: what to build from\n :return: built string\n \"\"\"\n # the sting it built\n built = ''\n # iterating over dictionary\n for key, val in dictionary_to_build_from.items():\n # building current setting\n built += \"{key}={val},\".format(key=key, val=val)\n # returning built -1 cause of , at end\n return built[:-1]\n","sub_path":"easy_rest/test_framework/resolvers/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"614162228","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.conf import settings\n\nadmin.autodiscover()\n\nurlpatterns = [\n url(r'^', include(admin.site.urls)),\n url(r'^report/', include('report.urls')),\n url(r'^api/', include('raw_statistics.urls', namespace='raw_statistics')),\n]\n\n# проверка соответствия названий урлов с сеттингами\nfor report_choice in settings.TYPE_REPORT_CHOICES:\n try:\n reverse(report_choice[0])\n except NoReverseMatch as err:\n raise ImproperlyConfigured(\n 'Error configuration of url-name[{0}]. Please check '\n 'settings.TYPE_REPORT_CHOICES (first elem must be equal of'\n 'url-name)'.format(report_choice[0])\n )","sub_path":"conf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15852118","text":"from flask import Flask, jsonify, request\nfrom flask_restful import reqparse\nimport flask_restful\nimport json\nimport pymysql\n\npp = Flask(__name__)\n# app.config[\"DEBUG\"] = True\napi = flask_restful.Api(app)\n\nconfig = {\n 'host': '172.0.0.1',\n 'port': 13306,\n 'user': 'root',\n 'database': 'mydb'\n}\n\n@app.route('/')\ndef index():\n return \"Welcome to ORDER Microservice!\"\n\n\nclass Order(flask_restful.Resource):\n def __init__(self):\n self.conn = pymysql.connect(**config)\n self.cursor = self.conn.cursor()\n\n \n def get(self, order_id):\n sql = '''SELECT total_qty, total_price from orders a \n INNER JOIN order_detail b ON a.order_id = b.order_id WHERE a.order_id=%s; \n SELECT menu_name, menu_price FROM order_detail a \n INNER JOIN menu b ON a.menu_id = b.menu_id WHERE a.order_id=%s;\n '''\n self.cursor.execute(sql, [order_id])\n \n result_set = self.cursor.fetchall()\n\n row_headers = [x[0] for x in self.cursor.description]\n\n json_data = []\n for result in result_set:\n json_data.append(dict(zip(row_headers, result)))\n\n return jsonify(json_data)\n\n def post(self, order_id):\n json_data = request.get_json()\n json_data['order_id'] = order_id\n\n # DB INSERT\n sql = '''INSERT INTO orders(requests) VALUES(%s);\n INSERT INTO users(phone_number) VALUES(%S);\n '''\n self.cursor.execute(sql, [order_id, \n json_data['requests'],\n json_data['phone_number']\n ])\n self.conn.commit()\n\n # producer 인스턴스의 send() 메소드로 json 데이터 전송\n self.producer.send('new_orders', value=json.dumps(json_data).encode())\n self.producer.flush()\n\n\n response = jsonify(json_data)\n response.status_code = 201\n\n return response\n\n\napi.add_resource(Order, '/order-ms//orders')\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"project_order_test.py","file_name":"project_order_test.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"98996659","text":"from django.contrib.auth.models import User\r\nfrom django.test import TestCase\r\nfrom django.urls import reverse\r\n\r\nfrom . import constants\r\nfrom .forms import TradeCoinsForm\r\nfrom .models import MarketData, Wallet\r\n\r\n\r\nclass RegisterUserViewTests(TestCase):\r\n \"\"\" Test if user account and it's wallet are created correcly through the view. \"\"\"\r\n\r\n def setUp(self):\r\n \"\"\" Create user by POST request to the register view. \"\"\"\r\n user_dict = {\r\n 'username': 'test_user',\r\n 'password1': 'assert123',\r\n 'password2': 'assert123',\r\n }\r\n self.client.post(reverse('users:register'), data=user_dict)\r\n\r\n def test_if_user_was_created(self):\r\n \"\"\" Check if user has been created and can log in.\"\"\"\r\n self.assertTrue(self.client.login(username='test_user', password='assert123'))\r\n\r\n def test_if_user_has_wallet(self):\r\n \"\"\" Check if wallet was created along with user account. \"\"\"\r\n wallets = Wallet.objects.all()\r\n self.assertEqual(len(wallets), 1)\r\n\r\n def test_if_wallet_has_cash(self):\r\n \"\"\" Check if created wallet has 100000 cash value. \"\"\"\r\n user = User.objects.get(username='test_user')\r\n wallet = Wallet.objects.get(owner=user)\r\n self.assertEqual(wallet.cash, 100000)\r\n\r\n def test_if_wallet_has_0_coins(self):\r\n \"\"\" Check if created wallet has 0 of each coin. \"\"\"\r\n user = User.objects.get(username='test_user')\r\n wallet = Wallet.objects.get(owner=user)\r\n COINS = constants.COINS\r\n self.assertIsNot(len(COINS), 0)\r\n for coin in COINS:\r\n self.assertEqual(getattr(wallet, coin), 0)\r\n\r\n\r\nclass WalletMethodsTests(TestCase):\r\n \"\"\" Test estimated_value, buy_coins and sell_coins methods. \"\"\"\r\n\r\n def setUp(self):\r\n \"\"\" Create User, Wallet instance with 100000 cash and 10 BTC along with MarketData instance with BTC prices data. \"\"\"\r\n user = User.objects.create_user('user', 'A@example.com', 'test')\r\n wallet = Wallet(owner=user, BTC=10)\r\n wallet.save()\r\n self.wallet = Wallet.objects.get(owner=user)\r\n BTC_data = MarketData(\r\n currency_symbol='BTC',\r\n currency_name='Bitcoin',\r\n buy_price=20000,\r\n sell_price=10000,\r\n )\r\n BTC_data.save()\r\n\r\n def test_wallet_estimated_value(self):\r\n \"\"\" Check if method returns correct value. \"\"\"\r\n est_value = self.wallet.estimated_value()\r\n self.assertEqual(est_value, (100000 + (10 * 10000)))\r\n\r\n def test_buy_coins_method_valid_input(self):\r\n \"\"\" Check if Wallet.buy_coins() does all expected changes in database when user's input is correct. \"\"\"\r\n return_value = self.wallet.buy_coins(\r\n coin_type='BTC',\r\n coin_amount=1,\r\n query_data_price=float('20000.000000'),\r\n )\r\n self.assertEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 80000)\r\n self.assertEqual(self.wallet.BTC, 11)\r\n\r\n def test_sell_coins_method_valid_input(self):\r\n \"\"\" Check if Wallet.sell_coins() does all expected changes in database when user's input is correct. \"\"\"\r\n return_value = self.wallet.sell_coins(\r\n coin_type='BTC',\r\n coin_amount=1,\r\n query_data_price=float('10000.000000'),\r\n )\r\n self.assertEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 110000)\r\n self.assertEqual(self.wallet.BTC, 9)\r\n\r\n def test_buy_coins_method_invalid_input(self):\r\n \"\"\" Check if Wallet.buy_coins() doesn't change the database when user's input is invalid. \"\"\"\r\n return_value = self.wallet.buy_coins(\r\n coin_type='BTC',\r\n coin_amount=1000,\r\n query_data_price=float('20000.000000'),\r\n )\r\n self.assertNotEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 100000)\r\n self.assertEqual(self.wallet.BTC, 10)\r\n\r\n def test_sell_coins_method_invalid_input(self):\r\n \"\"\" Check if Wallet.sell_coins() doesn't change the database when user's input is invalid. \"\"\"\r\n return_value = self.wallet.buy_coins(\r\n coin_type='BTC',\r\n coin_amount=1000,\r\n query_data_price=float('10000.000000'),\r\n )\r\n self.assertNotEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 100000)\r\n self.assertEqual(self.wallet.BTC, 10)\r\n\r\n def test_buy_coins_method_old_data(self):\r\n \"\"\" Check if Wallet.buy_coins() doesn't change the database when user submitted form after prices have changed. \"\"\"\r\n return_value = self.wallet.buy_coins(\r\n coin_type='BTC',\r\n coin_amount=1000,\r\n query_data_price=float('123.000000'),\r\n )\r\n self.assertNotEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 100000)\r\n self.assertEqual(self.wallet.BTC, 10)\r\n\r\n def test_sell_coins_method_old_data(self):\r\n \"\"\" Check if Wallet.sell_coins() doesn't change the database when user submitted form after prices have changed. \"\"\"\r\n return_value = self.wallet.buy_coins(\r\n coin_type='BTC',\r\n coin_amount=1000,\r\n query_data_price=float('123.000000'),\r\n )\r\n self.assertNotEqual(return_value, '')\r\n self.assertEqual(self.wallet.cash, 100000)\r\n self.assertEqual(self.wallet.BTC, 10)\r\n\r\n\r\nclass TradeCoinsFormTests(TestCase):\r\n \"\"\" Test if TradeCoinsForm includes all coins type from constants.COINS . \"\"\"\r\n\r\n def test_form_choices(self):\r\n types = TradeCoinsForm.COIN_TYPE_CHOICES\r\n coin_choice_list = []\r\n for type in types:\r\n coin_choice_list.append(type[0])\r\n self.assertEqual(coin_choice_list, constants.COINS)\r\n\r\n\r\nclass WalletAssetsTests(TestCase):\r\n \"\"\" Check if Wallet model has field for every coin type from constants.COINS . \"\"\"\r\n\r\n def test_includes_all_coins(self):\r\n for coin in constants.COINS:\r\n self.assertTrue(hasattr(Wallet, coin))\r\n\r\n\r\nclass NotLoggedViewsTests(TestCase):\r\n \"\"\" Tests for views access for anonymous users. \"\"\"\r\n\r\n def setUp(self):\r\n \"\"\"Create user_A with wallet_A\"\"\"\r\n self.user_A = User.objects.create_user('user_A', 'A@example.com', 'test')\r\n wallet_A = Wallet(owner=self.user_A)\r\n wallet_A.save()\r\n\r\n def test_index_view(self):\r\n \"\"\" Tests access to homepage.\"\"\"\r\n response = self.client.get(reverse('wallet:index'))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_login_view(self):\r\n \"\"\" Tests access to login page.\"\"\"\r\n response = self.client.get(reverse('users:login'))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_register_view(self):\r\n \"\"\" Tests access to login page.\"\"\"\r\n response = self.client.get(reverse('users:register'))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_access_to_someones_wallet(self):\r\n \"\"\" Check if anonymous user has an access to someone's wallet.\"\"\"\r\n wallet_A = Wallet.objects.get(owner=self.user_A)\r\n response = self.client.get(reverse('wallet:detail', args=(wallet_A.id,)))\r\n self.assertEqual(response.status_code, 302)\r\n\r\n def test_access_to_someones_trade(self):\r\n \"\"\" Check if anonymous user has an access to someone's trade page.\"\"\"\r\n wallet_A = Wallet.objects.get(owner=self.user_A)\r\n response = self.client.get(reverse('wallet:trade', args=(wallet_A.id,)))\r\n self.assertEqual(response.status_code, 302)\r\n\r\n\r\nclass LoggedViewsAccessTests(TestCase):\r\n \"\"\" Tests for views access for logged users. \"\"\"\r\n\r\n def setUp(self):\r\n \"\"\"\r\n Create user_A and user_B, both with wallets,\r\n accordingly wallet_A and wallet_B. Log in user_A.\r\n \"\"\"\r\n self.user_A = User.objects.create_user('user_A', 'A@example.com', 'test')\r\n wallet_A = Wallet(owner=self.user_A)\r\n wallet_A.save()\r\n self.user_B = User.objects.create_user('user_B', 'B@example.com', 'test')\r\n wallet_B = Wallet(owner=self.user_B)\r\n wallet_B.save()\r\n self.client.login(username='user_A', password='test')\r\n\r\n def test_index_view(self):\r\n \"\"\" Tests access to homepage.\"\"\"\r\n response = self.client.get(reverse('wallet:index'))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_access_to_own_wallet(self):\r\n \"\"\" Check if logged user has an access to his wallet.\"\"\"\r\n wallet_A = Wallet.objects.get(owner=self.user_A)\r\n response = self.client.get(reverse('wallet:detail', args=(wallet_A.id,)))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_access_to_own_trade(self):\r\n \"\"\" Check if logged user has an access to his trade page.\"\"\"\r\n wallet_A = Wallet.objects.get(owner=self.user_A)\r\n response = self.client.get(reverse('wallet:trade', args=(wallet_A.id,)))\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def test_access_to_other_wallet(self):\r\n \"\"\" Check if logged user has an access to other's wallet.\"\"\"\r\n wallet_B = Wallet.objects.get(owner=self.user_B)\r\n response = self.client.get(reverse('wallet:detail', args=(wallet_B.id,)))\r\n self.assertEqual(response.status_code, 404)\r\n\r\n def test_access_to_other_trade(self):\r\n \"\"\" Check if logged user has an access to other's trade page.\"\"\"\r\n wallet_B = Wallet.objects.get(owner=self.user_B)\r\n response = self.client.get(reverse('wallet:trade', args=(wallet_B.id,)))\r\n self.assertEqual(response.status_code, 404)\r\n","sub_path":"src/wallet/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"19745582","text":"__all__ = [\n 'issue_to_dict'\n]\n\n\ndef issue_to_dict(issue):\n result = {}\n\n if issue.closed_by:\n closed_by = issue.closed_by.name\n else:\n closed_by = None\n\n result['id'] = issue.id\n result['repository'] = issue.repository.name\n result['title'] = issue.title\n result['body'] = issue.body\n result['url'] = issue.html_url\n result['state'] = issue.state\n result['labels'] = issue.labels\n result['created_at'] = issue.created_at\n result['closed_at'] = issue.closed_at\n result['closed_by'] = closed_by\n return result\n","sub_path":"packs/github/actions/lib/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"444931412","text":"\n\nclass Node(object):\n\n def __init__(self, key):\n self.left = None\n self.right = None\n self.val = key \n\n\n\n# To traverse the tree with a recursive function:\ndef search(root, key):\n \n # base case of if the root node is the data we are looking for\n if root is None or root.val is key:\n return root\n\n if root.val < key:\n return search(root.right, key)\n \n return search(root.left, key)\n\n\ndef insert(root,node):\n \n #base case if the root node is none, insert here\n if root is None: \n root = node\n \n if root.val < node.val:\n if root.right == None:\n root.right = node\n else: \n insert(root.right, node)\n\n elif root.val > node.val:\n if root.left == None:\n root.left = node\n else:\n insert(root.left, node) \n \n \n\ndef inorder(root):\n # function that takes a node object, traverses each node\n # and prints the value of each node.\n\n if root:\n inorder(root.left)\n print(root.val)\n inorder(root.right)\n\n","sub_path":"harder/bintree.py","file_name":"bintree.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"42924712","text":"\"\"\"\nThis module contains the default config parameters for the container\npipeline service. However, other modules don't consume this module\ndirectly, but container_pipeline.lib.settings.py which extends this module\nand provides a layer of abstraction around config loading.\n\"\"\"\nimport os\n\nLOG_LEVEL = os.environ.get('LOG_LEVEL') or 'DEBUG'\nLOGS_BASE_DIR = '/srv/pipeline-logs'\nLOG_PATH = os.path.join(LOGS_BASE_DIR, 'cccp.log')\n\nSERVICE_LOGFILE = \"service_debug_log.txt\"\n\n# Django specific configuration\nDEBUG = True\nTIME_ZONE = 'UTC'\nUSE_TZ = True\nBASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nSECRET_KEY = 'xxxxxxxxxxxxxx'\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'container_pipeline',\n 'rest_framework',\n 'rest_framework_docs',\n)\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'container_pipeline.wsgi.application'\n\nSTATIC_URL = '/static/'\nROOT_URLCONF = 'container_pipeline.urls'\n\nALLOWED_HOSTS = ['127.0.0.1']\n\nLOGS_URL_BASE = \"https://registry.centos.org/pipeline-logs/\"\nLOGS_DIR = LOGS_DIR_BASE = \"/srv/pipeline-logs/\"\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ],\n 'DEFAULT_PAGINATION_CLASS':\n 'container_pipeline.pagination.ModelAPIPagination'\n}\n\nLOGGING = dict(\n version=1,\n level=LOG_LEVEL,\n formatters=dict(\n bare={\n \"format\":\n (\n '[%(asctime)s] %(name)s p%(process)s %(lineno)d '\n '%(levelname)s - %(message)s'\n )\n },\n ),\n handlers=dict(\n console={\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"bare\",\n \"level\": LOG_LEVEL,\n },\n log_to_file={\n 'level': LOG_LEVEL,\n 'class': 'logging.FileHandler',\n 'filename': LOG_PATH,\n 'mode': 'a+',\n 'formatter': 'bare',\n }\n ),\n loggers={\n 'dockerfile-linter': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'build-worker': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'test-worker': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'delivery-worker': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'scan-worker': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'dispatcher': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'mail-service': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"],\n },\n 'console': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\"]\n\n },\n 'tracking': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"]\n\n },\n 'jenkins': {\n \"level\": \"DEBUG\",\n \"propagate\": False,\n \"handlers\": [\"console\", \"log_to_file\"]\n\n },\n 'cccp-index-reader': {\n \"level\": \"DEBUG\",\n \"propogate\": False,\n \"handlers\": [\"console\", \"log_to_file\"]\n }\n },\n)\n\nBEANSTALKD_HOST = os.environ.get('BEANSTALKD_HOST') or '127.0.0.1'\nBEANSTALKD_PORT = int(os.environ.get('BEANSTALKD_PORT') or '11300')\nOPENSHIFT_ENDPOINT = os.environ.get('OPENSHIFT_ENDPOINT') or \\\n 'https://localhost:8443'\nOPENSHIFT_USER = os.environ.get('OPENSHIFT_USER') or 'test-admin'\nOPENSHIFT_PASSWORD = os.environ.get('OPENSHIFT_PASSWORD') or 'admin'\nOC_CONFIG = os.environ.get('OC_CONFIG') or \\\n '/opt/cccp-service/client/node.kubeconfig'\nOC_CERT = os.environ.get('OC_CERT') or '/opt/cccp-service/client/ca.crt'\n\nSCANNERS_STATUS_FILE = \"scanners_status.json\"\nLINTER_RESULT_FILE = \"linter_results.txt\"\nLINTER_STATUS_FILE = \"linter_status.json\"\n\n# tracking\nREGISTRY_ENDPOINT = ('registry.centos.org', 'https://registry.centos.org')\nJENKINS_ENDPOINT = 'http://127.0.0.1:8080/'\nJENKINS_USERNAME = ''\nJENKINS_PASSWORD = ''\nJENKINS_CLI = '/opt/jenkins-cli.jar'\nCONTAINER_BUILD_TRIGGER_DELAY = 10\nUPSTREAM_PACKAGE_CACHE = os.path.join(BASE_DIR, 'tracking/data')\nBEANSTALK_SERVER = 'localhost'\n\n# Build worker\nBUILD_RETRY_DELAY = os.environ.get('BUILD_RETRY_DELAY') or 120 # in seconds\n","sub_path":"container_pipeline/lib/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"140072876","text":"import os\nfrom carte import carte\nfrom carte import display_mountain\nfrom carte import display_plain\nfrom mountain import mountain\nfrom tresor import tresor\nfrom aventurier import aventurier\nfrom tour import tour\n\ndef init_input(dirname_input):\n carte_created=False\n\n cwd,inputdir,dir=verify_input_path(dirname_input)\n\n if not dir:\n os.makedirs(inputdir,exist_ok=True)\n print(f\"Le dossier d'entrée vient d'être créé, veuillez ajouter votre fichier d'entrée ici. {inputdir}\")\n else:\n getfile=get_files_in_dir(inputdir)\n if getfile:\n \n if len(getfile) > 1:\n print(f\"Plusieurs fichiers d'entrée sont dans le directory{inputdir}:\\n{getfile}, seul le premier est pris en compte.\")\n # selectedfile=raw_input(f\"Plusieurs fichiers d'entrée sont présents dans {inputdir} lequel voulez vous utilisez ? \\n {getfile}:\")\n # print(selectedfile)\n input = open(inputdir+\"/\"+getfile[0], \"r\")\n\n\n for l in input:\n if l[0]==\"C\" and carte_created:\n print(\"une carte a déjà été instancié, cette ligne ne sera pas prise en compte seul une carte peut-etre créé à la fois.\")\n elif l[0]==\"C\" and not carte_created:\n str=l.replace(\" \",\"\")\n values=str.split(\"-\")\n try:\n largeur=int(values[1])\n longueur=int(values[2])\n except ValueError:\n print(\" Les valeurs attendues sont des entiers. veuillez modifier le fichier.\")\n c=carte(largeur,longueur)\n carte_created=True\n print(f\"La carte a ete cree. [{largeur} unites en largeur, {longueur} unites en longueur]\")\n\n if l[0]==\"M\":\n str=l.replace(\" \",\"\")\n values=str.split(\"-\")\n try:\n axis_h=int(values[1])\n axis_v=int(values[2][0])\n except ValueError:\n print(\" Les valeurs attendues sont des entiers. veuillez modifier le fichier.\")\n\n if not 0 <= axis_h < c.largeur or not 0 <= axis_v < c.longueur:\n print(\"La case n'est pas dans la carte, la ligne est ignoré.\")\n else:\n if c.board[axis_h][axis_v] in display_plain():\n c.mountains.append(mountain(axis_h,axis_v))\n print(f\"On aperçoit une montagne en [{axis_h},{axis_v}]\")\n c.board[axis_h][axis_v]=display_mountain()\n else:\n print(\"Cette case n'est pas une plaine, cette montagne ne peut etre créé ici. La ligne est donc ignoré\")\n\n if l[0]==\"T\":\n str=l.replace(\" \",\"\")\n values=str.split(\"-\")\n try:\n axis_h=int(values[1])\n axis_v=int(values[2])\n nb_tresors=int(values[3][0])\n except ValueError:\n print(\" Les valeurs attendues sont des entiers. veuillez modifier le fichier.\")\n\n\n if not nb_tresors > 0:\n print(\"le nombre de tresor doit être un entier positif non nul.\")\n else:\n if not 0 <= axis_h < c.largeur or not 0 <= axis_v < c.longueur:\n print(\"La case n'est pas dans la carte, la ligne est ignoré.\")\n else:\n if c.board[axis_v][axis_h] in display_plain():\n c.tresors.append(tresor(axis_h,axis_v,nb_tresors))\n print(f\"{nb_tresors} trésors ont été caché ici. [{axis_h},{axis_v}]\")\n\n else:\n print(\"Cette case n'est pas une plaine, le trésor ne peut pas être créé ici. La ligne est donc ignoré.\")\n\n if l[0]==\"A\":\n str=l.replace(\" \",\"\")\n values=str.split(\"-\")\n\n try:\n name=values[1]\n orientation=values[4]\n parcours=values[5]\n axis_h=int(values[2])\n axis_v=int(values[3])\n nb_tresors=int(values[6])\n except ValueError:\n print(\" Les valeurs attendues sont des entiers. veuillez modifier le fichier.\")\n\n if not 0 <= axis_h < c.largeur or not 0 <= axis_v < c.longueur:\n print(\"La case n'est pas dans la carte, la ligne est ignoré.\")\n else:\n if not orientation in \"NSOE\":\n print(\"L'orientation renseigné n'est pas reconnu, toute triche est sanctionné, la ligne est ignoré.\")\n else:\n c.aventuriers.append(aventurier(name,axis_h,axis_v,orientation,parcours,nb_tresors))\n print(f\"l'aventurier {name} est inscrit. il partira de [{axis_h},{axis_v}] et s'orientera vers {orientation}\")\n\n for t in c.tresors:\n c.board[t.axis_v][t.axis_h]=t.display_tresor()\n for a in c.aventuriers:\n c.board[a.axis_v][a.axis_h]=a.display_aventurier()\n c.design_map()\n input.close()\n\n c.tour_de_jeu=tour(c.aventuriers)\n return c\n else:\n print(\"Pas de fichier d'entrée présent dans :\"+inputdir+\"\\n\\n\\t Veuillez créer un fichier dans ce repertoire\")\n return\n\ndef verify_input_path(dirname_input):\n print(\"La carte au trésor !\")\n cwd=os.getcwd()\n inputdir=cwd+dirname_input\n dir=os.path.exists(inputdir)\n return cwd,inputdir,dir\n\n# list of files\ndef get_files_in_dir(dir):\n return os.listdir(dir)\n","sub_path":"read_input.py","file_name":"read_input.py","file_ext":"py","file_size_in_byte":6064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"61931182","text":"#!/usr/bin/python3\n# Imports:\n\nimport torch\nimport torch.nn as nn\nimport sys\n\nsys.path.append('../')\nfrom utils import normal_init\n\nclass EditAndGen(nn.Module):\n def __init__(self, base_gen_object, editor_object_list, num_edit):\n super(EditAndGen, self).__init__()\n \n # @yuchen commenting these lines out saves no significant memory.\n # these modules are truly tiny.\n \n self.mods = nn.ModuleList([base_gen_object])\n self.mods.extend(editor_object_list)\n self.param_groups = [list(m.parameters()) for m in self.mods]\n\n def weight_init(self, mean, std):\n for m in self.mods:\n normal_init(m, mean, std)\n \n def forward(self, z):\n curr_inp = z\n all_images = []\n for i in range(len(self.mods)):\n curr_inp = self.mods[i](curr_inp).detach()\n all_images.append(curr_inp.clone())\n return all_images\n \n def generate_upto(self, z, upto):\n \"\"\" Generates upto but not including the var upto\"\"\"\n curr_inp = z\n i = -1\n for i in range(upto - 1):\n curr_inp = self.mods[i](curr_inp).clone().detach()\n curr_inp = self.mods[i+1](curr_inp.clone())\n return curr_inp\n \nclass EditAndGenLabels(nn.Module):\n \n def __init__(self, base_gen_class, base_edit_class, num_edit):\n super(EditAndGenLabels, self).__init__()\n \n base_gen = base_gen_class()\n self.add_module('base', base_gen)\n self.param_groups = [list(base_gen.parameters())]\n for i in range(num_edit):\n base_edit = base_edit_class()\n self.add_module('edit%d'%i, base_edit)\n self.param_groups.append(list(base_edit.parameters()))\n\n def weight_init(self, mean, std):\n for m in self._modules:\n normal_init(self._modules[m], mean, std)\n \n def forward(self, x, labels):\n generated = []\n for layer in self.named_children():\n if len(generated) == 0:\n image = layer[1](x, labels)\n generated.append(image)\n else:\n image = layer[1](generated[-1], labels)\n generated.append(image)\n return generated\n\ndef test():\n from generator import TinyGenerator\n from edit_generator import edit_generator_tiny\n from utils import print_network\n test_edit_gen = EditAndGen(TinyGenerator, edit_generator_tiny, 50)\n \n assert(len(test_edit_gen.param_groups) == 51)\n \n z = torch.randn((10, 64))\n gen = test_edit_gen(z)\n \n for i, pic in enumerate(gen):\n print('At step %d'%i, pic.shape)\n \n print_network(test_edit_gen)\n \nif __name__ == '__main__':\n test()\n","sub_path":"trainers/edit_and_gen.py","file_name":"edit_and_gen.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"127227113","text":"#Define a list, set\n\n#Output of Function listBasics: \n# \n# [['Punit', 79], ['Vineet', 66]]\n# ['Punit', 'Vineet']\n# [79, 66]\ndef listBasics():\n \n #l1=list() Can define like this as well\n l1=[]\n \n print(type(l1))\n \n l1.insert(0,[\"Vineet\",66])\n l1.insert(0,[\"Punit\",79])\n \n #l1=[[\"Vineet\",66],[\"Punit\",79]]\n \n print(l1)\n print([name for name,marks in l1])\n print([marks for name,marks in l1])\n\n#Output of Function setBasics: \n# \n# \n# \n# {44, 22}\n\ndef setBasics():\n\n #Empty curly braces {} will make an empty dictionary in Python. \n s={}\n print(type(s))\n\n s1={ 13 , 12 }\n print(type(s1))\n \n #To make a set without any elements, we use the set() function without any argument.\n s2=set()\n print(type(s2))\n \n #Sets are mutable. However, since they are unordered, indexing has no meaning.\n s2.add(22)\n s2.add(44)\n s2.add(22)\n \n print(s2)\n\nif __name__ == \"__main__\":\n #listBasics()\n setBasics()","sub_path":"ListAndSetBasics.py","file_name":"ListAndSetBasics.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"446661048","text":"import os\nimport sys\nimport cv2\nfrom scanner_camera import Camera as Scanner\nimport base64\nimport numpy as np\n\nCWD_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\nclass ImageSource:\n def __init__(self):\n self.conf_section = None\n self.conf_file = None\n self.icon = None\n self.capture = None\n self.factor = 1\n self.width=320\n self.camera = None\n self.uploaded_image = None\n\n def meta(self):\n return {\n \"filter\": self.conf_section,\n \"name\":\"Camera\",\n \"description\":\"Place the image in front of your camera and zoom in to focus\",\n \"parameters\": [],\n \"icon\": self.icon\n }\n\n def configure(self, global_conf, conf_section, conf_file):\n self.conf_section = conf_section\n self.conf_file = conf_file\n\n self.factor = self.conf_file.get_int(\"zoom\", self.conf_section)\n self.width = self.conf_file.get_int(\"width\", self.conf_section)\n print(\"CREATE CAMERA\")\n self.camera = Scanner()\n print(self.camera)\n\n\n def set_image(self, val):\n if val == None:\n self.uploaded_image = None\n else:\n bytearray = base64.b64decode(val)\n png_as_np = np.frombuffer(bytearray, dtype=np.uint8)\n self.uploaded_image = self.__resize(cv2.imdecode(png_as_np, flags=cv2.IMREAD_COLOR), width=self.width)\n\n\n def get_image(self):\n try:\n if self.uploaded_image is None :\n readed = self.camera.capture.read()\n if readed is None:\n return None\n return self.__resize(readed, width=self.width)\n else:\n return self.uploaded_image\n except Exception as exc:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print(self.conf_section, exc)\n return None\n\n\n def __resize(self, image, width = None, height = None, inter = cv2.INTER_AREA):\n # initialize the dimensions of the image to be resized and\n # grab the image size\n dim = None\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the\n # original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the\n # dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the\n # dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation = inter)\n\n # return the resized image\n return resized\n\n\n def __zoom(self, image, zoom_size):\n height, width, channels = image.shape\n new_dim = (int(zoom_size * width), int(zoom_size * height))\n image = cv2.resize(image, new_dim, interpolation = cv2.INTER_AREA)\n from_width = int(new_dim[0]/2)- int(width/2)\n to_width =from_width+width\n\n from_height = int(new_dim[1]/2)- int(height/2)\n to_height =from_height+height\n\n return image[from_height:to_height,from_width:to_width]\n\n\n def stop(self):\n Scanner.stop()\n pass\n\n","sub_path":"src/processing/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"526538619","text":"import pandas as pd\nimport os\nimport math\nfrom pandas.api.types import is_numeric_dtype, is_string_dtype\nfrom gluonts_forecasts.model import Model\nfrom dku_constants import (\n METRICS_DATASET,\n METRICS_COLUMNS_DESCRIPTIONS,\n TIMESERIES_KEYS,\n EVALUATION_METRICS_DESCRIPTIONS,\n ROW_ORIGIN,\n)\nfrom gluonts_forecasts.gluon_dataset import GluonDataset\nfrom gluonts_forecasts.model_config_registry import ModelConfigRegistry\nfrom gluonts_forecasts.utils import add_row_origin\nfrom safe_logger import SafeLogger\n\n\nlogger = SafeLogger(\"Forecast plugin\")\n\n\nclass TrainingSession:\n \"\"\"\n Class to train and evaluate multiple GluonTS estimators on a training dataframe, and to retrieve an aggregated metrics dataframe\n\n Attributes:\n target_columns_names (list): List of column names to predict\n time_column_name (str)\n frequency (str): Pandas timeseries frequency (e.g. '3M')\n epoch (int): Number of epochs used by the GluonTS Trainer class\n models_parameters (dict): Dictionary of model names (key) and their parameters (value)\n prediction_length (int): Number of time steps to predict\n training_df (DataFrame): Training dataframe\n make_forecasts (bool): True to output the evaluation predictions of the last prediction_length time steps\n external_features_columns_names (list): List of columns with dynamic real features over time\n timeseries_identifiers_names (list): Columns to identify multiple time series when data is in long format\n batch_size (int): Size of batch used by the GluonTS Trainer class\n user_num_batches_per_epoch (int): Number of batches per epoch selected by user. -1 means to compute scaled number\n num_batches_per_epoch (int): Number of batches per epoch\n season_length (int): Length of the seasonality parameter.\n mxnet_context (mxnet.context.Context): MXNet context to use for Deep Learning models training.\n \"\"\"\n\n def __init__(\n self,\n target_columns_names,\n time_column_name,\n frequency,\n epoch,\n models_parameters,\n prediction_length,\n training_df,\n make_forecasts,\n external_features_columns_names=None,\n timeseries_identifiers_names=None,\n batch_size=None,\n user_num_batches_per_epoch=None,\n season_length=None,\n mxnet_context=None,\n ):\n self.models_parameters = models_parameters\n self.models = []\n self.glutonts_dataset = None\n self.training_df = training_df\n self.prediction_length = prediction_length\n self.target_columns_names = target_columns_names\n self.time_column_name = time_column_name\n self.frequency = frequency\n self.epoch = epoch\n self.make_forecasts = make_forecasts\n self.external_features_columns_names = external_features_columns_names\n self.use_external_features = len(external_features_columns_names) > 0\n self.timeseries_identifiers_names = timeseries_identifiers_names\n self.session_name = None\n self.session_path = None\n if self.make_forecasts:\n self.forecasts_df = pd.DataFrame()\n self.evaluation_forecasts_df = None\n self.evaluation_train_list_dataset = None\n self.full_list_dataset = None\n self.metrics_df = None\n self.batch_size = batch_size\n self.user_num_batches_per_epoch = user_num_batches_per_epoch\n self.num_batches_per_epoch = None\n self.season_length = season_length\n self.mxnet_context = mxnet_context\n\n def init(self, session_name, partition_root=None):\n \"\"\"Create the session_path. Check types of target, external features and timeseries identifiers columns.\n\n Args:\n session_name (Timestamp)\n partition_root (str, optional): Partition root path, concatenated to session_name to create the session_path. Defaults to None.\n \"\"\"\n self.session_name = session_name\n if partition_root is None:\n self.session_path = session_name\n else:\n self.session_path = os.path.join(partition_root, session_name)\n\n self._check_target_columns_types()\n self._check_external_features_columns_types()\n\n def create_gluon_datasets(self):\n \"\"\"Create train and test gluon list datasets.\n The last prediction_length time steps are removed from each timeseries of the train dataset.\n Compute optimal num_batches_per_epoch value based on the train dataset size._check_target_columns_types\n \"\"\"\n\n gluon_dataset = GluonDataset(\n dataframe=self.training_df,\n time_column_name=self.time_column_name,\n frequency=self.frequency,\n target_columns_names=self.target_columns_names,\n timeseries_identifiers_names=self.timeseries_identifiers_names,\n external_features_columns_names=self.external_features_columns_names,\n min_length=2 * self.prediction_length, # Assuming that context_length = prediction_length\n )\n\n gluon_list_datasets = gluon_dataset.create_list_datasets(cut_lengths=[self.prediction_length, 0])\n self.evaluation_train_list_dataset = gluon_list_datasets[0]\n self.full_list_dataset = gluon_list_datasets[1]\n\n if self.user_num_batches_per_epoch == -1:\n self.num_batches_per_epoch = self._compute_optimal_num_batches_per_epoch()\n else:\n self.num_batches_per_epoch = self.user_num_batches_per_epoch\n\n def instantiate_models(self):\n \"\"\"Instantiate all the selected models.\"\"\"\n for model_name, model_parameters in self.models_parameters.items():\n self.models.append(\n Model(\n model_name,\n model_parameters=model_parameters,\n frequency=self.frequency,\n prediction_length=self.prediction_length,\n epoch=self.epoch,\n use_external_features=self.use_external_features,\n batch_size=self.batch_size,\n num_batches_per_epoch=self.num_batches_per_epoch,\n season_length=self.season_length,\n mxnet_context=self.mxnet_context,\n )\n )\n\n def train_evaluate(self, retrain=False):\n \"\"\"Call the right train and evaluate function depending on the need to make forecasts.\"\"\"\n\n if self.make_forecasts:\n self._train_evaluate_make_forecast(retrain)\n else:\n self._train_evaluate(retrain)\n\n def _train_evaluate(self, retrain):\n \"\"\"Evaluate all the selected models (then retrain on complete data if specified) and get the metrics dataframe.\"\"\"\n metrics_df = pd.DataFrame()\n for model in self.models:\n item_metrics = model.train_evaluate(\n self.evaluation_train_list_dataset, self.full_list_dataset, retrain=retrain\n )[0]\n metrics_df = metrics_df.append(item_metrics)\n metrics_df[METRICS_DATASET.SESSION] = self.session_name\n self.metrics_df = self._reorder_metrics_df(metrics_df)\n\n def _train_evaluate_make_forecast(self, retrain):\n \"\"\"Evaluate all the selected models (then retrain on complete data if specified), get the metrics dataframe and create the forecasts dataframe.\"\"\"\n metrics_df = pd.DataFrame()\n for model in self.models:\n (item_metrics, identifiers_columns, forecasts_df) = model.train_evaluate(\n self.evaluation_train_list_dataset, self.full_list_dataset, make_forecasts=True, retrain=retrain\n )\n forecasts_df = forecasts_df.rename(columns={\"index\": self.time_column_name})\n if self.forecasts_df.empty:\n self.forecasts_df = forecasts_df\n else:\n self.forecasts_df = self.forecasts_df.merge(\n forecasts_df, on=[self.time_column_name] + identifiers_columns\n )\n metrics_df = metrics_df.append(item_metrics)\n metrics_df[METRICS_DATASET.SESSION] = self.session_name\n self.metrics_df = self._reorder_metrics_df(metrics_df)\n\n self.evaluation_forecasts_df = self.training_df.merge(\n self.forecasts_df, on=[self.time_column_name] + identifiers_columns, how=\"left\", indicator=True\n )\n\n self.evaluation_forecasts_df = add_row_origin(\n self.evaluation_forecasts_df, both=ROW_ORIGIN.EVALUATION, left_only=ROW_ORIGIN.TRAIN\n )\n\n # sort forecasts dataframe by timeseries identifiers (ascending) and time column (descending)\n self.evaluation_forecasts_df = self.evaluation_forecasts_df.sort_values(\n by=identifiers_columns + [self.time_column_name], ascending=[True] * len(identifiers_columns) + [False]\n )\n self.evaluation_forecasts_df[METRICS_DATASET.SESSION] = self.session_name\n\n def _reorder_metrics_df(self, metrics_df):\n \"\"\"Sort rows by target column and put aggregated rows on top.\n\n Args:\n metrics_df (DataFrame): Dataframe of metrics of all timeseries.\n\n Returns:\n Ordered metrics DataFrame.\n \"\"\"\n metrics_df = metrics_df.sort_values(by=[METRICS_DATASET.TARGET_COLUMN], ascending=True)\n orderd_metrics_df = pd.concat(\n [\n metrics_df[metrics_df[METRICS_DATASET.TARGET_COLUMN] == METRICS_DATASET.AGGREGATED_ROW],\n metrics_df[metrics_df[METRICS_DATASET.TARGET_COLUMN] != METRICS_DATASET.AGGREGATED_ROW],\n ],\n axis=0,\n ).reset_index(drop=True)\n return orderd_metrics_df\n\n def get_evaluation_forecasts_df(self):\n return self.evaluation_forecasts_df\n\n def create_evaluation_forecasts_column_description(self):\n \"\"\"Explain the meaning of the forecasts columns.\n\n Returns:\n Dictionary of description (value) by column (key).\n \"\"\"\n column_descriptions = METRICS_COLUMNS_DESCRIPTIONS.copy()\n available_models = ModelConfigRegistry().list_available_models()\n for column in self.evaluation_forecasts_df.columns:\n model = next((model for model in available_models if model in column), None)\n if model:\n column_split = column.split(f\"{model}_\")\n if len(column_split) > 1:\n target_name = [1]\n column_descriptions[column] = f\"Median forecasts of {target_name} using {model} model\"\n return column_descriptions\n\n def get_metrics_df(self):\n return self.metrics_df\n\n def get_evaluation_metrics_df(self):\n \"\"\"Replace __aggregated__ by target column name and remove other rows when only one target\n and no timeseries identifiers.\n\n Returns:\n Dataframe of metrics to display to users.\n \"\"\"\n evaluation_metrics_df = self.metrics_df.copy()\n evaluation_metrics_df.columns = [\n column.lower() if column in EVALUATION_METRICS_DESCRIPTIONS else column\n for column in evaluation_metrics_df.columns\n ]\n if len(self.target_columns_names) == 1 and len(self.timeseries_identifiers_names) == 0:\n evaluation_metrics_df = self.metrics_df.copy()\n evaluation_metrics_df = evaluation_metrics_df[\n evaluation_metrics_df[METRICS_DATASET.TARGET_COLUMN] == METRICS_DATASET.AGGREGATED_ROW\n ]\n evaluation_metrics_df[METRICS_DATASET.TARGET_COLUMN] = self.target_columns_names[0]\n return evaluation_metrics_df\n\n def create_evaluation_results_columns_descriptions(self):\n \"\"\"Explain the meaning of the metrics dataset columns.\n\n Returns:\n Dictionary of description (value) by column (key).\n \"\"\"\n column_descriptions = METRICS_COLUMNS_DESCRIPTIONS.copy()\n for column in EVALUATION_METRICS_DESCRIPTIONS:\n column_descriptions[column.lower()] = EVALUATION_METRICS_DESCRIPTIONS[column]\n return column_descriptions\n\n def _check_target_columns_types(self):\n \"\"\"Raises ValueError if a target column is not numerical\"\"\"\n for column_name in self.target_columns_names:\n if not is_numeric_dtype(self.training_df[column_name]):\n raise ValueError(f\"Target column '{column_name}' must be of numeric type\")\n\n def _check_external_features_columns_types(self):\n \"\"\"Raises ValueError if an external feature column is not numerical\"\"\"\n for column_name in self.external_features_columns_names:\n if not is_numeric_dtype(self.training_df[column_name]):\n raise ValueError(f\"External feature '{column_name}' must be of numeric type\")\n\n def _compute_optimal_num_batches_per_epoch(self):\n \"\"\"Compute the optimal value of num batches per epoch to scale to the training data size.\n With this formula, each timestep will on average be in 2 samples, once in the context part and once in the prediction part.\n \"\"\"\n num_samples_total = 0\n for timeseries in self.evaluation_train_list_dataset.list_data:\n timeseries_length = len(timeseries[TIMESERIES_KEYS.TARGET])\n num_samples = math.ceil(timeseries_length / self.prediction_length)\n num_samples_total += num_samples\n optimal_num_batches_per_epoch = max(math.ceil(num_samples_total / self.batch_size), 50)\n logger.info(\n f\"Number of batches per epoch automatically scaled to training data size: {optimal_num_batches_per_epoch}\"\n )\n return optimal_num_batches_per_epoch\n","sub_path":"python-lib/gluonts_forecasts/training_session.py","file_name":"training_session.py","file_ext":"py","file_size_in_byte":13665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"470104274","text":"# -*- coding: utf-8 -*-\n\n# Generator와 iterator\n# Generator(생성자)는 (주로 for문을 통해서) 반복할 수 있으며, generator의 각 항목은 필요한 순간에 그때 그때 생성된다.\n# Generator를 만드는 방법은 함수와 yield를 활용하는 것이다.\n\ndef lazy_range(n):\n\ti = 0\n\twhile i < n:\n\t\tyield i\n\t\ti += 1\n\n\t\t\n","sub_path":"chap2/sample10.py","file_name":"sample10.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"554649712","text":"#!/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPython matplotlib\nPlot density/latitude standard deviation of salinity for each basin, each model\nChoose between :\n- HistoricalNat (ensemble mean, max std)\n- PiControl\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset as open_ncfile\nfrom maps_matplot_lib import zonal_2D, defVarmme, custom_div_cmap\nimport glob, os\nimport datetime\nimport colormaps as cmaps\n\n# ===== Define Work ======\n\nwork = 'histNat'\n# work = 'piC'\n\nindir_histNat = '/data/ericglod/Density_binning/Prod_density_april15/mme_histNat/'\nindir_piC = '/data/ericglod/Density_binning/Prod_density_april15/mme_piControl/'\nfig_dir = '/home/ysilvy/figures/models/zonal_ys/'\n\n# varname = defVarmme('salinity'); v = 'S'\n# varname = defVarmme('temp'); v = 'T'\nvarname = defVarmme('depth'); v = 'Z'\n\nvar = varname['var_zonal_w/bowl']\nlegVar = varname['legVar']\nunit = varname['unit']\n\n# Density domain\nrhomin = 21\nrhomid = 26\nrhomax = 28\ndomrho = [rhomin, rhomid, rhomax]\n\nif work == 'histNat':\n indir = indir_histNat\n longName = 'historicalNat'\n dir = 'histNat_std/'\nelse:\n indir = indir_piC\n longName = 'piControl'\n dir = 'piControl_std/'\n\n# ==== Read std and plot, for each model in list ====\n\n# -- Read files in directory\nlistfiles = sorted(glob.glob(indir + '/*2D.nc'))\nnmodels = len(listfiles)\n\n# -- Loop on models\nfor i in range(nmodels):\n#i=8\n file = os.path.basename(listfiles[i])\n f = open_ncfile(indir+file,'r')\n name = file.split('.')[1] # Read model name\n\n print('Reading '+work+' for '+name)\n\n if work == 'histNat':\n if name != 'multimodel_Nat':\n # Read varstd of histNat (max std of all runs for each model)\n stdvar_a = f.variables[var+'Std'][1,:,:].squeeze()\n stdvar_i = f.variables[var+'Std'][3,:,:].squeeze()\n stdvar_p = f.variables[var+'Std'][2,:,:].squeeze()\n else:\n # Read var of histNat for the multimodel\n varmme = f.variables[var][:]\n stdvar_a = np.ma.std(varmme[:,1,:,:],axis=0)\n stdvar_p = np.ma.std(varmme[:,2,:,:],axis=0)\n stdvar_i = np.ma.std(varmme[:,3,:,:],axis=0)\n\n else:\n # Read PiControl over 240 years + compute std of PiControl\n varpiC = f.variables[var][-240:,:,:,:]\n stdvar_a = np.ma.std(varpiC[:,1,:,:], axis=0)\n stdvar_p = np.ma.std(varpiC[:,2,:,:], axis=0)\n stdvar_i = np.ma.std(varpiC[:,3,:,:], axis=0)\n\n density = f.variables['lev'][:]\n lat = f.variables['latitude'][:]\n\n # Read bowl and take the average\n file2 = glob.glob(indir+'/*'+name+'*1D.nc')[0]\n f2 = open_ncfile(file2,'r')\n if work == 'histNat':\n bowl = f2.variables['ptopsigma'][:]\n else:\n bowl = f2.variables['ptopsigma'][-240:,:,:]\n bowl = np.ma.average(bowl,axis=0)\n\n # == Plot ==\n\n # Create variable bundles\n varPac = {'name': 'Pacific', 'var_std': stdvar_p, 'bowl':bowl[2,:]}\n varAtl = {'name': 'Atlantic','var_std': stdvar_a, 'bowl':bowl[1,:]}\n varInd = {'name': 'Indian', 'var_std': stdvar_i, 'bowl':bowl[3,:]}\n\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(17,5))\n\n cmap = cmaps.viridis\n #levels = np.arange(0,0.1201,0.01) # Salinity\n levels = np.arange(0,40,2) # Depth\n\n cnplot = zonal_2D(plt, 'var_std', axes[0,0], axes[1,0], 'left', lat, density, varAtl, domrho, cmap, levels)\n cnplot = zonal_2D(plt, 'var_std', axes[0,1], axes[1,1], 'mid', lat, density, varPac, domrho, cmap, levels)\n cnplot = zonal_2D(plt, 'var_std', axes[0,2], axes[1,2], 'right', lat, density, varInd, domrho, cmap, levels)\n\n\n plt.subplots_adjust(hspace=.0001, wspace=0.05, left=0.04, right=0.86)\n\n cb = plt.colorbar(cnplot[0], ax=axes.ravel().tolist(), ticks=levels[::2], fraction=0.015, shrink=2.0, pad=0.05)\n cb.set_label('Standard deviation (%s)' % (unit,), fontweight='bold')\n\n plotTitle = 'Standard deviation of '+legVar+', '+name+' '+longName\n plotName = name+'_'+legVar+'_std_'+work\n\n # Date\n now = datetime.datetime.now()\n date = now.strftime(\"%Y-%m-%d\")\n\n plt.suptitle(plotTitle, fontweight='bold', fontsize=14, verticalalignment='top')\n plt.figtext(.5,.015,'Computed by : zonal_std.py '+date,fontsize=8,ha='center')\n\n plt.savefig(fig_dir+dir+plotName+'.pdf', bbox_inches='tight')\n","sub_path":"Yona_analysis/programs/zonal_std.py","file_name":"zonal_std.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"147754610","text":"from constants import SPOTIFY_API_KEY, SPOTIFY_API_SECRET, SPOTIFY_USERNAME, SPOTIFY_REDIRECT_URL\nimport os\nimport binascii\nimport requests\nimport pprint\nfrom spotipy import Spotify\nimport spotipy.util as util\n\nSCOPES = [\n 'user-read-email',\n 'playlist-read-private',\n 'playlist-modify-private',\n 'playlist-modify-public',\n]\n\nCLEAR_CACHE_FOR_TEST = True\n\nif CLEAR_CACHE_FOR_TEST:\n os.system('rm .cache-{}'.format(SPOTIFY_USERNAME))\n\ntoken = util.prompt_for_user_token(SPOTIFY_USERNAME,\n scope=' '.join(SCOPES),\n client_id=SPOTIFY_API_KEY,\n client_secret=SPOTIFY_API_SECRET,\n redirect_uri=SPOTIFY_REDIRECT_URL)\n\nif token:\n sp = Spotify(auth=token)\n sp.trace = False\n playlist_name = 'test'\n playlists = sp.user_playlist_create(SPOTIFY_USERNAME,\n playlist_name,\n public=False)\n pprint.pprint(playlists)\nelse:\n print(\"Can't get token for\", SPOTIFY_USERNAME)\n","sub_path":"src/spotify-sandbox.py","file_name":"spotify-sandbox.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"22341927","text":"import requests\nimport re\n\ndef links_list(url):\n try:\n req = requests.get(url)\n text = req.text\n pattern = r' 0, map(lambda eh: eh.fd, readers)))\n wl = list(filter(lambda fd: fd.fileno() > 0, map(lambda eh: eh.fd, writers)))\n\n (r_rl, r_wl, _) = select.select(rl, wl, [], timeout or None)\n handled = False\n for fd in r_rl:\n (h, r) = self.handle(Event.READABLE, fd=fd)\n if isinstance(r, GeneratorType):\n yield from r\n elif r is not None:\n yield r\n handled = handled or h\n for fd in r_wl:\n (h, r) = self.handle(Event.WRITEABLE, fd=fd)\n if isinstance(r, GeneratorType):\n yield from r\n elif r is not None:\n yield r\n handled = handled or h\n if not handled:\n (h, r) = self.handle(Event.IDLE)\n if isinstance(r, GeneratorType):\n yield from r\n elif r is not None:\n yield r\n","sub_path":"keybender/keybender/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"429442270","text":"import os\nimport getpass\nimport logging\nimport json\n\n\nclass Configuration():\n\n def __init__(self,):\n\n self.params = {}\n\n # centinel user\n user_info = {}\n user_info['current_user'] = getpass.getuser()\n user_home = os.path.expanduser('~' + user_info['current_user'])\n user_info['centinel_home'] = os.path.join(user_home, '.centinel')\n user_info['is_vpn'] = False\n self.params['user'] = user_info\n\n # directory structure\n dirs = {}\n dirs['experiments_dir'] = os.path.join(os.path.dirname(__file__),\n \"experiments\")\n dirs['data_dir'] = os.path.join(os.path.dirname(__file__),\n \"data\")\n dirs['results_dir'] = os.path.join(self.params['user']['centinel_home'],\n 'results')\n self.params['dirs'] = dirs\n\n # logging\n self.params['log'] = {}\n self.params['log']['log_level'] = logging.INFO\n self.params['log']['log_file'] = None\n # an alternative is os.path.join(centinel_home,\n # \"centinel.log\")\n self.params['log']['log_format'] = '%(asctime)s: %(levelname)s: %(message)s'\n\n # server\n servers = {}\n servers['server_url'] = \"https://server.iclab.org:8082\"\n servers['login_file'] = os.path.join(self.params['user']['centinel_home'],\n 'login')\n # the entire transaction should take less than 5 min\n servers['total_timeout'] = 60*5\n # set a socket timeout of 15 seconds (no way to do per request\n # platform independently)\n servers['req_timeout'] = 15\n self.params['server'] = servers\n\n # proxy\n proxy = {}\n proxy['proxy_type'] = None # \"socks\" or \"http\"\n proxy['proxy_url'] = None # \"http://127.0.0.1:9050\"\n proxy['proxy'] = None\n if proxy['proxy_type']:\n proxy['proxy'] = {proxy['proxy_type']: proxy['proxy_url']}\n self.params['proxy'] = proxy\n\n def parse_config(self, config_file):\n \"\"\"Given a configuration file, read in and interpret the results\"\"\"\n\n with open(config_file, 'r') as f:\n config = json.load(f)\n self.params = config\n if self.params['proxy']['proxy_type']:\n self.params['proxy'] = {self.params['proxy']['proxy_type']:\n self.params['proxy']['proxy_url']}\n\n def write_out_config(self, config_file):\n \"\"\"Write out the configuration file\n\n Note: this will erase all comments from the config file\n\n \"\"\"\n with open(config_file, 'w') as f:\n json.dump(self.params, f)\n","sub_path":"centinel/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"285583647","text":"import json\nimport os\n\nfrom django.shortcuts import render_to_response, render\nfrom django.template import RequestContext\nfrom django.http import HttpResponse, JsonResponse\nfrom django.core.servers.basehttp import FileWrapper\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom controls import *\nfrom models import PreloadedData, UploadedData\nfrom apps.commons.models import CaseCoefficient, CaseParameter\nfrom aquampro.settings.base import MEDIA_ROOT\n\n\ndef render_water_quality_analyzer(request):\n context = {\"page_title\": \"AQUAM | Water Quality Analyzer\"}\n return render(request, \"tools/water-quality-analyzer.html\", context)\n\n\ndef render_water_quality_table(request):\n model = None\n ip = \"127.0.0.1\"\n context = {\"records\": \"\", \"rows_num\": \"\", \"table_status\": \"\"}\n data_source = None\n if request.method == \"GET\":\n model = PreloadedData\n context[\"table_status\"] = \"success\"\n data_source = \"PRELOADED-DATA\"\n if request.method == \"POST\":\n ip = get_client_ip(request)\n model = UploadedData\n is_uploaded = upload_data(request)\n if is_uploaded:\n context[\"table_status\"] = \"success\"\n data_source = \"UPLOADED-DATA\"\n else:\n context[\"table_status\"] = \"failed\"\n records = model.objects.filter(client_ip=ip)\n for record in records:\n record.h2o_pred = None\n record.tds = None\n record.chloride = None\n record.sodium = None\n record.calcium = None\n record.iron = None\n context[\"records\"] = records\n context[\"rows_num\"] = records.count()\n template = \"tools/partial/water-quality-table.html\"\n response = render_to_response(template, context, context_instance=RequestContext(request))\n response.set_cookie(\"WATER-QUALITY-DATA-SOURCE\", data_source)\n return response\n\n\ndef download_water_quality_file(request):\n file_name = MEDIA_ROOT + \"/downloads/water_quality_sample.csv\"\n wrapper = FileWrapper(open(file_name))\n response = HttpResponse(wrapper, content_type=\"text/csv\")\n response[\"Content-Length\"] = os.path.getsize(file_name)\n response[\"Content-Disposition\"] = \"attachment; filename=water_quality_sample.csv\"\n return response\n\n\ndef render_water_quality_file_upload_instructions(request):\n context = {\"page_title\": \"AQUAM | Water Quality Analyzer\"}\n template = \"tools/partial/water-quality-file-upload-instructions.html\"\n return render(request, template, context)\n\n\ndef jsonify_water_quality_coefficients(request):\n location = request.GET[\"location\"]\n coeffs = CaseCoefficient.objects.filter(location=location)\n result = {}\n for coeff in coeffs:\n result[coeff.const_name] = {\"alpha\": coeff.alpha, \"beta\": coeff.beta}\n params = CaseParameter.objects.filter(location=location)\n for param in params:\n result[param.period] = {\"Q0\": param.init_production, \"D\": param.decline_rate, \"b\": param.curve_degree}\n return JsonResponse(result)\n\n\n@ensure_csrf_cookie\ndef update_water_quality_table(request):\n ip = \"127.0.0.1\"\n model = get_client_model(request)\n if model == UploadedData:\n ip = get_client_ip(request)\n is_updated = update_water_quality_database(request, model, ip)\n template = \"tools/partial/water-quality-table.html\"\n if is_updated:\n records = model.objects.filter(client_ip=ip)\n context = {\"records\": records, \"rows_num\": records.count(), \"table_status\": \"success\"}\n return render_to_response(template, context, context_instance=RequestContext(request))\n else:\n context = {\"records\": \"\", \"rows_num\": \"\", \"table_status\": \"failed\"}\n return render_to_response(template, context, context_instance=RequestContext(request))\n\n\ndef jsonify_water_quality_data(request):\n result = get_water_quality_data(request)\n return JsonResponse(result)\n","sub_path":"aquampro/apps/waterquality/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"39926634","text":"from sklearn import preprocessing\n\nlable_encoder = preprocessing.LabelEncoder()\n\ninput_classes = ['audi', 'ford', 'audi', 'toyota', 'ford', 'bmw']\n\nlable_encoder.fit(input_classes)\n\n# word is marked with index\nprint(\"Class mapping :\\n\")\nfor i, item in enumerate(lable_encoder.classes_) :\n print(item, '-->', i)\n\n# find index from word\nlabels = ['toyota', 'ford', 'audi']\nencoded_label = lable_encoder.transform(labels)\nprint (\"Labels :\", labels)\nprint (\"Encoded Labels:\", encoded_label)\n\n# find word by index\nindexs = [2, 1, 0, 3, 1]\nencoded_indexs = lable_encoder.inverse_transform(indexs);\nprint (\"Index :\", indexs)\nprint (\"Encoded Indexs:\", encoded_indexs)\n\n\n","sub_path":"src/MarkEncoderFun.py","file_name":"MarkEncoderFun.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"597054684","text":"import csv\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nimport math\napp = Flask(__name__)\n \ndef get_csv():\n csv_path = 'VegasRestaurants.csv'\n csv_file = open(csv_path, 'r')\n csv_obj = csv.DictReader(csv_file)\n csv_list = list(csv_obj)\n for row in csv_list:\n if row['current_grade'] != \"A\" and row['current_grade'] != \"B\" and row['current_grade'] != \"C\":\n csv_list.remove(row)\n return csv_list\n \n@app.route(\"/\")\ndef index():\n template = 'front-end.html'\n object_list = get_csv()\n return render_template(template, object_list=object_list)\n \n@app.route('/search', methods=['GET','POST'])\ndef res():\n template = 'front-end.html'\n object_list = get_csv()\n result_list = []\n grade = request.form['current_grade']\n demerits = request.form['current_demerits']\n stars = request.form['yelp_stars']\n for row in object_list:\n add = True\n if row['current_grade'] != grade and grade != \"any\":\n add = False\n if isinstance(stars,int) and math.ceil(float(row['yelp_stars'])) != int(stars):\n add = False\n dems = int(row['current_demerits'])\n if dems != 0 and demerits == \"0\":\n add = False\n if (dems < 1 or dems > 5) and demerits == \"1-5\":\n add = False\n if (dems < 6 or dems > 10) and demerits == \"6-10\":\n add = False\n if (dems < 11 or dems > 15) and demerits == \"11-15\":\n add = False\n if (dems < 16 or dems > 20) and demerits == \"16-20\":\n add = False\n if dems < 21 and demerits == \"21+\":\n add = False\n if add:\n result_list.append(row)\n return render_template(template, object_list=result_list)\n \n@app.route('//')\ndef detail(row_id):\n template = 'detail.html'\n object_list = get_csv()\n for row in object_list:\n if row['id'] == row_id:\n print(row['inspection_history'])\n return render_template(template, object=row)\n abort(404)\n\n\n","sub_path":"final-project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"437556692","text":"# -*- coding: utf-8 -*-\nfrom collections import defaultdict\nimport json\nfrom flask.helpers import url_for\nimport os\nfrom flask import abort, render_template, request, redirect, current_app\nfrom app.api_client.data import DataAPIClient\nfrom app.main.utils import get_page_list\n\nfrom ...main import main\nfrom app import cache\n\n\n# For prototyping\nfrom collections import namedtuple\n\nFilter = namedtuple('Filter', 'name options')\nOption = namedtuple('Option', 'name value label checked')\nBadge = namedtuple('Badge', 'css_class label')\nRole = namedtuple('Role', 'label')\nExtraDetail = namedtuple('ExtraDetail', 'key value')\nResult = namedtuple('Result', 'title description badges roles url')\n\nSUPPLIER_RESULTS_PER_PAGE = 50\n\n\ndef normalise_role(role_name):\n return role_name.replace('Senior ', '').replace('Junior ', '') # Mind the white space after Junior\n\n\n@cache.cached(timeout=300, key_prefix='get_all_roles')\ndef get_all_roles(data_api_client):\n \"\"\"\n Returns a two-valued tuple:\n 1. A set containing all role strings\n 2. A map from role strings to original role data\n\n The original role data is actually a list of dicts because of folding Senior/Junior roles into one role.\n \"\"\"\n response = data_api_client.get_roles()\n\n roles = set()\n raw_role_data = defaultdict(list)\n for role_data in response['roles']:\n role = normalise_role(role_data['role'])\n raw_role_data[role].append(role_data)\n roles.add(role)\n return roles, raw_role_data\n\n\n@main.route('/search/sellers')\ndef supplier_search():\n sort_order = request.args.get('sort_order', 'asc') # asc or desc\n sort_terms = request.args.getlist('sort_term')\n keyword = request.args.get('keyword', None)\n\n if not sort_terms: # Sort by A-Z for default\n sort_terms = ['name']\n\n data_api_client = DataAPIClient()\n\n selected_roles = set(request.args.getlist('role'))\n roles, raw_role_data = get_all_roles(data_api_client)\n\n sidepanel_roles = [Option('role', role, role, role in selected_roles) for role in roles]\n sidepanel_filters = [\n Filter('Capabilities', sidepanel_roles),\n ]\n\n sort_queries = []\n allowed_sort_terms = set(('name',)) # Limit what can be sorted\n for sort_term in sort_terms:\n if sort_term in allowed_sort_terms:\n if sort_term == 'name': # Use 'name' in url to keep it clean but query needs to search on not analyzed.\n sort_term = 'name.not_analyzed'\n\n sort_queries.append({\n sort_term: {\"order\": sort_order, \"mode\": \"min\"}\n })\n\n if selected_roles:\n filters = []\n for role in selected_roles:\n filters.extend(raw_role_data[role])\n query = {\n \"query\": {\n \"filtered\": {\n \"query\": {\n \"match_all\": {}\n },\n \"filter\": {\n \"terms\": {\"prices.serviceRole.role\": filters},\n }\n }\n },\n \"sort\": sort_queries,\n }\n elif keyword:\n query = {\n \"query\": {\n \"match_phrase_prefix\": {\n \"name\": keyword\n }\n },\n \"sort\": sort_queries\n }\n else:\n query = {\n \"query\": {\n \"match_all\": {\n }\n },\n \"sort\": sort_queries\n }\n\n page = int(request.args.get('page', 1))\n results_from = (page - 1) * SUPPLIER_RESULTS_PER_PAGE\n\n find_suppliers_params = {\n 'from': results_from,\n 'size': SUPPLIER_RESULTS_PER_PAGE\n }\n response = data_api_client.find_suppliers(data=query, params=find_suppliers_params)\n\n results = []\n for supplier in response['hits']['hits']:\n details = supplier['_source']\n\n supplier_roles = []\n seen_supplier_roles = set()\n for price in details['prices']:\n role = normalise_role(price['serviceRole']['role'])\n if role not in seen_supplier_roles:\n supplier_roles.append(Role(role))\n seen_supplier_roles.add(role)\n\n result = Result(\n details['name'],\n details['summary'],\n [],\n sorted(supplier_roles),\n url_for('.get_supplier', code=details['code']))\n\n results.append(result)\n\n num_results = response['hits']['total']\n results_to = min(num_results, page * SUPPLIER_RESULTS_PER_PAGE)\n\n pages = get_page_list(SUPPLIER_RESULTS_PER_PAGE, num_results, page)\n\n return render_template(\n 'search_sellers.html',\n title='Supplier Catalogue',\n search_url=url_for('.supplier_search'),\n search_keywords='',\n sidepanel_filters=sidepanel_filters,\n num_results=num_results,\n results=results,\n results_from=results_from + 1,\n results_to=results_to,\n pages=pages,\n page=page,\n num_pages=pages[-1],\n selected_roles=selected_roles,\n sort_order=sort_order,\n sort_terms=sort_terms,\n sort_term_name_label='A to Z' if sort_order == 'asc' else 'Z to A',\n keyword=keyword,\n )\n","sub_path":"app/main/views/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"537072109","text":"from math import sqrt\r\n\r\nnum = int(input('Digite um numero: '))\r\nraiz = sqrt(num)\r\nprint('A raiz de {} é igual a {:.2f}'.format(num, raiz))\r\n\r\nimport random\r\n\r\nnum1 = random.random()\r\nprint(num1)\r\nteste = random.randrange(1,4)\r\nprint(teste)\r\n\r\nimport emoji\r\nprint(emoji.emojize(\"Tirando onda :sunglasses:\", use_aliases=True))\r\n","sub_path":"aula8.py","file_name":"aula8.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"220250574","text":"import time\n\nfrom selenium import webdriver\nimport xlsxwriter\n\n\n\n# web driver open source tool for automated testing of webapps for chrome browser\ndriver = webdriver.Chrome(r\"C:\\Users\\DekelRaz\\PycharmProjects\\webScraping\\drivers\\chromedriver.exe\")\n\n# set the amount of time by seconds for hte page to load before it throws an exception\ndriver.set_page_load_timeout(10)\n\n# declare list of chosen companies from the website\nlistCompanies = [\"470\", \"435\", \"3113\", \"15\", \"2356\"]\ncompany1 = \"Mercaz Haformaika Averbuch INC\"\ncompany2 = \"Adama Pitronot Lehaklaut INC\"\ncompany3 = \"Overseas Commerce INC\"\ncompany4 = \"Edgar Hashkaot Vepituach INC\"\ncompany5 = \"Adri-El Israel Nechasim INC\"\n\ncompaniesNames = [company1, company2, company3, company4, company5]\n\n# method that adding company's id to the url\ndef AddCompany(id):\n listCompanies.append(id)\n\n# opens url in browser\ndriver.get(\"https://www.magna.isa.gov.il/Report.aspx?rid=2&eid=\" + ', '.join(str(x) for x in listCompanies) + \"&ifrs=2&p=42017&lang=1\")\n\n# gets basically all the numbers from the table by class name\n# *************************************************GROSS MARGIN************************************\nrevah_golmis = []\n\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor x in range(len(listCompanies)):\n index = x + 3\n url = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[25]/td[\" + str(index) + \"]\"\n\n text = str(driver.find_element_by_xpath(url).text)\n if text != ' ':\n revah_golmis.append(int(text.replace(',', '')))\n else:\n revah_golmis.append(0)\n\n\n# ***************************************EBITDA*****************************************\nebitdas = []\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor x in range(len(listCompanies)):\n index = x + 3\n url = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[28]/td[\" + str(index) + \"]\"\n\n text = str(driver.find_element_by_xpath(url).text)\n if text != ' ':\n ebitdas.append(int(text.replace(',', '')))\n else:\n ebitdas.append(0)\n\n# **********************************************EPS************************************\n\nepss = []\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor x in range(len(listCompanies)):\n index = x + 3\n url = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[38]/td[\" + str(index) + \"]\"\n\n text = str(driver.find_element_by_xpath(url).text)\n if text != ' ':\n epss.append(float(text.replace(',', '')))\n else:\n epss.append(0)\n\n# *******************************************PROPERTIES'S SUMMARY*****************************************\nproperties = []\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor x in range(len(listCompanies)):\n index = x + 3\n url = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[7]/td[\" + str(index) + \"]\"\n\n text = str(driver.find_element_by_xpath(url).text)\n if text != ' ':\n properties.append(float(text.replace(',', '')))\n else:\n properties.append(-1)\n\n# ***********************************************EARNINGS**************************************\nearnings = []\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor x in range(len(listCompanies)):\n index = x + 3\n url = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[33]/td[\" + str(index) + \"]\"\n\n text = str(driver.find_element_by_xpath(url).text)\n if text != ' ':\n earnings.append(float(text.replace(',', '')))\n else:\n earnings.append(-1)\n# **********************************************FORTUNES******************************************\nfortunes = []\n# loop running on td number and increment to get the value of the wanted data, gets the data as string and convert it to int\nfor y in range(len(listCompanies)):\n index2 = y + 3\n url2 = \"//*[@id=\\\"gvReportData\\\"]/tbody/tr[19]/td[\" + str(index2) + \"]\"\n\n text2 = str(driver.find_element_by_xpath(url2).text)\n if text2 != ' ':\n fortunes.append(float(text2.replace(',', '')))\n else:\n earnings.append(-1)\n# ******************************************ROE*******************************************\n# calculating the vlaues of earnings and fortunes to get the value of ROE\nroe = []\nfor z in range(len(listCompanies)):\n roe.append(float(earnings[z]/fortunes[z]))\n\n# **************************************************Basic Earnings Power Ratio********************************\nbasic_earnings_power_ratio = []\n# calculating the value of EBITDA and properties to get the value of BEPR\nfor x in range(len(listCompanies)):\n basic_earnings_power_ratio.append(float(ebitdas[x]/properties[x]))\n\n# ******************************************Open The Project As EXCEL File***********************************\n\n# declaring the financial ratios column\nfinancial_ratios = []\nfinancial_ratios.append(\"Gross Margin\")\nfinancial_ratios.append(\"ROE\")\nfinancial_ratios.append(\"EBITDA\")\nfinancial_ratios.append(\"BEPR\")\nfinancial_ratios.append(\"EPS\")\n\n# create file (workbook) and worksheet\noutWorkbook = xlsxwriter.Workbook(\"magnaWebScraping.xlsx\")\noutsheet = outWorkbook.add_worksheet()\n\n# write headers\noutsheet.write(\"A1\", \"Financial Ratios\")\n\n# write data to file\nfor i in range(len(companiesNames)):\n outsheet.write(0, i + 1, companiesNames[i])\n outsheet.write(i + 1, 0, financial_ratios[i])\n outsheet.write(1, i + 1, revah_golmis[i])\n outsheet.write(2, i + 1, roe[i])\n outsheet.write(3, i + 1, ebitdas[i])\n outsheet.write(4, i + 1, basic_earnings_power_ratio[i])\n outsheet.write(5, i + 1, epss[i])\n\n\noutWorkbook.close()\n\ntime.sleep(4)\ndriver.quit()\n","sub_path":"webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"573964559","text":"'''\naction in this game can be divide into 2 parts:\n 1. do notion,\n 2. long tap for n seconds,where n: 0 max , game would be over.\nFor long tap, divide n seconds into m actions, the stride of time is n/m. So the final layer would have\nm units standing for m action.\n'''\nimport tensorflow as tf\nimport numpy as np\nimport random\nfrom collections import deque\n\n# Hyper Parameters\nFRAME_PER_ACTION = 1\nOBSERBR = 1 # timesteps to observr before training\nEXPLORE = 200000 # frame over which to anneal epsion\nFINAL_EPSILON = 0\nINITIAL_EPSILON = 0\nREPLAY_MEMORY = 50000\nBATCH_SIZE = 32\n\nclass BrainDQN:\n def __init__(self,actions):\n # init replay memory\n self.replayMemory = deque()\n # init some paprameter\n self.timeStep = 0\n self.actions = actions\n # init Q network\n self.createQNetwork()\n\n def createQNetwork(self):\n # network weights\n #input: batch_size x \n W_conv1 = self.weight_variable([8,8,4,32])\n b_conv1 = self.bias_variable([32])\n\n W_conv2 = self.weight_variable([4,4,32,64])\n b_conv2 = self.weight_variable([64])\n\n W_conv3 = self.weight_variable([3,3,64,64])\n b_conv3 = self.bias_variable([64])\n\n W_fc1 = self.weight_variable([])\n\n def weight_variable(self,shape):\n initial = tf.truncated_normal(shape,stddev=0.01)\n return tf.Variable(initial,dtype=tf.float32)\n\n def bias_variable(self,shape):\n initial = tf.truncated_normal(shape,stddev=0.01)\n return tf.Variable(initial)\n\n def conv2d(self,x,W,stride):\n return tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding=\"SAME\")\n\n def max_pool_2x2(self,x):\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=\"SAME\")\n","sub_path":"JumpDeepQLearning.py","file_name":"JumpDeepQLearning.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"150651123","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('competition', '0019_auto_20150510_1353'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='equip',\n name='correoe',\n field=models.EmailField(default=1, unique=True, max_length=75),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='jornada',\n name='date',\n field=models.DateTimeField(default=datetime.datetime(2015, 5, 11, 14, 6, 40, 206523, tzinfo=utc)),\n preserve_default=True,\n ),\n ]\n","sub_path":"LoL/competition/migrations/0020_auto_20150511_1406.py","file_name":"0020_auto_20150511_1406.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"3007014","text":"import numpy as np\nfrom sklearn.datasets import fetch_mldata\n\nclass MNISTLoader:\n def __init__(self, dataset_dir='dataset', shuffle=True):\n mnist = fetch_mldata('MNIST original', data_home=dataset_dir)\n self.image, self.label = mnist.data, mnist.target\n self.size = mnist.data.shape[0]\n self.shuffle = shuffle\n\n self._cur = 0\n\n def next_batch(self, batch_size):\n if self._cur + batch_size > self.size:\n if self.shuffle:\n self._shuffle()\n self._cur = 0\n\n image_batch = self.image[self._cur:self._cur + batch_size]\n label_batch = self.label[self._cur:self._cur + batch_size]\n self._cur += batch_size\n return image_batch, label_batch\n\n def _shuffle(self):\n perm = np.random.permutation(self.size)\n self.image, self.label = self.image[perm], self.label[perm]\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505845576","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n# Importing the dataset\r\ndataset = pd.read_csv(\"final_data.csv\")\r\nX = dataset.drop(['output', 'DISTRICT', 'Unnamed: 0'], axis=1)\r\ny = dataset['output']\r\n\r\nimport xgboost\r\nxgb = xgboost.XGBRegressor(\r\n max_depth=2,\r\n gamma=2,\r\n eta=0.8,\r\n reg_alpha=0.5,\r\n reg_lambda=0.5\r\n)\r\nres = xgb.fit(X,y)\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nlog_reg = LogisticRegression(\r\n penalty='l2',\r\n C=0.1\r\n)\r\nreg = log_reg.fit(X, y)\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrf = RandomForestClassifier()\r\nforest = rf.fit(X,y)\r\n\r\nimport joblib\r\njoblib.dump(res, \"model_xgb.pkl\")\r\njoblib.dump(reg, \"model_reg.pkl\")\r\njoblib.dump(forest, \"model_forest.pkl\")\r\ncols=X.columns\r\njoblib.dump(cols, \"model_cols.pkl\")\r\n","sub_path":"landslide-api/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"469650861","text":"# Puzzle 1 Day 1\nimport os\nimport io\n# Variables\nris = 0\n\n# Open File\nfileOpen = open(os.path.expanduser(\"~/Desktop/AdventCode/puzzleInput.txt\"))\n # Read File until EOF\nfor line in fileOpen:\n if line.strip(): \n n = int(line)\n ris=ris+n\nprint(ris)","sub_path":"PythonScripts/Day1/Puzzle1.py","file_name":"Puzzle1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"260732399","text":"from django import forms\n\nfrom elearning.apps.student_groups.models import StudentGroup\n\n\nclass CreateStudentGroupForm(forms.ModelForm):\n\n class Meta(object):\n\n model = StudentGroup\n fields= ('name','description')\n\n name = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control '}))\n description = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control '}))\n\n def __init__(self, *args, **kwargs):\n super(CreateStudentGroupForm, self).__init__(*args, **kwargs)\n instance = kwargs.get('instance')\n\n if instance:\n self.fields['name'].initial = instance.name\n\n def save(self, commit=True):\n student_group = super(CreateStudentGroupForm, self).save(commit=False)\n\n if commit:\n student_group.save()\n return student_group\n\n def clean_name(self):\n if self.instance:\n if self.instance.name != self.cleaned_data['name']:\n try:\n StudentGroup.objects.get(name=self.cleaned_data['name'])\n except StudentGroup.DoesNotExist:\n return self.cleaned_data['name']\n raise forms.ValidationError(\"This student group already exists\")\n return self.cleaned_data['name']\n else:\n try:\n StudentGroup.objects.get(name=self.cleaned_data['name'])\n except StudentGroup.DoesNotExist:\n return self.cleaned_data['name']\n raise forms.ValidationError(\"This student group already exists\")","sub_path":"elearning/apps/student_groups/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"539818281","text":"#-*- coding: UTF-8 -*-\nimport os\nfrom flask import Flask, request, g\nimport json\nimport leveldb\n\n# create our little application\napp = Flask(__name__)\napp.config.from_object(__name__)\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\"\"\"\n if not hasattr(g, 'leveldb'):\n g.leveldb = leveldb.LevelDB('./usercarts_db')\n return g.leveldb\n\n@app.route('/', methods=['GET'])\ndef show_cart(user):\n \"\"\"Return the user cart json-encoded.\"\"\"\n cart = get_cart(user) # not JSON encoded\n return json.dumps(cart) # encoding the cart in JSON\n\ndef get_cart(user):\n \"\"\"Fetch the user's cart from the DB or create one if there's none yet.\n Return a json-decoded cart looking like: {\"item\": [{\"name\": \"x\", \"qty\": y}, ]}\"\"\"\n try: # looks for the user's cart in the database\n cart = get_db().Get(user) # JSON encoded\n cart = json.loads(cart) # JSON decoding\n except KeyError: # create a cart if needed\n cart = {'items': []}\n return cart\n\n@app.route('//product/', methods=['PUT'])\ndef put_in_cart(user, item):\n \"\"\"Add a product or change its quantity. Return the cart json-encoded.\"\"\"\n cart = get_cart(user) # not JSON encoded\n for elt in cart[\"items\"]:\n if elt[\"name\"] == item:\n elt[\"qty\"] = request.json[\"qty\"]\n get_db().Put(user, json.dumps(cart))\n return json.dumps(cart)\n\n cart[\"items\"].append({\"name\": item, \"qty\": request.json[\"qty\"]})\n get_db().Put(user, json.dumps(cart))\n return json.dumps(cart)\n\n@app.route('//product/', methods=['DELETE'])\ndef remove_from_cart(user, item):\n \"\"\"Delete a product form a given cart, and return the cart json-encoded.\"\"\"\n cart = get_cart(user) # not JSON encoded\n for i, elt in enumerate(cart[\"items\"]):\n if elt[\"name\"] == item:\n del cart[\"items\"][i]\n\n get_db().Put(user, json.dumps(cart))\n return json.dumps(cart)\n","sub_path":"inflaskart_server.py","file_name":"inflaskart_server.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"514128345","text":"# -*- coding: utf-8 -*-\n\nimport pprint\nimport logging\nimport werkzeug\nfrom werkzeug import urls, utils\n\nfrom odoo import http, _\nfrom odoo.http import request\nfrom odoo.exceptions import ValidationError\n\n_logger = logging.getLogger(__name__)\n\nclass EbizchargeController(http.Controller):\n # _approved_url = '/payment/ebizcharge/approved/'\n _approved_url = '/shop/confirmation/'\n _cancel_url = '/payment/ebizcharge/cancel/'\n _error_url = '/payment/ebizcharge/error'\n\n ''' Rizwan implementation '''\n # @http.route([\n # '/shop/confirmation/',\n # '/payment/ebizcharge/cancel/',\n # '/payment/ebizcharge/error',\n # ], type='http', auth='public', csrf=False)\n # def ebizcharge_form_feedback(self, **post):\n # #_logger.info('Ebizcharge: entering form_feedback with post data %s', pprint.pformat(post))\n # return_url = '/'\n #\n # if post:\n # request.env['payment.transaction'].sudo().form_feedback(post, 'ebizcharge')\n # # return werkzeug.utils.redirect('/payment/process')\n #\n #\n # return_url = post.pop('return_url', '/shop/payment/validate')\n # base_url = request.env['ir.config_parameter'].sudo().get_param('web.base.url')\n # _logger.info('Ebizcharge: entering form_feedback with post data %s', pprint.pformat(return_url))\n # return request.render('payment_ebizcharge.payment_ebizcharge_redirect', {\n # 'return_url': urls.url_join(base_url, return_url)\n # })\n # #return werkzeug.utils.redirect(return_url)\n\n ''' Attiq Implementation '''\n\n\n def ebizcharge_validate_data(self, **post):\n res = False\n\n if post['TranResult'] == 'Approved':\n ''' Add customer security token by write in res.partner '''\n\n _logger.info('Ebizcharge: validated data')\n reference = post['TransactionLookupKey']\n tx = request.env['payment.transaction'].sudo().search([('reference', '=', reference)])\n tx._set_transaction_done()\n # so = request.env['sale.order'].sudo().search([('name', '=', reference)])\n # tx.write({\n # 'state': 'done',\n # })\n # so.write({\n # 'state': 'done',\n # })\n res = request.env['payment.transaction'].sudo().form_feedback(post, 'ebizcharge')\n # self.env.cr.commit()\n\n return res\n\n ''' Rizwan implementation '''\n # @http.route([\n # #'/payment/ebizcharge/approved/',\n # # Error here\n\t# '/shop/confirmation/',\n # '/payment/ebizcharge/cancel/',\n # '/payment/ebizcharge/error',\n # ], type='http', auth='public', csrf=False)\n # def ebizcharge_form_return(self, **post):\n # _logger.info(\n # 'ebizcharge: entering form_feedback with post data %s', pprint.pformat(post))\n # if post:\n # request.env['payment.transaction'].sudo().form_feedback(post, 'ebizcharge')\n # if post['TranResult'] == 'Approved':\n # reference = post['TransactionLookupKey']\n # tx = request.env['payment.transaction'].sudo().search([('reference', '=', reference)])\n # tx.write({\n # 'state': 'done',\n # })\n # # self.env.cr.commit()\n # return werkzeug.utils.redirect('/payment/process')\n\n\n @http.route('/shop/confirmation/', type='http', auth=\"public\", methods=['POST', 'GET'], csrf=False)\n def ebizcharge_form_feedback(self, **post):\n \"\"\" Ebizcharge \"\"\"\n\n _logger.info('Beginning Ebizcharge form_feedback with post data %s', pprint.pformat(post)) # debug\n if post:\n try:\n self.ebizcharge_validate_data(**post)\n except ValidationError:\n _logger.exception('Unable to validate the Ebizcharge payment')\n\n return werkzeug.utils.redirect('/payment/process')\n\n @http.route(['/payment/ebizcharge/s2s/create_json'], type='json', auth='public')\n def ebizcharge_s2s_create_json(self, **kwargs):\n acquirer_id = int(kwargs.get('acquirer_id'))\n acquirer = request.env['payment.acquirer'].browse(acquirer_id)\n if not kwargs.get('partner_id'):\n kwargs = dict(kwargs, partner_id=request.env.user.partner_id.id)\n return acquirer.s2s_process(kwargs).id\n\n @http.route(['/payment/authorize/s2s/create_json_3ds'], type='json', auth='public', csrf=False)\n def authorize_s2s_create_json_3ds(self, verify_validity=False, **kwargs):\n token = False\n acquirer = request.env['payment.acquirer'].browse(int(kwargs.get('acquirer_id')))\n\n try:\n if not kwargs.get('partner_id'):\n kwargs = dict(kwargs, partner_id=request.env.user.partner_id.id)\n token = acquirer.s2s_process(kwargs)\n except ValidationError as e:\n message = e.args[0]\n if isinstance(message, dict) and 'missing_fields' in message:\n msg = _(\"The transaction cannot be processed because some contact details are missing or invalid: \")\n message = msg + ', '.join(message['missing_fields']) + '. '\n if request.env.user._is_public():\n message += _(\"Please sign in to complete your profile.\")\n # update message if portal mode = b2b\n if request.env['ir.config_parameter'].sudo().get_param('auth_signup.allow_uninvited', 'False').lower() == 'false':\n message += _(\"If you don't have any account, please ask your salesperson to update your profile. \")\n else:\n message += _(\"Please complete your profile. \")\n\n return {\n 'error': message\n }\n\n if not token:\n res = {\n 'result': False,\n }\n return res\n\n res = {\n 'result': True,\n 'id': token.id,\n 'short_name': token.short_name,\n '3d_secure': False,\n 'verified': False,\n }\n\n if verify_validity != False:\n token.validate()\n res['verified'] = token.verified\n\n return res\n\n @http.route(['/payment/ebizcharge/s2s/create'], type='http', auth='public')\n def ebizcharge_s2s_create(self, **post):\n acquirer_id = int(post.get('acquirer_id'))\n acquirer = request.env['payment.acquirer'].browse(acquirer_id)\n acquirer.s2s_process(post)\n return utils.redirect(post.get('return_url', '/'))\n","sub_path":"controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"511542805","text":"#!/usr/bin/env python3\n# coding=utf-8\n# Function: 将训练语料Corpus合并到text8中\n\n\nimport sys\nimport math\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nimport logging\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef add_two_dim_dict(thedict, key1, key2, val):\n if (key1 in thedict):\n thedict[key1].update({key2: val})\n else:\n thedict.update({key1: {key2: val}})\n\n\ndef purity(label_pt, btm_pzd_pt, M=22,\n N=20): # filename2中的参数是可以调整的\n file1 = open(label_pt)\n file2 = open(btm_pzd_pt)\n i = 0\n n = 0\n dict_doc_cluster = {}\n list_type = []\n for line1 in file1:\n i += 1\n split = line1.replace(\"\\n\", \"\")\n dict_doc_cluster[i] = split\n i = 0\n N = 0\n split_pro = {}\n split_index = {}\n for line2 in file2:\n # split = line2.replace(\"\\n\", \"\").split(\" \")\n split = line2.strip().split()\n i += 1\n split_pro[i] = 0\n split_index[i] = 0\n for j in range(len(split)):\n # print float(split[j])\n # if ((split[j] == '') or (split[j] == '\\n')):\n # continue\n if (split_pro[i] < float(split[j])):\n split_pro[i] = float(split[j])\n split_index[i] = j\n n += 1\n # print n\n dict_cluster = {}\n for i in range(1, n + 1):\n a = split_index[i] # a是每个文档对应的最大概率的主题\n b = dict_doc_cluster[i] # b是每个文档实际的类标\n # print a,b\n if (a in dict_cluster):\n if (b in dict_cluster[a]):\n dict_cluster[a][b] += 1\n N += 1\n else:\n add_two_dim_dict(dict_cluster, a, b, 1)\n N += 1\n else:\n add_two_dim_dict(dict_cluster, a, b, 1)\n N += 1\n # print N\n dict_sum = 0\n # print len(dict_cluster)\n for i in dict_cluster:\n # print i\n dict_max = 0\n s = 0\n for j in dict_cluster[i]:\n if (dict_max == 0):\n dict_max = dict_cluster[i][j]\n s += dict_cluster[i][j]\n else:\n if (dict_max < dict_cluster[i][j]):\n dict_max = dict_cluster[i][j]\n s += dict_cluster[i][j]\n # print i,dict_max,s\n dict_sum += dict_max\n # print((dict_sum + 0.0) / N)\n return (dict_sum + 0.0) / N\n\n\ndef coherence(btm_pwz_pt, dwid_pt, coherence_M, e=pow(10, -6),\n K_num=20):\n file1 = open(btm_pwz_pt)\n file2 = open(dwid_pt)\n list_doc_wids = []\n average = 0\n count = 1\n for line2 in file2:\n split = line2.replace(\"\\n\", \"\").split(\" \")\n count = count + 1\n for i in range(len(split)):\n if (split[i] != ''):\n # print split[i]\n split[i] = int(split[i])\n list_doc_wids.append(split)\n # print list_doc_wids\n count = 0\n for line1 in file1:\n count += 1\n split = line1.replace(\"\\n\", \"\").split(\" \")\n length = len(split)\n list_M_index = []\n list_M_pro = []\n for i in range(length - 1):\n if (len(list_M_pro) < coherence_M):\n list_M_index.append(i)\n list_M_pro.append(float(split[i]))\n else:\n if (min(list_M_pro) < float(split[i])):\n j = list_M_pro.index(min(list_M_pro))\n list_M_index[j] = i\n list_M_pro[j] = float(split[i])\n for i in range(len(list_M_pro)):\n for j in range(i + 1, len(list_M_pro)):\n if (list_M_pro[i] < list_M_pro[j]):\n temp = list_M_pro[i]\n list_M_pro[i] = list_M_pro[j]\n list_M_pro[j] = temp\n temp = list_M_index[i]\n list_M_index[i] = list_M_index[j]\n list_M_index[j] = temp\n '''\n for i in range(M):\n\t\t\tprint list_M_index[i],list_M_pro[i] \n break \n '''\n # print list_M_index\n result = 0\n for i in range(coherence_M):\n for j in range(i):\n # print i,j\n\n index_i = list_M_index[i]\n index_j = list_M_index[j]\n num_i_and_j = 0\n num_j = 0\n for l in list_doc_wids:\n # print index_i,index_j,l\n if (index_j in l):\n num_j += 1\n # print\"j\"\n if ((index_i in l) and (index_j in l)):\n num_i_and_j += 1\n # print\"i\"\n # print num_i_and_j,num_j,(num_i_and_j+e)/num_j\n result += math.log10((num_i_and_j + e) / num_j)\n '''\n if (num_i_and_j == 0):\n print count,':',index_i, ',', index_j, ' ', num_i_and_j, ' ', math.log10((num_i_and_j+e)/num_j)\n '''\n # print result\n average += result\n average = average / K_num\n # print (average)\n file1.close()\n file2.close()\n return average\n\n\ndef scores(btm_pzd_pt, btm_pwz_pt, dwid_pt, label_pt, btm_K, output_dir):\n feature_file = open(btm_pzd_pt, 'r')\n label_file = open(label_pt, 'r')\n feature_matrix = [[float(feature) for feature in doc.split()] for doc in feature_file.readlines()]\n label_list = []\n dict_label = {}\n\n for line in label_file.readlines():\n label = line.split('&')[0].strip()\n\n if label not in dict_label:\n dict_label[label] = len(dict_label)\n\n label_list.append(dict_label[label])\n\n feature_file.close()\n label_file.close()\n\n X_train, X_test, y_train, y_test = train_test_split(np.array(feature_matrix), np.array(label_list), test_size=0.2,\n random_state=0)\n\n clf = RandomForestClassifier(n_estimators=10)\n clf = clf.fit(X_train, y_train)\n rfa = str(clf.score(X_test, y_test))\n logger.info(\"RandomForest Accuracy : \" + rfa)\n\n gnb = GaussianNB()\n gnb = gnb.fit(X_train, y_train)\n nba = str(gnb.score(X_test, y_test))\n logger.info(\"NB Accuracy : \" + nba)\n\n mnb = MultinomialNB(alpha=0.01)\n mnb = mnb.fit(X_train, y_train)\n mnba = str(mnb.score(X_test, y_test))\n logger.info(\"MultiNB Accuracy : \" + mnba)\n\n km = KMeans(n_clusters=30, init='k-means++', max_iter=100, n_init=1,\n verbose=False)\n\n km.fit(feature_matrix)\n kh = str(metrics.homogeneity_score(label_list, km.labels_))\n kv = str(metrics.v_measure_score(label_list, km.labels_))\n logger.info(\"K-means Homogeneity : \" + kh)\n logger.info(\"K-means V-measure : \" + kv)\n\n c5 = str(coherence(btm_pwz_pt, dwid_pt, 5, e=pow(10, -6), K_num=btm_K))\n c10 = str(coherence(btm_pwz_pt, dwid_pt, 10, e=pow(10, -6), K_num=btm_K))\n c20 = str(coherence(btm_pwz_pt, dwid_pt, 20, e=pow(10, -6), K_num=btm_K))\n p_m = str(purity(label_pt, btm_pzd_pt))\n logger.info(\"Coherence : \" + c5)\n logger.info(\"Coherence : \" + c10)\n logger.info(\"Coherence : \" + c20)\n logger.info(\"Purity : \" + p_m)\n rfa_file = open(file=output_dir + \"RandomForest_Accuracies.txt\", mode='a', encoding='utf-8')\n rfa_file.write(\"\\n\")\n rfa_file.write(rfa)\n rfa_file.close()\n nba_file = open(file=output_dir + \"NB_Accuracies.txt\", mode='a', encoding='utf-8')\n nba_file.write(\"\\n\")\n nba_file.write(nba)\n nba_file.close()\n kh_file = open(file=output_dir + \"K-means_Homogeneities.txt\", mode='a', encoding='utf-8')\n kh_file.write(\"\\n\")\n kh_file.write(kh)\n kh_file.close()\n kv_file = open(file=output_dir + \"K-means_V-measures.txt\", mode='a', encoding='utf-8')\n kv_file.write(\"\\n\")\n kv_file.write(kv)\n kv_file.close()\n c5_file = open(file=output_dir + \"Coherence5.txt\", mode='a', encoding='utf-8')\n c5_file.write(\"\\n\")\n c5_file.write(c5)\n c5_file.close()\n c10_file = open(file=output_dir + \"Coherence10.txt\", mode='a', encoding='utf-8')\n c10_file.write(\"\\n\")\n c10_file.write(c10)\n c10_file.close()\n c20_file = open(file=output_dir + \"Coherence20.txt\", mode='a', encoding='utf-8')\n c20_file.write(\"\\n\")\n c20_file.write(c20)\n c20_file.close()\n p_file = open(file=output_dir + \"Purityies.txt\", mode='a', encoding='utf-8')\n p_file.write(\"\\n\")\n p_file.write(p_m)\n p_file.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 7:\n print('Usage: python3 %s ' % sys.argv[0])\n exit(1)\n\n btm_pzd_pt = sys.argv[1]\n btm_pwz_pt = sys.argv[2]\n dwid_pt = sys.argv[3]\n label_pt = sys.argv[4]\n btm_K = int(sys.argv[5])\n output_dir = sys.argv[6]\n\n scores(btm_pzd_pt, btm_pwz_pt, dwid_pt, label_pt, btm_K, output_dir)\n","sub_path":"script/scores.py","file_name":"scores.py","file_ext":"py","file_size_in_byte":9112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"54790123","text":"import pygame\r\nimport background\r\n\r\nclass Score:\r\n def __init__(self):\r\n self.screen = background.Background().screen\r\n self.score = 0\r\n self.high_score = 0\r\n self.game_font = pygame.font.Font(\"04B_19.TTF\", 40)\r\n \r\n\r\n def score_display(self, game_state):\r\n if game_state == \"main_game\":\r\n score_surface = self.game_font.render(\"red ginger collected: \" + str(int(self.score)), True, (30, 30, 30))\r\n score_rect = score_surface.get_rect(topleft = (50, 50))\r\n self.screen.blit(score_surface, score_rect)\r\n\r\n if game_state == \"game_over\":\r\n score_surface = self.game_font.render(\"collected: \" + str(int(self.score)), True, (30, 30, 30))\r\n score_rect = score_surface.get_rect(center = (512, 150))\r\n self.screen.blit(score_surface, score_rect)\r\n\r\n highscore_surface = self.game_font.render(\"high score: \" + str(int(self.high_score)), True, (30, 30, 30))\r\n highscore_rect = score_surface.get_rect(center = (512, 200))\r\n self.screen.blit(highscore_surface, highscore_rect)\r\n\r\n instruction_surface = self.game_font.render(\"\"\"Collect 30 red gingers to get the flavor!\"\"\", True, (30, 30, 30))\r\n instruction_rect = instruction_surface.get_rect(center = (512, 390))\r\n self.screen.blit(instruction_surface, instruction_rect)\r\n\r\n control_surface = self.game_font.render(\"\"\"Press SPACE BAR to fly!\"\"\", True, (30, 30, 30))\r\n control_rect = control_surface.get_rect(center = (512, 440))\r\n self.screen.blit(control_surface, control_rect)\r\n\r\n\r\n\r\n","sub_path":"minigame-1/scoring.py","file_name":"scoring.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"272799463","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Post, Comment\n\n\n@admin.register(Post)\nclass PostAdmin(admin.ModelAdmin):\n list_display = ['id', 'title', 'slug', 'category', 'get_user', 'published_at']\n list_display_links = ['id']\n list_editable = ['title', 'category']\n list_filter = ['published_at', 'category']\n search_fields = ['title', 'content']\n\n def get_user(self, obj):\n return obj.user.get_full_name()\n\n get_user.admin_order_field = 'user' # Allows column order sorting\n get_user.short_description = _('Author') # Renames column head\n\n class Meta:\n model = Post\n\n\n@admin.register(Comment)\nclass CommentAdmin(admin.ModelAdmin):\n list_display = ['id', 'full_name', 'email_address', 'get_post', 'published_at', 'is_confirmed']\n list_editable = ['is_confirmed']\n list_filter = ['is_confirmed', 'post']\n search_fields = ['full_name', 'email_address', 'content']\n\n def get_post(self, obj):\n return obj.post.title\n\n get_post.admin_order_field = 'post'\n get_post.short_description = _('Post')\n\n class Meta:\n model = Comment\n","sub_path":"post/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"193804749","text":"from setuptools import setup\n\nVERSION = \"\"\n\nwith open(\"VERSION\", \"r+\") as f:\n content = f.read()\n ver = str(content).strip().split(\".\")\n ver[2] = str(int(ver[2]) + 1)\n VERSION = \".\".join(ver)\n f.seek(0)\n f.write(VERSION)\n\n\nsetup(\n name=\"petpy\",\n version=VERSION,\n description=\"Petrophysics utilities\",\n url=\"https://example.com/\",\n author=\"Agile Scientific\",\n author_email=\"matt@agilescientific.com\",\n license=\"Apache 2\",\n packages=[\"petpy\"],\n install_requires=[\"numpy\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"495087837","text":"from zope.interface import Interface\nfrom zope.interface import implements\n\nfrom plone.app.portlets.portlets import base\nfrom plone.portlets.interfaces import IPortletDataProvider\n\nfrom zope import schema\nfrom zope.formlib import form\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\nfrom plone.memoize.instance import memoize\n\nfrom Acquisition import aq_parent, aq_inner, aq_base\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom plone.app.layout.navigation.root import getNavigationRoot\nfrom plone.app.vocabularies.catalog import SearchableTextSourceBinder\nfrom plone.app.form.widgets.uberselectionwidget import UberSelectionWidget\n\nfrom vwcollective.simplecontact.interfaces import IPreviewTagProvider\nfrom vwcollective.simplecontact import simplecontactMessageFactory as _\n\nclass IContactsPortlet(IPortletDataProvider):\n \"\"\"A portlet\n\n It inherits from IPortletDataProvider because for this portlet, the\n data that is being rendered and the portlet assignment itself are the\n same.\n \"\"\"\n\n name = schema.TextLine (title=_(u\"Title\"),\n description=_(u\"The title of the selection portlet.\"),\n default=u\"\",\n required=True)\n \n root = schema.Choice(title=_(u\"Selection\"),\n description=_(u\"You may search for and choose a folder \"\n \"to act as the root of the selection.\"),\n required=True,\n source=SearchableTextSourceBinder({'is_folderish' : True},\n default_query='path:'))\n\n\nclass Assignment(base.Assignment):\n \"\"\"Portlet assignment.\n\n This is what is actually managed through the portlets UI and associated\n with columns.\n \"\"\"\n\n implements(IContactsPortlet)\n\n title = _(u'Contacts Portlet')\n name = u\"\"\n root = None\n\n def __init__(self, name=u\"\", root=None):\n self.name = name\n self.root = root\n\n\nclass Renderer(base.Renderer):\n \"\"\"Portlet renderer.\n\n This is registered in configure.zcml. The referenced page template is\n rendered, and the implicit variable 'view' will refer to an instance\n of this class. Other methods can be added and referenced in the template.\n \"\"\"\n\n render = ViewPageTemplateFile('contactsportlet.pt')\n\n def Title(self):\n return self.data.name\n \n def has_contactfolders(self):\n return len(self.contactfolders()) > 0\n \n def root_data(self):\n return self.data.root\n \n def selection_root(self):\n return self.getSelectionRoot()\n \n def selection_root_url(self):\n context = aq_inner(self.context)\n portal = getToolByName(context, 'portal_url')\n selection_path = self.data.root\n root_url = portal.getPortalPath() + selection_path\n return root_url\n \n def contactfolders(self):\n \"\"\"Contact Folders in the selected section of the site\"\"\"\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n portal = getToolByName(context, 'portal_url')\n pathfilter = self.data.root\n path = portal.getPortalPath() + pathfilter\n portaltypes = 'ContactFolder'\n wfstate = 'published'\n query = {}\n query['path'] = path\n query['portal_type'] = portaltypes\n query['review_state'] = wfstate\n results = [dict(url=cf.getURL(),\n title=cf.Title,\n preview_tag=IPreviewTagProvider(cf.getObject()).tag,\n )\n for cf in catalog.searchResults(query)\n ]\n \n folders = list(results)\n #folders.sort(lambda x,y: cmp(random.randint(0,200),100))\n folders = folders[:3]\n return folders\n\n# NOTE: If this portlet does not have any configurable parameters, you can\n# inherit from NullAddForm and remove the form_fields variable.\n\nclass AddForm(base.AddForm):\n \"\"\"Portlet add form.\n \"\"\"\n form_fields = form.Fields(IContactsPortlet)\n form_fields['root'].custom_widget = UberSelectionWidget\n label = _(u\"Add Contacts Portlet\")\n description = _(u\"This portlet displays contact folders from a selected section.\")\n\n def create(self, data):\n return Assignment(name=data.get('name', u\"\"),\n root=data.get('root', u\"\"))\n\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form.\n \"\"\"\n form_fields = form.Fields(IContactsPortlet)\n form_fields['root'].custom_widget = UberSelectionWidget\n label = _(u\"Edit Contacts Portlet\")\n description = _(u\"This portlet displays contact folders from a selected section.\")\n\ndef getRootPath(context, root):\n \"\"\"helper function to get the root path\"\"\"\n context = aq_inner(context)\n rootPath = getNavigationRoot(context, relativeRoot=root)\n return rootPath\n","sub_path":"src/vwcollective.simplecontact/vwcollective/simplecontact/portlets/contactsportlet.py","file_name":"contactsportlet.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"350663358","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/rohan/Django/django-admin-kit/.venv/lib/python3.6/site-packages/admin_kit/sites.py\n# Compiled at: 2017-12-12 06:19:46\n# Size of source mod 2**32: 2474 bytes\n\"\"\"\n Admin Kit Sites module\n\n\"\"\"\nfrom weakref import WeakSet\nall_sites = WeakSet()\n__all__ = [\n 'AdminKitSite', 'site']\n\nclass AdminKitSite:\n __doc__ = '\\n The main AdminKitSite that routes and process url requests.\\n\\n '\n\n def __init__(self, name='admin_kit'):\n self._registry = {}\n self.name = name\n all_sites.add(self)\n\n def ping(self, request):\n \"\"\"\n Ping method is used to ping admin_kit ajax\n\n \"\"\"\n from django.shortcuts import render\n return render(request, 'admin_kit/ping.html')\n\n def js_config(self, request):\n \"\"\"\n Renders the config.js file which configures global variables\n\n \"\"\"\n from django.shortcuts import render\n base_index = request.path.rfind('js_config')\n app_url = request.path[:base_index - 1]\n enable_dup = True\n try:\n from django.conf import settings\n enable_dup = not settings.KIT_DISABLE_DUPLICATE\n except AttributeError:\n enable_dup = True\n\n return render(request, 'admin_kit/config.js', context={'app':app_url, \n 'duplicate':enable_dup},\n content_type='text/javascript')\n\n def register(self, key, ajax_class):\n \"\"\"\n Registers the ``ajax_class`` for ajax behaviour\n\n key :: str\n This is the *key* that will be used in models for binding\n ajax_class :: class\n The ajax class that inherits :class:`admin_kit.ajax.Ajax`\n\n \"\"\"\n key = ajax_class.generate_key(key)\n self._registry[key] = ajax_class()\n\n def ajax(self, request, key):\n \"\"\"\n Calls route method\n\n \"\"\"\n response = self._registry[key].route(request)\n return response\n\n def get_urls(self):\n \"\"\"\n Returns the list of urls of admin_kit\n\n \"\"\"\n from django.conf.urls import url\n from django.conf import settings\n urlpatterns = [\n url('^ajax/(?P.*)/', (self.ajax), name='ajax'),\n url('js_config/', (self.js_config), name='js_config')]\n if settings.DEBUG or hasattr(settings, 'TEST_MODE'):\n urlpatterns += [url('^ping', (self.ping), name='ping')]\n return urlpatterns\n\n @property\n def urls(self):\n \"\"\"\n The actual property used by django for routing requests\n\n \"\"\"\n return (\n self.get_urls(), 'admin_kit', self.name)\n\n\nsite = AdminKitSite()","sub_path":"pycfiles/django-admin-kit-0.0.2.linux-x86_64.tar/sites.cpython-36.py","file_name":"sites.cpython-36.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"198926209","text":"# a=60\n# b=50\n#\n# while True:\n# q=a//b\n# r=a%b\n# if r==0:\n# print(b)\n# break\n# else:\n# a=b\n# b=r\n\nyear=int(input(\"enter the year\"))\nif ((year%4==0) and (not (year%100==0))) or (year%400==0):\n print(\"Leap Year\")\n\n\n","sub_path":"others/rough.py","file_name":"rough.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"469582786","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index, name='index'),\r\n url(r'^result/(?P\\d+\\_\\w+)/$', views.query_result, name='result'),\r\n url(r'^download_rf/(?P\\d+\\_\\w+)/$', views.download_rf, name='download_rf'),\r\n url(r'^download_k/(?P\\d+\\_\\w+)/$', views.download_k, name='download_k'),\r\n]\r\n","sub_path":"intern_proj/adtsite/tools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"497491040","text":"import argparse\nimport os\n\n\nclass ArgParser:\n config = \"\"\n\n def __init__(self):\n parser = argparse.ArgumentParser(description=\"benchmark options\")\n parser.add_argument(\"--config\", help=\"set specific json file (default: conf.json)\")\n args = parser.parse_args()\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n root_dir = curr_dir[0:len(curr_dir) - 25]\n if args.config is None:\n ArgParser.config = root_dir + \"test/system/benchmark/conf.json\"\n else:\n ArgParser.config = root_dir + \"test/system/benchmark/\" + args.config\n\n @classmethod\n def GetConfig(cls):\n return cls.config\n","sub_path":"test/system/benchmark/lib/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"485828633","text":"from sklearn.metrics import (\n adjusted_rand_score,\n confusion_matrix,\n f1_score,\n precision_score,\n recall_score,\n)\nimport pandas as pd\n\n\ndef dataframeize(series, score_name):\n df = series.reset_index()\n df = df.rename(columns={0: \"score_value\"})\n df[\"score_name\"] = score_name\n return df\n\n\ndef get_adjusted_rand_scores(\n predicted_cells,\n scoring_groupby,\n ground_truth_celltype_col,\n predicted_celltype_col,\n # Take labels as an input, but don't do anything with it\n # This is to be comparible with the inputs to get_f1_scores without adding special cases\n labels=None\n):\n adjusted_rand_scores = predicted_cells.groupby(scoring_groupby).apply(\n lambda x: adjusted_rand_score(\n x[ground_truth_celltype_col],\n x[predicted_celltype_col],\n )\n )\n adjusted_rand_scores_df = dataframeize(adjusted_rand_scores, \"adjusted_rand_score\")\n return adjusted_rand_scores_df\n\n\ndef get_f1_scores(\n predicted_cells,\n scoring_groupby,\n ground_truth_celltype_col,\n predicted_celltype_col,\n labels=None\n):\n f1_scores = predicted_cells.groupby(scoring_groupby).apply(\n lambda x: f1_score(\n x[ground_truth_celltype_col],\n x[predicted_celltype_col],\n sample_weight=x.similarity,\n average=\"weighted\",\n labels=labels\n )\n )\n f1_scores_df = dataframeize(f1_scores, \"f1_score\")\n return f1_scores_df\n\n\nscorers = (get_f1_scores, get_adjusted_rand_scores)\n\n\ndef get_f1_ari_scores(\n predicted_cells,\n ground_truth_celltype_col,\n predicted_celltype_col,\n scoring_groupby,\n scorers=scorers,\n):\n\n dfs = []\n for scorer in scorers:\n df = scorer(\n predicted_cells,\n scoring_groupby=scoring_groupby,\n ground_truth_celltype_col=ground_truth_celltype_col,\n predicted_celltype_col=predicted_celltype_col,\n )\n dfs.append(df)\n concated_scores_df = pd.concat(dfs, ignore_index=True)\n return concated_scores_df\n\n\ndef get_ksize_maximizing_mean(classification_metrics):\n score_value_means = classification_metrics.groupby(\n [\"alphabet\", \"score_name\", \"ksize\"]\n ).score_value.mean()\n score_value_means_ksize_argmax = score_value_means.groupby(\n level=[0, 1], group_keys=False\n ).apply(lambda x: x.nlargest(1))\n score_value_means_ksize_argmax = score_value_means_ksize_argmax.reset_index()\n score_value_means_ksize_argmax[\n \"mean_score\"\n ] = score_value_means_ksize_argmax.score_name.str.split(\"_score\").str[0]\n score_value_means_ksize_argmax[\n \"alphabet_ksize\"\n ] = score_value_means_ksize_argmax.apply(\n lambda x: f\"{x.alphabet}, ksize: {x.ksize}\", axis=1\n )\n return score_value_means_ksize_argmax\n\n\ndef pivot_and_plot_argmax_ksize_scores(score_value_means_ksize_argmax):\n score_value_means_ksize_argmax_2d = score_value_means_ksize_argmax.pivot(\n index=\"alphabet_ksize\", columns=\"mean_score\", values=\"score_value\"\n )\n fig, ax = plt.subplots(figsize=(3, 2))\n sns.heatmap(\n score_value_means_ksize_argmax_2d,\n annot=True,\n vmin=0.5,\n vmax=1,\n cmap=\"viridis_r\",\n fmt=\".5g\",\n )\n ax.set(ylabel=None)\n\n\ndef subsample_and_score(\n predicted_cells_subset,\n predicted_celltype_col,\n ground_truth_celltype_col,\n scoring_groupby,\n n_iterations=1000,\n n_cells_per_sample=3,\n labels=None,\n scorers=(get_f1_scores, get_adjusted_rand_scores),\n keys=None,\n):\n score_dfs = []\n for i in range(n_iterations):\n subsampled = predicted_cells_subset.groupby(predicted_celltype_col).apply(\n lambda x: x.sample(n_cells_per_sample)\n if len(x) >= n_cells_per_sample\n else None\n )\n for scorer in scorers:\n score_df = scorer(\n subsampled,\n predicted_celltype_col=predicted_celltype_col,\n ground_truth_celltype_col=ground_truth_celltype_col,\n scoring_groupby=scoring_groupby,\n labels=labels\n )\n score_df[\"iteration\"] = i\n\n for name, key in zip(scoring_groupby, keys):\n score_df[name] = key\n\n score_dfs.append(score_df)\n\n concat_score_df = pd.concat(score_dfs, ignore_index=True)\n return concat_score_df\n","sub_path":"notebooks/classification_metrics.py","file_name":"classification_metrics.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"398940216","text":"# importing required modules for the tracker\r\nimport tkinter \r\nfrom tkinter import *\r\nimport requests\r\nimport webbrowser\r\n############################################## FIRST PART OF THE TK FOR view\r\nGUIWorkspace = Tk()\r\nGUIWorkspace.geometry('1100x550')\r\n\r\nGUIWorkspace.title(\"COVID-19 Case Tracker\")\r\nfont_tuple = (\"Helvetica\", 20)\r\nfont_button_tuple = (\"Helvetica\", 12)\r\n# functions for displaying the data\r\nlabel_1 = Label(GUIWorkspace, font = font_tuple)\r\nlabel_1.pack(pady=10)\r\nlabel_countryData = Label(GUIWorkspace, font = font_tuple)\r\nlabel_countryData.pack(pady=10)\r\n############################################## FIRST PART OF THE TK FOR view\r\ndef getWorld_Data():\r\n pass\r\n covid_api = 'https://corona.lmao.ninja/v2/all?yesterday='\r\n covid_json = requests.get(covid_api).json()\r\n total = str(covid_json['cases'])\r\n totalDeaths = str(covid_json['deaths'])\r\n recovered = str(covid_json['recovered'])\r\n active = str(covid_json['active'])\r\n casesPer10to6 = str(covid_json['casesPerOneMillion'])\r\n countriesAffected = str(covid_json['affectedCountries'])\r\n label_1.config(text = \"Total Number of COVID-19 cases = \" +total+'\\n'+\"Total Number of deaths = \" +totalDeaths+'\\n'+\r\n 'Total number of recoveries = '+recovered+'\\n'+\"Currently active cases of COVID-19 = \"+active+'\\n'+\"Cases per a million = \" \r\n +casesPer10to6+'\\n'+\"Total number of countries affected by the COVID-19 Pandemic = \"+countriesAffected)\r\n label_1.configure(foreground=\"blue\")\r\ngetWorld_Data()\r\n\r\n\r\ndef countryData(countryN):\r\n #pass\r\n api = f'https://corona.lmao.ninja/v2/countries/{countryN}?yesterday&strict&query'\r\n json = requests.get(api).json()\r\n casesConf = str(json['cases'])\r\n country_fullName = str(json['country'])\r\n deathConf = str(json['deaths'])\r\n recoveredConf = str(json['recovered'])\r\n activeConf = str(json['active'])\r\n criticalConf = str(json['critical'])\r\n activeConf = str(json['active'])\r\n casesPer10to6 = str(json['casesPerOneMillion'])\r\n testsConf = str(json['tests'])\r\n label_countryData.config(text = f\"Total Number of COVID-19 cases in {country_fullName} = \" +casesConf+'\\n'+f\"Total Number of deaths in {country_fullName}= \" +deathConf+'\\n'+\r\n f\"Total number of recoveries in {country_fullName} = \"+recoveredConf+'\\n'+f\"Currently active cases of COVID-19 in {country_fullName} = \"+activeConf+'\\n'+f\"Cases per a million in {country_fullName} = \" \r\n +casesPer10to6+'\\n'+f\"Critical cases in {country_fullName} = \"+criticalConf+'\\n'+f\"Number of COVID-19 tests done in {country_fullName} = \"+testsConf)\r\n\r\n\r\ndef search_by_country(event):\r\n #pass\r\n countryName = country_searchbox.get().upper()\r\n if countryName:\r\n try :\r\n countryData(countryName)\r\n print(f\"{countryName} Information Generated.\")\r\n except :\r\n print(f\"{countryName} is not defined.\")\r\n error_text.config(text=f\"Error: {countryName} is not defined or not available.\\nPlease double check your spelling.\")\r\n error_text.configure(foreground=\"red\")\r\n\r\n\r\n# twitterButton = Button(GUIWorkspace, font = font_button_tuple, text = \"My Twitter!\", height=1, command = openTwitter)\r\n# twitterButton.pack(side='bottom')\r\n# ytButton = Button(GUIWorkspace, font = font_button_tuple, text = \"My YouTube!\", height=1, command = openYouTube)\r\n# ytButton.pack(side='bottom')\r\n\r\n############################################## SECOND PART OF THE TK FOR SEARCH\r\ncountry_searchbox = Entry(GUIWorkspace, font = font_tuple, text = \"Search by Country:\", width = 20)\r\ncountry_searchbox.pack()\r\nsearch_button = Button(GUIWorkspace, font = font_tuple, width = 15, text = \"Search\", command = search_by_country)\r\nsearch_button.pack()\r\nerror_text = Label(GUIWorkspace, font = font_tuple)\r\nerror_text.pack()\r\nGUIWorkspace.bind('', search_by_country)\r\nGUIWorkspace.mainloop()\r\n############################################## SECOND PART OF THE TK FOR SEARCH\r\n\r\n\r\n\r\n\r\n\r\n\r\n# functions to open the web browser when clicking the button\r\n\r\ndef openTwitter():\r\n webbrowser.open(urlTwt)\r\n\r\ndef openYouTube():\r\n webbrowser.open(urlYt)\r\n\r\n\r\n# writing ui code\r\n\r\nurlTwt = 'https://twitter.com/Neon_DEVFN'\r\nurlYt = 'https://www.youtube.com/channel/UCdqFdOJpMbyXGYnSAZdCX_Q'\r\n\r\n\r\n# function to search individual country data\r\n\r\n# COVID19-Tracker made with love and Python by Neon (a.k.a --> Neonツ, Neon__DEV, Ne10-Neon).s\r\n","sub_path":"covidtracker.py","file_name":"covidtracker.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"441793984","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nimport logging;logging.basicConfig(level=logging.INFO)\nfrom wechat_sender import Sender\nfrom SendEmail import SendEmail\nfrom ods_check import *\nimport cx_Oracle\nimport threading\nimport pymysql\nimport requests\nimport json\nimport os\n\n# os.environ['NLS_LANG'] = 'AMERICAN_AMERICA.ZHS16GBK'\n# 连接oracle,为了保证select和insert的不乱码,要先设置好环境变量NLS_LANG='AMERICAN_AMERICA.ZHS16GBK'\n\n\nserver_msg = [\n ['192.168.95.73','ods_cms_sms','ods','dfcwbi'],\n ['192.168.95.64','ods_cms_sms','ods','dfcwbi'],\n ['10.8.11.4','ods_cms_sms','ods','dfcwbi']\n]\nsql_etl = '''\nSelect case Result when 1 then '失败' when 0 then '成功' end Result\n\t --,Caption\n\t ,To_Char(Starttime, 'yyyy-mm-dd hh24:mi:ss') Start_Time\n\t ,To_Char(Endtime, 'yyyy-mm-dd hh24:mi:ss') End_Time\n\t ,Rcaption\n\t -- ,nvl(to_char(substr(Des,1,100)),'null') Des\n From dfcw.Szsys_1_Log\n Where Oper = 'schedule task'\n And Trunc(Endtime) = Trunc(Sysdate)\n order by starttime desc\n'''\n\nglo_text = []\n\ndef GetOraAllData(host,user,passwd,instance,exec_sql):\n conn = cx_Oracle.connect('%s/%s@%s/%s' % (user,passwd,host,instance))\n cursor = conn.cursor()\n cursor.execute(exec_sql)\n ora_list = cursor.fetchall()\n conn.close()\n return ora_list\n\ndef GenText(res_list,host):\n text = host+'\\n'\n res_str = '\\n'.join([' '.join(rl1) for rl1 in res_list])\n if not res_str.strip():\n text = text + '未查询到ETL执行情况,ETL未执行完毕,异常!!'\n return text+res_str\n\n\ndef SendWx(text):\n Sender().send(text)\n\ndef SendYj(text):\n sendmail = SendEmail()\n sendmail.SetDefaultParam()\n sendmail.SetParam('EmailSubject', '东财ETL执行情况')\n sendmail.SetParam('EmailContent', text)\n # sendmail.add_toaddr('jinhc@succez.com')\n # sendmail.add_toaddr('wangjia@succez.com')\n # sendmail.add_toaddr('cheny@succez.com')\n # sendmail.add_toaddr('wuf@succez.com')\n sendmail.msg_add_text()\n sendmail.send_mail()\n\ndef get_ods_check_text():\n sftp_info = {\n 'ip': '10.8.11.4',\n 'port': '22',\n 'username': 'kettle',\n 'password': 'kettle'\n }\n\n logname = time.strftime('%Y%m%d', time.localtime(time.time())) + '.log'\n remotepath = '/home/kettle/ETLTASK/bin/log' + '/' + logname\n\n localpath = 'kettle.log'\n SFTP = SFTPClient(**sftp_info)\n SFTP.getFile(remotepath, localpath)\n\n ods = odsCheck()\n db_info1 = {\n 'ip': '10.8.11.4',\n 'username': 'ods_rfc20_m',\n 'password': 'ods_rfc20_m',\n 'instance': 'dfcwbi'\n }\n ora1 = dbOracle(**db_info1)\n text1 = ods.odsMysqlSynExceptionInfo(localpath, ora1)\n text2 = ods.odsOracleSynExceptionInfo(ora1)\n return text1+text2\n\n\ndef get_tables_count():\n table_list = ['tcc_contract_info','tcf_contract_info','tcc_file_apply','tcf_file_apply']\n conn = cx_Oracle.connect('%s/%s@%s/%s' % ('ods_rfc20_m','ods_rfc20_m','10.8.11.4','dfcwbi'))\n exec_sql = 'select count(*) from {0}'\n cursor = conn.cursor()\n ora_str = ''\n for table in table_list:\n cursor.execute(exec_sql.format(table))\n ora_tuple = cursor.fetchone()\n ora_str = ora_str + table + ': ' + str(ora_tuple[0]) + '\\n'\n # print(ora_str)\n conn.close()\n\n mconn = pymysql.connect(host='10.8.11.4',user='DFF_READ',password='DFF_READ',database='dff',port=3300)\n mcursor = mconn.cursor()\n my_str = ''\n for table in table_list:\n mcursor.execute(exec_sql.format(table))\n my_tuple = mcursor.fetchone()\n my_str = my_str + table + ': ' + str(my_tuple[0]) + '\\n'\n # print(my_str)\n mconn.close()\n ret_str = '------kettle今天同步的数据量: \\n'+ora_str+'----发送此消息时重查联友12mysql库的数据: \\n'+my_str\n # print(ret_str)\n return ret_str\n\ndef sendfiletoqywx(filename):\n corpid = \"wwcc772268cf665691\"\n secret = \"JGd0ce720fav7OYYIArtKA0aM2bx1hyzdbgDPWSIa7s\"\n agentid = \"1000002\"\n # --- 创建连接 --- #\n login_url = r\"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={}&corpsecret={}\".format(corpid, secret)\n response = requests.get(login_url)\n access_token = response.json()['access_token']\n\n # --- 发送文件消息 --- #\n upload_url = r\"https://qyapi.weixin.qq.com/cgi-bin/media/upload\"\n\n # 上传附件,取得 media_id\n params = {\"access_token\": access_token, \"type\": \"file\"}\n files = {\"file\": open(filename, 'rb')}\n response = requests.post(upload_url, files=files, params=params)\n response_json = response.json()\n media_id = response_json['media_id']\n\n msg = {\n \"touser\": \"@all\",\n \"msgtype\": \"file\",\n \"agentid\": agentid,\n \"file\": {\n \"media_id\": media_id\n },\n \"safe\": 0\n }\n msg_json = json.dumps(msg).encode('utf-8')\n send_url = r'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}'.format(access_token)\n response = requests.post(url=send_url, data=msg_json)\n\ndef Dispatch():\n th = []\n filename = 'dfcw_alarm.txt'\n for server in server_msg:\n ora_list = GetOraAllData(server[0],server[1],server[2],server[3],sql_etl)\n text = GenText(ora_list,server[0])\n glo_text.append(text)\n str_text = '\\n\\n'.join(glo_text)\n str_text = str_text + '\\n\\n' + get_ods_check_text() + '\\n' + get_tables_count()\n with open(filename, 'wb') as f:\n f.write(str_text.encode('utf-8'))\n # 改为多线程,wechat的listen长时间等待导致连接丢失,发邮件和发微信线程独立\n # thw = threading.Thread(target=SendWx,name='Send Wexin Threading',args=(str_text,))\n # thw.start()\n # th.append(thw)\n thy = threading.Thread(target=SendYj,name='Send Email Threading',args=(str_text,))\n thy.start()\n th.append(thy)\n thx = threading.Thread(target=sendfiletoqywx,name='Send alarm to qywx',args=(filename,))\n thx.start()\n th.append(thx)\n for tht in th:\n tht.join()\n\nDispatch()\n","sub_path":"dfcw_alarm1020/SendMsg.py","file_name":"SendMsg.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"620441269","text":"from django.forms import modelformset_factory\nfrom django.db.models import Q\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.db.models import Avg\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .forms import *\nfrom booking.forms import *\nfrom booking.models import *\nfrom django.core.mail import EmailMessage\n\ndef hotels(request):\n hotels_list = None\n if ('q' in request.GET) and request.GET['q'].strip():\n query = request.GET['q']\n hotels_list = Hotel.objects.filter(Q(name__icontains=query) |\n Q(city__icontains=query) |\n Q(country_code__icontains=query)\n )\n else:\n hotels_list = Hotel.objects.all()\n\n paginator = Paginator(hotels_list, 5)\n page_var = 'page'\n page = request.GET.get(page_var)\n try:\n hotels = paginator.page('page')\n except PageNotAnInteger:\n hotels = paginator.page(1)\n except EmptyPage:\n hotels = paginator.page(paginator.num_pages)\n\n context = {'hotels': hotels}\n return render(request, 'booking/hotels.html', context)\n\n\ndef index(request):\n return render(request, 'booking/index.html', {})\n\nemail = EmailMessage('title', 'body', to=[email])\nemail.send()\n\n\n\ndef hotel_detail(request, hotel_id):\n hotel = get_object_or_404(Hotel, id=hotel_id)\n rooms_list = Room.objects.filter(hotel=hotel_id)\n paginator = Paginator(rooms_list, 5)\n page_var = 'page'\n page = request.GET.get(page_var)\n try:\n rooms = paginator.page(page)\n except PageNotAnInteger:\n rooms = paginator.page(1)\n except EmptyPage:\n rooms = paginator.page(paginator.num_pages)\n\n context = {\n 'hotel' : hotel,\n 'rooms' : rooms,\n }\n return render(request, 'booking/hotel_detail.html', context)\n\n\n\ndef room_detail(request, room_id):\n room = get_object_or_404(Room, id=room_id)\n images = Image.objects.filter(room=room_id)\n image = images[0]\n comments_list = Comment.objects.filter(room=room_id).order_by('-date')\n paginator = Paginator(comments_list, 5)\n page_var = 'page'\n page = request.GET.get(page_var)\n try:\n comments = paginator.page(page)\n except PageNotAnInteger:\n comments = paginator.page(1)\n except EmptyPage:\n comments = paginator.page(paginator.num_pages)\n\n context = {\n 'room': room,\n 'image': image,\n 'comments': comments,\n 'page_var': page_var,\n }\n if request.method == 'POST':\n return comment_add(request, room_id)\n\n return render(request, 'booking/room_detail.html', context)\n\n\ndef comment_add(request, room_id):\n room = get_object_or_404(Room, id=room_id)\n hotel = get_object_or_404(Hotel, id=room.hotel.id)\n if request.method == 'POST':\n commentForm = CommentForm()\n comment_form = commentForm.save(commit=False)\n comment_form.user = request.user\n comment_form.room = room\n comment_form.context = request.POST.get('content')\n\n ratingForm = RatingForm()\n rating_form = ratingForm.save(commit=False)\n rating_form.rate = request.POST.get('rating')\n rating_form.hotel = hotel\n rating_form.save()\n rate_avg = Rating.objects.filter(hotel_id=rating_form.hotel.id).aggregate(Avg('rate'))\n hotel.star_rating = rate_avg.get('rate__avg')\n hotel.save()\n comment_form.rating = rating_form\n comment_form.save()\n\n\n return redirect('booking:room_detail', room.id)\n\n\n\ndef room_type_add(request):\n if request.method == 'POST':\n room_type_form = RoomTypeForm(request.POST)\n if room_type_form.is_valid():\n room_type_form.save(commit=False)\n room_type_form.save()\n\n return render(request, 'booking/index.html', {})\n\n else:\n room_type_form = RoomTypeForm()\n return render(request, 'booking/room_type_add.html', {'roomTypeForm' : room_type_form})\n\n\ndef hotel_add(request):\n ImageFormSet = modelformset_factory(Image, form=ImageForm, extra=3)\n\n if request.method == 'POST':\n hotelForm = HotelForm(request.POST)\n formset = ImageFormSet(request.POST, request.FILES, queryset=Image.objects.none())\n\n if hotelForm.is_valid() and formset.is_valid():\n hotel_form = hotelForm.save(commit=False)\n hotel_form.owner = request.user\n hotel_form.save()\n\n for form in formset.cleaned_data:\n try:\n image = form['image']\n photo = Image(hotel=hotel_form, image=image)\n photo.save()\n except KeyError:\n pass\n\n try:\n image = formset.cleaned_data[0]['image']\n hotel_form.avatar = image\n hotel_form.save()\n except KeyError:\n hotel_form.delete()\n return clear_hotel_add_form(request, ImageFormSet, 'Select at least 1 Image!')\n\n hotels = Hotel.objects.all()\n return render(request, 'booking/hotels.html', {'hotels' : hotels})\n\n else:\n return clear_hotel_add_form(request, ImageFormSet, 'Invalid Form!')\n\n else:\n return clear_hotel_add_form(request, ImageFormSet, '')\n\n\ndef clear_hotel_add_form(request, formSet, title):\n title = title\n ImageFormSet = formSet\n hotelForm = HotelForm()\n formset = ImageFormSet(queryset=Image.objects.none())\n context = {'hotelForm': hotelForm, 'formset': formset, 'title': title}\n return render(request, 'booking/hotel_add.html', context)\n\n\ndef hotel_delete(request, hotel_id):\n hotel = get_object_or_404(Hotel, id=hotel_id)\n hotel.delete()\n return redirect('booking:hotels')\n\n\ndef room_delete(request, room_id):\n room = get_object_or_404(Room, id=room_id)\n room.delete()\n return redirect('booking:index')\n\n\ndef room_add(request):\n ImageFormSet = modelformset_factory(Image, form=ImageForm, extra=3)\n\n if request.method == 'POST':\n roomForm = RoomForm(request.POST)\n formset = ImageFormSet(request.POST, request.FILES, queryset=Image.objects.none())\n\n if roomForm.is_valid() and formset.is_valid():\n room_form = roomForm.save(commit=False)\n room_form.save()\n\n for form in formset.cleaned_data:\n try:\n image = form['image']\n photo = Image(room=room_form, image=image)\n photo.save()\n except KeyError:\n pass\n\n try:\n image = formset.cleaned_data[0]['image']\n room_form.avatar = image\n room_form.save()\n except KeyError:\n room_form.delete()\n return clear_room_add_form(request, ImageFormSet, 'Select at least 1 Image!')\n\n return redirect('booking:index')\n\n else:\n return clear_room_add_form(request, ImageFormSet, 'Invalid form!')\n\n else:\n return clear_room_add_form(request, ImageFormSet, '')\n\n\ndef clear_room_add_form(request, formSet, title):\n title = title\n ImageFormSet = formSet\n roomForm = RoomForm()\n formset = ImageFormSet(queryset=Image.objects.none())\n context = {'roomForm': roomForm, 'formset': formset, 'title': title}\n return render(request, 'booking/room_add.html', context)\n\ndef user_profile(request, username):\n user = User.objects.get(username=username)\n return render(request, 'booking/user_profile.html', {'user' : user})\n\n\ndef room_bookit(request, room_id):\n room = get_object_or_404(Room, id=room_id)\n hotel = get_object_or_404(Hotel, id=room.hotel.id)\n\n if request.user.is_authenticated():\n if request.method == 'POST':\n bookingForm = BookingForm(request.POST)\n if bookingForm.is_valid():\n booking_form = bookingForm.save(commit=False)\n if booking_form.start_date >= booking_form.end_date:\n return clear_booking_form(request, 'Wrong dates!', hotel, room)\n\n bookings = Booking.objects.filter(room=room)\n\n for booking in bookings:\n if booking.start_date <= booking_form.start_date < booking.end_date or booking.start_date < booking_form.end_date <= booking.end_date:\n return clear_booking_form(request, 'Room is already booked for this dates!', hotel, room)\n\n booking_form.guest = request.user\n booking_form.room = room\n booking_form.hotel = hotel\n booking_form.save()\n return redirect('booking:booking_confirm', booking_id=booking_form.id)\n\n else:\n return clear_booking_form(request, 'Invalid form!', hotel, room)\n\n else:\n return clear_booking_form(request, 'Almost done!', hotel, room)\n\n else:\n return redirect('auth_login')\n\n\ndef clear_booking_form(request, title, hotel, room):\n title = title\n bookingForm = BookingForm(initial={'hotel': hotel.id})\n context = {\n 'bookingForm': bookingForm,\n 'title': title,\n 'hotel': hotel,\n 'room': room,\n }\n return render(request, 'booking/room_bookit.html', context)\n\n\n\ndef booking_confirm(request, booking_id):\n booking = get_object_or_404(Booking, id=booking_id)\n\n if request.user.is_authenticated() and request.user.id == booking.guest.id:\n if booking:\n days = booking.end_date.day - booking.start_date.day\n price = days * booking.room.price\n if request.method == 'POST':\n paymentForm = PaymentForm()\n payment_form = paymentForm.save(commit=False)\n payment_form.guest = booking.guest\n payment_form.booking = booking\n payment_form.amount = price\n payment_form.status = False\n payment_form.save()\n return redirect('booking:user_bookings', username=request.user)\n else:\n return render(request, 'booking/room_bookit_confirm.html', {'booking': booking, 'price': price})\n\n else:\n return redirect('auth_login')\n\n\ndef booking_cancel(request, booking_id):\n booking = get_object_or_404(Booking, id=booking_id)\n booking.delete()\n return redirect('booking:index')\n\n\n\ndef user_bookings(request, username):\n if request.user.is_superuser:\n bookings = Booking.objects.filter(hotel__owner_id=request.user.id).order_by('-booked_date')\n else:\n bookings = Booking.objects.filter(guest_id=request.user.id).order_by('-booked_date')\n return render(request, 'booking/user_bookings.html', {'bookings': bookings})\n\n\ndef user_payments(request, username):\n if request.user.is_superuser:\n payments = Payment.objects.filter(booking__hotel__owner_id=request.user.id).order_by('-booking__booked_date')\n else:\n payments = Payment.objects.filter(guest_id=request.user.id).order_by('-booking__booked_date')\n return render(request, 'booking/user_payments.html', {'payments': payments})\n\n\ndef pay_bill(request, payment_id):\n payment = get_object_or_404(Payment, id=payment_id)\n\n if payment:\n payment.status = True\n print('STATUS CHANGED!')\n payment.save()\n\n return redirect('booking:user_payments', username=request.user)\n\n\ndef hotel_chain_add(request):\n if request.method == 'POST':\n hotelChainForm = HotelChainForm(request.POST)\n if hotelChainForm.is_valid():\n hotel_chain_form = hotelChainForm.save(commit=False)\n hotel_chain_form.save()\n return redirect('booking:index')\n else:\n hotelChainForm = HotelChainForm()\n return render(request, 'booking/hotel_chain_add.html', {'hotelChainForm': hotelChainForm, 'title': 'Invalid Name'})\n\n else:\n hotelChainForm = HotelChainForm()\n return render(request, 'booking/hotel_chain_add.html', {'hotelChainForm': hotelChainForm})\n\n\n\n\n\n","sub_path":"booking/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"621155237","text":"from django.http import HttpResponse, Http404\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom django.template import Context , loader\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator\nfrom django.template.loader import render_to_string\nfrom django.contrib.syndication.feeds import Feed\nfrom accounts.models import User\nfrom django.contrib.auth.models import Group\nfrom django.conf import settings\nfrom django.views.generic.list_detail import object_list\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom datetime import datetime, timedelta\nimport simplejson\n\nfrom dinette.models import Ftopics , SuperCategory ,Category ,Reply\nfrom dinette.forms import FtopicForm , ReplyForm\n\njson_mimetype = 'application/javascript'\ndef index_page(request):\n forums = SuperCategory.objects.all()\n accesslist = \"\"\n jumpflag = False\n \n \n #groups which this user has access\n if request.user.is_authenticated() :\n \n groups = [group for group in request.user.groups.all()] + [group for group in Group.objects.filter(name=\"general\")]\n else:\n #we are treating user who have not loggedin belongs to general group\n groups = Group.objects.filter(name=\"general\")\n \n \n #logic which decide which forum does this user have access to\n for forum in forums :\n jumpflag = False\n for group in groups : \n for gforum in group.can_access_forums.all() :\n \n if gforum.id == forum.id :\n #the respective user can access the forum\n #accesslist.append(True)\n accesslist = \"1\"+accesslist\n jumpflag = True\n break\n \n #already one group has acces to forum no need to check whether other groups have access to it or not \n if jumpflag:\n break\n \n if jumpflag == False:\n accesslist = \"0\"+accesslist\n \n totaltopics = Ftopics.objects.count()\n totalposts = totaltopics + Reply.objects.count()\n totalusers = User.objects.count()\n now = datetime.now()\n users_online = User.objects.filter(last_activity__gte = now - timedelta(seconds = 900)).count() + 1#The current user is always online. :)\n last_registered_user = User.objects.order_by('-date_joined')[0]\n try:\n user_access_list = int(accesslist)\n except ValueError:\n user_access_list = 0\n payload = {'users_online':users_online, 'forums_list':forums,'totaltopics':totaltopics,\n 'totalposts':totalposts,'totalusers':totalusers,'user_access_list':user_access_list,\n 'last_registered_user':last_registered_user}\n return render_to_response(\"dinette/mainindex.html\", payload,RequestContext(request))\n\ndef category_details(request, categoryslug, pageno=1) :\n #build a form for posting topics\n topicform = FtopicForm()\n category = get_object_or_404(Category, slug=categoryslug)\n queryset = Ftopics.objects.filter(category__id__exact = category.id)\n topiclist = queryset \n topic_page_size = getattr(settings , \"TOPIC_PAGE_SIZE\", 10)\n payload = {'topicform': topicform,'category':category,'authenticated':request.user.is_authenticated(),'topic_list':topiclist, \"topic_page_size\": topic_page_size}\n return render_to_response(\"dinette/category_details.html\", payload, RequestContext(request))\n \ndef topic_detail(request, categoryslug, topic_slug , pageno = 1):\n topic = get_object_or_404(Ftopics, slug = topic_slug)\n show_moderation_items = False\n if request.user in topic.category.moderated_by.all():\n show_moderation_items = True\n #some body has viewed this topic\n topic.viewcount = topic.viewcount + 1\n topic.save()\n #we also need to display the reply form\n replylist = topic.reply_set.all()\n reply_page_size = getattr(settings , \"REPLY_PAGE_SIZE\", 10)\n replyform = ReplyForm()\n payload = {'topic': topic, 'replyform':replyform,'reply_list':replylist, 'show_moderation_items':show_moderation_items, \"reply_page_size\": reply_page_size}\n return render_to_response(\"dinette/topic_detail.html\", payload, RequestContext(request))\n\n@login_required\ndef postTopic(request) :\n \n topic = FtopicForm(request.POST,request.FILES)\n \n if topic.is_valid() == False :\n d = {\"is_valid\":\"false\",\"response_html\":topic.as_table()}\n json = simplejson.dumps(d)\n if request.FILES :\n json = \"\"\n else:\n json = simplejson.dumps(d)\n return HttpResponse(json, mimetype = json_mimetype) \n \n #code which checks for flood control\n last_posttime = request.user.last_posttime\n if last_posttime and (datetime.now() - last_posttime).seconds <= settings.FLOOD_TIME:\n #oh....... user trying to flood us Stop him\n d2 = {\"is_valid\":\"flood\",\"errormessage\":\"Flood control..................\"}\n if request.FILES : \n json = \"\"\n else :\n json = simplejson.dumps(d2) \n return HttpResponse(json, mimetype = json_mimetype)\n \n ftopic = topic.save(commit=False) \n #only if there is any file\n if request.FILES :\n if(request.FILES['file'].content_type.find(\"image\") >= 0 ) :\n ftopic.attachment_type = \"image\"\n else :\n ftopic.attachment_type = \"text\"\n ftopic.filename = request.FILES['file'].name\n \n ftopic.posted_by = request.user\n ftopic.category = Category.objects.get(pk = request.POST['categoryid'])\n #Assigning user rank\n assignUserElements(request.user)\n ftopic.save()\n payload = {'topic':ftopic}\n response_html = render_to_string('dinette/topic_detail_frag.html', payload,RequestContext(request))\n \n d2 = {\"is_valid\":\"true\",\"response_html\":response_html}\n #this the required for ajax file uploads\n if request.FILES : \n json = \"\"\n else :\n json = simplejson.dumps(d2) \n return HttpResponse(json, mimetype = json_mimetype)\n \n@login_required \ndef postReply(request) :\n freply = ReplyForm(request.POST,request.FILES)\n \n if freply.is_valid() == False :\n d = {\"is_valid\":\"false\",\"response_html\":freply.as_table()}\n json = simplejson.dumps(d)\n if request.FILES :\n json = \"\"\n else:\n json = simplejson.dumps(d)\n return HttpResponse(json, mimetype = json_mimetype)\n \n \n \n #code which checks for flood control\n last_posttime = request.user.last_posttime\n if last_posttime and (datetime.now() - last_posttime).seconds <= settings.FLOOD_TIME:\n #oh....... user trying to flood us Stop him\n d2 = {\"is_valid\":\"flood\",\"errormessage\": _(\"You have posted message too recently. Please wait a while before trying again.\")}\n if request.FILES : \n json = \"\"\n else :\n json = simplejson.dumps(d2) \n return HttpResponse(json, mimetype = json_mimetype) \n \n \n reply = freply.save(commit=False) \n #only if there is any file\n if len(request.FILES.keys()) == 1 :\n if(request.FILES['file'].content_type.find(\"image\") >= 0 ) :\n reply.attachment_type = \"image\"\n else :\n reply.attachment_type = \"text\"\n \n reply.filename = request.FILES['file'].name\n \n reply.posted_by = request.user\n reply.topic = Ftopics.objects.get(pk = request.POST['topicid'])\n #Assigning user rank\n assignUserElements(request.user) \n reply.save()\n payload = {'reply':reply} \n response_html = render_to_string('dinette/replydetail_frag.html', payload ,RequestContext(request))\n \n d2 = {\"is_valid\":\"true\",\"response_html\":response_html}\n \n if request.FILES :\n #this the required for ajax file uploads\n json = \"\"\n else:\n json = simplejson.dumps(d2)\n \n return HttpResponse(json, mimetype = json_mimetype) \n \n \n \nclass LatestTopicsByCategory(Feed):\n title_template = 'dinette/feeds/title.html'\n description_template = 'dinette/feeds/description.html'\n \n def get_object(self, whichcategory):\n return get_object_or_404(Category, slug=whichcategory[0])\n \n def title(self, obj):\n return _(\"Latest topics in category %(name)s\") % dict(name=obj.name)\n \n def link(self, obj):\n return settings.SITE_URL\n \n def items(self, obj):\n return obj.ftopics_set.all()[:10]\n \n #construct these links by means of reverse lookup by\n #using permalink decorator\n def item_link(self,obj):\n return obj.get_absolute_url()\n \n def item_pubdate(self,obj):\n return obj.created_on\n \n \nclass LatestRepliesOfTopic(Feed):\n title_template = 'dinette/feeds/title.html'\n description_template = 'dinette/feeds/description.html'\n\n def get_object(self, whichtopic):\n return get_object_or_404(Ftopics, slug=whichtopic[0])\n \n def title(self, obj):\n return \"Latest replies in topic %(subject)s\" % dict(subject=obj.subject)\n \n def link(self, obj):\n return settings.SITE_URL\n\n def items(self, obj):\n list = []\n list.insert(0,obj)\n for obj in obj.reply_set.all()[:10] :\n list.append(obj) \n return list\n \n #construct these links by means of reverse lookup by\n #using permalink decorator\n def item_link(self,obj): \n return obj.get_absolute_url()\n \n def item_pubdate(self,obj):\n return obj.created_on\n \n \n \ndef assignUserElements(user):\n totalposts = user.ftopics_set.count() + user.reply_set.count()\n user.posts_count = totalposts\n user.last_posttime = datetime.now()\n user.save() \n \n###Moderation views###\n@login_required\ndef moderate_topic(request, topic_id, action):\n topic = get_object_or_404(Ftopics, pk = topic_id)\n if not request.user in topic.category.moderated_by.all():\n raise Http404\n if request.method == 'POST':\n if action == 'close':\n if topic.is_closed:\n message = _('You have reopened topic %(subject)s') % dict(subject=topic.subject)\n else:\n message = _('You have closed topic %(subject)s') % dict(subject=topic.subject)\n topic.is_closed = not topic.is_closed\n elif action == 'announce':\n if topic.announcement_flag:\n message = _('%(subject)s is no longer an announcement.') % dict(subject=topic.subject)\n else:\n message = _('%(subject)s is now an announcement.') % dict(subject=topic.subject)\n topic.announcement_flag = not topic.announcement_flag\n elif action == 'sticky':\n if topic.is_sticky:\n message = _('%(subject)s has been unstickied.') % dict(subject=topic.subject)\n else:\n message = _('%(subject)s has been stickied.') % dict(subject=topic.subject)\n topic.is_sticky = not topic.is_sticky\n elif action == 'hide':\n if topic.is_hidden:\n message = _('%(subject)s has been unhidden.') % dict(subject=topic.subject)\n else:\n message = _(\"%(subject)s has been hidden and won't show up any further.\") % dict(subject=topic.subject)\n topic.is_hidden = not topic.is_hidden\n topic.save()\n payload = {'topic_id':topic.pk, 'message':message}\n resp = simplejson.dumps(payload)\n return HttpResponse(resp, mimetype = json_mimetype)\n else:\n return HttpResponse(_('This view must be called via post'))\n \ndef login(request):\n if getattr(settings, 'DINETTE_LOGIN_TEMPLATE', None):\n return render_to_response(settings.DINETTE_LOGIN_TEMPLATE, {}, RequestContext(request, {'fb_api_key':settings.FACEBOOK_API_KEY,}))\n else:\n from django.contrib.auth.views import login\n return login(request)\n \ndef user_profile(request, user_name):\n user_profile =get_object_or_404(User, username = user_name)\n return render_to_response('dinette/user_profile.html', {}, RequestContext(request, {'user_profile': user_profile}))\n\n@login_required\ndef new_topics(request):\n user= request.user\n new_topic_list = user.get_since_last_visit()\n return topic_list(request, new_topic_list, page_message = _(\"Topics since your last visit\"))\n \ndef active(request):\n #Time filter = 48 hours\n days_ago_2 = datetime.now() - timedelta(days = 2)\n topics = Ftopics.objects.filter(last_reply_on__gt = days_ago_2)\n active_topics = topics.extra(select= {\"activity\":\"viewcount+100*num_replies\"}).order_by(\"-activity\")\n return topic_list(request, active_topics, page_message = _(\"Most active Topics\"))\n \ndef topic_list(request, queryset, page_message):\n payload = {\"new_topic_list\": queryset, \"page_message\": page_message}\n return render_to_response(\"dinette/new_topics.html\", payload, RequestContext(request))\n\ndef search(request):\n return HttpResponse('TODO')\n #from haystack.views import SearchView\n #search_view = SearchView(template = \"dinette/search.html\")\n #return search_view(request)\n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"src/dinette/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"611809930","text":"sciezka = \"tekst1.txt\"\n\ntekst = \"\"\"To jest mój tekst. To\njest kolejna linijka tekstu, \n a to kolejna.\"\"\"\n\nwith open(sciezka, 'r+') as plik:\n print(plik.tell())\n # plik.write(tekst)\n plik.seek(0)\n plik.read()\n\n\n\n","sub_path":"Day_6/ex_file_5.py","file_name":"ex_file_5.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"584645904","text":"#!/usr/bin/python\n\n'''\nCreated on 21/set/2017\n\n@author: a.airaghi\n\n>p9300 per cambiare tempo rilevamento posizione\n\n'''\n\nfrom tkinter import Tk\nimport threading\nimport config as cfg\nimport time\nimport arianna_db \nimport socket\nimport arianna_utility\nimport subprocess\nimport arianna_web\nimport math\nimport arianna_gui\n#import navigazione as navi\n\n\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n#ip locale\nip=([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\nUDP_IP = ip\nUDP_PORT = 8888\nsoudp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \nsoudp.bind((UDP_IP, UDP_PORT))\n\n\ndatipostxt=open(\"pos.csv\",\"w\")\ninviocmd=open(\"cmd.txt\",\"w\")\ndatipostxt.write(\"dati\\n\")\ndatipostxt.close()\n\n#ipclient= '192.168.88.129'\ndef ricerca_arianna(sock):\n data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n print(\"ip\",data,addr)\n return addr[0]\n\n\n\nsimu=0\n\nif simu==0:\n \n file=\"C:\\\\Python34\\\\progetti\\\\arianna_cli_socket\\\\simulatore.py\"\n command1 = subprocess.Popen(['c:\\python34\\python.exe ',file], shell=True)\n ipclient='127.0.0.1'\n TCP_PORT = 8181\n BUFFER_SIZE = 256\nelif simu==2:\n #ipclient='192.168.43.235'\n attesa_arianna=1\n while attesa_arianna==1:\n #ipclient='192.168.1.102'\n ipclient=''\n print(\"cerco arianna\")\n a=ricerca_arianna(soudp)\n if len(a)>7:\n ipclient=a\n attesa_arianna=0\n TCP_PORT = 81\n BUFFER_SIZE = 256\nelse:\n ipclient='192.168.1.102'\n TCP_PORT = 81\n BUFFER_SIZE = 256\n\n\n \nclass comunicazione (threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n \n def run(self):\n messaggio=\"\"\n while 1:\n try:\n s.settimeout(3) \n s.connect((ipclient, TCP_PORT)) #aggiungere verifica se funziona\n except socket.error as msg:\n if str(msg)[:45]==\"[WinError 10056] Richiesta di connessione ino\":\n pass;\n else:\n print(\"errore \",msg)\n messaggio=self.risp_ari(messaggio)\n self.com_ari_mov()\n self.com_ari_altro()\n time.sleep(0.1)\n\n\n \n def risp_ari(self,messaggio): #acquisizione risposte da arianna, da mettere nella coda giusta\n s.settimeout(0)\n try:\n raw_bytestream = s.recv(BUFFER_SIZE) #metto nella coda di ricezione le info di arduino\n messaggio+=str(raw_bytestream)\n arianna_utility.prt(messaggio,1,my_gui)\n mex=arianna_utility.gestiscirisp(messaggio)\n\n for m in mex[0]:\n\n if m[0:3]=='mis':\n cfg.dist_libera=int(m.split(\";\")[1])\n #print(m)\n elif m[0:3]=='pos':\n cfg.messaggiesppos.put(m)\n arianna_utility.prt(m, 2, my_gui)\n elif m[0:4]=='echo':\n \n cfg.messaggiesprx.put(m)\n elif m[0:2]=='ir':\n \n cfg.messaggiesprx.put(m)\n print(m)\n \n elif m[0:2]=='r:':\n if m[0:4]=='r: 0':\n chiave=m.split(\";\")[1]\n if chiave in cfg.richieste_fermo:\n cfg.stato[0]=0 #sblocco successivi movimenti\n cfg.richieste_fermo=[]\n print(\"sblocco\")\n else:\n if cfg.stato[0]==1: \n print(\"chiave non riconosciuta\")\n cfg.messaggiesprx.put(m)\n\n else:\n cfg.messaggiesprx.put(m)\n messaggio=\"\"\n for m in mex[1]:\n messaggio+m\n return messaggio\n except Exception as msg:\n if str(msg).find(\"[WinError 10035]\")<0:\n print(msg)\n messaggio=''\n return messaggio\n \n \n def com_ari_mov(self): #invio comandi per arianna coda movimento\n '''devo gestire l'attesa del fermo la gestisco mettendo la variabile moto=1 dopo un R e chiamando\n un r con una chiave precisa a ripetizione solo quando mi torna un r:0 con chiave giusto rimetto a 0\n '''\n destinatario='M' #M= arduino MEGA E=ESP \n if cfg.messaggiesptx.empty()==False and cfg.stato[0]==0 and time.time()>cfg.timer_sleep:\n time.sleep(0.3)\n #mystring=\"!\"+cfg.messaggiesptx.get()+\"?\"\n mystring=cfg.messaggiesptx.get()\n if mystring.find(\"sleep\")>=0: #addormento procedura per n secondi\n mio_sl=int(mystring.split(\";\")[1])\n mio_sl=time.time()+mio_sl\n cfg.timer_sleep=mio_sl\n return\n\n if mystring.find(\"1q\")>=0:\n cfg.time_radar=1\n\n mystring=arianna_utility.cmdradar(mystring) #trovo verso\n if mystring.find(\"3A\")>=0: #se e movimento svuoto le richieste di attese\n pass;\n #cfg.richieste_fermo=[]\n if mystring.find(\"3R\")>=0: #se e movimento svuoto le richieste di attese\n cfg.richieste_fermo=[]\n if mystring.find(\"1r\")>=0: #attesa\n cfg.stato[0]=1\n idunivoco=arianna_utility.idmsg()\n cfg.richieste_fermo.append(idunivoco)\n mystring=mystring+idunivoco\n \n if mystring.find(\"3A\")>=0: #attesa\n angnew=arianna_utility.minimoangolo(float(cfg.posatt[3]), float(mystring[2:]))\n mystring=\"3A\"+str(angnew)\n mystring=\"!\"+mystring+\"?\"\n print(\"mystring\",mystring)\n t=bytes(mystring, 'utf-8')\n try:\n inviocmd=open(\"cmd.txt\",\"a\")\n inviocmd.write(mystring+\"\\n\")\n inviocmd.close()\n s.send(t)\n except:\n print(\"errore coda mov\")\n pass\n mystring=\"\"\n if cfg.stato[0]!=0:\n time.sleep(0.3)\n idunivoco=arianna_utility.idmsg()\n cfg.richieste_fermo.append(idunivoco)\n mystring=\"!1r\"+idunivoco+\"?\"\n t=bytes(mystring, 'utf-8')\n try:\n inviocmd=open(\"cmd.txt\",\"a\")\n inviocmd.write(mystring+\"\\n\")\n inviocmd.close()\n s.send(t)\n except:\n print(\"errore coda mov\")\n pass\n mystring=\"\" \n \n def com_ari_altro(self): #invio comandi per arianna altro\n if cfg.messaggiesptx_altro.empty()==False:\n mystring=\"!\"+cfg.messaggiesptx_altro.get()+\"?\"\n t=bytes(mystring, 'utf-8')\n time.sleep(0.2)\n try:\n s.send(t)\n except:\n print(\"errore coda altro\")\n pass\n \n\n \nclass elabora (threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name \n \n def run(self):\n while 1:\n \n if cfg.messaggiesprx.empty()==False:\n msgxx=cfg.messaggiesprx.get()\n print(\"msg generico\",msgxx)\n if msgxx[0:4]=='echo' :\n print(msgxx) \n #arianna_utility.crea_mappa(msgxx,cfg.mappa,\"assoluta\",cfg.versoradar,cfg.posatt)\n arianna_utility.trovadistanza(msgxx) \n \n if msgxx[0:4]=='echf' :\n print('----------echf------------') \n arianna_utility.inizializza_mappa()\n cfg.time_radar=2\n print(\"finito radar\")\n\n \n if cfg.messaggiesppos.empty()==False:\n posxyt=cfg.messaggiesppos.get()\n newpos=arianna_utility.deco_pos(str(posxyt))\n if arianna_utility.controlla_new_pos(cfg.posatt, newpos):\n cfg.posatt=newpos\n cfg.posatt[2]=str(float(cfg.posatt[2])*-1)\n datipostxt=open(\"pos.csv\",\"a\")\n posxyt=posxyt.replace(\":\",\";\")\n datipostxt.write(posxyt+\"\\n\")\n datipostxt.close()\n \n if len(cfg.percorso)!=0:\n d,a=arianna_utility.calcola_movimento()\n cfg.messaggirx.put((time.time(),'3A'+str(a)))\n time.sleep(0.2)\n cfg.messaggirx.put((time.time(),'3D'+str(d)))\n time.sleep(0.2)\n cfg.messaggirx.put((time.time(),'3R4'))\n time.sleep(0.2)\n cfg.messaggirx.put((time.time(),'1r'))\n time.sleep(0.2)\n \n self.elaborarich()\n \n \n def elaborarich(self):\n\n if cfg.messaggirx.empty()==False:\n lavoro=cfg.messaggirx.get()\n if lavoro[1][:2]!='4J': #muove a caso\n \n cfg.messaggiesptx.put(lavoro[1])\n else:\n if lavoro[1][2]=='0':\n print(\"cerco di bloccare il lavoro\")\n arianna_utility.svuota_coda('cfg.messaggiesptx')\n arianna_utility.svuota_coda('cfg.messaggirx')\n cfg.time_radar=0\n elif lavoro[1][2]=='1':\n while cfg.stato[0]!=0 or cfg.time_radar==1:\n print(\"ATTENDO\",cfg.stato[0],cfg.time_radar)\n time.sleep(0.5)\n \n cfg.messaggirx.put((time.time(),lavoro[1]))\n return\n time.sleep(3)\n arianna_utility.svuota_coda('cfg.messaggiesptx')\n a=arianna_utility.muovefun()\n time.sleep(0.5)\n cfg.messaggirx.put((time.time(),lavoro[1]))\n elif lavoro[1][2]=='2':\n arianna_utility.movehome() \n \n elif lavoro[1][2]=='3':\n time.sleep(0.2)\n cfg.messaggirx.put((time.time(),'sleep;10'))\n elif lavoro[1][2]=='4':\n a=arianna_utility.muovefun()\n time.sleep(0.2)\n elif lavoro[1][2]=='5' and cfg.messaggiesptx.empty()==True:\n a=arianna_utility.muove4()\n time.sleep(0.5)\n cfg.messaggirx.put((time.time(),lavoro[1]))\n time.sleep(0.2)\n elif lavoro[1][2]=='5' and cfg.messaggiesptx.empty()==False:\n cfg.messaggirx.put((time.time(),lavoro[1]))\n time.sleep(0.2)\n \n\n\n \n \n else:\n cfg.messaggicli.put(\"abilita\")\n\n\nclass mappa (threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name \n \n def run(self):\n while 1:\n if cfg.time_radar==2 :\n x=arianna_db.leggosql([], 'mappe_elaborare')\n if len(x)>0:\n for i in x:\n arianna_db.leggosql(['',cfg.id_radar],'deletec')\n punti=arianna_db.leggosql(i,'mappa')\n for i in punti:\n arianna_utility.crea_mappa('',cfg.mappa,\"assoluta\",\"dx\",[i[0],i[1],math.radians(i[4])],1,i[2],i[3])\n cfg.time_radar=0 \n else:\n time.sleep(2)\n \n# **********param default****\ncfg.messaggirx.put((time.time(),'>p9600'))\n#****************************\ncfg.id_radar=arianna_utility.idmap()\nprint(\"id mappa\", cfg.id_radar)\n#nuova mappa avvio \n\nthread1 = arianna_web.serverweb(1, \"Thread-1\")\nthread2 = comunicazione(2, \"Thread-com1\")\nthread3 = elabora(3, \"Thread-ela1\")\nthread4 = mappa(4, \"Thread-map1\")\n\n# Start new Threads\nthread1.start()\nthread2.start()\nthread3.start()\nthread4.start()\n\nroot = Tk()\nmy_gui = arianna_gui.MyFirstGUI(root)\nroot.mainloop()\n\nprint (\"Exiting Main Thread\")","sub_path":"arianna_cli_socket/arianna_clisocket.py","file_name":"arianna_clisocket.py","file_ext":"py","file_size_in_byte":12520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"64574349","text":"\"\"\"PytSite Facebook Content Export Driver.\n\"\"\"\n__author__ = 'Alexander Shepetko'\n__email__ = 'a@shepetko.com'\n__license__ = 'MIT'\n\nimport requests as _requests, re as _re\nfrom frozendict import frozendict as _frozendict\nfrom pytsite import logger as _logger, util as _util\nfrom plugins import content_export as _content_export, facebook as _facebook, content as _content, widget as _widget\n\n_tag_cleanup_re = _re.compile('[\\-_\\s]+')\n\n\nclass Driver(_content_export.AbstractDriver):\n def get_name(self) -> str:\n \"\"\"Get system name of the driver.\n \"\"\"\n return 'facebook'\n\n def get_description(self):\n \"\"\"Get human readable description of the driver.\n \"\"\"\n return 'facebook@facebook'\n\n def get_settings_widget(self, driver_options: _frozendict, form_url: str) -> _widget.Abstract:\n \"\"\"Get settings widget.\n \"\"\"\n return _facebook.widget.Auth(\n uid='driver_opts',\n scope='public_profile,email,user_friends,publish_actions,manage_pages,publish_pages',\n access_token=driver_options.get('access_token'),\n access_token_type=driver_options.get('access_token_type'),\n access_token_expires=driver_options.get('access_token_expires'),\n user_id=driver_options.get('user_id'),\n page_id=driver_options.get('page_id'),\n screen_name=driver_options.get('screen_name'),\n redirect_url=form_url,\n )\n\n def get_options_description(self, driver_options: _frozendict) -> str:\n \"\"\"Get driver options as a string.\n \"\"\"\n r = driver_options.get('screen_name')\n if 'page_id' in driver_options and driver_options['page_id']:\n r += ' (page {})'.format(driver_options.get('page_id'))\n\n return r\n\n def export(self, entity: _content.model.Content, exporter=_content_export.model.ContentExport):\n \"\"\"Export data.\n \"\"\"\n _logger.info(\"Export started. '{}'\".format(entity.title))\n\n try:\n opts = exporter.driver_opts # type: _frozendict\n user_session = _facebook.session.Session(opts.get('access_token'))\n\n # Tags\n tags = ['#' + _tag_cleanup_re.sub('', t) for t in exporter.add_tags]\n\n if entity.has_field('tags'):\n tags += ['#' + _tag_cleanup_re.sub('', t.title) for t in entity.f_get('tags')]\n\n message = _util.strip_html_tags(entity.body)[:600] + ' ' + ' '.join(tags) + ' ' + entity.url\n\n # Pre-generating image for OpenGraph story\n if entity.has_field('images') and entity.images:\n _requests.get(entity.images[0].get_url(width=900, height=500))\n\n # Notify OpenGraph about sharing\n scrape_r = user_session.request('', 'POST', id=entity.url, scrape='true')\n if 'updated_time' not in scrape_r:\n raise _facebook.error.OpenGraphError(\n \"Error while updating OG story '{}'. Response from Facebook: {}.\".format(entity.title, scrape_r))\n\n if 'page_id' in opts and opts['page_id']:\n page_session = _facebook.session.Session(self._get_page_access_token(opts['page_id'], user_session))\n page_session.feed_message(message, entity.url)\n else:\n user_session.feed_message(message, entity.url)\n\n except Exception as e:\n raise _content_export.error.ExportError(e)\n\n _logger.info(\"Export finished. '{}'\".format(entity.title))\n\n def _get_page_access_token(self, page_id: str, user_session: _facebook.session.Session) -> str:\n \"\"\"Get page access token.\n \"\"\"\n for acc in user_session.accounts():\n if 'id' in acc and acc['id'] == page_id:\n return acc['access_token']\n\n raise RuntimeError('Cannot get access token for page with id == {}'.format(page_id))\n","sub_path":"_driver.py","file_name":"_driver.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"379186648","text":"from tkinter import *\n\nroot = Tk()\n\n# Disposicion automatica de cuadricula\n# Arriba a la izquierda - LBL_NOMBRE\nlabel = Label(root, text = 'Nombre muy largo:')\nlabel.grid(row = 0, column = 0)\nlabel.grid(sticky = \"e\", padx=5, pady=5) # Nos permite justificar la etiqueta,identar, a la derecha\n\n# padx = nos permite ajustar 5 pixeles horizontal\n# pady = nos permite ajustar 5 pixeles vertical\n\n# Arriba a la derecha - ENTRY_NOMBRE\n# justify - Justificar a la derecha el texto del entry\n# state - Nos permite habilitar o deshabilitar un estado\nentry = Entry(root)\nentry.grid(row = 0, column = 1, padx=5, pady=5)\nentry.config(justify = 'right', state = 'disabled') \n\n\n# Abajo a la izquierda - LBL_APELLIDO\nlabel2 = Label(root, text = 'Apellido:')\nlabel2.grid(row = 1, column = 0)\nlabel2.grid(sticky = \"e\", padx=5, pady=5) # Nos permite justificar la etiqueta,identar, a la derecha\n\n# Abajo a la derecha - ENTRY_APELLIDO\nentry2 = Entry(root)\nentry2.grid(row = 1, column = 1, padx=5, pady=5)\nentry2.config(justify = 'left') # Justificar a la derecha\n\n# Que pasa si queremos escribir una contraseña\n# Debemos usar el parametro >>show<< y el caracter \nlabel3 = Label(root, text = 'Password')\nlabel3.grid(row = 2, column = 0)\nlabel3.grid(sticky = \"e\", padx = 5, pady = 5)\n\nentry3 = Entry(root)\nentry3.grid(row = 2, column = 1, padx = 5, pady = 5)\nentry3.config(justify = 'center', show = \"?\")\n\nroot.mainloop()\n","sub_path":"CursoPython/Fase 4 - Temas avanzados/Tema 13 - Interfaces graficas con tkinter/Clase 04 - Entry/entry2.py","file_name":"entry2.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"458337565","text":"import unittest\n\nfrom programy.security.linking.accountlinker import BasicAccountLinkerService\n\n\nclass MockUser(object):\n pass\n\nclass MockUserStore(object):\n\n def __init__(self):\n self.users = {}\n\n def add_user(self, userid, clientid):\n if userid not in self.users:\n self.users[userid] = []\n if clientid not in self.users[userid]:\n self.users[userid].append(clientid)\n return MockUser()\n\n def exists(self, userid, clientid):\n if userid in self.users:\n links = self.users[userid]\n return clientid in links\n return False\n\n def get_links(self, userid):\n if userid in self.users:\n return self.users[userid]\n return []\n\n def remove_user(self, userid, clientid):\n if userid in self.users:\n if clientid in self.users[userid]:\n self.users[userid].remove(clientid)\n if len(self.users[userid]) == 0:\n del self.users[userid]\n return True\n return False\n\nclass MockLink(object):\n\n def __init__(self, data):\n self.userid = data['userid']\n self.generated_key = data['generated_key']\n self.provided_key = data['provided_key']\n self.expired = data['expired']\n self.expires = data['expires']\n self.retry_count = data['retry_count']\n\nclass MockLinkStore(object):\n\n def __init__(self):\n self.links = {}\n\n def create_link(self, userid, generated_key, provided_key, expires):\n\n self.links[userid] = {'userid': userid, 'generated_key': generated_key, 'provided_key': provided_key, 'expired': False, 'expires': expires, 'retry_count': 0}\n return MockLink(self.links[userid])\n\n def get_link(self, userid):\n if userid in self.links:\n return MockLink(self.links[userid])\n return None\n\n def link_exists(self, userid, provided_key, generated_key):\n if userid in self.links:\n link = self.links[userid]\n if link['generated_key'] == generated_key and link['provided_key'] == provided_key:\n return True\n return False\n\n def update_link(self, link):\n self.links[link.userid] = {'userid': link.userid,\n 'generated_key': link.generated_key,\n 'provided_key': link.provided_key,\n 'expired': link.expired,\n 'expires': link.expires,\n 'retry_count': link.retry_count}\n\n def remove_link(self, userid):\n if userid in self.links.keys():\n del self.links[userid]\n return True\n return False\n\nclass MockAccountLink(object):\n pass\n\nclass MockLinkedAccount(object):\n\n def __init__(self):\n self.links = {}\n self.lookup = {}\n\n def link_accounts(self, primary_userid, linked_userid):\n if primary_userid not in self.links:\n self.links[primary_userid] = []\n if linked_userid not in self.links[primary_userid]:\n self.links[primary_userid].append(self.links[primary_userid])\n self.lookup[linked_userid] = primary_userid\n return MockAccountLink()\n\n def primary_account(self, secondary_userid):\n if secondary_userid in self.lookup:\n return self.lookup[secondary_userid]\n return None\n\n def unlink_accounts(self, userid):\n del self.links[userid]\n toremove = []\n for key, value in self.lookup.items():\n if value == userid:\n toremove.append(key)\n for key in toremove:\n del self.lookup[key]\n return True\n\nclass MockStorageEngine(object):\n\n def __init__(self):\n self.user_store = MockUserStore()\n self.link_store = MockLinkStore()\n self.linked_account_store = MockLinkedAccount()\n\n\nclass BasicAccountLinkerServiceTests(unittest.TestCase):\n\n def test_init(self):\n storage_engine = MockStorageEngine()\n\n mgr = BasicAccountLinkerService(storage_engine)\n self.assertIsNotNone(mgr)\n\n def test_generate_key(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n key = mgr._generate_key()\n self.assertIsNotNone(key)\n self.assertEqual(8, len(key))\n\n key = mgr._generate_key(size=12)\n self.assertIsNotNone(key)\n self.assertEqual(12, len(key))\n\n def test_generate_expirary(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n expires = mgr._generate_expirary(lifetime=1)\n self.assertIsNotNone(expires)\n\n def test_happy_path(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, \"testuser2\", \"facebook\")\n self.assertTrue(result)\n\n primary = mgr.primary_account(\"testuser2\")\n self.assertTrue(primary)\n self.assertEquals(primary_user, primary)\n\n def test_user_client_link_already_exists(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n links = mgr.linked_accounts(primary_user)\n self.assertIsNotNone(links)\n self.assertEquals(1, len(links))\n self.assertEquals(primary_client, links[0])\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n links = mgr.linked_accounts(primary_user)\n self.assertIsNotNone(links)\n self.assertEquals(1, len(links))\n self.assertEquals(primary_client, links[0])\n\n def test_given_key_not_matched(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n secondary_user = \"testuser2\"\n secondary_client = \"facebook\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, \"PASSWORD2\", generated_key, secondary_user, secondary_client)\n self.assertFalse(result)\n\n def test_generated_key_not_matched(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n secondary_user = \"testuser2\"\n secondary_client = \"facebook\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key+\"X\", secondary_user, secondary_client)\n self.assertFalse(result)\n\n def test_generated_key_expired(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key, lifetime=0)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, \"testuser2\", \"facebook\")\n self.assertFalse(result)\n\n def test_lockout_after_max_retries(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n secondary_user = \"testuser2\"\n secondary_client = \"facebook\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key+\"X\", secondary_user, secondary_client)\n self.assertFalse(result)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key+\"X\", secondary_user, secondary_client)\n self.assertFalse(result)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key+\"X\", secondary_user, secondary_client)\n self.assertFalse(result)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, secondary_user, secondary_client)\n self.assertFalse(result)\n\n reset = mgr.reset_link(primary_user)\n self.assertTrue(reset)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, secondary_user, secondary_client)\n self.assertTrue(result)\n\n def test_unlink_user_from_client(self):\n storage_engine = MockStorageEngine()\n mgr = BasicAccountLinkerService(storage_engine)\n\n primary_user = \"testuser1\"\n primary_client = \"console\"\n given_key = \"PASSWORD1\"\n secondary_user = \"testuser2\"\n secondary_client = \"facebook\"\n\n result = mgr.link_user_to_client(primary_user, primary_client)\n self.assertTrue(result)\n\n generated_key = mgr.generate_link(primary_user, given_key)\n self.assertIsNotNone(generated_key)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, secondary_user, secondary_client)\n self.assertTrue(result)\n\n result = mgr.unlink_user_from_client(primary_user, primary_client)\n self.assertTrue(result)\n\n result = mgr.link_accounts(primary_user, given_key, generated_key, secondary_user, secondary_client)\n self.assertFalse(result)\n","sub_path":"test/programytest/security/linking/test_accountlinker.py","file_name":"test_accountlinker.py","file_ext":"py","file_size_in_byte":10482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"151650700","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018 Spanish National Research Council (CSIC)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nSatellite utils\n\nAuthor: Daniel Garcia Diaz\nDate: May 2018\n\"\"\"\n\n#Submodules\nfrom sat_modules import config\n\n#APIs\nimport zipfile, tarfile\nimport argparse\nimport numpy as np\nimport os, shutil\nimport json\nimport datetime\nimport utm\nfrom netCDF4 import Dataset\nfrom six import string_types\n\n\ndef valid_date(sd, ed):\n \"\"\"\n check if the format date input is string(\"%Y-%m-%d\") or datetime.date\n and return it as format datetime.strptime(\"YYYY-MM-dd\", \"%Y-%m-%d\")\n\n Parameters\n ----------\n sd(start_date) : str \"%Y-%m-%d\"\n ed(end_date) : str \"%Y-%m-%d\"\n\n Returns\n -------\n sd : datetime\n datetime.strptime(\"YYYY-MM-dd\", \"%Y-%m-%d\")\n ed : datetime\n datetime.strptime(\"YYYY-MM-dd\", \"%Y-%m-%d\")\n\n Raises\n ------\n FormatError\n Unsupported format date\n ValueError\n Unsupported date value\n \"\"\"\n\n if isinstance(sd, datetime.date) and isinstance(ed, datetime.date):\n\n return sd, ed\n\n elif isinstance(sd, string_types) and isinstance(ed, string_types): \n try:\n sd = datetime.datetime.strptime(sd, \"%Y-%m-%d\")\n ed = datetime.datetime.strptime(ed, \"%Y-%m-%d\")\n if sd < ed:\n return sd, ed\n else:\n msg = \"Unsupported date value: '{} or {}'.\".format(sd, ed)\n raise argparse.ArgumentTypeError(msg)\n except:\n msg = \"Unsupported format date: '{} or {}'.\".format(sd, ed)\n raise argparse.ArgumentTypeError(msg)\n else:\n msg = \"Unsupported format date: '{} or {}'.\".format(sd, ed)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef valid_region(r):\n \"\"\"\n check if the regions exits\n\n Parameters\n ----------\n r(region) : str e.g: \"CdP\"\n\n Raises\n ------\n FormatError\n Not a valid region\n \"\"\"\n\n if r in config.regions:\n pass\n else:\n msg = \"Not a valid region: '{0}'.\".format(r)\n raise argparse.ArgumentTypeError(msg)\n\n\ndef path():\n \"\"\"\n Configure the tree of datasets path. \n Create the folder and the downloaded_files file.\n\n Parameters\n ----------\n path : datasets path from config file\n \"\"\"\n\n file = 'downloaded_files.json'\n list_region = config.regions\n local_path = config.local_path\n\n try:\n with open(os.path.join(local_path, file)) as data_file:\n json.load(data_file)\n except:\n if not (os.path.isdir(local_path)):\n os.mkdir(local_path)\n\n dictionary = {\"Sentinel-2\": {}, \"Landsat 8\": {}}\n\n for region in list_region:\n\n os.mkdir(os.path.join(local_path, region))\n dictionary['Sentinel-2'][region] = []\n dictionary['Landsat 8'][region] = []\n\n with open(os.path.join(local_path, 'downloaded_files.json'), 'w') as outfile:\n json.dump(dictionary, outfile)\n","sub_path":"sat_modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445320750","text":"from Plasticity.FieldDynamics import FieldDynamics\n\nfrom Plasticity.NumericalMethods import *\n\n\"\"\"\nThis controls the theta parameter for MinMod limiters.\nSetting this to 0.0 reduces the scheme to a Local Lax-Friedrich like scheme.\n\"\"\"\ntheta = 1.0\n\ndef FindDerivatives(field, coord=0, Dx=None):\n \"\"\"\n one sided difference\n \"\"\"\n if Dx is None:\n Dx = 1./field.gridShape[-1]\n diff = (rollfield(field, -1, coord)-field)\n \"\"\"\n second order derivative minmod\n \"\"\" \n #theta = 1.\n #Dx = 1./state.gridShape[-1]\n diff2_1 = theta*(rollfield(diff, -1, coord) - diff)/Dx/Dx\n diff2_2 = (rollfield(diff, -1, coord) - rollfield(diff, 1, coord))/Dx/Dx*0.5\n diff2_3 = rollfield(diff2_1, 1, coord)\n diff2 = MinMod3Field(diff2_1, diff2_2, diff2_3)\n \"\"\"\n Evaluate right and left derivatives \n \"\"\"\n deriv_p = diff/Dx - 0.5*Dx*diff2\n # with N=32, this develops something weird probably a typo in the paper since it breaks the symmetry\n #deriv_m = rollfield(diff, 1, coord)/Dx + 0.5*Dx*diff2\n deriv_m = rollfield(diff, 1, coord)/Dx + 0.5*Dx*rollfield(diff2, 1, coord)\n return deriv_p, deriv_m\n\nclass CentralUpwindHJDynamics(FieldDynamics.FieldDynamics):\n def __init__(self, Dx=None):\n self.Dx = Dx\n \n def H_1D(self,field,deriv):\n pass\n def H_1Dprime(self,field,deriv):\n \"\"\"\n This must return a scalar characteristic speed field\n \"\"\"\n pass\n def H_2D(self,field,deriv_x,deriv_y,opt=None):\n pass\n def H_2Dprime(self,field,deriv_x,deriv_y,dir=0,opt=None):\n \"\"\"\n This must return a scalar characteristic speed field\n \"\"\"\n pass\n\n def CalculateFlux(self, time, state, CFLCondition=False):\n if state.dimension == 1:\n field = state.GetOrderParameterField()\n \"\"\"\n Perform the interpolation + right and left derivative part\n \"\"\"\n deriv_p, deriv_m = FindDerivatives(field, Dx=self.Dx)\n \"\"\"\n Find out right and left velocities\n\n Here we are just using two point values assuming that we do not know the analytic form of H.\n For Burgers equation this should work just fine since Hprime is linear in u_x.\n \"\"\"\n a_1 = self.H_1Dprime(field,deriv_m)\n a_2 = self.H_1Dprime(field,deriv_p)\n a_p = Max3(a_1,a_2,0)\n a_m = Min3(a_1,a_2,0).fabs()\n \"\"\"\n Calculate time derivative field and return\n \"\"\"\n # Added ME for correcting 0 velocity points\n a_tot = a_p+a_m+ME\n H_p = self.H_1D(field,deriv_p)\n H_m = self.H_1D(field,deriv_m)\n psi_int = (deriv_p*a_p + deriv_m*a_m)/(a_tot) - (H_p-H_m)/(a_tot)\n \"\"\"\n Use either the less diffusive method by Kurganov or the old one.\n For 2D, the new method from adaptive Central Upwind scheme does not seem to ork very well because the new term diverges\n \"\"\"\n #derivative = -(H_p*a_m+H_m*a_p)/(a_tot) + ((deriv_p-deriv_m)/(a_tot)-MinMod2Field((deriv_p-psi_int)/(a_tot), (psi_int-deriv_m)/(a_tot)))*a_m*a_p\n derivative = -(H_p*a_m+H_m*a_p)/(a_tot) + ((deriv_p-deriv_m)/(a_tot))*a_m*a_p\n\n if CFLCondition:\n return derivative, a_tot \n else:\n return derivative\n elif state.dimension == 2:\n #print 't = ', time\n field = state.GetOrderParameterField()\n #print 'field', field['u']\n deriv_x_p, deriv_x_m = FindDerivatives(field, 0, Dx=self.Dx)\n #print 'd_x_p/m', deriv_x_p['u'], deriv_x_m['u']\n deriv_y_p, deriv_y_m = FindDerivatives(field, 1, Dx=self.Dx)\n #print 'd_y_p/m', deriv_y_p['u'], deriv_y_m['u']\n #self.SetTime(time)\n self.time = time\n a_1 = self.H_2Dprime(field,deriv_x_m,deriv_y_m,0,opt='--')\n a_2 = self.H_2Dprime(field,deriv_x_m,deriv_y_p,0,opt='-+')\n a_3 = self.H_2Dprime(field,deriv_x_p,deriv_y_m,0,opt='+-')\n a_4 = self.H_2Dprime(field,deriv_x_p,deriv_y_p,0,opt='++')\n\n a_p = Max5(a_1, a_2, a_3, a_4, 0.)\n a_m = Min5(a_1, a_2, a_3, a_4, 0.).fabs()\n\n b_1 = self.H_2Dprime(field,deriv_x_m,deriv_y_m,1,opt='--')\n b_2 = self.H_2Dprime(field,deriv_x_m,deriv_y_p,1,opt='-+')\n b_3 = self.H_2Dprime(field,deriv_x_p,deriv_y_m,1,opt='+-')\n b_4 = self.H_2Dprime(field,deriv_x_p,deriv_y_p,1,opt='++')\n\n b_p = Max5(b_1, b_2, b_3, b_4, 0.)\n b_m = Min5(b_1, b_2, b_3, b_4, 0.).fabs()\n\n \"\"\"\n Lax-Friedrich like velocities - seem to cure problems, very similar to adaptive diffusion in theory\n \"\"\"\n \"\"\"\n a_p = a_p.max()\n a_m = a_m.max()\n a_p = a_m = max(a_p, a_m)\n\n b_p = b_p.max()\n b_m = b_m.max()\n b_p = b_m = max(b_p, b_m)\n \"\"\"\n \"\"\"\n a_p = Max5(self.H_2Dprime(field,deriv_x_m,deriv_y_m,0),\\\n self.H_2Dprime(field,deriv_x_m,deriv_y_p,0),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_m,0),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_p,0),\\\n ME)\n a_m = Min5(self.H_2Dprime(field,deriv_x_m,deriv_y_m,0),\\\n self.H_2Dprime(field,deriv_x_m,deriv_y_p,0),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_m,0),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_p,0),\\\n -ME).fabs()\n #print 'a_p/m', a_p['u'], a_m['u']\n b_p = Max5(self.H_2Dprime(field,deriv_x_m,deriv_y_m,1),\\\n self.H_2Dprime(field,deriv_x_m,deriv_y_p,1),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_m,1),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_p,1),\\\n ME)\n b_m = Min5(self.H_2Dprime(field,deriv_x_m,deriv_y_m,1),\\\n self.H_2Dprime(field,deriv_x_m,deriv_y_p,1),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_m,1),\\\n self.H_2Dprime(field,deriv_x_p,deriv_y_p,1),\\\n -ME).fabs()\n \"\"\"\n a_tot = a_p+a_m+ME\n b_tot = b_p+b_m+ME\n\n H_p_p = self.H_2D(field,deriv_x_p,deriv_y_p,opt='++')\n H_p_m = self.H_2D(field,deriv_x_p,deriv_y_m,opt='+-')\n H_m_p = self.H_2D(field,deriv_x_m,deriv_y_p,opt='-+')\n H_m_m = self.H_2D(field,deriv_x_m,deriv_y_m,opt='--')\n\n psi_int_x_p = (deriv_x_p*a_p+deriv_x_m*a_m)/a_tot - (H_p_p-H_m_p)/a_tot\n psi_int_x_m = (deriv_x_p*a_p+deriv_x_m*a_m)/a_tot - (H_p_m-H_m_m)/a_tot\n psi_int_y_p = (deriv_y_p*b_p+deriv_y_m*b_m)/b_tot - (H_p_p-H_p_m)/b_tot\n psi_int_y_m = (deriv_y_p*b_p+deriv_y_m*b_m)/b_tot - (H_m_p-H_m_m)/b_tot\n \"\"\" \n derivative = -(H_p_p*a_m*b_m+H_p_m*a_m*b_p+H_m_p*a_p*b_m+H_m_m*a_p*b_p)/a_tot/b_tot +\\\n ((deriv_x_p-deriv_x_m)*a_m*a_p/a_tot \\\n -MinMod2Field((deriv_x_p-psi_int_x_m)*b_p/b_tot/a_tot, (psi_int_x_m-deriv_x_m)/a_tot) \\\n -MinMod2Field((deriv_x_p-psi_int_x_p)*b_m/b_tot/a_tot, (psi_int_x_p-deriv_x_m)/a_tot)) +\\\n ((deriv_y_p-deriv_y_m)*b_m*b_p/b_tot \\\n -MinMod2Field((deriv_y_p-psi_int_y_m)*a_p/a_tot/b_tot, (psi_int_y_m-deriv_y_m)/b_tot) \\\n -MinMod2Field((deriv_y_p-psi_int_y_p)*a_m/a_tot/b_tot, (psi_int_y_p-deriv_y_m)/b_tot))\n )\n \"\"\" \n derivative = -(H_p_p*a_m*b_m+H_p_m*a_m*b_p+H_m_p*a_p*b_m+H_m_m*a_p*b_p)/a_tot/b_tot +\\\n ((deriv_x_p-deriv_x_m)*a_m*a_p/a_tot ) + \\\n ((deriv_y_p-deriv_y_m)*b_m*b_p/b_tot )\n if CFLCondition:\n return derivative, a_tot+b_tot\n else:\n return derivative\n else:\n \"\"\"\n Not implemented for now\n \"\"\"\n assert False \n\n\"\"\"\nFrom here on we have specific parts for Burger's equation\n\"\"\"\nfrom Plasticity.PlasticityStates import BurgersState\n\ndef Burgers1D_H(u,ux):\n #return u*ux\n return ux*(ux['u']+ux['v'])*0.5*2\n\ndef Burgers1D_Hprime(u,ux):\n #return u\n return (ux['v']+ux['u'])*2\n\ndef Burgers2D_H(u,ux,uy):\n return 0.5*(ux*ux+uy*uy)\n\ndef Burgers2D_Hprime(u,ux,uy,dir=0):\n if dir == 0:\n return ux['u']\n elif dir == 1:\n return uy['u']\n else:\n assert False\n \nclass BurgersHJDynamics(CentralUpwindHJDynamics):\n def H_1D(self,field,deriv):\n return Burgers1D_H(field,deriv)\n def H_1Dprime(self,field,deriv):\n return Burgers1D_Hprime(field,deriv)\n def H_2D(self,field,deriv_x,deriv_y):\n return Burgers2D_H(field,deriv_x,deriv_y)\n def H_2Dprime(self,field,deriv_x,deriv_y,dir=0):\n return Burgers2D_Hprime(field,deriv_x,deriv_y,dir)\n\n","sub_path":"Plasticity/FieldDynamics/CentralUpwindHJ.py","file_name":"CentralUpwindHJ.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93142629","text":"import numpy as np\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\nimport sys\nimport glob\nface_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv.CascadeClassifier('haarcascade_eye.xml')\n\nIM_SIZE = 32\nBATCH_SIZE = 100\nWINDOW_SIZE = 2\n\nsess = tf.InteractiveSession()\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.05)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n\tinitial = tf.constant(0.1, shape=shape)\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n\ndef max_pool_2x2(x):\n\treturn tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\nx_tensor = tf.placeholder(tf.float32, shape=[None,IM_SIZE,IM_SIZE,3])\ny_ = tf.placeholder(tf.float32, shape=[None,2])\nkeep_prob = tf.placeholder(tf.float32)\n\nx_reshape = tf.reshape(x_tensor, [-1,IM_SIZE,IM_SIZE,3])\n\nW_conv1 = weight_variable([5,5,3,16])\nb_conv1 = bias_variable([16])\nh_conv1 = tf.nn.relu(conv2d(x_reshape, W_conv1) + b_conv1)\n\nW_conv2 = weight_variable([5,5,16,16])\nb_conv2 = bias_variable([16])\nh_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)\n\nW_conv3 = weight_variable([5,5,16,16])\nb_conv3 = bias_variable([16])\nh_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3) + b_conv3)\n\n\n\"\"\"\n1 pooling\n\"\"\"\nh_pool3 = max_pool_2x2(h_conv3)\n\n\nW_conv4 = weight_variable([5,5,16,32])\nb_conv4 = bias_variable([32])\nh_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)\n\nW_conv5 = weight_variable([5,5,32,32])\nb_conv5 = bias_variable([32])\nh_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5) + b_conv5)\n\nW_conv6 = weight_variable([5,5,32,32])\nb_conv6 = bias_variable([32])\nh_conv6 = tf.nn.relu(conv2d(h_conv5, W_conv6) + b_conv6)\n\n\n\"\"\"\n2 pooling\n\"\"\"\nh_pool6 = max_pool_2x2(h_conv6)\n\nW_conv7 = weight_variable([5,5,32,64])\nb_conv7 = bias_variable([64])\nh_conv7 = tf.nn.relu(conv2d(h_pool6, W_conv7) + b_conv7)\n\nW_conv8 = weight_variable([5,5,64,64])\nb_conv8 = weight_variable([64])\nh_conv8 = tf.nn.relu(conv2d(h_conv7, W_conv8) + b_conv8)\n\nW_conv9 = weight_variable([5,5,64,64])\nb_conv9 = bias_variable([64])\nh_conv9 = tf.nn.relu(conv2d(h_conv8, W_conv9) + b_conv9)\n\n\n\"\"\"\n3 pooling\n\"\"\"\nh_pool9 = max_pool_2x2(h_conv9)\nh_pool9_flat = tf.reshape(h_pool9, [-1, int((IM_SIZE/8)*(IM_SIZE/8)*64)])\n\n\n\nW_fc1 = weight_variable([int((IM_SIZE/8)*(IM_SIZE/8)*64), 1024])\nb_fc1 = bias_variable([1024])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool9_flat, W_fc1) + b_fc1)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\nW_fc2 = weight_variable([1024,1024])\nb_fc2 = bias_variable([1024])\nh_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\nh_fc2_drop = tf.nn.dropout(h_fc2, keep_prob)\n\nW_fc3 = weight_variable([1024,2])\nb_fc3 = bias_variable([2])\n\ny_matmul = tf.matmul(h_fc2_drop, W_fc3) + b_fc3\ny_conv = tf.nn.softmax(y_matmul)\n\nl2_loss = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv+1e-7), reduction_indices=[1]))\ntrain_step = tf.train.AdamOptimizer(1e-3).minimize(l2_loss)\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\ntf.global_variables_initializer().run()\n\nsaver = tf.train.Saver()\nSAVE_PATH = \"./checkpoint/ver1.0_iteration.64000.ckpt\"\nsaver.restore(sess, SAVE_PATH)\n\nprev_face = [(0,0,30,30)]\nprev_eyes = [(1,1,1,1), (1,1,1,1)]\ndrowsiness_check_list = [0] * WINDOW_SIZE\ndrowsiness_check_idx = 0\n\ndef atten(eyes,roi_color):\n\teye_count = 1\n\tglobal drowsiness_check_idx\n\tfor (ex,ey,ew,eh) in eyes:\n\t\tcv.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n\t\teye_image = roi_color[ey:ey+eh , ex:ex+ew]\n\t\tinput_images = cv.resize(eye_image, (32,32))\n\t\tinput_images.resize((1,32,32,3))\n\t\tinput_images = np.divide(input_images, 255.0)\n\t\tlabel = sess.run(tf.argmax(y_conv, 1), feed_dict={keep_prob:1.0, x_tensor:input_images})\n\t\tdrowsiness_check_list[drowsiness_check_idx%WINDOW_SIZE] = label[0]\n\t\tdrowsiness_check_idx += 1\n\t\tif eye_count == 2:\n\t\t\tif drowsiness_check_list == [1] * WINDOW_SIZE:\n\t\t\t\tprint(\"Face - \",face_count,\" - Not Attentive\",)\n\t\t\telif drowsiness_check_list == [0] * WINDOW_SIZE:\n\t\t\t\tprint(\"Face - \",face_count,\" - Attentive\")\n\t\teye_count+=1\n\t\tcv.imshow(\"Face\",img)\n\n\n\n\n\ncap = cv.VideoCapture(0)\nwhile True:\n\timg = cap.read()[1]\n\tgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\tfaces = face_cascade.detectMultiScale(gray, 1.1, 4)\n\tface_count = 0\n\tfor (x,y,w,h) in faces:\n\t\tcv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n\t\troi_gray = gray[y:y+h, x:x+w]\n\t\troi_color = img[y:y+h, x:x+w]\n\t\teyes = eye_cascade.detectMultiScale(roi_gray)\n\t\tface_count += 1\n\t\ttext = \"Person\" + str(face_count)\n\t\tcv.putText(img, text, (int(x + w/2), int(y)), cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)\n\t\tatten(eyes,roi_color)\n\tif cv.waitKey(1) & 0xFF == ord('q'):break\n\ncap.release()\ncv.destroyAllWindows()\n","sub_path":"FINAL/src/multi_face.py","file_name":"multi_face.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"501114517","text":"from datetime import date\nfrom zang.exceptions.zang_exception import ZangException\n\nfrom zang.configuration.configuration import Configuration\nfrom zang.connectors.connector_factory import ConnectorFactory\nfrom zang.domain.enums.conference_status import ConferenceStatus\n\nfrom docs.examples.credentials import sid, authToken\nurl = 'https://api.zang.io/v2'\n\nconfiguration = Configuration(sid, authToken, url=url)\nconferencesConnector = ConnectorFactory(configuration).conferencesConnector\n\n\n# view conference\ntry:\n conference = conferencesConnector.viewConference('TestConferenceSid')\n print(conference.friendlyName)\nexcept ZangException as ze:\n print(ze)\n\n\n# list conferences\ntry:\n fromDate = date(2016, 12, 31)\n toDate = date(2017, 12, 31)\n conferences = conferencesConnector.listConferences(\n friendlyName='TestConferenceSid',\n status=ConferenceStatus.COMPLETED,\n dateCreatedGte=fromDate,\n dateCreatedLt=toDate,\n dateUpdatedGte=fromDate,\n dateUpdatedLt=toDate,\n page=0,\n pageSize=33)\n if conferences and conferences.elements:\n for conference in conferences.elements:\n print(conference.friendlyName)\nexcept ZangException as ze:\n print(ze)\n\n\n# view participant\ntry:\n conference = conferencesConnector.viewConference('TestConferenceSid')\n print(conference.friendlyName)\nexcept ZangException as ze:\n print(ze)\n\n# list participants\ntry:\n participants = conferencesConnector.listParticipants(\n 'TestConferenceSid', False, False, 0, 33)\n print(participants.total)\nexcept ZangException as ze:\n print(ze)\n\n# mute/deaf participant\ntry:\n participant = conferencesConnector.deafOrMuteParticipant(\n 'TestConferenceSid', 'TestParticipantSid', True, True)\n print(participant.muted)\nexcept ZangException as ze:\n print(ze)\n\n# play audio to participant\ntry:\n participant = conferencesConnector.playAudioToParticipant(\n 'TestConferenceSid', 'TestParticipantSid',\n 'https://mydomain.com/audio.mp3')\n print(participant.duration)\nexcept ZangException as ze:\n print(ze)\n\n# hangup participant\ntry:\n participant = conferencesConnector.hangupParticipant(\n 'TestConferenceSid', 'TestParticipantSid')\n print(participant.callerNumber)\nexcept ZangException as ze:\n print(ze)\n","sub_path":"docs/examples/conferences_example.py","file_name":"conferences_example.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"138161834","text":"import pandas\r\nfrom openpyxl import load_workbook\r\n\r\nbook = load_workbook('empty_book.xlsx')\r\nwriter = pandas.ExcelWriter('empty_book.xlsx', engine='openpyxl')\r\nwriter.book = book\r\nwriter.sheets = dict((ws.title, ws) for ws in book.worksheets)\r\n\r\ndata_filtered.to_excel(writer, \"Main\", cols = ['Diff1', 'Diff2'])\r\n\r\nwriter.save()","sub_path":"pandas.py","file_name":"pandas.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"450419162","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 5 07:06:25 2019\n\n@author: vismujum\n\"\"\"\n\nimport numpy as np\nincome = np.array([1000, 2000 , 3000 , 4000 , 5000])\nprint(income)\n\nexpenses = income *.6\nprint(expenses)\n\nsaving = income - expenses\nprint(saving)\n\nimport pandas as pd\ndata = {\"name\":[\"ram\",\"shyam\",\"Mahesh\"], \"age\":[23, 35 , 45]}\ndf = pd.DataFrame(data)\n\nprint(df)\n\nimport os \nos.getcwd()\n\nos.chdir(\"C:\\\\dataset\\\\archive\")\nos.getcwd()\n\ndata1= pd.read_csv(\"yearly_sales.csv\")\nprint(data1)\n\ndata1.columns\nprint(data1.gender)\nprint(data1.gender[1])\n\nprint(data1.gender[0:10])\n\nimport matplotlib as mp\nx = np.random.normal(0,1,1000)\ny = np.random.normal(0,1,1000)\n\nprint(x)\nprint(y)\n\nmp.pyplot.scatter(x,y)\n\nimport sklearn as sklearn \n\nprint(data1.columns)\n\nx1 = data1[\"num_of_orders\"]\ny1 = data1[\"sales_total\"]\n\nx1 = x1.values.reshape(-1,1)\ny1 = y1.values.reshape(-1,1)\n\nfrom sklearn import linear_model\nreg = linear_model.LinearRegression()\nreg.fit(x1,y1)\n\nprint(\"Cofficient is : \" , reg.coef_)\nprint(\"Intercept is : \" , reg.intercept_)\n\n# Data Handling\n\ndata1= pd.read_csv(\"yearly_sales.csv\")\nprint(data1)\n\ndata2 = pd.read_excel(\"housing.xlsx\",\"housing_train\")\nprint(data2)\n\ndata1.shape\n\ndata2.shape\n\ndata1.columns.values\ndata1.dtypes\ndata1.no_of_orders.dtypes\ndata1.head()\ndata1.head(10)\ndata1.tail()\n\ndata1.describe()\n\ndata1.gender.value_counts()\ndata1.gender.isnull()\nsum(data1.gender.isnull())\n\ndata3= pd.read_csv(\"yearly_sales_missing.csv\")\nprint(data3)\ndata3.gender.isnull()\nsum(data3.gender.isnull())\n\n# Subset\n\ndata1.sample(25)\ndata1.head(25)\ndata4 = data1.iloc[[2 , 4, 6 ,8, 12]]\ndata4\n\ndata5 = data1[[\"cust_id\" , \"gender\"]]\ndata5\n\ndata6 = data1[[\"cust_id\" , \"gender\"]][1:25]\ndata6\n\n#Data filtering\ndata7 = data1[(data1[\"gender\"]==\"F\") & (data1[\"num_of_orders\"]>10)]\ndata7\ndata8 = data1[(data1[\"gender\"]==\"F\") | (data1[\"num_of_orders\"]>10)]\ndata8\n\n# Creating a New Column\ndata1[\"Unit_price\"] = (data1[\"sales_total\"])/(data1[\"num_of_orders\"])\ndata1.columns\ndata9 = data1.sort_values(\"sales_total\")\nprint(data9)\n\n#Sorting of Data\ndata10 = data1.sort_values(\"sales_total\", ascending = False)\nprint(data10)\n\n#Duplicate Values\ndata11= pd.read_csv(\"yearly_sales_dup.csv\")\nprint(data11)\n\ndata12 = data11.duplicated()\ndata12\nprint(data12)\n\nsum(data12)\n\ndata15 = data12.drop_duplicates()\ndata15.shape\n\ndata16 = data11.cust_id.drop_duplicates()\ndata16.shape\n\ndata17 = data11.drop_duplicates([\"cust_id\"])\ndata17.shape\n\n\n#merging data set\ndata18 = pd.read_csv(\"age.csv\")\nprint(data18)\n\ndata19 = pd.read_csv(\"income.csv\")\nprint(data19)\n\nsum(data18.duplicated([\"cust_id\"]))\nsum(data19.duplicated([\"cust_id\"]))\n\ndata18.shape\ndata20 = data18.drop_duplicates([\"cust_id\"])\ndata20.shape\ndata21 = data19.drop_duplicates([\"cust_id\"])\n\ndata22 = pd.merge(data20,data21,on=\"cust_id\" , how=\"inner\" )\ndata22.shape\n\ndata23 = pd.merge(data20,data21,on=\"cust_id\" , how=\"outer\" )\ndata23.shape\n\ndata24 = pd.merge(data20,data21,on=\"cust_id\" , how=\"left\" )\ndata24.shape\n\ndata25 = pd.merge(data20,data21,on=\"cust_id\" , how=\"right\" )\ndata25.shape\n\n#exporting the output\ndata25.to_csv(\"rjoin.csv\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"05-Oct-2019.py","file_name":"05-Oct-2019.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505114900","text":"# -*- coding: utf-8 -*-\n#\n# pyramid documentation build configuration file, created by\n# sphinx-quickstart on Wed Jul 16 13:18:14 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default value; values that are commented out\n# serve to show the default value.\n\nimport sys\nimport os\nimport datetime\nimport inspect\nimport warnings\n\nwarnings.simplefilter('ignore', DeprecationWarning)\n\nimport pkg_resources\n\n# skip raw nodes\nfrom sphinx.writers.text import TextTranslator\nfrom sphinx.writers.latex import LaTeXTranslator\n\nfrom docutils import nodes\nfrom docutils import utils\n\n\ndef raw(*arg):\n raise nodes.SkipNode\nTextTranslator.visit_raw = raw\n\n\n# make sure :app:`Pyramid` doesn't mess up LaTeX rendering\ndef nothing(*arg):\n pass\nLaTeXTranslator.visit_inline = nothing\nLaTeXTranslator.depart_inline = nothing\n\nbook = os.environ.get('BOOK')\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'repoze.sphinx.autointerface',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.intersphinx'\n ]\n\n# Looks for objects in external projects\nintersphinx_mapping = {\n 'tutorials': ('http://docs.pylonsproject.org/projects/pyramid_tutorials/en/latest/', None),\n 'pyramid': ('http://docs.pylonsproject.org/projects/pyramid/en/latest/', None),\n 'jinja2': ('http://docs.pylonsproject.org/projects/pyramid_jinja2/en/latest/', None),\n 'tm': (\n 'http://docs.pylonsproject.org/projects/pyramid_tm/en/latest/',\n None,\n ),\n 'zcomponent': ('http://docs.zope.org/zope.component', None),\n 'webtest': ('http://webtest.pythonpaste.org/en/latest', None),\n 'webob': ('http://docs.webob.org/en/latest', None),\n 'colander': (\n 'http://docs.pylonsproject.org/projects/colander/en/latest',\n None),\n 'deform': (\n 'http://docs.pylonsproject.org/projects/deform/en/latest',\n None),\n 'sqla': ('http://docs.sqlalchemy.org/en/latest', None),\n 'who': ('http://docs.repoze.org/who/latest', None),\n 'python': ('http://docs.python.org', None),\n 'python3': ('http://docs.python.org/3', None),\n 'tstring':\n ('http://docs.pylonsproject.org/projects/translationstring/en/latest',\n None),\n 'venusian':\n ('http://docs.pylonsproject.org/projects/venusian/en/latest', None),\n 'toolbar':\n ('http://docs.pylonsproject.org/projects/pyramid_debugtoolbar/en/latest',\n None),\n 'zcml':\n ('http://docs.pylonsproject.org/projects/pyramid_zcml/en/latest',\n None),\n}\n\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General substitutions.\nproject = 'The Pyramid Web Framework'\nthisyear = datetime.datetime.now().year\ncopyright = '2008-%s, Agendaless Consulting' % thisyear\n\n# The default replacements for |version| and |release|, also used in various\n# other places throughout the built documents.\n#\n# The short X.Y version.\nversion = pkg_resources.get_distribution('pyramid').version\n\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\ntoday_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_themes/README.rst', ]\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n#pygments_style = book and 'bw' or 'tango'\nif book:\n pygments_style = 'bw'\n\n# Options for HTML output\n# -----------------------\n\n# Add and use Pylons theme\nif 'sphinx-build' in ' '.join(sys.argv): # protect against dumb importers\n from subprocess import call, Popen, PIPE\n\n p = Popen('which git', shell=True, stdout=PIPE)\n cwd = os.getcwd()\n _themes = os.path.join(cwd, '_themes')\n git = p.stdout.read().strip()\n if not os.listdir(_themes):\n call([git, 'submodule', '--init'])\n else:\n call([git, 'submodule', 'update'])\n\n sys.path.append(os.path.abspath('_themes'))\n\nhtml_theme_path = ['_themes']\nhtml_theme = 'pyramid'\nhtml_theme_options = dict(\n github_url='https://github.com/Pylons/pyramid',\n #in_progress='true',\n )\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\nhtml_title = 'The Pyramid Web Framework v%s' % release\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = False # people use cutnpaste in some places\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pyramid'\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\nlatex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\nlatex_font_size = '10pt'\n\nlatex_additional_files = ['_static/latex-note.png', '_static/latex-warning.png']\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'pytn2014.tex',\n 'PyTennessee 2014 Pyramid Tutorial',\n 'Tres Seaver', 'manual'),\n ]\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\nlatex_use_parts = True\n\n# If false, no module index is generated.\nlatex_use_modindex = False\n\n## Say, for a moment that you have a twoside document that needs a 3cm\n## inner margin to allow for binding and at least two centimetres the\n## rest of the way around. You've been using the a4wide package up until\n## now, because you like the amount of text it places on the\n## page. Perhaps try something like this in your preamble:\n\n## \\usepackage[bindingoffset=1cm,textheight=22cm,hdivide={2cm,*,2cm},vdivide={*,22cm,*}]{geometry}\n\n## _PREAMBLE = r\"\"\"\\usepackage[bindingoffset=0.45in,textheight=7.25in,hdivide={0.5in,*,0.75in},vdivide={1in,7.25in,1in},papersize={7.5in,9.25in}]{geometry}\"\"\"\n\n_PREAMBLE = r\"\"\"\n\\usepackage[]{geometry}\n\\geometry{bindingoffset=0.45in,textheight=7.25in,hdivide={0.5in,*,0.75in},vdivide={1in,7.25in,1in},papersize={7.5in,9.25in}}\n\\hypersetup{\n colorlinks=true,\n linkcolor=black,\n citecolor=black,\n filecolor=black,\n urlcolor=black\n}\n\\fvset{frame=single,xleftmargin=9pt,numbersep=4pt}\n\n\\pagestyle{fancy}\n\n% header and footer styles\n\\renewcommand{\\chaptermark}[1]%\n {\\markboth{\\MakeUppercase{\\thechapter.\\ #1}}{}\n }\n\\renewcommand{\\sectionmark}[1]%\n {\\markright{\\MakeUppercase{\\thesection.\\ #1}}\n }\n\n% defaults for fancy style\n\\renewcommand{\\headrulewidth}{0pt}\n\\renewcommand{\\footrulewidth}{0pt}\n\\fancyhf{}\n\\fancyfoot[C]{\\thepage}\n\n% plain style\n\\fancypagestyle{plain}{\n \\renewcommand{\\headrulewidth}{0pt} % ho header line\n \\renewcommand{\\footrulewidth}{0pt}% no footer line\n \\fancyhf{} % empty header and footer\n \\fancyfoot[C]{\\thepage}\n}\n\n% title page styles\n\\makeatletter\n\\def\\@subtitle{\\relax}\n\\newcommand{\\subtitle}[1]{\\gdef\\@subtitle{#1}}\n\\renewcommand{\\maketitle}{\n \\begin{titlepage}\n {\\rm\\Huge\\@title\\par}\n {\\em\\large\\py@release\\releaseinfo\\par}\n \\if\\@subtitle\\relax\\else\\large\\@subtitle\\par\\fi\n {\\large\\@author\\par}\n \\end{titlepage}\n}\n\\makeatother\n\n% Redefine link and title colors\n\\definecolor{TitleColor}{rgb}{0,0,0}\n\\definecolor{InnerLinkColor}{rgb}{0.208,0.374,0.486}\n\\definecolor{OuterLinkColor}{rgb}{0.216,0.439,0.388}\n% Redefine these colors to something not white if you want to have colored\n% background and border for code examples.\n\\definecolor{VerbatimColor}{rgb}{1,1,1}\n\\definecolor{VerbatimBorderColor}{rgb}{1,1,1}\n\n\\makeatletter\n\\renewcommand{\\py@noticestart@warning}{\\py@heavybox}\n\\renewcommand{\\py@noticeend@warning}{\\py@endheavybox}\n\\renewcommand{\\py@noticestart@note}{\\py@heavybox}\n\\renewcommand{\\py@noticeend@note}{\\py@endheavybox}\n\\makeatother\n\n% icons in note and warning boxes\n\\usepackage{ifthen}\n% Keep a copy of the original notice environment\n\\let\\origbeginnotice\\notice\n\\let\\origendnotice\\endnotice\n\n% Redefine the notice environment so we can add our own code to it\n\\renewenvironment{notice}[2]{%\n \\origbeginnotice{#1}{}% equivalent to original \\begin{notice}{#1}{#2}\n % load graphics\n \\ifthenelse{\\equal{#1}{warning}}{\\includegraphics{latex-warning.png}}{}\n \\ifthenelse{\\equal{#1}{note}}{\\includegraphics{latex-note.png}}{}\n % etc.\n}{%\n \\origendnotice% equivalent to original \\end{notice}\n}\n\n% try to prevent code-block boxes from splitting across pages\n\\sloppy\n\\widowpenalty=300\n\\clubpenalty=300\n\\setlength{\\parskip}{3ex plus 2ex minus 2ex}\n\n% suppress page numbers on pages showing part title\n\\makeatletter\n\\let\\sv@endpart\\@endpart\n\\def\\@endpart{\\thispagestyle{empty}\\sv@endpart}\n\\makeatother\n\n% prevent page numbers in TOC (reset to fancy by frontmatter directive)\n\\pagestyle{empty}\n\"\"\"\n\nlatex_elements = {\n 'preamble': _PREAMBLE,\n 'wrapperclass': 'book',\n 'date': '',\n 'releasename': 'Version',\n 'title': r'The Pyramid Web Framework',\n# 'pointsize':'12pt', # uncomment for 12pt version\n}\n\n# secnumdepth counter reset to 2 causes numbering in related matter;\n# reset to -1 causes chapters to not be numbered, reset to -2 causes\n# parts to not be numbered.\n\n#part\t -1\n#chapter 0\n#section 1\n#subsection 2\n#subsubsection 3\n#paragraph 4\n#subparagraph 5\n\n\ndef frontmatter(name, arguments, options, content, lineno,\n content_offset, block_text, state, state_machine):\n return [nodes.raw(\n '',\n r\"\"\"\n\\frontmatter\n% prevent part/chapter/section numbering\n\\setcounter{secnumdepth}{-2}\n% suppress headers\n\\pagestyle{plain}\n% reset page counter\n\\setcounter{page}{1}\n% suppress first toc pagenum\n\\addtocontents{toc}{\\protect\\thispagestyle{empty}}\n\"\"\",\n format='latex')]\n\n\ndef mainmatter(name, arguments, options, content, lineno,\n content_offset, block_text, state, state_machine):\n return [nodes.raw(\n '',\n r\"\"\"\n\\mainmatter\n% allow part/chapter/section numbering\n\\setcounter{secnumdepth}{2}\n% get headers back\n\\pagestyle{fancy}\n\\fancyhf{}\n\\renewcommand{\\headrulewidth}{0.5pt}\n\\renewcommand{\\footrulewidth}{0pt}\n\\fancyfoot[C]{\\thepage}\n\\fancyhead[RO]{\\rightmark}\n\\fancyhead[LE]{\\leftmark}\n\"\"\",\n format='latex')]\n\n\ndef backmatter(name, arguments, options, content, lineno,\n content_offset, block_text, state, state_machine):\n return [nodes.raw('', '\\\\backmatter\\n\\\\setcounter{secnumdepth}{-1}\\n',\n format='latex')]\n\n\ndef app_role(role, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"custom role for :app: marker, does nothing in particular except allow\n :app:`Pyramid` to work (for later search and replace).\"\"\"\n if 'class' in options:\n assert 'classes' not in options\n options['classes'] = options['class']\n del options['class']\n return [nodes.inline(rawtext, utils.unescape(text), **options)], []\n\n\ndef setup(app):\n app.add_role('app', app_role)\n app.add_directive('frontmatter', frontmatter, 1, (0, 0, 0))\n app.add_directive('mainmatter', mainmatter, 1, (0, 0, 0))\n app.add_directive('backmatter', backmatter, 1, (0, 0, 0))\n app.connect('autodoc-process-signature', resig)\n\n\ndef resig(app, what, name, obj, options, signature, return_annotation):\n \"\"\" Allow for preservation of ``@action_method`` decorated methods\n in configurator \"\"\"\n docobj = getattr(obj, '__docobj__', None)\n if docobj is not None:\n argspec = inspect.getargspec(docobj)\n if argspec[0] and argspec[0][0] in ('cls', 'self'):\n del argspec[0][0]\n signature = inspect.formatargspec(*argspec)\n return signature, return_annotation\n\n# turn off all line numbers in latex formatting\n\n## from pygments.formatters import LatexFormatter\n## from sphinx.highlighting import PygmentsBridge\n\n## class NoLinenosLatexFormatter(LatexFormatter):\n## def __init__(self, **options):\n## LatexFormatter.__init__(self, **options)\n## self.linenos = False\n\n## PygmentsBridge.latex_formatter = NoLinenosLatexFormatter\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = 'The Pyramid Web Framework, Version %s' \\\n % release\nepub_author = 'Chris McDonough'\nepub_publisher = 'Agendaless Consulting'\nepub_copyright = '2008-%d' % thisyear\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\nepub_language = 'en'\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\nepub_scheme = 'ISBN'\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\nepub_identifier = '0615445675'\n\n# A unique identification for the text.\nepub_uid = 'The Pyramid Web Framework, Version %s' \\\n % release\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['_static/opensearch.xml', '_static/doctools.js',\n '_static/jquery.js', '_static/searchtools.js', '_static/underscore.js',\n '_static/basic.css', 'search.html', '_static/websupport.js']\n\n\n# The depth of the table of contents in toc.ncx.\nepub_tocdepth = 3\n\n# For a list of all settings, visit http://sphinx-doc.org/config.html\n","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":14025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154799148","text":"\"\"\"\n Contains widgets used for Slumber.\n\"\"\"\nfrom django import forms\nfrom django.contrib.admin.widgets import AdminURLFieldWidget\n\nfrom slumber.connector.api import _InstanceProxy, get_instance\nfrom slumber.scheme import from_slumber_scheme\nfrom slumber.server import get_slumber_services\nimport copy\nfrom slumber.scheme import to_slumber_scheme, from_slumber_scheme\nfrom slumber.server import get_slumber_services\n\nclass RemoteForeignKeyWidget(forms.TextInput):\n \"\"\"A widget that allows the URL to be edited.\n \"\"\"\n def render(self, name, value, **kw):\n if isinstance(value, basestring):\n return super(RemoteForeignKeyWidget, self).render(\n name, value, **kw)\n else:\n return super(RemoteForeignKeyWidget, self).render(\n name, value._url if value else None, **kw)\n\n\nclass RemoteForeignKeyField(forms.Field):\n \"\"\"A simple widget that allows the URL for the remote object to be\n seen and edited.\n \"\"\"\n def __init__(self, max_length=None, verify_exists=True,\n model_url=None, **kwargs):\n assert model_url, \"RemoteForiegnKeyField must be passed a model_url\"\n self.max_length = max_length\n self.model_url = model_url\n self.verify_exists = verify_exists\n default = {'widget': RemoteForeignKeyWidget}\n default.update(kwargs)\n if default['widget'] == AdminURLFieldWidget:\n # We have to ignore a request for admin's broken widget\n default['widget'] = RemoteForeignKeyWidget\n super(RemoteForeignKeyField, self).__init__(**default)\n\n def clean(self, value):\n if not value:\n if self.required:\n raise forms.ValidationError('This field is required')\n return None\n elif isinstance(value, _InstanceProxy):\n return value\n else:\n try:\n model_url = from_slumber_scheme(\n self.model_url, get_slumber_services())\n instance = get_instance(model_url, value, None)\n unicode(instance)\n \n except AssertionError:\n raise forms.ValidationError(\"The remote object doesn't exist\")\n return instance\n\nfrom django.forms.widgets import Select\nclass RemoteSelect(Select):\n def render(self, name, value, attrs=None, choices=()):\n # we had to cast slumber field to string\n if value is not None:\n value = to_slumber_scheme(value._url, get_slumber_services())\n return super(RemoteSelect, self).render(name, value, attrs=attrs, choices=choices) \n\nclass TypedRemoteChoiceField(RemoteForeignKeyField):\n def __init__(self, coerce=None, *args, **kwargs):\n self.empty_value = kwargs.pop('empty_value', '')\n kwargs['widget'] = RemoteSelect()\n choices = kwargs.pop('choices', [])\n super(TypedRemoteChoiceField, self).__init__(*args, **kwargs)\n self.choices = choices\n\n def __deepcopy__(self, memo):\n result = super(TypedRemoteChoiceField, self).__deepcopy__(memo)\n result._choices = copy.deepcopy(self._choices, memo)\n return result\n\n def _get_choices(self):\n return self._choices\n\n def _set_choices(self, value):\n # Setting choices also sets the choices on the widget.\n # choices can be any iterable, but we call list() on it because\n # it will be consumed more than once.\n self._choices = self.widget.choices = list(value)\n\n choices = property(_get_choices, _set_choices)\n \n def to_python(self, value):\n if not value:\n return None\n if isinstance(value, _InstanceProxy):\n return value\n instance_url = from_slumber_scheme(\n super(TypedRemoteChoiceField, self).to_python(value),\n get_slumber_services())\n model_url = from_slumber_scheme(\n self.model_url, get_slumber_services())\n return get_instance(model_url, instance_url, None)\n \n# def to_python(self, value):\n# \"\"\"\n# Validates that the value is in self.choices and can be coerced to the\n# right type.\n# \"\"\"\n# value = super(TypedChoiceField, self).to_python(value)\n# if value == self.empty_value or value in self.empty_values:\n# return self.empty_value\n# try:\n# value = self.coerce(value)\n# except (ValueError, TypeError, ValidationError):\n# raise ValidationError(\n# self.error_messages['invalid_choice'],\n# code='invalid_choice',\n# params={'value': value},\n# )\n# return value","sub_path":"slumber/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"652396663","text":"from google.cloud import firestore\n\n\ndef write_quotes():\n db = firestore.Client()\n\n coll_ref = db.collection(u\"quotes\")\n coll_ref.add({\n u\"text\": u\"Believe in yourself! Have faith in your abilities! Without a humble but reasonable confidence in your own powers you cannot be successful or happy.\",\n u\"author\": u\"Norman Vincent Peale\"\n })\n\n quotes = [\n u\"Your focus determines your reality.\", \"Do. Or do not. There is no try.\",\n u\"In my experience there is no such thing as luck.\",\n u\"Your eyes can deceive you. Don’t trust them.\",\n u\"The Force will be with you. Always.\",\n u\"There’s always a bigger fish.\",\n u\"You can’t stop the change, any more than you can stop the suns from setting.\",\n u\"Fear is the path to the dark side. Fear leads to anger; anger leads to hate; hate leads uto suffering. I sense much fear in you.\",\n u\"I’m one with the Force. The Force is with me.\"\n ]\n author = u\"Star Wars\"\n\n for q in quotes:\n quote = {\n \"text\": q,\n \"author\": author\n }\n coll_ref.add(quote)\n\n\ndef write_users():\n db = firestore.Client()\n\n coll_ref = db.collection(u\"quotes\")\n coll_ref.add({\n\n })\n\n\nif __name__ == '__main__':\n write_quotes()\n","sub_path":"backend/seed_database.py","file_name":"seed_database.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"271491064","text":"# -*- mode: python; coding: utf-8 -*-\n# Copyright 2012-2014 Peter Williams and collaborators.\n# Licensed under the MIT License.\n\n\"\"\"pwkit.cli - miscellaneous utilities for command-line programs.\n\nFunctions:\n\ncheck_usage - Print usage and exit if --help is in argv.\ndie - Print an error and exit.\npop_option - Check for a single command-line option.\nshow_usage - Print a usage message.\nunicode_stdio - Ensure that sys.std{in,out,err} accept unicode strings.\nwarn - Print a warning.\nwrong_usage - Print an error about wrong usage and the usage help.\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n__all__ = (b'check_usage die pop_option show_usage unicode_stdio warn '\n b'wrong_usage').split ()\n\nimport codecs, sys\nfrom .. import text_type\n\n\ndef unicode_stdio ():\n \"\"\"Make sure that the standard I/O streams accept Unicode.\n\n The standard I/O streams accept bytes, not Unicode characters. This means\n that in principle every Unicode string that we want to output should be\n encoded to utf-8 before print()ing. But Python 2.X has a hack where, if\n the output is a terminal, it will automatically encode your strings, using\n UTF-8 in most cases.\n\n BUT this hack doesn't kick in if you pipe your program's output to another\n program. So it's easy to write a tool that works fine in most cases but then\n blows up when you log its output to a file.\n\n The proper solution is just to do the encoding right. This function sets\n things up to do this in the most sensible way I can devise. This approach\n sets up compatibility with Python 3, which has the stdio streams be in\n text mode rather than bytes mode to begin with.\n\n Basically, every command-line Python program should call this right at\n startup. I'm tempted to just invoke this code whenever this module is\n imported since I foresee many accidentally omissions of the call.\n\n \"\"\"\n enc = sys.stdin.encoding or 'utf-8'\n sys.stdin = codecs.getreader (enc) (sys.stdin)\n enc = sys.stdout.encoding or enc\n sys.stdout = codecs.getwriter (enc) (sys.stdout)\n enc = sys.stderr.encoding or enc\n sys.stderr = codecs.getwriter (enc) (sys.stderr)\n\n\ndef die (fmt, *args):\n \"\"\"Raise a :exc:`SystemExit` exception with a formatted error message.\n\n :arg str fmt: a format string\n :arg args: arguments to the format string\n\n If *args* is empty, a :exc:`SystemExit` exception is raised with the\n argument ``'error: ' + str (fmt)``. Otherwise, the string component is\n ``fmt % args``. If uncaught, the interpreter exits with an error code and\n prints the exception argument.\n\n Example::\n\n if ndim != 3:\n die ('require exactly 3 dimensions, not %d', ndim)\n\n \"\"\"\n if not len (args):\n raise SystemExit ('error: ' + text_type (fmt))\n raise SystemExit ('error: ' + (fmt % args))\n\n\ndef warn (fmt, *args):\n if not len (args):\n s = text_type (fmt)\n else:\n s = fmt % args\n\n print ('warning:', s, file=sys.stderr)\n\n\n# Simple-minded argument handling -- see also kwargv.\n\ndef pop_option (ident, argv=None):\n \"\"\"A lame routine for grabbing command-line arguments. Returns a boolean\n indicating whether the option was present. If it was, it's removed from\n the argument string. Because of the lame behavior, options can't be\n combined, and non-boolean options aren't supported. Operates on sys.argv\n by default.\n\n Note that this will proceed merrily if argv[0] matches your option.\n\n \"\"\"\n if argv is None:\n from sys import argv\n\n if len (ident) == 1:\n ident = '-' + ident\n else:\n ident = '--' + ident\n\n found = ident in argv\n if found:\n argv.remove (ident)\n\n return found\n\n\ndef show_usage (docstring, short, stream, exitcode):\n \"\"\"Print program usage information and exit.\n\n :arg str docstring: the program help text\n\n This function just prints *docstring* and exits. In most cases, the\n function :func:`check_usage` should be used: it automatically checks\n :data:`sys.argv` for a sole \"-h\" or \"--help\" argument and invokes this\n function.\n\n This function is provided in case there are instances where the user\n should get a friendly usage message that :func:`check_usage` doesn't catch.\n It can be contrasted with :func:`wrong_usage`, which prints a terser usage\n message and exits with an error code.\n\n \"\"\"\n if stream is None:\n from sys import stdout as stream\n\n if not short:\n print ('Usage:', docstring.strip (), file=stream)\n else:\n intext = False\n for l in docstring.splitlines ():\n if intext:\n if not len (l):\n break\n print (l, file=stream)\n elif len (l):\n intext = True\n print ('Usage:', l, file=stream)\n\n print ('\\nRun with a sole argument --help for more detailed '\n 'usage information.', file=stream)\n\n raise SystemExit (exitcode)\n\n\ndef check_usage (docstring, argv=None, usageifnoargs=False):\n \"\"\"Check if the program has been run with a --help argument; if so,\n print usage information and exit.\n\n :arg str docstring: the program help text\n :arg argv: the program arguments; taken as :data:`sys.argv` if\n given as :const:`None` (the default). (Note that this implies\n ``argv[0]`` should be the program name and not the first option.)\n :arg bool usageifnoargs: if :const:`True`, usage information will be\n printed and the program will exit if no command-line arguments are\n passed. If \"long\", print long usasge. Default is :const:`False`.\n\n This function is intended for small programs launched from the command\n line. The intention is for the program help information to be written in\n its docstring, and then for the preamble to contain something like::\n\n \\\"\\\"\\\"myprogram - this is all the usage help you get\\\"\\\"\\\"\n import sys\n ... # other setup\n check_usage (__doc__)\n ... # go on with business\n\n If it is determined that usage information should be shown,\n :func:`show_usage` is called and the program exits.\n\n See also :func:`wrong_usage`.\n\n \"\"\"\n if argv is None:\n from sys import argv\n\n if len (argv) == 1 and usageifnoargs:\n show_usage (docstring, (usageifnoargs != 'long'), None, 0)\n if len (argv) == 2 and argv[1] in ('-h', '--help'):\n show_usage (docstring, False, None, 0)\n\n\ndef wrong_usage (docstring, *rest):\n \"\"\"Print a message indicating invalid command-line arguments and exit with an\n error code.\n\n :arg str docstring: the program help text\n :arg rest: an optional specific error message\n\n This function is intended for small programs launched from the command\n line. The intention is for the program help information to be written in\n its docstring, and then for argument checking to look something like\n this::\n\n \\\"\\\"\\\"mytask \n\n Do something to the input to create the output.\n \\\"\\\"\\\"\n ...\n import sys\n ... # other setup\n check_usage (__doc__)\n ... # more setup\n if len (sys.argv) != 3:\n wrong_usage (__doc__, \"expect exactly 2 arguments, not %d\",\n len (sys.argv))\n\n When called, an error message is printed along with the *first stanza* of\n *docstring*. The program then exits with an error code and a suggestion to\n run the program with a --help argument to see more detailed usage\n information. The \"first stanza\" of *docstring* is defined as everything up\n until the first blank line, ignoring any leading blank lines.\n\n The optional message in *rest* is treated as follows. If *rest* is empty,\n the error message \"invalid command-line arguments\" is printed. If it is a\n single item, the stringification of that item is printed. If it is more\n than one item, the first item is treated as a format string, and it is\n percent-formatted with the remaining values. See the above example.\n\n See also :func:`check_usage` and :func:`show_usage`.\n\n \"\"\"\n intext = False\n\n if len (rest) == 0:\n detail = 'invalid command-line arguments'\n elif len (rest) == 1:\n detail = rest[0]\n else:\n detail = rest[0] % tuple (rest[1:])\n\n print ('error:', detail, '\\n', file=sys.stderr) # extra NL\n show_usage (docstring, True, sys.stderr, 1)\n","sub_path":"pwkit/cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"55372040","text":"from List import *\n\ndef main():\n\n lst = EnumerateList(0,20,2)\n print(lst)\n lstRec = EnumerateList(0,20,2)\n print(lstRec)\n\ndef EnumerateList(low, high, step):\n\n lst = NodeCreate(low, None)\n node = lst\n count = low + step\n while ( count < high):\n nextNode = NodeCreate(count, None)\n NodeSetNext(node, nextNode)\n count += step\n node = nextNode\n return lst\n\ndef EnumerateListRec(low, high, step):\n\n if (low >= high):\n return None\n else:\n return NodeCreate(low,EnumerateListRec(low+step, high, step))\n\n\n\n\nmain()\n","sub_path":"Practice/lists/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"70052763","text":"import nltk\nimport numpy\nimport codecs\n\ntext = codecs.open(\"test.txt\", \"r\", \"utf-8\").read()\n\n##tokenize text using nltk word tokenizer\nte = nltk.word_tokenize(text)\n\n##label each token with POS Tag\ntags = nltk.pos_tag(te)\nnouns = []\nnouns_2 = []\n\nf = codecs.open(\"nouns_1.txt\", \"w\", 'utf-8')\n\nstart = 0\noutput = \"\"\n\n## extract all noun Phrases\nfor i, item in enumerate(tags) :\n word = item[0]\n ## start == 0 if currently outside Noun Phrase , otherwise start == 1\n if ( start == 0 ) :\n ## Check if noun Phrase has not begun, otherwise start noun phrase\n if ( item[1] not in ['NNS', 'NNP', 'NN', 'NNPS'] or (word in ['(', '{', '[', ')', '}', ']']) ) :\n continue\n else :\n start = 1\n output += item[0] + \" \"\n else :\n ## Check if noun Phrase has ended, output if true,\n ## otherwise continue building noun phrase\n if ( item[1] not in ['NNS', 'NNP', 'NN', 'NNPS'] or word in ['(', '{', '[', ')', '}', ']'] ) :\n output.strip()\n nouns.append(output)\n output = \"\"\n start = 0\n else :\n output += item[0] + \" \"\n\n## extract all noun phrases of length 2\nfor item in nouns :\n arr = item.strip().split(\" \")\n nouns_2.append(item)\n f.write(item + \"\\n\")\n\nf.close()\n","sub_path":"POS_GNA/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"104152648","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom decimal import Decimal\nimport logging\nimport random\nimport time\n\nfrom dbunit.generator import Generator\nfrom lxml import etree\n\nfrom chengpin import ctx, inject\nfrom chengpin.boot import CpConfig\nfrom chengpin.bll.order import OrderService\n\nctx.register(CpConfig)\nctx.refresh()\nsvc = inject('OrderService', OrderService)\n\nlogging.basicConfig(level=logging.DEBUG)\n\norder_ids = set()\nuser_id = 3326133\n\ndetail_id = 1000000\ndef get_detail_id():\n global detail_id\n detail_id += 1\n return detail_id\n\naddress_id = 100000\ndef get_address_id():\n global address_id\n address_id += 1\n return address_id\n\ndef to_xml(_):\n row = etree.Element('row')\n for c in _.__columns__:\n cld = etree.SubElement(row, c)\n v = getattr(_, c)\n if type(v) == Decimal:\n v = '%.2f' % v\n if type(v) == datetime:\n v = v.strftime('%Y-%m-%d %H:%M:%S')\n cld.text = str(v)\n\n return etree.tostring(row, pretty_print=True)\n\norangers = {\n 'id': None,\n 'preprocess_state': 200,\n 'payment_state': list((0,)),\n 'order_state': list((0,)),\n 'user_id': user_id, #range(1, 1000000),\n 'is_hidden': list((1, 0, 0, 0, 0, 0, 0, 0, 0, 0)),\n 'subtotal_price': lambda : Decimal(random.random() * 100),\n 'grand_total_price': lambda : Decimal(random.random() * 1000 + 1000),\n 'shipping_cost': lambda : Decimal(random.random() * 200),\n 'note': '备注信息',\n 'ordered_datetime': lambda: datetime.now(),\n 'paid_datetime': lambda: datetime.now(),\n 'shipping_datetime': lambda: datetime.now(),\n 'completed_datetime': lambda: datetime.now(),\n 'created_at': lambda: datetime.now(),\n 'updated_at': lambda: datetime.now(),\n}\n\nodrangers = {\n 'id': lambda : get_detail_id(),\n 'order_id': None,\n 'product_id': lambda : int(random.random() * 1000000),\n 'sku_id': lambda : int(random.random() * 10000),\n 'snapshot_key': 'c4ca4238a0b923820dcc509a6f75849b',\n 'unit_price': lambda : Decimal(random.random() * 100 + 50),\n 'real_unit_price': lambda: Decimal(random.random() * 50),\n 'quantity': lambda : int(random.random() * 10),\n 'created_at': lambda: datetime.now(),\n 'updated_at': lambda: datetime.now(),\n}\n\nasrangers = {\n 'id': None,\n 'aftersales_type': None,\n 'aftersales_state': list((10, 20)),\n 'user_id': user_id,\n 'order_id': None,\n 'product_id': None,\n 'sku_id': None,\n 'snapshot_key': None,\n 'note': '描述',\n 'refund_amount': 0,\n 'quantity': 0,\n 'payment_no': '',\n}\n\noarangers = {\n 'id': lambda : get_address_id(),\n 'area_id': range(3650),\n 'city_id': range(390),\n 'province_id': range(36),\n 'order_id': None,\n 'shipto_name': list(('张三', '李四', '王五', '赵六')),\n 'contact_name': list(('张三', '李四', '王五', '赵六')),\n 'phone': 13812345678,\n 'address1': '这是一个测试用的地址嗯',\n 'address2': '这是另一个测试用的地址嗯',\n}\n\n\nog = Generator(orangers)\nodg = Generator(odrangers)\noag = Generator(oarangers)\n\norders = []\ndetails = []\naddresses = []\n\n\ndef generate_order(state, count):\n for i in range(count + 1):\n while True:\n o = og.generate()\n order_id = svc.generate_order_no(o.created_at, o.user_id)\n if order_id not in order_ids:\n order_ids.add(order_id)\n break\n time.sleep(0.1)\n o.id = order_id\n if o.preprocess_state == 200:\n o.order_state = state\n if o.order_state > 100:\n o.payment_state = 10\n s = to_xml(o)\n\n d = odg.generate()\n d.order_id = order_id\n\n a = oag.generate()\n a.order_id = order_id\n\n orders.append(to_xml(o).decode('UTF-8'))\n details.append(to_xml(d).decode('UTF-8'))\n addresses.append(to_xml(a).decode('UTF-8'))\n\ngenerate_order(100, 10)\ngenerate_order(200, 10)\ngenerate_order(300, 10)\ngenerate_order(400, 10)\ngenerate_order(500, 10)\ngenerate_order(600, 10)\ngenerate_order(610, 10)\n\nprint('')\nprint('')\nprint('')\nprint('')\nprint(''.join(orders))\nprint('')\nprint('
'\n)\nprint('')\nprint('')\nprint(''.join(details))\nprint('')\nprint('
')\n\nprint('')\nprint('')\nprint(''.join(addresses))\nprint('')\nprint('
')\n\nprint('
')\n","sub_path":"python/dbunit/o.py","file_name":"o.py","file_ext":"py","file_size_in_byte":4475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"559215242","text":"def rgbExclusion(filename,rgblist):\r\n\r\n#!/usr/bin/env python3\r\n import cv2\r\n from PIL import Image\r\n import numpy\r\n import sys\r\n \r\n im = Image.open(filename).convert(\"RGB\")\r\n Rmult, Gmult, Bmult = 1, 1, 1\r\n\r\n Rmult = rgblist[0]\r\n Gmult = rgblist[1]\r\n Bmult = rgblist[2]\r\n\r\n# Select one (or more) channels to zero out\r\n Matrix = ( Rmult, 0, 0, 0,\r\n 0, Gmult, 0, 0,\r\n 0, 0, Bmult, 0)\r\n\r\n im = im.convert(\"RGB\", Matrix)\r\n im.save('result.jpg')\r\n\r\n","sub_path":"rgbex.py","file_name":"rgbex.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"488164987","text":"import itertools\nimport numpy as np\n\n\ndef calc_sal_ranges(num_players, max_salary):\n return np.array(list(range(3500*num_players, max_salary+100, 100)))\n\n\ndef combine_single_position(position, num_players, max_salary, df):\n posdf = df[df.pos == position]\n posdf_indicies = posdf.index.values\n points = posdf['pts'].values\n sals = posdf['sal'].values\n\n #splitting the data fram into the necessary fields\n ids_comb = np.array(list(itertools.combinations(posdf_indicies, num_players)))\n points_comb = np.array(list(itertools.combinations(points, num_players)))\n sal_comb = np.array(list(itertools.combinations(sals, num_players)))\n\n sal_ranges = calc_sal_ranges(num_players, max_salary)\n\n return restrict_and_merge(ids_comb, points_comb, sal_comb, sal_ranges)\n\n\ndef combine_multiple_positions(pos1, pos2, max_salary):\n\n #pos1 varaibles don't have a 1 at the end, pos 2 variables do at the beginning\n\n _, ids, points, sals = pos1\n _, ids2, points2, sals2 = pos2\n\n _, inds = np.unique(points, return_index=True)\n _, inds2 = np.unique(points2, return_index=True)\n\n #shrinking the data to remove repeats. Again, we're wanting to keep them all the same length\n #so the same ids reference the correct data\n shrunk_sals = sals[inds]\n shrunk_sals2 = sals2[inds2]\n shrunk_points = points[inds]\n shrunk_points2 = points2[inds2]\n shrunk_ids = ids[inds]\n shrunk_ids2 = ids2[inds2]\n\n #combine the status using the product\n sals_comb = np.array(list(itertools.product(shrunk_sals, shrunk_sals2)))\n points_comb = np.array(list(itertools.product(shrunk_points, shrunk_points2)))\n ids_comb = np.array([ np.concatenate((x,y)) for x,y in list(itertools.product(shrunk_ids, shrunk_ids2)) ] )\n\n num_players = ids.shape[1] + ids2.shape[1]\n sal_ranges = calc_sal_ranges(num_players, max_salary)\n\n return restrict_and_merge(ids_comb, points_comb, sals_comb, sal_ranges)\n\n\ndef combine_all_positions(tops_array, max_salary):\n tops_array_len = len(tops_array)\n if tops_array_len == 1:\n return tops_array[0]\n else:\n half_len = tops_array_len // 2 #we want this non floating\n half1 = tops_array[:half_len]\n half2 = tops_array[half_len:]\n half1top = combine_all_positions(half1, max_salary)\n half2top = combine_all_positions(half2, max_salary)\n\n players_in_first = len(half1top[1][1])\n players_in_second = len(half2top[1][1])\n players_in_combo = players_in_first + players_in_second\n print('Players in combining combo:', players_in_combo)\n\n return combine_multiple_positions(half1top, half2top, max_salary)\n\n\ndef restrict_and_merge(ids_comb, points_comb, sal_comb, sal_ranges):\n\n player_combination_size, num_ids_size = ids_comb.shape\n sal_ranges_size = sal_ranges.size #used to know how big to make the arrays\n\n sal_ranges_full = np.broadcast_to(sal_ranges,(player_combination_size, sal_ranges_size)).T\n\n #creating an array where we add the sals together to get a 1d array\n sal_sum = sal_comb.sum(axis=1)\n points_sum = points_comb.sum(axis=1)\n\n sal_sum_full = np.broadcast_to(sal_sum,(sal_ranges_size, player_combination_size))\n #adding the points of the combinations and making them zero if the salary sum is\n #higher than the max_salary\n points_sum_full = np.broadcast_to(points_sum,(sal_ranges_size, player_combination_size))\n\n #used to snag the best players who've been selected\n ids_comb_full = np.broadcast_to(ids_comb, (sal_ranges_size, player_combination_size, num_ids_size))\n\n under_sal_limit = sal_sum_full <= sal_ranges_full\n\n calculated_points = points_sum_full * under_sal_limit\n\n #we're finding the max indicies\n #argmax() returns the index of the max value\n top_inds = calculated_points.argmax(axis=1)\n\n #now that we know the index of the maximum, we return the relevant info\n row_selectors = np.arange(sal_ranges_size)\n max_points = points_sum_full[row_selectors, top_inds]\n max_sals = sal_sum_full[row_selectors, top_inds]\n max_inds = ids_comb_full[row_selectors, top_inds]\n return sal_ranges, max_inds, max_points, max_sals\n\ndef solve(combo_positions_dict, max_salary, df):\n '''\n Solving the projections.\n '''\n\n print(f'Solving with {max_salary}')\n tops={}\n for position, num_players in combo_positions_dict.items():\n print(f\"Calculating initial positions for {position}\")\n if num_players == 0:\n continue\n tops[position] = combine_single_position(position, num_players, max_salary, df)\n\n tops_array = [vals for pos, vals in tops.items()]\n return combine_all_positions(tops_array, max_salary)\n","sub_path":"lib/optimize/calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"437967311","text":"# Python-exercise 'Letter Game'\n# Done by timosarkka\n# Import Python 3.0 print functions\n# Import random library for random number generation\n# Import os for building 'clear screen'-function\n# Import sys for enabling a structured exit\nfrom __future__ import print_function\nimport random\nimport os\nimport sys\n\n# Creates a list of words to be used by the app\n# Opens up a file, the \"Oxford English Dictionary\" and reads all the lines for words\n# Splits them up and appends them to the list 'words'.\nwords = []\nwordlist = open('largelist.txt', 'r')\nfor word in wordlist.read().split():\n\twords.append(word)\n\n# A function to clear the screen\n# If the operating system is Windows or equivalent (nt), use command cls\n# In other cases, use clear\ndef clear():\n\tif os.name == 'nt':\n\t\tos.system('cls')\n\telse:\n\t\tos.system('clear')\n\n# A function to display the lines, if the guess was wrong; or display the letters, if the guess was right\n# Strikes displays the number of bad_guesses using length of list 'bad_guesses'\n# Note that the bad_guesses and good_guesses are empty lists at first, which get appended with the guesses by the player\n# Note that secret_word is a string which gets looped through for every letter and searched for the right letters\ndef draw(bad_guesses, good_guesses, secret_word):\n\tclear\n\n\tprint('Strikes: {}/20'.format(len(bad_guesses)))\n\tprint('')\n\t\n\tfor letter in bad_guesses:\n\t\tprint(letter,end=' ')\n\tprint('\\n\\n')\n\t\n\tfor letter in secret_word:\n\t\tif letter in good_guesses:\n\t\t\tprint(letter,end='')\n\t\telse:\n\t\t\tprint('_',end='')\n\t\t\t\n\tprint('')\n\n# A function to get a guess from the player\n# Runs an infinite while-loop\n# Checks first, that the player is not trying to use multiple letters,\n# not trying to guess an already used letter and that the symbol guessed is actually a letter\n# If all of that checks out, the function returns the players guess\ndef get_guess(bad_guesses, good_guesses):\n\twhile True:\n\t\tguess = raw_input(\"Guess a letter: \").lower()\n\t\tif len(guess) != 1:\n\t\t\tprint(\"You can only guess a single letter!\")\n\t\telif guess in bad_guesses or guess in good_guesses:\n\t\t\tprint(\"You've already guessed that letter!\")\n\t\telif not guess.isalpha():\n\t\t\tprint(\"You can only guess letters!\")\n\t\telse:\n\t\t\treturn guess\n\n# The *actual* game function\n# Clears the screen first\n# Takes out a random word from the list of words\n# Initializes the bad and good guesses lists\ndef play(done):\n\tclear()\n\tsecret_word = random.choice(words)\n\tbad_guesses = []\n\tgood_guesses = []\n\t\n\t# The game loop begins\n\t# First the draw function is used to display the starting state = empty lines\n\t# A guess is gotten from the player\n\twhile True:\n\t\tdraw(bad_guesses, good_guesses, secret_word)\n\t\tguess = get_guess(bad_guesses, good_guesses)\n\t\t\n\t\t# If the letter is found from the secret_word string, the list of good guesses is appended by that letter\n\t\t# At the same time, variable found is set to True\n\t\t# If there is still a letter missing from the good_guesses list, the game will continue\n\t\t# If all the letters are there, a win message is displayed and the variable done is set to True\n\t\t\t# If done is True, the player will be asked whether or not to play again. If the choice is yes, play()-function is repeated.\n\t\t\t# If not, the program is terminated.\n\t\t# Else, the letter is added to the list of bad_guesses. If the amount of bad guesses is equal to 20, the game will terminate.\n\t\t# The done-sequence is called in this case too.\n\t\tif guess in secret_word:\n\t\t\tgood_guesses.append(guess)\n\t\t\tfound = True\n\t\t\tfor letter in secret_word:\n\t\t\t\tif letter not in good_guesses:\n\t\t\t\t\tfound = False\n\t\t\tif found:\n\t\t\t\tprint(\"You win!\")\n\t\t\t\tprint(\"The secret word was {}\".format(secret_word))\n\t\t\t\tdone = True\n\t\telse:\n\t\t\tbad_guesses.append(guess)\n\t\t\tif len(bad_guesses) == 20:\n\t\t\t\tdraw(bad_guesses, good_guesses, secret_word)\n\t\t\t\tprint(\"You lost!\")\n\t\t\t\tprint(\"The secret word was {}\".format(secret_word))\n\t\t\t\tdone = True\n\t\t\n\t\tif done:\n\t\t\tplay_again = raw_input(\"Play again? Y/n \").lower()\n\t\t\tif play_again != 'n':\n\t\t\t\treturn play(done=False)\n\t\t\telse:\n\t\t\t\tsys.exit()\n\n# A function to verify that the player really wants to start the game. If not, the program will terminate.\n# Exit is carried out with sys.exit()\n# variable done is initialized to be 'False' so that the game loop can go on.\ndef welcome():\n\tprint('Welcome to Letter Guess!')\n\tstart = raw_input(\"Press Enter to start or Q to quit. \").lower()\n\tif start == 'q':\n\t\tprint(\"Bye!\")\n\t\tsys.exit()\n\telse:\n\t\treturn True\n\ndone = False\n\nwhile True:\n\tclear()\n\twelcome()\n\tplay(done)","sub_path":"letter_game.py","file_name":"letter_game.py","file_ext":"py","file_size_in_byte":4511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"122517545","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom dynamicforms import fields, serializers\nfrom dynamicforms.action import Actions, TableAction, TablePosition\nfrom dynamicforms.viewsets import ModelViewSet\nfrom ..models import RefreshType\n\n\nclass RefreshTypesSerializer(serializers.ModelSerializer):\n form_titles = {\n 'table': 'Refresh type list',\n 'new': 'New refresh type object',\n 'edit': 'Editing refresh type object',\n }\n actions = Actions(\n # Add actions\n # refresh record\n TableAction(TablePosition.HEADER, label=_('+ Add (refresh record)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'record', __TABLEID__);\"),\n # refresh table\n TableAction(TablePosition.HEADER, label=_('+ Add (refresh table)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'table', __TABLEID__);\"),\n # no refresh\n TableAction(TablePosition.HEADER, label=_('+ Add (no refresh)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'no refresh', __TABLEID__);\"),\n # page reload\n TableAction(TablePosition.HEADER, label=_('+ Add (page reload)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'page', __TABLEID__);\"),\n # redirect\n TableAction(TablePosition.HEADER, label=_('+ Add (redirect)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'redirect:{% url 'validated-list' format='html' %}', __TABLEID__);\"),\n # custom function\n TableAction(TablePosition.HEADER, label=_('+ Add (custom function)'), title=_('Add new record'),\n action_js=\"dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'\"\n \", 'testRefreshType', __TABLEID__);\"),\n\n # Edit actions\n TableAction(TablePosition.ROW_CLICK, label=_('Edit'), title=_('Edit record'),\n action_js=\"dynamicforms.editRow('{% url url_reverse|add:'-detail' pk='__ROWID__' format='html'\"\n \" %}'.replace('__ROWID__', $(event.target.parentElement).closest('tr[class=\\\"df-table-row\\\"]').attr('data-id'))\"\n \", 'record', __TABLEID__);\"),\n\n # Delete actions\n # refresh record\n TableAction(TablePosition.ROW_END, label=_('Delete (refresh record)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'record', __TABLEID__);\"),\n # refresh table\n TableAction(TablePosition.ROW_END, label=_('Delete (refresh table)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'table', __TABLEID__);\"),\n # no refresh\n TableAction(TablePosition.ROW_END, label=_('Delete (no refresh)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'no refresh', __TABLEID__);\"),\n # The following action is duplicated unnecessarily just to later eliminate it in suppress_action\n TableAction(TablePosition.ROW_END, name='del 1', label=_('Delete (no refresh)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'no refresh', __TABLEID__);\"),\n # page reload\n TableAction(TablePosition.ROW_END, label=_('Delete (page reload)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'page', __TABLEID__);\"),\n # redirect\n TableAction(TablePosition.ROW_END, label=_('Delete (redirect)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'redirect:{% url 'validated-list' format='html' %}', __TABLEID__);\"),\n # custom function\n TableAction(TablePosition.ROW_END, label=_('Delete (custom function)'), title=_('Delete record'),\n action_js=\"dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', \"\n + \"{{row.id}}, 'testRefreshType', __TABLEID__);\"),\n )\n\n rich_text_field = fields.RTFField(required=False, allow_blank=True)\n\n def suppress_action(self, action, request, viewset):\n if action.name == 'del 1':\n return True\n return super().suppress_action(action, request, viewset)\n\n class Meta:\n model = RefreshType\n exclude = ()\n\n\nclass RefreshTypesViewSet(ModelViewSet):\n template_context = dict(url_reverse='refresh-types')\n\n queryset = RefreshType.objects.all()\n serializer_class = RefreshTypesSerializer\n","sub_path":"examples/rest/refresh_types.py","file_name":"refresh_types.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"279914666","text":"#!/usr/bin/env python\nimport os\nimport rospkg\nimport rospy\nimport tf\nimport random\nimport tf_conversions\nfrom gazebo_msgs.srv import DeleteModel, SpawnModel\nfrom geometry_msgs.msg import *\n\nif __name__ == \"__main__\":\n print(\"Waiting for gazebo services...\")\n rospy.init_node(\"spawn_products_in_bins\")\n rospy.wait_for_service(\"gazebo/delete_model\")\n rospy.wait_for_service(\"gazebo/spawn_sdf_model\")\n print(\"Got it.\")\n delete_model = rospy.ServiceProxy(\"gazebo/delete_model\", DeleteModel)\n spawn_model = rospy.ServiceProxy(\"gazebo/spawn_sdf_model\", SpawnModel)\n\n rospack = rospkg.RosPack()\n filedir = os.path.join(rospack.get_path(\"proj1\"), \"urdf/cube.urdf\")\n # filedir = (\n # \"/home/morten/repos/software-frameworks/catkin_ws/src/proj1/urdf/cube.urdf\"\n # )\n with open(filedir, \"r\") as f:\n product_xml = f.read()\n\n orient = Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0.0, 0.0, 0.785398)\n )\n\n num_of_cubes = random.randint(2, 6)\n\n for num in range(0, num_of_cubes):\n bin_y = random.uniform(0, 0.5)\n bin_x = random.uniform(0, 0.5)\n item_name = \"cube{}\".format(num)\n print(\"Spawning model:%s\", item_name)\n item_pose = Pose(Point(x=bin_x, y=bin_y, z=1), orient)\n spawn_model(item_name, product_xml, \"\", item_pose, \"world\")\n\n filedir = os.path.join(rospack.get_path(\"proj1\"), \"urdf/bucket.urdf\")\n # filedir = (\n # \"/home/morten/repos/software-frameworks/catkin_ws/src/proj1/urdf/bucket.urdf\"\n # )\n with open(filedir, \"r\") as f:\n product_xml = f.read()\n\n item_pose = Pose(Point(x=0.53, y=-0.23, z=0.78), orient)\n print(\"Spawning model:%s\", \"bucket\")\n spawn_model(\"bucket\", product_xml, \"\", item_pose, \"world\")\n","sub_path":"catkin_ws_project_1/src/proj1/scripts/cube_spawn.py","file_name":"cube_spawn.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"629706291","text":"\"\"\"\nCopyright 2016 Peter Urda\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom unittest import TestCase\n\nfrom httpsdns import RecordTypes\n\n\nclass TestRecordTypes(TestCase):\n def test_lookup_tables_integrity(self):\n \"\"\"\n Confirm lookup table integrity\n\n - The tables are the same length\n - For each item x in _lookup_table:\n - Confirm that value exist as a key in _lookup_table_reverse\n - Confirm that the key matches the value stored in\n _lookup_table_reverse[value]\n \"\"\"\n\n self.assertEqual(\n len(RecordTypes._lookup_table),\n len(RecordTypes._lookup_table_reverse),\n )\n\n for record_name, record_int in RecordTypes._lookup_table.items():\n self.assertIn(record_int, RecordTypes._lookup_table_reverse)\n self.assertEqual(\n record_name,\n RecordTypes._lookup_table_reverse.get(record_int)\n )\n\n def test_lookup(self):\n \"\"\"\n Verify the standard lookup operation for a record name\n \"\"\"\n\n expected = 1\n actual = RecordTypes.get('A')\n\n self.assertEqual(expected, actual)\n\n def test_lookup_default(self):\n \"\"\"\n Verify the default argument on a lookup operation results in 'None'\n \"\"\"\n\n self.assertIsNone(RecordTypes.get('NOT A RECORD'))\n\n def test_lookup_reverse(self):\n \"\"\"\n Verify the reverse lookup operation for a record int\n \"\"\"\n\n expected = \"A\"\n actual = RecordTypes.get_reverse(1)\n\n self.assertEqual(expected, actual)\n\n def test_lookup_reverse_default(self):\n \"\"\"\n Verify the default argument on a reverse lookup results in 'None'\n \"\"\"\n\n self.assertIsNone(RecordTypes.get_reverse(-1))\n","sub_path":"tests/unit/httpsdns/models/test_record_types.py","file_name":"test_record_types.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"414734122","text":"from django.core.mail import EmailMultiAlternatives\n\n\ndef notify_askhat(booking):\n text = 'Name: %s\\nEmail: %s\\nPhone number: %s\\nMessage: %s' % (\n booking.name, booking.email, booking.phone_number, booking.message\n )\n msg = EmailMultiAlternatives('New booking', text, 'GuideMe team', ['askhat.bolatkhan@gmail.com'])\n msg.attach_alternative(text, \"text/html\")\n msg.send()\n","sub_path":"tours/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"652047034","text":"#! /usr/bin/env python\n#\n\n\"\"\" Access SEDM data from pharos \"\"\"\n\n\nPHAROS_BASEURL = \"http://pharos.caltech.edu\"\nimport os\nimport requests\nimport json\nimport numpy as np\nimport pandas\nfrom . import io\n\nSEDMLOCAL_BASESOURCE = io.LOCALSOURCE+\"SEDM\"\nSEDMLOCALSOURCE = SEDMLOCAL_BASESOURCE+\"/redux\"\nif not os.path.exists(SEDMLOCAL_BASESOURCE):\n os.makedirs(SEDMLOCAL_BASESOURCE)\nif not os.path.exists(SEDMLOCALSOURCE):\n os.makedirs(SEDMLOCALSOURCE)\n \n\n#######################\n# #\n# High level method #\n# #\n#######################\ndef _download_sedm_data_(night, pharosfile, fileout=None, verbose=False):\n \"\"\" \"\"\"\n url = PHAROS_BASEURL+\"/data/%s/\"%night+pharosfile\n if verbose:\n print(url)\n return io.download_single_url(url,fileout=fileout,\n auth=io._load_id_(\"pharos\"),\n cookies=\"no_cookies\")\n\ndef _relative_to_source_(relative_datapath, source=None):\n \"\"\" \"\"\"\n if source is None:\n return relative_datapath\n if source in [\"pharos\"]:\n return [PHAROS_BASEURL+\"/data/\"+l for l in relative_datapath]\n if source in [\"local\"]:\n return [SEDMLOCALSOURCE+\"/\"+l for l in relative_datapath]\n \ndef get_night_file(night):\n \"\"\" get the what.list for a given night \n night format: YYYYMMDD \n \"\"\"\n response = _download_sedm_data_(night, \"what.list\")\n return response.text.splitlines()\n\n#######################\n# #\n# INTERNAL JSON DB #\n# #\n#######################\nclass _SEDMFiles_():\n \"\"\" \"\"\"\n SOURCEFILE = SEDMLOCAL_BASESOURCE+\"whatfiles.json\"\n def __init__(self):\n \"\"\" \"\"\"\n self.load()\n\n def download_nightrange(self, start=\"2018-08-01\", end=\"now\"):\n \"\"\" \"\"\"\n if end is None or end in [\"today\", \"now\"]:\n from datetime import datetime \n today = datetime.today()\n end = today.isoformat().split(\"T\")[0]\n \n self.add_night([\"%4d%02d%02d\"%(tt.year,tt.month, tt.day) for tt in pandas.date_range(start=start, end=end) ])\n \n def add_night(self, night, update=False):\n \"\"\" night (or list of) with the given format YYYYMMDD \n if the given night is already known, this will the download except if update is True \n \"\"\"\n for night_ in np.atleast_1d(night):\n if night_ in self.data and not update:\n continue\n self.data[night_] = get_night_file(night_)\n \n self.dump()\n \n def load(self):\n \"\"\" \"\"\"\n if os.path.isfile( self.SOURCEFILE ):\n self.data = json.load( open(self.SOURCEFILE, 'r') )\n else:\n self.data = {}\n \n def dump(self):\n \"\"\" \"\"\"\n with open(self.SOURCEFILE, 'w') as outfile:\n json.dump(self.data, outfile)\n\n def nights_with_target(self, target):\n \"\"\" \"\"\"\n return [n for n,v in self.data.items() if target in \"\\n\".join(v)]\n \n \n\n##################\n# #\n# PHAROS #\n# #\n##################\nclass SEDMQuery( object ):\n \"\"\" \"\"\"\n PROPERTIES = [\"auth\", \"date\"]\n def __init__(self, auth=None, date=None):\n \"\"\" \"\"\"\n self._sedmwhatfiles = _SEDMFiles_()\n self.reset()\n self.set_date(date)\n self.set_auth(io._load_id_(\"pharos\") if auth is None else auth)\n \n def reset(self):\n \"\"\" set the authentification, date and any other properties to default \"\"\"\n self._properties = {k:None for k in self.PROPERTIES}\n \n # -------- #\n # SETTER #\n # -------- #\n def set_date(self, date):\n \"\"\" attach a date for faster night access interation \"\"\"\n self._properties[\"date\"] = date\n \n def set_auth(self, auth):\n \"\"\" provide your authentification. \"\"\"\n self._properties[\"auth\"] = auth\n\n\n def download_target_data(self, target, which=\"cube\", extension=\"fits\", timerange=[\"2018-09-01\", None],\n nodl=False, auth=None,\n show_progress=False, notebook=False, verbose=True,\n overwrite=False, nprocess=None ):\n \"\"\" \n download SEDM data associated to the given target. \n \n Parameters\n ----------\n target: [string] \n Name of a source (e.g. ZTF18abuhzfc) of any part of a filename (i.e. 20180913_06_28_51)\n \n which: [string] -optional-\n kind oif data you want. \n - cube / spec / ccd / all\n\n extension: [string] -optional-\n Extension of the file \n - these exist depending on the file you want: fits / png / pdf / pkl / all\n\n timerange: [iso format dates] -optional-\n time range between which you are looking for file.\n If the dates are not yet stored in you whatfiles.json, this will first download it.\n if the second data is None, it means 'today'\n\n nodl: [bool] -optional-\n do not launch the download, instead, returns \n list of queried url and where they are going to be stored.\n \n download_dir: [string] -optional-\n Directory where the file should be downloaded.\n If th\n \n overwrite: [bool] -optional-\n Check if the requested data already exist in the target download directory. \n If so, this will skip the download except if overwrite is set to True.\n\n nprocess: [None/int] -optional-\n Number of parallel downloading you want to do. \n If None, it will be set to 1 and will not use multiprocess\n\n auth: [str, str] -optional-\n [username, password] of you IRSA account.\n If used, information stored in ~/.ztfquery will be ignored.\n \n Returns\n -------\n Void or list (see nodl)\n \"\"\"\n # Build the path (local and url)\n relative_path = self.get_data_path(target, which=which,extension=extension, timerange=timerange, source=\"pharos\")\n self.to_download_urls, self.download_location = [_relative_to_source_(relative_path, \"pharos\"),\n _relative_to_source_(relative_path, \"local\")]\n if nodl:\n return self.to_download_urls, self.download_location\n \n # Actual Download\n io.download_url(self.to_download_urls, self.download_location,\n show_progress = show_progress, notebook=notebook, verbose=verbose,\n overwrite=overwrite, nprocess=nprocess,\n auth=self._properties[\"auth\"] if auth is None else auth)\n # -------- #\n # GETTER # \n # -------- #\n def get_data_path(self, target, which=\"cube\", extension=\"fits\", source=\"pharos\", timerange=[\"2018-09-01\", None]):\n \"\"\" get the datapath for the given target. \n this is used to build the url that will be queried (see, download_target_data) and the look \n for file in your computer (see, get_local_data)\n \n Parameters\n ----------\n target: [string] \n Name of a source (e.g. ZTF18abuhzfc) of any part of a filename (i.e. 20180913_06_28_51)\n \n which: [string] -optional-\n kind oif data you want. \n - cube / spec / ccd / all\n\n extension: [string] -optional-\n Extension of the file \n - these exist depending on the file you want: fits / png / pdf / pkl / all\n\n timerange: [iso format dates] -optional-\n time range between which you are looking for file.\n If the dates are not yet stored in you whatfiles.json, this will first download it.\n if the second data is None, it means 'today'\n \n source: [string] -optional-\n Where are you looking for data.\n - pharos (online)\n - local (your computer)\n \n Returns\n -------\n list of Path\n \"\"\"\n targetinfo = self.get_target_info(target, timerange=timerange)\n all_data = []\n for night, fileid in targetinfo.items():\n for k_ in fileid:\n all_data+=[l for l in self.get_night_data(night, source=source) if k_.split(\".\")[0] in l\n and (which in [\"*\", \"all\"] or \n (which in [\"cube\"] and \"/e3d\" in l) or\n (which in [\"spec\"] and \"/spec_\" in l) or\n (which in [\"ccd\"] and \"/crr_\" in l)\n )\n and (extension in [\"*\", \"all\"] or extension in l)\n ]\n return all_data\n \n def get_local_data(self, target, which=\"cube\", extension=\"fits\", **kwargs):\n \"\"\" get existing for in you computer associated to the given target\n\n Parameters\n ----------\n target: [string] \n Name of a source (e.g. ZTF18abuhzfc) of any part of a filename (i.e. 20180913_06_28_51)\n \n which: [string] -optional-\n kind oif data you want. \n - cube / spec / ccd / all\n\n extension: [string] -optional-\n Extension of the file \n - these exist depending on the file you want: fits / png / pdf / pkl / all\n\n kwargs goes to get_data_path()\n\n Returns\n -------\n full path\n \"\"\"\n return self.get_data_path(target, which=which, extension=extension, source=\"local\", **kwargs)\n\n def get_target_info(self, target, timerange=[\"2018-09-01\", None]):\n \"\"\" dictionary containing the dates and file id corresponding to the given target.\n this is based on the whatfiles.json stored in your computer under the SEDM directory\n \n Parameters\n ----------\n target: [string] \n Name of a source (e.g. ZTF18abuhzfc) of any part of a filename (i.e. 20180913_06_28_51)\n\n timerange: [iso format dates] -optional-\n time range between which you are looking for file.\n If the dates are not yet stored in you whatfiles.json, this will first download it.\n if the second data is None, it means 'today'\n\n Returns\n -------\n dict {date:[list of fileid ],...}\n \"\"\"\n self._sedmwhatfiles.download_nightrange(*timerange)\n nights_with_target = self._sedmwhatfiles.nights_with_target(target)\n \n return {n:[l.split()[0] for l in self._sedmwhatfiles.data[n] if target in l]\n for n in nights_with_target}\n\n # = Get Night Data = #\n def get_night_data(self, date, source=\"pharos\"):\n \"\"\" get all the data of the given date you have access to:\n \n Parameters\n ----------\n date: [string]\n format YYYYMMDD\n\n source: [string] -optional-\n Where are you looking for data.\n - pharos (online, only the one you have access to)\n - local (your computer, only the one you have already downloaded)\n \n Returns\n -------\n list of file\n \"\"\"\n if source in [\"pharos\", \"sedm\"]:\n return [l.replace(\"/data/\",\"\") for l in self._get_pharos_night_data_(date)]\n if source in [\"what\"]:\n if date not in self._sedmwhatfiles.data:\n self._sedmwhatfiles.add_night(date)\n return self._sedmwhatfiles.data[date]\n \n elif source in [\"local\"]:\n return self._get_local_night_data_(date)\n raise ValueError(\"unknown source: %s, use 'local' or 'pharos'\"%source)\n\n def _get_pharos_night_data_(self, date):\n \"\"\" \"\"\"\n requests_prop = {\"data\":json.dumps({\"obsdate\":date,\n \"username\":self._properties[\"auth\"][0],\n \"password\":self._properties[\"auth\"][1],\n }),\n \"headers\":{'content-type': 'application/json'}}\n \n t = requests.post(PHAROS_BASEURL+\"/get_user_observations\", **requests_prop).text\n if \"data\" not in t:\n raise IOError(\"night file download fails. Check you authentification maybe?\")\n return np.sort(json.loads(t)[\"data\"])\n\n def _get_local_night_data_(self, date):\n \"\"\" \"\"\"\n date_dir = SEDMLOCALSOURCE+\"/%s/\"%date\n if not os.path.exists( date_dir ):\n return []\n return [date_dir+l for l in os.listdir( date_dir )]\n # ============== #\n # Properties #\n # ============== #\n @property\n def date(self):\n \"\"\" \"\"\"\n return self._properties[\"date\"]\n\n @property\n def night_files(self):\n \"\"\" \"\"\"\n if not hasattr(self, \"_night_files\") or self._night_files is None:\n self._night_files = self.get_night_data(self.date, source=\"pharos\")\n \n return self._night_files\n \n","sub_path":"ztfquery/sedm.py","file_name":"sedm.py","file_ext":"py","file_size_in_byte":13085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"173594726","text":"# This is a custom blender plugin I wrote to auto-export to GLB. Its pretty shitty, but gets the job done.\n\nimport shutil\nimport os\nimport bpy\nbl_info = {\n \"name\": \"AutoGLTF\",\n \"blender\": (2, 80, 0),\n \"category\": \"Object\",\n}\n\n\n# bpy.ops.wm.save_mainfile()\n\n\nclass ObjectAutoExportGltf(bpy.types.Operator):\n \"\"\"My Object Exporting Script\"\"\" # Use this as a tooltip for menu items and buttons.\n bl_idname = \"object.autoexport\" # Unique identifier for buttons and menu items to reference.\n bl_label = \"Export to GLTF\" # Display name in the interface.\n bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.\n\n # execute() is called when running the operator.\n def execute(self, context):\n # The original script\n # scene = context.scene\n # filepath = os.path.join(export_path, name)\n # filepath = bpy.path.ensure_ext(filepath, \".x3d\")\n\n # bpy.ops.export_scene.gltf(\n\n # )\n # for obj in scene.objects:\n # obj.location.x += 1.0\n\n # Lets Blender know the operator finished successfully.\n return {'FINISHED'}\n\n\ndef menu_func(self, context):\n self.layout.operator(ObjectAutoExportGltf.bl_idname)\n\n\ndef my_save_handler(scene):\n path = bpy.data.filepath\n saveto = path.split('.')[0] + '.glb'\n print('saveto')\n print(saveto)\n bpy.ops.export_scene.gltf(\n filepath=saveto,\n export_format='GLB',\n ui_tab='GENERAL',\n export_copyright='',\n export_image_format='AUTO',\n export_texture_dir='',\n export_texcoords=True,\n export_normals=True,\n export_draco_mesh_compression_enable=False,\n export_draco_mesh_compression_level=6,\n export_draco_position_quantization=14,\n export_draco_normal_quantization=10,\n export_draco_texcoord_quantization=12,\n export_draco_color_quantization=10,\n export_draco_generic_quantization=12,\n export_tangents=False,\n export_materials='EXPORT',\n export_colors=True,\n use_mesh_edges=False,\n use_mesh_vertices=False,\n export_cameras=False,\n export_selected=False,\n use_selection=False,\n use_visible=False,\n use_renderable=False,\n use_active_collection=False,\n export_extras=False,\n export_yup=True,\n export_apply=False,\n export_animations=True,\n export_frame_range=True,\n export_frame_step=1,\n export_force_sampling=True,\n export_nla_strips=True,\n export_def_bones=False,\n export_current_frame=False,\n export_skins=True,\n export_all_influences=False,\n export_morph=True,\n export_morph_normal=True,\n export_morph_tangent=False,\n export_lights=False,\n export_displacement=False,\n will_save_settings=False,\n filter_glob='*.glb;*.gltf'\n )\n\n\ndef register():\n bpy.utils.register_class(ObjectAutoExportGltf)\n # Adds the new operator to an existing menu.\n bpy.types.VIEW3D_MT_object.append(menu_func)\n bpy.app.handlers.save_post.append(my_save_handler)\n\n\ndef unregister():\n # Removes the new operator from an existing menu.\n bpy.types.VIEW3D_MT_object.remove(menu_func)\n bpy.utils.unregister_class(ObjectAutoExportGltf)\n\n\n# This allows you to run the script directly from Blender's Text editor\n# to test the add-on without having to install it.\nif __name__ == \"__main__\":\n register()","sub_path":"build/models/autoexport.py","file_name":"autoexport.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"95977343","text":"# -*- coding: utf-8 -*-\nimport json\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\n\nfrom django.conf import settings\n\nfrom mock import patch\nfrom nose.tools import eq_, ok_\n\nfrom lib.sellers.models import Seller, SellerProduct\nfrom lib.transactions import constants\nfrom lib.transactions.constants import STATUS_COMPLETED\nfrom lib.transactions.models import Transaction\nfrom solitude.base import APITest\n\nfrom ..constants import CANCEL\nfrom ..utils import sign\n\nimport samples\nimport utils\n\n\nclass TestNotification(APITest):\n api_name = 'bango'\n\n def setUp(self):\n self.trans_uuid = 'some-transaction-uid'\n sellers = utils.make_sellers(uuid='seller-uuid')\n self.seller = sellers.seller\n self.product = sellers.product\n self.trans = Transaction.objects.create(\n amount=1, provider=constants.SOURCE_BANGO,\n seller_product=self.product,\n uuid=self.trans_uuid,\n uid_pay='external-trans-uid'\n )\n self.url = self.get_list_url('notification')\n\n def data(self, overrides=None):\n data = {'moz_transaction': self.trans_uuid,\n 'moz_signature': sign(self.trans_uuid),\n 'billing_config_id': '1234',\n 'bango_trans_id': '56789',\n 'bango_response_code': 'OK',\n 'amount': '0.99',\n 'currency': 'EUR',\n 'bango_response_message': 'Success'}\n if overrides:\n data.update(overrides)\n return data\n\n def post(self, data, expected_status=201):\n res = self.client.post(self.url, data=data)\n eq_(res.status_code, expected_status, res.content)\n return json.loads(res.content)\n\n def test_success(self):\n data = self.data()\n self.post(data)\n tr = self.trans.reget()\n eq_(tr.status, constants.STATUS_COMPLETED)\n eq_(tr.amount, Decimal(data['amount']))\n eq_(tr.currency, data['currency'])\n ok_(tr.uid_support)\n\n def test_no_price(self):\n data = self.data()\n del data['amount']\n del data['currency']\n self.post(data)\n tr = self.trans.reget()\n eq_(tr.amount, None)\n eq_(tr.currency, '')\n\n def test_empty_price(self):\n data = self.data()\n data['amount'] = ''\n data['currency'] = ''\n self.post(data)\n tr = self.trans.reget()\n eq_(tr.amount, None)\n eq_(tr.currency, '')\n\n def test_failed(self):\n self.post(self.data(overrides={'bango_response_code': 'NOT OK'}))\n tr = self.trans.reget()\n eq_(tr.status, constants.STATUS_FAILED)\n\n def test_cancelled(self):\n self.post(self.data(overrides={'bango_response_code':\n CANCEL}))\n tr = self.trans.reget()\n eq_(tr.status, constants.STATUS_CANCELLED)\n\n def test_incorrect_sig(self):\n data = self.data({'moz_signature': sign(self.trans_uuid) + 'garbage'})\n self.post(data, expected_status=400)\n\n def test_missing_sig(self):\n data = self.data()\n del data['moz_signature']\n self.post(data, expected_status=400)\n\n def test_missing_transaction(self):\n data = self.data()\n del data['moz_transaction']\n self.post(data, expected_status=400)\n\n def test_unknown_transaction(self):\n self.post(self.data({'moz_transaction': 'does-not-exist'}),\n expected_status=400)\n\n def test_already_completed(self):\n self.trans.status = constants.STATUS_COMPLETED\n self.trans.save()\n self.post(self.data(), expected_status=400)\n\n def test_expired_transaction(self):\n self.trans.created = datetime.now() - timedelta(seconds=62)\n self.trans.save()\n with self.settings(TRANSACTION_EXPIRY=60):\n self.post(self.data(), expected_status=400)\n\n\n@patch.object(settings, 'BANGO_BASIC_AUTH', {'USER': 'f', 'PASSWORD': 'b'})\nclass TestEvent(APITest):\n api_name = 'bango'\n\n def setUp(self):\n self.trans_uuid = 'some-transaction-uid'\n self.seller = Seller.objects.create(uuid='seller-uuid')\n self.product = SellerProduct.objects.create(seller=self.seller,\n external_id='xyz')\n self.trans = Transaction.objects.create(\n amount=1, provider=constants.SOURCE_BANGO,\n seller_product=self.product,\n uuid=self.trans_uuid,\n uid_pay='external-trans-uid'\n )\n self.url = self.get_list_url('event')\n\n def post(self, data=None, notice=samples.event_notification,\n expected=201):\n if data is None:\n data = {\n 'notification': notice,\n 'password': 'b',\n 'username': 'f'\n }\n res = self.client.post(self.url, data=data)\n eq_(res.status_code, expected, res.content)\n return json.loads(res.content)\n\n def test_missing(self):\n self.post(data={}, expected=400)\n\n def test_good(self):\n self.post()\n trans = self.trans.reget()\n eq_(trans.status, STATUS_COMPLETED)\n\n def test_no_action(self):\n self.post(notice=samples.event_notification_no_action,\n expected=400)\n\n def test_no_data(self):\n self.post(notice=samples.event_notification_no_data,\n expected=400)\n\n def test_not_changed(self):\n self.trans.status = STATUS_COMPLETED\n self.trans.save()\n self.post()\n trans = self.trans.reget()\n eq_(trans.status, STATUS_COMPLETED)\n\n def test_wrong_auth(self):\n data = {'notification': samples.event_notification,\n 'password': 'nope',\n 'username': 'yes'}\n self.post(data, expected=400)\n","sub_path":"lib/bango/tests/test_notification.py","file_name":"test_notification.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"33077972","text":"from __future__ import print_function\n\nimport Queue\nimport logging\nimport subprocess\nimport sys\nimport threading\nfrom threading import Timer\n\nfrom ADCThread import ADCThread\nfrom DeploymentThread import DeploymentThread\nfrom EPSThread import EPSThread\nfrom GPSThread import GPSThread\nfrom GlobalstarThread import GlobalstarThread\nfrom HealthThread import HealthThread\nfrom RFM22BThread import RFM22BThread\n\n'''\nIMPORTANT::\nHow to handle the interplay between low power mode and deployment attempts - possible cases:\n low power mode in the queue when executing deployment\n deployment return from GPS in queue when in low power mode (both commands at approx. same time)\n deployment command in the midst of low power mode\nIn low power mode should the executive thread still look through messages from other threads and log them?\nShould it wait till after low power mode to sift through these commands?\n Should it execute all commands that it received during low power mode?\n if not which commands should it execute/ignore?\nWhat to do if the low power command is called from ground when the satellite is already in low power mode\nHow to return from Low power mode? Should we continue executing what we were doing when we left?\n or should we first look through the messages in the queue and execute those?\n should we rank the messages that we missed and the tasks that we were doing so that we execute the most important\n ones first? or should we do them based on when they came in?\n\nEdit the dictionaries to do error handling if the string isn't correct\n'''\n\n'''\nShould we check all the messages in the queue before we enter low power mode to see if they indicate that we finished\noperations for the state we were in (i.e deployment complete?) to ensure that we transition back to the correct state\nwhen we return from low power mode\n'''\n\n# Health Status Report File location\nHealthBeaconFile = \"/home/debian/Maria/healthFiles/healthBeacon.txt\"\nGlobalstarHealthFile = \"/home/debian/Maria/healthFiles/Globalstarhealth.txt\"\nGPSHealthFile = \"/home/debian/Maria/healthFiles/GPShealth.txt\"\n\n# IMPORTANT:: need a method to start the radio on the Mule if the node ejected was the first one\nstartNetwork = 0\nHealthFreq = 5\nADCHealth = 0\nEPSHealth = 0\nDeploymentHealth = 0\nglobalstarHealth = 0\ngpsHealth = 0\nRFM22BHealth = 0\n\n# setup threads and thread communication\ninputQueue = Queue.Queue() # E\nDeployer = DeploymentThread(inputQueue) # D\nPower = EPSThread(inputQueue) # P\nComm = GlobalstarThread(inputQueue) # C\nRFM22B = RFM22BThread(inputQueue) # R\nHealth = HealthThread(inputQueue) # H\nADC = ADCThread(inputQueue) # A\nGPS = GPSThread(inputQueue) # G\n\n\ndef initialization():\n # this will require initialization of necessary components required for\n # individual threads to do their own hardware initialization\n # specifics need to be identified\n # specifics need to be identified\n print(\"*** BEGINING ANDESITE FLIGHT COMPUTER OPERATIONS***\")\n # watchdog = Watchdog()\n # working with BeagleBone\n threading.Timer(8, Comm.GlobalstarEnable).start()\n threading.Timer(HealthFreq, Health.healthBeacon, [ADCHealth, EPSHealth, DeploymentHealth, globalstarHealth, gpsHealth, RFM22BHealth]).start()\n\n # Test initializing the health file\n with open(HealthBeaconFile, \"w\") as healthFile:\n subprocess.call([\"echo\", \"Successful health file initialization\"], stdout=healthFile)\n\n\ndef loop():\n pass\n\n\nclass Watchdog:\n def __init__(self, timeout, userHandler=None): # timeout in seconds\n self.timeout = timeout\n self.handler = userHandler if userHandler is not None else self.defaultHandler\n self.timer = Timer(self.timeout, self.handler)\n\n def reset(self):\n self.timer.cancel()\n self.timer = Timer(self.timeout, self.handler)\n\n def stop(self):\n self.timer.cancel()\n\n def defaultHandler(self):\n raise self\n\n\ndef ADCInterpreter(string):\n global ADCHealth\n global PointingState\n global DeployState\n if string == \"EA:Pointing\":\n PointingState = 1\n print(\"Done Detumbling Entering Pointing Mode\")\n if DeployState == 1:\n return 2\n else:\n return CurrentState\n elif string == \"EA:HealthUpdate\":\n ADCHealth = 1\n else:\n return CurrentState\n\n\n## develop profile for comm interpreter\n\ndef CommInterpreter(string):\n # ground commands to configure which b-dot detumbling and pointing algorithm\n # get and set methods for which method were using\n global EPSReturnState\n global globalstarHealth\n global PointingState\n global DeployState\n if string == \"EC:lowPowerMode\":\n EPSReturnState = CurrentState\n return 5\n elif string == \"EC:safeMode\":\n processMessage(\"CE:safeMode\")\n processMessage(\"RE:safeMode\")\n return 5\n elif string == \"EC:deployState\":\n DeployState = 1\n print(\"***RECEIVED GROUND COMMAND: 'EC:deployNow'***\")\n if PointingState == 1:\n return 2\n else:\n return CurrentState\n elif string == \"EC:deployNow\" and CurrentState != 5:\n # should we also not be able to deploy now from state 0?\n return\n elif string == \"EC:HealthUpdate\":\n globalstarHealth = 1\n # add restarts for temperature sensors and motor controller micro controllers\n else:\n return CurrentState\n\n\ndef DeployerInterpreter(string):\n global DeploymentHealth\n if string == \"ED:DeploymentComplete\" and CurrentState == 3:\n # send update to ground on current deployment state\n # set boolean that says we should accept grount commands for ejection attempts\n return 4\n elif string == \"ED:NextDeployment\" and CurrentState == 3:\n return 1\n elif string == \"ED:AttemptsExceeded\":\n # send update to ground on current deployment state\n # set boolean that says we should accept grount commands for ejection attempts\n return 4\n elif string == \"ED:HealthUpdate\":\n DeploymentHealth = 1\n else:\n return CurrentState\n\n\ndef GPSInterpreter(string):\n global gpsHealth\n if string == \"EG:DeployLat\" and CurrentState == 2:\n return 3\n elif string == \"EG:HealthUpdate\":\n gpsHealth = 1\n else:\n return CurrentState\n\n\n## Develop profile for power interpreter\n\ndef HealthInterpreter(string):\n global EPSHealth\n global ADCHealth\n global DeploymentHealth\n global globalstarHealth\n global gpsHealth\n global RFM22BHealth\n if string == \"CH:healthBeacon\":\n # set all health variables to 0\n EPSHealth = 0\n ADCHealth = 0\n DeploymentHealth = 0\n globalstarHealth = 0\n gpsHealth = 0\n RFM22BHealth = 0\n threading.Timer(HealthFreq, Health.healthBeacon, [ADCHealth, EPSHealth, DeploymentHealth, globalstarHealth, gpsHealth, RFM22BHealth]).start()\n return CurrentState\n\n\ndef PowerInterpreter(string):\n global EPSReturnState\n global MessageRecovery\n global EPSHealth\n global DeployState\n if string == \"EP:HealthUpdate\":\n EPSHealth = 1\n else:\n return CurrentState\n\n\ndef RFM22BInterpreter(string):\n global RF22BHealth\n if string == \"ER:HealthUpdate\":\n RF22BHealth = 1\n elif \"ER:Done:\" in string:\n processMessage(\"CE\" + string[2:])\n return CurrentState\n\n\ndef processMessage(string):\n global MessageRecovery\n thread_char = string[0]\n subsystem_char = string[1]\n if thread_char == 'E':\n\n # include error handling for if subsystem char is not one of the items in the dictionary\n next_state = interpretMessage[subsystem_char](string)\n # how/where to update deploy state?\n return next_state\n else:\n # include error handling for if the thread char is not one of the items in the dictionary\n print(\"sending to thread: \" + string)\n\n routeMessage[thread_char].put(string)\n return CurrentState\n\n\n# logging setup\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format=\"%(asctime)s.%(msecs)03d:%(name)s:%(message)s\",\n datefmt='%Y-%m-%d,%H:%M:%S')\nlogger = logging.getLogger(\"main\")\n\n# system Initialization\ninitialization()\n# failureStatus()\n# threading.Thread(1080, loop())\n\n# create reserve queue for messages received while in low power mode\nreserveQueue = Queue.Queue()\n\nrouteMessage = {'A': ADC.inputQueue, 'C': Comm.inputQueue, 'D': Deployer.inputQueue, 'G': GPS.inputQueue,\n 'H': Health.inputQueue, 'P': Power.inputQueue, 'R': RFM22B.inputQueue}\ninterpretMessage = {'A': ADCInterpreter, 'C': CommInterpreter, 'D': DeployerInterpreter, 'G': GPSInterpreter,\n 'H': HealthInterpreter, 'P': PowerInterpreter, 'R': RFM22BInterpreter}\n\nEPSReturnState = 0\nCurrentState = 1\nMessageRecovery = 0\nPointingState = 0\nDeployState = 0\n\n# begin threading algorithms\nComm.resume()\nGPS.resume()\nHealth.resume()\nPower.resume()\nADC.resume()\nRFM22B.resume()\n# when to start rfm22b radio? should not start while still deep in detumbling mode?\n\nprint(\"performing ADC detumbling algorithms\")\n# print(\"waiting for deploy ready command from ground\")\n\n# wd = open(\"/dev/watchdog\", \"w+\")\n\n# State Machine\nwhile True:\n if CurrentState == 1: # ADC algorithms state\n processMessage(\"AE:Detumbling\")\n print(\"waiting for pointing mode and deploy ready command from ground\")\n elif CurrentState == 2: # waiting to deploy state and pointing\n # should we tell GPS thread to send message when it reaches the equator && right orbit count?\n # or should it constantly be sending the messages and we only begin to listen to them now?\n processMessage(\"GE:LatWait\")\n print(\"successfully entered pointing mode - waiting on orbit number and GPS location\")\n # wait on GPS and orbit count number to prep for deployment\n elif CurrentState == 3: # deployment state\n print(\"GPS indicates ready to deploy - running single deployment algorithm\")\n if startNetwork == 0:\n startNetwork = 1\n processMessage(\"RE:radioInit\")\n Deployer.singleRun()\n # Should we wait here while the node is deploying? We should keep routing messages, but we shouldn't be able to\n # transition to a different state (unless that state is low power mode?)\n elif CurrentState == 4: # data collection and transmission state\n processMessage(\"RE:ScienceMode\")\n processMessage(\"GE:ScienceMode\")\n print(\"Deployment state complete - Entering Science Mode\")\n elif CurrentState == 5: # safe mode\n # Be very careful with conflicts between low power mode and other modes\n # make sure that all other running processes are stopped and transitioned to low power mode before\n # hardware is turned off - also make sure that messages that should indicate a state transition that come\n # during low power mode (or on the cusp before and after) are handled correctly\n # lots of potential for edge cases that could cause a lot of problems!\n Comm.GlobalstarDisable()\n print(\"entering safe mode state in executive thread\")\n tempState = None\n while tempState is None or tempState == CurrentState:\n try:\n if MessageRecovery == 1:\n threadResponse = reserveQueue.get(False)\n else:\n threadResponse = inputQueue.get(False)\n tempState = processMessage(threadResponse)\n # print(threadResponse)\n except Queue.Empty:\n MessageRecovery = 0\n tempState = None\n CurrentState = tempState\n\nlogger.info(\"After calling terminate on threads\")\nsys.exit(0)\n","sub_path":"MULE/ExecutiveThread.py","file_name":"ExecutiveThread.py","file_ext":"py","file_size_in_byte":11694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"52762009","text":"class libararry:\n def __init__(self,list,name) :\n self.booklist = list\n self.name = name\n self.lendDict = {}\n\n def displaybook(self):\n print(f\"We have following books in our librarry :{self.name}\")\n for book in self.booklist:\n print(book)\n\n def lendbook(self,user,book):\n if book not in self.lendDict.keys():\n self.lendDict.update({book:user})\n print(\"lender book database has been updated , you can take the book now\")\n else :\n print(f\"book is already being used by {self.lendDict[book]}\")\n \n def addbook(self,book):\n self.booklist.append(book)\n print(\"book has been added to the list\")\n\n def returnbook(self,book):\n self.booklist.remove(book) \n\n\n \npnna=libararry([\"python\",\"rich Daddy poor Daddy\",\"harry potter\",\"c++ basics\",\"algorithm by CLRS\"],\"code with harry\") \n\nwhile True:\n print(f\"welcome to the {pnna.name} library.enter your choice to continue\")\n print(\"1. Display Books\")\n print(\"2. lend a Books\")\n print(\"3. add a Books\")\n print(\"4. return a Books\")\n user_choice=int(input())\n\n if user_choice ==1:\n pnna.displaybook()\n\n if user_choice ==2:\n book =input(\"enter the book you want to land:\")\n user = input(\"enter your name:2\")\n pnna.lendbook(user,book)\n if user_choice ==3:\n book = input(\"enter the book you want to add:\")\n pnna.addbook(book)\n if user_choice ==4:\n book = input(\"enter the book you want to add:\")\n pnna.addbook(book)\n else:\n print(\"not a valid option\")\n\n print(\"press q to quite and c to continue\")\n user_choice2 =input()\n if user_choice2==\"q\":\n exit()\n elif user_choice2==\"c\":\n continue","sub_path":"numpy sclicing.py","file_name":"numpy sclicing.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"346455630","text":"#########\n# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nimport os\nimport json\nfrom os.path import join\n\nfrom ..components_constants import (\n SSL_INPUTS,\n SSL_ENABLED,\n SSL_CLIENT_VERIFICATION,\n CLUSTER_JOIN\n)\nfrom ..base_component import BaseComponent\nfrom ..service_names import (\n POSTGRESQL_CLIENT,\n STAGE,\n)\nfrom ...config import config\nfrom ...logger import get_logger\nfrom ...utils import (\n certificates,\n common,\n files,\n service\n)\nfrom ...utils.network import wait_for_port\nfrom ...utils.install import is_premium_installed\nfrom ...constants import (NEW_POSTGRESQL_CA_CERT_FILE_PATH,\n NEW_POSTGRESQL_CLIENT_CERT_FILE_PATH)\n\nlogger = get_logger(STAGE)\n\nSTAGE_USER = '{0}_user'.format(STAGE)\nSTAGE_GROUP = '{0}_group'.format(STAGE)\n\nHOME_DIR = join('/opt', 'cloudify-{0}'.format(STAGE))\nCONF_DIR = join(HOME_DIR, 'conf')\n\n# These are all the same key as the other db keys, but postgres is very strict\n# about permissions (no group or other permissions allowed)\nDB_CLIENT_KEY_PATH = '/etc/cloudify/ssl/stage_db.key'\nDB_CLIENT_CERT_PATH = '/etc/cloudify/ssl/stage_db.crt'\nDB_CA_PATH = join(CONF_DIR, 'db_ca.crt')\n\n\nclass Stage(BaseComponent):\n def _set_community_mode(self):\n community_mode = '' if is_premium_installed else '-mode community'\n\n # This is used in the stage systemd service file\n config[STAGE]['community_mode'] = community_mode\n\n def _run_db_migrate(self):\n if config.get(CLUSTER_JOIN):\n logger.debug('Joining cluster - not creating the stage db')\n return\n backend_dir = join(HOME_DIR, 'backend')\n npm_path = join('/usr', 'bin', 'npm')\n common.run(\n [\n 'sudo', '-u', STAGE_USER, 'bash', '-c',\n 'cd {path}; {npm} run db-migrate'.format(\n path=backend_dir,\n npm=npm_path,\n ),\n ],\n )\n\n def _handle_ca_certificate(self):\n certificates.use_supplied_certificates(\n component_name=POSTGRESQL_CLIENT,\n logger=self.logger,\n ca_destination=DB_CA_PATH,\n owner=STAGE_USER,\n group=STAGE_GROUP,\n update_config=False,\n )\n\n def _handle_cert_and_key(self):\n certificates.use_supplied_certificates(\n component_name=SSL_INPUTS,\n prefix='postgresql_client_',\n logger=self.logger,\n cert_destination=DB_CLIENT_CERT_PATH,\n key_destination=DB_CLIENT_KEY_PATH,\n owner=STAGE_USER,\n group=STAGE_GROUP,\n key_perms='400',\n update_config=False,\n )\n\n def replace_certificates(self):\n # The certificates are validated in the PostgresqlClient component\n replacing_ca = os.path.exists(NEW_POSTGRESQL_CA_CERT_FILE_PATH)\n replacing_cert_and_key = os.path.exists(\n NEW_POSTGRESQL_CLIENT_CERT_FILE_PATH)\n\n if config[POSTGRESQL_CLIENT][SSL_ENABLED]:\n if replacing_ca:\n self.log_replacing_certs('CA cert')\n self._handle_ca_certificate()\n\n if (config[POSTGRESQL_CLIENT][SSL_CLIENT_VERIFICATION] and\n replacing_cert_and_key):\n self.log_replacing_certs('cert and key')\n self._handle_cert_and_key()\n\n service.restart(STAGE)\n service.verify_alive(STAGE)\n\n def log_replacing_certs(self, certs_type):\n self.logger.info(\n 'Replacing {0} on stage component'.format(certs_type))\n\n def _set_db_url(self):\n config_path = os.path.join(HOME_DIR, 'conf', 'app.json')\n # We need to use sudo to read this or we break on configure\n stage_config = json.loads(files.sudo_read(config_path))\n\n host_details = config[POSTGRESQL_CLIENT]['host'].split(':')\n database_host = host_details[0]\n database_port = host_details[1] if 1 < len(host_details) else '5432'\n\n stage_config['db']['url'] = \\\n 'postgres://{0}:{1}@{2}:{3}/stage'.format(\n config[POSTGRESQL_CLIENT]['cloudify_username'],\n config[POSTGRESQL_CLIENT]['cloudify_password'],\n database_host,\n database_port)\n\n # For node-postgres\n dialect_options = stage_config['db']['options']['dialectOptions']\n # For building URL string\n params = {}\n\n if config[POSTGRESQL_CLIENT][SSL_ENABLED]:\n self._handle_ca_certificate()\n\n params.update({\n 'sslmode': 'verify-full',\n 'sslrootcert': DB_CA_PATH,\n })\n\n dialect_options['ssl'] = {\n 'ca': DB_CA_PATH,\n 'rejectUnauthorized': True,\n }\n\n if config[POSTGRESQL_CLIENT][SSL_CLIENT_VERIFICATION]:\n self._handle_cert_and_key()\n\n params.update({\n 'sslcert': DB_CLIENT_CERT_PATH,\n 'sslkey': DB_CLIENT_KEY_PATH,\n })\n\n dialect_options['ssl']['key'] = DB_CLIENT_KEY_PATH\n dialect_options['ssl']['cert'] = DB_CLIENT_CERT_PATH\n else:\n dialect_options = {\n 'ssl': False\n }\n\n if any(params.values()):\n query = '&'.join('{0}={1}'.format(key, value)\n for key, value in params.items()\n if value)\n stage_config['db']['url'] = '{0}?{1}'.format(\n stage_config['db']['url'], query)\n\n content = json.dumps(stage_config, indent=4, sort_keys=True)\n\n # Using `write_to_file` because the path belongs to the stage user, so\n # we need to move with sudo\n files.write_to_file(contents=content, destination=config_path)\n common.chown(STAGE_USER, STAGE_GROUP, config_path)\n common.chmod('640', config_path)\n\n def _set_internal_manager_ip(self):\n config_path = os.path.join(HOME_DIR, 'conf', 'manager.json')\n with open(config_path) as f:\n stage_config = json.load(f)\n\n if config[SSL_INPUTS]['internal_manager_host']:\n stage_config['ip'] = config[SSL_INPUTS]['internal_manager_host']\n content = json.dumps(stage_config, indent=4, sort_keys=True)\n # Using `write_to_file` because the path belongs to the stage user,\n # so we need to move with sudo\n files.write_to_file(contents=content, destination=config_path)\n common.chown(STAGE_USER, STAGE_GROUP, config_path)\n common.chmod('640', config_path)\n\n def _verify_stage_alive(self):\n service.verify_alive(STAGE)\n wait_for_port(8088)\n\n def configure(self):\n logger.notice('Configuring Stage...')\n self._set_db_url()\n self._set_internal_manager_ip()\n self._set_community_mode()\n external_configure_params = {}\n if self.service_type == 'supervisord':\n external_configure_params['service_user'] = STAGE_USER\n external_configure_params['service_group'] = STAGE_GROUP\n service.configure(\n STAGE,\n user=STAGE_USER,\n group=STAGE_GROUP,\n external_configure_params=external_configure_params\n )\n logger.notice('Stage successfully configured!')\n\n def remove(self):\n logger.notice('Removing Stage...')\n service.remove(STAGE, service_file=False)\n logger.notice('Removing Stage data....')\n common.sudo(['rm', '-rf', '/opt/cloudify-stage'])\n logger.notice('Stage successfully removed')\n\n def start(self):\n logger.notice('Starting Stage...')\n self._run_db_migrate()\n service.restart(STAGE)\n self._verify_stage_alive()\n logger.notice('Stage successfully started')\n\n def stop(self):\n logger.notice('Stopping Stage...')\n service.stop(STAGE)\n logger.notice('Stage successfully stopped')\n","sub_path":"cfy_manager/components/stage/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"12362965","text":"pac_x, pac_y = list(map(int, input().split()))\nDest_x, Dest_y = list(map(int, input().split()))\nn, m = list(map(int, input().split()))\n\ngrid = []\nfor i in range(n):\n grid.append(list(map(str, input())))\n\nexplore = []\nnode_visited = []\nfinal_route = None\n\nPossibleMoves = [[-1, 0], [0, -1], [0, 1], [1, 0]]\n\nexplore.append([pac_x, pac_y, []])\n\nwhile len(explore) > 0:\n x, y, temp = explore.pop()\n routes = list(temp)\n routes.append([x, y])\n node_visited.append([x, y])\n\n if x == Dest_x and y == Dest_y:\n if final_route == None:\n final_route = routes\n break\n\n for move in PossibleMoves:\n next_x, next_y = x + move[0], y + move[1]\n if next_x < 0 or next_x >= m or next_y < 0 and next_y >= n:\n continue\n\n if grid[next_x][next_y] == '-' or grid[next_x][next_y] == '.':\n grid[next_x][next_y] = '='\n explore.append([next_x, next_y, routes])\nprint(str(len(node_visited)))\nfor nodes in node_visited:\n print(str(nodes[0]) + \" \" +str(nodes[1]))\n\nprint(str(len(final_route)-1))\nfor nodes in final_route:\n print(str(nodes[0]) + \" \" +str(nodes[1]))\n","sub_path":"Pacman_DFS.py","file_name":"Pacman_DFS.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"429328316","text":"import sys, time\nsys.path.append('entrez/entrez/')\n\nimport entrez as ez\n\nwith open('pmids_need_date.txt') as f:\n pmids = set(x.strip() for x in f)\n\ns = open('scraped_pmids.txt', 'w')\nf = open('failed_pmids.txt', 'w')\n\nct = 0\nfor p in pmids:\n ct += 1\n time.sleep(.25) # guessing this will take 30 minutes to an hour for 4000 pmids.\n\n try:\n lines = [\n line.replace('{', '').replace('}', '').replace(',', '').strip()\n for line in ez.equery(tool='fetch', db='pubmed', id=p)\n if 'date' in line or 'year' in line or 'month' in line\n ]\n\n date_idx = lines.index('date std')\n relevant_lines = lines[date_idx + 1:date_idx + 3]\n\n year = relevant_lines[0].replace('year', '').strip()\n assert year.isdigit()\n month = relevant_lines[1].replace('month', '').strip()\n assert month.isdigit()\n\n print(ct, p + '\\t' + year, month)\n print(p + '\\t' + year, month, file=s)\n\n except:\n print(ct, 'failed:', p)\n print(p, file=f)\n\ns.close()\nf.close()\n","sub_path":"ncbi-scrape/scrape_pmid_years.py","file_name":"scrape_pmid_years.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"481960141","text":"from __future__ import annotations\n\nimport logging\nimport os\nimport types\nfrom typing import Callable, List, Any, Optional, cast\n\nimport pytesseract as tess\nimport pyttsx3 as sp\nimport speech_recognition as sr\nimport googletrans\nfrom googletrans.models import Translated\n\n\nfrom maybe import Maybe\nfrom subtypes import Enum, Str, Process\nfrom pathmagic import File, Dir, PathLike\nfrom subtypes.process import CompletedProcess\n\n\nclass ImageToText:\n def __init__(self, file: os.PathLike) -> None:\n self.file = File.from_pathlike(file)\n self.text: str = tess.image_to_string(str(self.file))\n\n def __str__(self) -> str:\n return self.text\n\n\nclass TextToSpeech:\n def __init__(self, male: bool = True) -> None:\n self.engine = sp.init()\n self.male = male\n\n @property\n def male(self) -> bool:\n return self._male\n\n @male.setter\n def male(self, val: bool) -> None:\n # noinspection PyUnresolvedReferences\n self.engine.setProperty(\"voice\", self.engine.getProperty(\"voices\")[0 if val else 2].id)\n self._male = val\n\n def speak(self, text: str) -> TextToSpeech:\n self.engine.say(text)\n self.engine.runAndWait()\n return self\n\n\nclass SpeechToText:\n def __init__(self, api: str = \"google\", logger: logging.Logger = None) -> None:\n self.rec = Recognizer(handler=self)\n self.mic = sr.Microphone()\n self.text: Optional[str] = None\n\n api_options = {\"google\": self.rec.recognize_google}\n self.api = api_options[api]\n\n if logger is not None:\n self.logger = logger\n else:\n self.logger = logging.Logger(\"listener\", level=logging.INFO)\n handler = logging.FileHandler(Dir.from_desktop().new_file(\"listener.log\"))\n handler.setFormatter(logging.Formatter(\"%(name)s - %(asctime)s - %(levelname)s - %(message)s\"))\n self.logger.addHandler(handler)\n\n self.background: List[str] = []\n self.killer: Optional[Callable] = None\n\n def __str__(self) -> str:\n return self.text\n\n def from_file(self, filepath: os.PathLike, duration: int = None, offset: int = None, adjust_for_ambient_noise: bool = False) -> SpeechToText:\n file = File.from_pathlike(filepath)\n\n if file.extension != \"wav\":\n wav_file = file.new_rename(f\"{file.stem}.wav\")\n wav_file.content = file.content\n else:\n wav_file = file\n\n audio_file = sr.AudioFile(wav_file.path)\n with audio_file as source:\n if adjust_for_ambient_noise:\n self.rec.adjust_for_ambient_noise(source)\n audio = self.rec.record(source, duration=duration, offset=offset)\n\n self.text = self.api(audio)\n return self\n\n def from_mic(self, phrase_time_limit: int = 60, timeout: int = 300) -> SpeechToText:\n with self.mic as source:\n audio = self.rec.listen(source, phrase_time_limit=phrase_time_limit, timeout=timeout)\n self.text = self.api(audio)\n return self\n\n def from_background(self, action: Callable = None, phrase_time_limit: int = 60) -> SpeechToText:\n if self.killer is None:\n callback = Maybe(action).else_(self._append_to_background_and_log)\n with self.mic as source:\n self.rec.adjust_for_ambient_noise(source)\n self.killer = self.rec.listen_in_background(self.mic, callback=callback, phrase_time_limit=phrase_time_limit)\n return self\n else:\n raise RuntimeError(\"Cannot spawn another background listener while one already exists.\")\n\n def kill_background_listener(self) -> SpeechToText:\n if self.killer is not None:\n self.killer(wait_for_stop=False)\n self.killer = None\n return self\n else:\n raise RuntimeError(\"No background listener exists.\")\n\n @staticmethod\n def _append_to_background_and_log(recognizer: Recognizer, audio: sr.AudioData) -> None:\n try:\n text = recognizer.handler.api(audio)\n recognizer.handler.background.append(text)\n recognizer.handler.logger.info(text)\n if text.strip().lower() in [\"exit\", \"quit\", \"bye\", \"enough\", \"stop\", \"stop listening\"]:\n TextToSpeech().speak(\"listener is now exiting...\")\n recognizer.handler.kill_background_listener()\n except sr.UnknownValueError:\n pass\n except sr.RequestError as ex:\n recognizer.handler.logger.error(f\"Could not request results from Google Speech Recognition service: {ex}\")\n\n\nclass Recognizer(sr.Recognizer):\n def __init__(self, handler: SpeechToText) -> None:\n super().__init__()\n self.handler = handler\n\n\nclass YoutubeToFile:\n class AudioFormats(Enum):\n Mp3, Wav = \"mp3\", \"wav\"\n\n class VideoFormats(Enum):\n Mp4, Mkv, Avi = \"mp4\", \"mkv\", \"avi\"\n\n def __init__(self, default_youtube_dl_args: List[str] = None) -> None:\n from convert import resources\n self.args, self.exe = Maybe(default_youtube_dl_args).else_([]), File.from_resource(package=cast(types.ModuleType, resources), name=\"downloader\", extension=\"exe\")\n\n def download_as_audio(self, url: str, location: PathLike = None, file_format: str = \"mp3\") -> File:\n path = Maybe(location).else_(os.getcwd())\n proc = Process([str(self.exe), *self.args, \"-x\", \"--audio-format\", file_format, \"-o\", f\"{os.fspath(path)}{os.sep}%(title)s.%(ext)s\", str(url)]).wait()\n\n return self._get_file_from_proc(proc=proc, path=path)\n\n def download_as_video(self, url: str, location: PathLike = None, file_format: str = \"mp4\") -> File:\n path = Maybe(location).else_(os.getcwd())\n proc = Process([str(self.exe), *self.args, \"-f\", file_format, \"-o\", f\"{os.fspath(path)}{os.sep}%(title)s.%(ext)s\", str(url)]).wait()\n\n return self._get_file_from_proc(proc=proc, path=path)\n\n def raw_youtube_dl_call(self, youtube_dl_args: List[str] = None) -> CompletedProcess:\n return Process([str(self.exe), *youtube_dl_args]).wait()\n\n def update_downloader(self) -> CompletedProcess:\n return Process([str(self.exe), \"-U\"]).wait()\n\n @staticmethod\n def _get_file_from_proc(proc: Any, path: PathLike) -> File:\n filename = Str(proc.stdout).slice.after_last(r\"Destination: \").slice.before_first(r\"\\n\").slice.after_last(os.sep if os.sep != \"\\\\\" else \"\\\\\\\\\")\n\n outdir = Dir.from_pathlike(path)\n best_match, = list(filename.fuzzy.best_n_matches(outdir.files(), num=1))\n\n return outdir.files[best_match]\n\n\nclass Translate(googletrans.Translator):\n def __init__(self, dest: str = \"en\", src: str = \"auto\", **kwargs: Any) -> None:\n super().__init__(**kwargs)\n self.default_src, self.default_dest = src, dest\n\n def translate(self, *args: Any, **kwargs: Any) -> Translated:\n return super().translate(*args, src=self.default_src, dest=self.default_dest, **kwargs)\n\n def __call__(self, *args: Any, **kwargs: Any) -> Translated:\n return self.translate(*args, **kwargs)\n\n\n","sub_path":"convert/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71697729","text":"from pyimagesearch.object_detection.objectdetector import ObjectDetector\nfrom pyimagesearch.descriptors.hog import HOG\nfrom pyimagesearch.utils.conf import Conf\nimport imutils\nimport argparse\nimport _pickle as cPickle\nimport cv2\n\n# construct the argument parser and parse the argument\nap.add_argument(\"-c\", \"--conf\", required = True, help = \"path to configuration file\")\nap.add_argument(\"-i\", \"--image\", required = True, help = \"path to the image being classified\")\nargs = vars(ap.parse_args())\n\n# load the configuration file\nconf = Conf(args[\"conf\"])\n\n#load the classifier\nmodel = cPickle.loads(open(conf[\"classifier_path\"]).read())\nhog = HOG(orientations=conf[\"orientations\"], pixelsPerCell = tuple(conf[\"pixels_per_cell\"]),\ncellsPerBlock=tuple(conf[\"cells_per_block\"]), normalize=conf[\"normalize\"])\nod = ObjectDetector(model, hog)\n\n# loads the image and convert it to grayscale\nimage = cv2.imread(args[\"image\"])\nimage = imutils.resize(image, width=min(260,image.shape[1]))\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n#detect the objects\n(boxes, prob) = od.detect(gray, conf[\"window_dim\"], winStep=conf[\"window_step\"],\npyramidScale=conf[\"pyramid_scale\"], minProb=conf[\"min_probability\"])\n\n#loop over the bounding boxes and draw then\nfor (startX, startY, endX, endY) in boxes:\n cv2.rectangle(image,(startX, startY),(endX,endY), (0,0,255), 2)\n\ncv2.imshow(\"Image\",image)\ncv2.waitKey(0)\n","sub_path":"pyimagesearch/02_object_detection/test_model_no_nms.py","file_name":"test_model_no_nms.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"545207143","text":"import logging\n\nfrom qwci.commands.options import NAME\nfrom qwci.exception import QwciError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Build(object):\n \"\"\"build the jobs\"\"\"\n\n name = 'build'\n help = __doc__\n params = [\n NAME\n ]\n\n def run(self, name):\n raise QwciError('command not implemented yet')\n","sub_path":"qwci/commands/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"250986187","text":"class Board:\n def __init__(self, height, width):\n \"\"\"constructs a new Board object by initializing\n the following three attributes\n \"\"\"\n self.height=height\n self.width=width\n self.slots = [[' '] * self.width for row in range(self.height)]\n\n def __repr__(self):\n \"\"\" Returns a string representation for a Board object.\n \"\"\"\n s = '' # begin with an empty string\n for row in range(self.height):\n s += '|' # one vertical bar at the start of the row\n \n for col in range(self.width):\n s += self.slots[row][col] + '|'\n\n s += '\\n' # newline at the end of the row\n\n # Add code here for the hyphens at the bottom of the board\n s+='--'*self.width+ '-'\n # and the numbers underneath it.\n s += '\\n'\n x=0\n for count in range(self.width):\n if x>9:\n x=0\n x+=1\n s+= ' ' + str(x-1)\n\n return s\n\n\n def add_checker(self, checker, col):\n \"\"\" accepts two inputs\n \"\"\"\n assert(checker == 'X' or checker == 'O')\n assert(0 <= col < self.width)\n\n row = self.height-1\n \n while self.slots[row][col] != ' ':\n row-=1\n self.slots[row][col]=checker\n\n def reset(self):\n \"\"\" reset the Board object on which it is called\n by setting all slots to contain a space character\n \"\"\"\n for col in range(self.width):\n for row in range (self.height):\n if self.slots[row][col]!=' ':\n self.slots[row][col]=' ' \n \n \n\n def add_checkers(self, colnums):\n \"\"\" takes in a string of column numbers and places alternating\n checkers in those columns of the called Board object, \n starting with 'X'.\n \"\"\"\n checker = 'X' # start by playing 'X'\n \n for col_str in colnums:\n col = int(col_str)\n if 0 <= col < self.width:\n self.add_checker(checker, col)\n\n # switch to the other checker\n if checker == 'X':\n checker = 'O'\n else:\n checker = 'X'\n \n def can_add_to(self, col):\n \"\"\"returns True if it is valid to place a checker in the column col\n on the calling Board object. Otherwise, it should return False.\n \"\"\"\n \n if 0<=col<(self.width):\n \n if self.slots[0][col] == ' ':\n return True\n else:\n \n return False\n else:\n \n return False\n \n def is_full(self):\n \"\"\"returns True if the called Board object is completely full of checkers,\n and returns False otherwise\n \"\"\"\n \n for col in range(self.width):\n if self.can_add_to(col)== True:\n return False\n else:\n return True\n \n \n def remove_checker(self, col):\n \"\"\"removes the top checker from column col of the called Board\n object\n \"\"\"\n row=0\n while self.slots[row][col]==' ' and row<(self.height-1):\n row+=1\n self.slots[row][col]=' '\n \n\n def is_horizontal_win(self, checker):\n \"\"\" Checks for a horizontal win for the specified checker.\n \"\"\"\n for row in range(self.height):\n for col in range(self.width-3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row][col + 1] == checker and \\\n self.slots[row][col + 2] == checker and \\\n self.slots[row][col + 3] == checker:\n return True\n\n # if we make it here, there were no horizontal wins\n return False\n \n def is_vertical_win(self, checker):\n \"\"\" Checks for a vertical win for the specified checker.\n \"\"\"\n for row in range(self.height-3):\n for col in range(self.width):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row+1][col] == checker and \\\n self.slots[row+2][col] == checker and \\\n self.slots[row+3][col] == checker:\n return True\n\n # if we make it here, there were no horizontal wins\n return False\n def is_down_diagonal_win(self,checker):\n \"\"\" Checks for a down diagonal win for the specified checker.\n \"\"\"\n \n for row in range(self.height-3):\n for col in range(self.width-3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row+1][col+1] == checker and \\\n self.slots[row+2][col+2] == checker and \\\n self.slots[row+3][col+3] == checker:\n return True\n return False\n\n def is_up_diagonal_win(self,checker):\n \"\"\" Checks for a up diagonal win for the specified checker.\n \"\"\"\n for row in range(3, self.height):\n for col in range(self.width-3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row-1][col+1] == checker and \\\n self.slots[row-2][col+2] == checker and \\\n self.slots[row-3][col+3] == checker:\n return True\n return False\n \n def is_win_for(self, checker):\n \"\"\" accepts a parameter checker that is either 'X' or 'O', and returns True if there are\n four consecutive slots containing checker on the board\n \"\"\"\n assert(checker == 'X' or checker == 'O')\n\n # call the helper functions and use their return values to\n # determine whether to return True or False\n if self.is_horizontal_win(checker) or self.is_vertical_win(checker) or self.is_down_diagonal_win(checker) or self.is_up_diagonal_win(checker)==True:\n return True\n else:\n return False\n \n\n \n\n\n \n\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":6436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"489803379","text":"from ..sqlalchemy.db_support.init_params import Init_Params\r\nfrom ..sqlalchemy.make_conn import MakeConn\r\n\r\nparams_hq = Init_Params()\r\nparams_hq.DBTYPE = 'mssql'\r\nparams_hq.HOST = '10.20.233.103'\r\nparams_hq.UID = 'ad'\r\nparams_hq.PWD = 'ca$hc0w'\r\n#params_hq.DB = '01_HQ'\r\nparams_hq.DB = 'HQ_TEST'\r\n\r\npp_conn = MakeConn(params_hq)\r\n\r\nparams_rh = Init_Params()\r\nparams_rh.DBTYPE = 'mssql'\r\nparams_rh.HOST = '10.20.233.103'\r\nparams_rh.UID = 'ad'\r\nparams_rh.PWD = 'ca$hc0w'\r\n#params_rh.DB = '01_RH'\r\nparams_rh.DB = 'HR_TEST'\r\n\r\nss_conn = MakeConn(params_rh)\r\n","sub_path":"srm_db_tool/config_mgr/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"435488755","text":"from .base import BaseEncoder\nimport io\nimport numpy\n\nclass NDArrayEncoder(BaseEncoder):\n @classmethod\n def can_load(cls, file):\n test_bytes = file.peek(1024)\n try:\n magic = numpy.lib.npyio.format.read_magic(io.BytesIO(test_bytes))\n if magic:\n return True\n except ValueError:\n pass\n finally:\n file.seek(0)\n\n return False\n\n @classmethod\n def load(cls, file):\n return numpy.lib.npyio.format.read_array(file)\n\n @classmethod\n def can_dump(cls, obj):\n return type(obj) == numpy.ndarray\n\n @classmethod\n def dump(cls, obj, file):\n numpy.lib.npyio.format.write_array(file, obj)\n","sub_path":"jug/backends/encoders/numpy_encoder.py","file_name":"numpy_encoder.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"560494702","text":"from itertools import chain\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport os\n\nclass findElementsDemo:\n\n def findELemetsMethod(self):\n\n chromedriverpath=\"D:\\Shishu Raj Pandey\\Software\\Browser Drivers\\chromedriver\\chromedriver.exe\"\n os.environ['webdriver.chrome.driver']=chromedriverpath\n driver=webdriver.Chrome(chromedriverpath)\n\n driver.get(\"https://learn.letskodeit.com/p/practice\")\n id_elements= driver.find_elements_by_id('carselect')\n tag_elements=driver.find_elements_by_tag_name('a')\n class_elements=driver.find_elements(By.CLASS_NAME,'class1')\n\n print(len(id_elements))\n print(len(tag_elements))\n print(len(class_elements))\n\n\nfindByElements=findElementsDemo()\nfindByElements.findELemetsMethod()","sub_path":"findingElements/findElementsDemo.py","file_name":"findElementsDemo.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"446227086","text":"from sklearn.preprocessing import StandardScaler\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\ndf_wine = pd.read_csv(\"./wine_data.txt\", header=None)\ndf_wine.columns = [\"class_label\", \"alcohol\", \"malic_acid\", \"ash\",\n \"alcalinity_of_ash\", \"magnesium\", \"total_phenols\",\n \"flavanoids\", \"nonflavanoid_phenols\", \"proanthocyanins\",\n \"color_intensity\", \"hue\", \"od280/od315_of_diluted_wines\",\n \"proline\"]\n\nX, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=0,\n stratify=y)\n\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std = stdsc.transform(X_test)\n\nlr = LogisticRegression(penalty=\"l1\", C=1.0, solver=\"saga\",\n multi_class=\"auto\", max_iter=1000)\nlr.fit(X_train_std, y_train)\nprint(\"Training Accuracy: {}.\\n\"\n \"Test Accuracy: {}.\".format(lr.score(X_train_std, y_train),\n lr.score(X_test_std, y_test)))\nprint(lr.coef_)","sub_path":"henris_coding/chapter_04/i_L1_regularisation.py","file_name":"i_L1_regularisation.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"173173298","text":"import bs4, requests\n\nres = requests.get('https://www.amazon.com/dp/B019O5F7TK?ref=emc_b_5_t')\nprint(res.raise_for_status())\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\n#print(soup)\nelems = soup.select('#priceblock_ourprice')\nprint(elems[0].text.strip()) #need .text to call out just the text\n\n\n\n\n\n#import requests, os\n#\n#res = requests.get('https://automatetheboringstuff.com/files/rj.txt')\n#\n#print(res.status_code)\n#print(len(res.text))\n#print(res.raise_for_status())\n##print(res.text[:501])\n#os.chdir('/Users/mich4911/PycharmProjects')\n#with open('RomioAndJuliet.txt', 'wb') as playfile:\n# for chunk in res.iter_content(100000):\n# playfile.write(chunk)\n#","sub_path":"Request.py","file_name":"Request.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"457454311","text":"from django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom django.template import Context, loader, Template, RequestContext\nfrom django.template.loader import get_template\nfrom AACIapp.models import *\nfrom django.shortcuts import render, render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.mail import send_mail, BadHeaderError\nimport datetime\nimport urllib2, simplejson, json\nimport csv, time\nfrom django.utils import timezone\nimport time\n\nfrom django.contrib import messages\nfrom django.forms.formsets import formset_factory\nfrom django.shortcuts import render_to_response\nfrom django.template.context import RequestContext\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n\nfrom bootstrap_toolkit.widgets import BootstrapUneditableInput\n\n# from .forms import TestForm, TestModelForm, TestInlineForm, WidgetsForm, FormSetInlineForm\n\n\ndef register_navigator(request, error=None):\n error = False\n error_list = []\n if request.method == 'POST':\n form = NavigatorRegistrationForm(request.POST)\n if form.is_valid():\n if form.clean_password():\n cd = form.cleaned_data\n email = cd['email']\n if NavigatorVerifiedEmail.objects.filter(email=email) != None:\n user = form.save()\n email_navigator_after_registration(cd['first_name'], email)\n email_managers_notifying_new_navigator(cd['first_name'] + \" \" + cd['last_name'], email)\n return HttpResponseRedirect(\"/\")\n else:\n error = True\n error_list.append(\"The email you have chosen to register with has not been authorized by an AACI manager.\")\n else:\n error = True\n error_list.append(\"Please select a password and ensure both password fields match.\")\n else:\n form = NavigatorRegistrationForm()\n return render_to_response(\"create_user_n.html\", {'user_type':get_user_type(request),\n 'form': form, 'error': error, 'error_list':error_list, 'worker': False,\n }, context_instance=RequestContext(request))\n\ndef register_worker(request, error=None):\n if request.method == 'POST':\n form = WorkerRegistrationForm(request.POST)\n if form.is_valid():\n if form.clean_password():\n user = form.save()\n return HttpResponseRedirect(\"/\")\n else:\n return HttpResponseRedirect(\"/Register/Password\")\n else:\n form = WorkerRegistrationForm()\n return render_to_response(\"create_user_w.html\", {\n 'form': form, 'error': error, 'worker': True,'user_type':get_user_type(request)\n }, context_instance=RequestContext(request))\ndef index(request):\n return render_to_response(\"index.html\", {'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\ndef login_view(request, error=None):\n if request.method=='POST':\n email= request.POST['email']\n password = request.POST['password']\n user=authenticate(email=email,password=password)\n if user is not None:\n if ManagerVerifiedEmail.objects.filter(email=email) or NavigatorVerifiedEmail.objects.filter(email=email):\n login(request, user)\n return HttpResponseRedirect(\"/\")\n else:\n return render_to_response(\"login.html\", {'error': \"expired_user\",'user_type':get_user_type(request)}, context_instance=RequestContext(request))\n else:\n return render_to_response(\"login.html\", {\n 'error': \"incorrect_login\",'user_type':get_user_type(request)\n }, context_instance=RequestContext(request))\n return render_to_response(\"login.html\", {'error':None,'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\ndef logout_view(request):\n logout(request)\n return render_to_response(\"logout.html\", {'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\n#########################################################\n#########################################################\n#########################################################\n######## Report CSV Views #########\n#########################################################\n#########################################################\n#########################################################\ndef reports(request):\n if not request.user.is_worker:\n return HttpResponse(\"User must be signed in as a manager to view this page.\")\n if request.method == 'POST':\n if 'navigator_reports' in request.POST:\n return navigator_reports(request)\n if 'manager_reports' in request.POST:\n return manager_reports(request)\n if 'appointment_reports' in request.POST:\n return appointment_reports(request)\n if 'navigator_feedback_reports' in request.POST:\n return navigator_feedback_reports(request)\n return render_to_response(\"work_reports.html\", {'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\ndef navigator_feedback_reports(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename= \"AACI_Navigator_Reports_{0}.csv\"'.format(time.strftime(\"%m/%d/%Y\"))\n writer = csv.writer(response, dialect='excel')\n writer.writerow(['id','Appointment ID', 'Navigator ID', 'Patient ID', 'Navigator Name','Date/Time',\n 'Hospital','Picked Up','Arrived On Time','Recieved Care','Dropped Off', 'Additional Comments'])\n feedback_reports = Report.objects.all()\n for report in feedback_reports:\n writer.writerow([report.id, report.appointment.id, report.appointment.navigator.id, report.appointment.patient.id,\n report.navigator.first_name, report.navigator.last_name, report.appointment.appointment_datetime, report.appointment.hospital,\n report.picked_up, report.arrived_on_time, report.received_care, report.dropped_off, report.additional_comments])\n return response\n\ndef appointment_reports(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename= \"AACI_Appointment_Reports_{0}.csv\"'.format(time.strftime(\"%m/%d/%Y\"))\n writer = csv.writer(response, dialect='excel')\n writer.writerow(['id',\n 'Patient ID',\n 'Navigator ID',\n 'Manager ID',\n 'Date/Time',\n 'Navigator Name',\n 'Navigator Phone',\n 'Manager Name',\n 'Manager Phone',\n 'Patient Name',\n 'Patient Phone',\n 'Patient Language',\n 'Hospital',\n 'Department',\n 'Gender Preference',\n 'Special Instructions',\n 'Check-In Address',\n 'Check-In Date/Time',\n 'Claimed',\n 'Cancelled'])\n appointments = Appointment.objects.all()\n for appointment in appointments:\n navigator_id, navigator_phone, navigator_full_name = \"\", \"\", \"\"\n if appointment.navigator:\n navigator_id = appointment.navigator.id\n navigator_full_name = appointment.navigator.full_name\n navigator_phone = appointment.navigator.phone\n writer.writerow([appointment.id, appointment.patient.id,\n navigator_id,\n appointment.worker.id,\n appointment.appointment_datetime,\n navigator_full_name,\n navigator_phone,\n appointment.worker.full_name,\n appointment.worker.phone,\n appointment.patient.full_name,\n appointment.patient.phone,\n appointment.patient.language,\n appointment.hospital,\n appointment.department,\n appointment.gender_preference,\n appointment.special_instructions,\n appointment.checkin_address,\n appointment.checkin_time,\n appointment.is_claimed,\n appointment.is_canceled])\n return response\n\ndef navigator_reports(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename= \"AACI_Navigators_{0}.csv\"'.format(time.strftime(\"%m/%d/%Y\"))\n writer = csv.writer(response, dialect='excel')\n writer.writerow(['id','Navigator Name','Phone','Email','Gender','# Late Drops'])\n navigators = AACIUser.objects.filter(is_navigator = True)\n for navigator in navigators:\n writer.writerow([navigator.id, navigator.full_name, navigator.phone, navigator.email,navigator.gender,\n navigator.num_late_drops])\n return response\n\ndef manager_reports(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename= \"AACI_Managers_{0}.csv\"'.format(time.strftime(\"%m/%d/%Y\"))\n writer = csv.writer(response, dialect='excel')\n writer.writerow(['id','Manager Name','Phone','Email','Hospital'])\n managers = AACIUser.objects.filter(is_worker = True)\n for manager in managers:\n writer.writerow([manager.id, manager.full_name, manager.phone, manager.email, manager.hospital])\n return response\n\ndef registration_complete(request):\n return render_to_response(\"registration_complete.html\", {'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\ndef appointment_claimed(request):\n return render_to_response(\"appointment_claimed.html\", {'user_type':get_user_type(request)},context_instance=RequestContext(request))\n\ndef send_confirmation_email(appointment):\n navigator = AACIUser.objects.get(id=appointment.navigator_id)\n navigator_email = navigator.email\n manager = AACIUser.objects.get(id=appointment.worker_id)\n manager_email = manager.email\n subject = \"AACI Patient Navigator Appointment Confirmation\"\n datetime = parse_datetime(appointment.appointment_datetime)\n navigator_message = \"Hello \" + navigator.first_name + \",\\n\\nThank you for accepting the appointment to provide Patient Navigation services on \" +\\\n datetime + \". Please review the summary information for your appointment.\\n\" +\\\n \"\\n\\tDate: \"+ datetime[:10]+\\\n \"\\n\\tTime: \"+ datetime[14:]+\\\n \"\\n\\tPatient Name: \" + appointment.patient_first_name+ \" \" + appointment.patient_last_name +\\\n \"\\n\\tPatient Phone: \" + appointment.patient_phone +\\\n \"\\n\\tPatient Language: \" + appointment.patient_language +\\\n \"\\n\\tLocation of Appointment: \" + appointment.hospital +\\\n \"\\n\\tDepartment: \" + appointment.department +\\\n \"\\n\\tSpecial Instructions: \" + appointment.special_instructions +\\\n \"\\n\\tMeet-up Location: (default selections: AACI Health Clinic - Moorpark, Valley Medical Center - Hospital, Valley Medical Center - Specialty Center OR Contact Patient to arrange meetup location)\" +\\\n \"\\n\\nSincerely,\\n\\nPatient Navigation Center\\nAsian Americans for Community Involvement (AACI)\"\n manager_message = \"Hello \" + manager.first_name + \",\\n\\nA patient navigator has signed up to volunteer for an appointment you created. \" +\\\n \"Below are the details for this appointment:\\n\" +\\\n \"\\n\\tDate: \"+ datetime[:10]+\\\n \"\\n\\tTime: \"+ datetime[14:]+\\\n \"\\n\\tPatient Name: \" + appointment.patient_first_name+ \" \" + appointment.patient_last_name +\\\n \"\\n\\tPatient Phone: \" + appointment.patient_phone +\\\n \"\\n\\tPatient Language: \" + appointment.patient_language +\\\n \"\\n\\tLocation of Appointment: \" + appointment.hospital +\\\n \"\\n\\tDepartment: \" + appointment.department +\\\n \"\\n\\tSpecial Instructions: \" + appointment.special_instructions +\\\n \"\\n\\tMeet-up Location: (default selections: AACI Health Clinic - Moorpark, Valley Medical Center - Hospital, Valley Medical Center - Specialty Center OR Contact Patient to arrange meetup location)\" +\\\n \"\\n\\nSincerely,\\n\\nPatient Navigation Center\\nAsian Americans for Community Involvement (AACI)\"\n from_email = \"Patient Navigation Center \"\n\n if subject and navigator_message and from_email:\n try:\n send_mail(subject, navigator_message, from_email, [navigator_email,])\n send_mail(subject, manager_message, from_email, [manager_email,])\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return HttpResponseRedirect('/contact/thanks/')\n else:\n # In reality we'd use a form class\n # to get proper validation errors.\n return HttpResponse('Make sure all fields are entered and valid.')\n\ndef late_drop_email(appointment, dropped_navigator):\n manager = AACIUser.objects.get(id=appointment.worker_id)\n navigator = dropped_navigator\n manager_email= manager.email\n subject = \"Navigator Dropped Appointment Within 24 Hours!\"\n datetime = parse_datetime(appointment.appointment_datetime)\n\n message = \"Hello \" + manager.first_name +\",\\n\\nThis is an alert notifying you that a navigator dropped an appointment \"+\\\n \"within 24 hours of its scheduled time. Below is the informatation for the appointment along with contact information \"+\\\n \"for the navigator who dropped.\"+\\\n \"\\nAppointment Details \"+\\\n \"\\n\\tDate: \"+ datetime[:10]+\\\n \"\\n\\tTime: \"+ datetime[14:]+\\\n \"\\n\\tPatient Name: \" + appointment.patient_first_name+ \" \" + appointment.patient_last_name +\\\n \"\\n\\tPatient Phone: \" + appointment.patient_phone +\\\n \"\\n\\tPatient Language: \" + appointment.patient_language +\\\n \"\\n\\tHospital: \" + appointment.hospital +\\\n \"\\n\\tDepartment: \" + appointment.department +\\\n \"\\n\\tSpecial Instructions: \" + appointment.special_instructions +\\\n \"\\n\\nNavigator Who Dropped\"+\\\n \"\\n\\tName: \" + navigator.first_name + \" \" + navigator.last_name +\\\n \"\\n\\tEmail: \" + navigator.email +\\\n \"\\n\\tPhone: \" + navigator.phone +\\\n \"\\n\\nSincerely,\\n\\nPatient Navigation Center\\nAsian Americans for Community Involvement (AACI)\"\n from_email = \"Patient Navigation Center \"\n\n if subject and message and from_email:\n try:\n send_mail(subject, message, from_email, [manager_email,])\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return HttpResponseRedirect('/contact/thanks/')\n else:\n # In reality we'd use a form class\n # to get proper validation errors.\n return HttpResponse('Make sure all fields are entered and valid.')\n\ndef send_three_cancellation_email(email):\n subject = \"Navigator Account Suspended\"\n from_email = \"Patient Navigation Center \"\n message = \"Dear Navigator,\\n\\nWe regret to inform you that your ability \"+\\\n \"to accept appointments has been suspended. This is due the cancellation \"+\\\n \"of 3 navigation appointments within 24 hours of the appointment. Last minute \"+\\\n \"appointment cancellations create scheduling conflicts for our clinic staff and patients.\\n\\n\"+\\\n \"You will not be permitted to accept appointments from the PNC website until you have check in with \"+\\\n \"your Clinic Manager at XXX.XXXX@aaci.org.\\n\\nSincerely,\\n\\nPatient Navigation Team\"\n send_mail(subject, message, from_email, [email,])\n\ndef parse_datetime(datetime):\n datetime = str(datetime)\n hour, minute = datetime[11:13], datetime[14:16]\n hour, am_pm = int(hour), \"am\" \n hour-=8\n if hour>=13:\n hour-=12\n if hour >=12:\n am_pm = \"pm\"\n return datetime[5:7]+\"/\"+datetime[8:10]+\"/\"+datetime[0:4]+\" at \"+str(hour)+\":\"+minute+am_pm\n\ndef create_appointment(request):\n if not request.user.is_worker:\n return HttpResponse(\"User must be signed in as a manager to view this page.\")\n if request.method == 'POST':\n patient_form = PatientForm(request.POST)\n if patient_form.is_valid():\n new_patient = patient_form.save()\n appointment_form = AppointmentForm(request.POST)\n # if appointment_form.is_valid():\n new_appointment = appointment_form.save()\n new_appointment.worker = request.user\n new_appointment.patient = new_patient\n new_appointment.save()\n # else:\n # raise Exception(\"Appointment not valid!\")\n return HttpResponseRedirect('/ProcessAppointment')\n else:\n appointment_form = AppointmentForm()\n patient_form = PatientForm()\n return render_to_response(\"newappointment.html\", {'worker':request.user.id, 'user_type':get_user_type(request)}, context_instance=RequestContext(request))\n\n\n@csrf_exempt\ndef process_appointment(request):\n t = loader.get_template('processForm.html')\n #return render(request, t , {'form': form,})\n return HttpResponse(t.render(Context({'user_type':get_user_type(request)})))\n\ndef demo_form(request):\n messages.success(request, 'I am a success message.')\n layout = request.GET.get('layout')\n if not layout:\n layout = 'vertical'\n if request.method == 'POST':\n form = TestForm(request.POST)\n form.is_valid()\n else:\n form = TestForm()\n form.fields['title'].widget = BootstrapUneditableInput()\n return render_to_response('form.html', RequestContext(request, {\n 'form': form,\n 'layout': layout,\n })) \n\n\n\ndef navigator_verified_email(request):\n valid_form = True\n verified_emails = NavigatorVerifiedEmail.objects.all()[::-1]\n if request.method == 'POST':\n if 'add_email' in request.POST:\n form = NavigatorVerifiedEmailForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n valid_form = False\n if 'drop_email' in request.POST:\n remove_navigator_email(request.POST['drop_email'])\n return render_to_response(\"navigator_verified_email.html\", {'valid_form':valid_form, 'verified_emails':verified_emails, 'user_type':get_user_type(request)}, context_instance=RequestContext(request))\n\n\ndef remove_navigator_email(email):\n NavigatorVerifiedEmail.objects.filter(email=email).delete()\n\ndef manager_verified_email(request):\n valid_form = True\n verified_emails = ManagerVerifiedEmail.objects.all()[::-1]\n if request.method == 'POST':\n if 'add_email' in request.POST:\n form = ManagerVerifiedEmailForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n valid_form = False\n if 'drop_email' in request.POST:\n remove_manager_email(request.POST['drop_email'])\n return render_to_response(\"manager_verified_email.html\", {'valid_form':valid_form, 'verified_emails':verified_emails, 'user_type':get_user_type(request)}, context_instance=RequestContext(request))\n\n\ndef remove_manager_email(email):\n ManagerVerifiedEmail.objects.filter(email=email).delete()\n\n#########################################################\n#########################################################\n#########################################################\n######## Navigator Appointment Views #########\n#########################################################\n#########################################################\n#########################################################\n\n@login_required\ndef all_navigator_appointments(request):\n if not request.user.is_navigator:\n return HttpResponse(\"User must be signed in as a navigator to view this page.\")\n if request.method=='POST':\n if 'claimed_appointment' in request.POST:\n claim_appointment(request)\n if 'drop_appointment' in request.POST:\n drop_appointment(request)\n\n query_results = Appointment.objects.filter(appointment_datetime__gt=datetime.datetime.now()).order_by('appointment_datetime')\n appointments = []\n for appointment in query_results:\n item = [appointment]\n if (request.user.gender == appointment.gender_preference or appointment.gender_preference== \"Neutral\") and appointment.is_claimed == False:\n item.append(True)\n else:\n item.append(False)\n if appointment.navigator == request.user:\n item.append(True)\n else:\n item.append(False)\n appointments.append(item)\n return render_to_response(\"all_navigator_appointments.html\", {'appointments': appointments,'user_type':get_user_type(request)} , context_instance=RequestContext(request))\n\n@login_required\ndef unclaimed_navigator_appointments(request):\n if not request.user.is_navigator:\n return HttpResponse(\"User must be signed in as a navigator to view this page.\")\n if request.method=='POST':\n if 'claimed_appointment' in request.POST:\n claim_appointment(request)\n\n query_results = Appointment.objects.filter(appointment_datetime__gt=datetime.datetime.now(), is_claimed=False).order_by('appointment_datetime')\n appointments = []\n for appointment in query_results:\n item = [appointment]\n if request.user.gender == appointment.gender_preference or appointment.gender_preference== \"Neutral\":\n item.append(True)\n else:\n item.append(False)\n appointments.append(item)\n return render_to_response(\"unclaimed_navigator_appointments.html\", {'appointments': appointments, 'user_type':get_user_type(request)} , context_instance=RequestContext(request))\n\n@login_required\ndef my_navigator_upcoming_appointments(request):\n if not request.user.is_navigator:\n return HttpResponse(\"User must be signed in as a navigator to view this page.\")\n if request.method=='POST':\n if 'drop_appointment' in request.POST:\n drop_appointment(request)\n\n query_results = [[item,] for item in Appointment.objects.filter(appointment_datetime__gt=datetime.datetime.now(), navigator_id=request.user.id).order_by('appointment_datetime')]\n return render_to_response(\"my_navigator_upcoming_appointments.html\", {'appointments': query_results, 'user_type':get_user_type(request)} , context_instance=RequestContext(request))\n\n@login_required\ndef my_navigator_past_appointments(request):\n if not request.user.is_navigator:\n return HttpResponse(\"User must be signed in as a navigator to view this page.\")\n if request.method=='POST':\n if 'appointment_report' in request.POST:\n appointment_report(request)\n if 'appointment_checkin' in request.POST:\n appointment_checkin(request)\n query_results = Appointment.objects.filter(appointment_datetime__lt=datetime.datetime.now(), navigator_id=request.user.id).order_by('-appointment_datetime') \n appointments = []\n for appointment in query_results:\n item = [appointment]\n try:\n x = appointment.report\n item.append(False)\n except:\n item.append(True)\n if appointment.checkin_address == \"\":\n item.append(True)\n else:\n item.append(False)\n appointments.append(item)\n return render_to_response(\"my_navigator_past_appointments.html\", {'appointments': appointments, 'user_type':get_user_type(request)} , context_instance=RequestContext(request))\n\n\n# def submit_report(request):\n\n#########################################################\n#########################################################\n#########################################################\n######## Manager Appointment Views #########\n#########################################################\n#########################################################\n#########################################################\n\n\"\"\"\nView - all_manager_appointments(request)\nURL: /Manager/appointments\nA view to display all upcoming appointments on the manager appointments page.\nHandles \n\"\"\"\n@login_required\ndef all_manager_appointments(request):\n if not request.user.is_worker:\n return HttpResponse(\"User must be signed in as a manager to view this page.\")\n if request.method=='POST':\n if 'edit_appointment' in request.POST:\n edit_appointment(request)\n if 'delete_appointment' in request.POST:\n delete_appointment(request)\n\n query_results = Appointment.objects.filter(appointment_datetime__gt=datetime.datetime.now()).order_by('appointment_datetime')\n appointments = []\n\n for appointment in query_results:\n item = [appointment]\n if request.user.id == appointment.worker.id:\n item.append(True)\n else:\n item.append(False)\n item.append(appointment.appointment_datetime.strftime(\"%Y-%m-%d %H:%M\"))\n if appointment.navigator == None:\n item.append(\"no_navigator\")\n else:\n item.append(appointment.navigator)\n appointments.append(item)\n return render_to_response(\"all_manager_appointments.html\", {'appointments': appointments, 'user_type':get_user_type(request)} , context_instance=RequestContext(request))\n\n\n\n\n\ndef ajax_test(request):\n return render_to_response(\"ajax_test.html\", {}, context_instance=RequestContext(request))\n\n#########################################################\n#########################################################\n#########################################################\n######## Email Handling Functions #########\n#########################################################\n#########################################################\n#########################################################\n\n\ndef email_navigator_after_registration(navigator_name, navigator_email):\n message = \"Hello {0},\\n\\nThank you for registering on the PNC Portal. Once an AACI Manager has confirmed your account, you\\\n will be able to view and select appointments. PNC staff will also provide you with training on the PNC Portal and review \\\n policies and procedures.\\n\\nWelcome to the AACI Patient Navigation Team!\\n\\nSincerely,\\nPNC Team\".format(navigator_name)\n subject = \"Thank You for Registering as a Patient Navigator\"\n send_mail(subject, message, \"Patient Navigation Center \", [navigator_email,])\n\ndef email_navigator_appointment_cancellation(appointment):\n message = \"Hello {0},\\n\\nThis email is to inform you that the following appointment you accepted has been cancelled.\\n\\n\\\n Appointment Date/Time: {1}\\nPatient: {2}\\nLocation: {3}\\nPlease log back into the PNC Portal to view appointments\\\n available for selection.\\n\\nThank you for your understanding,\\nPNC Team\".format\\\n (appointment.navigator.first_name, appointment.appointment_datetime, appointment.patient.first_name + \" \" + appointment.patient.last_name, appointment.hospital)\n subject = \"An Appointment of Yours has been Cancelled\"\n send_mail(subject, message, from_email, [appointment.navigator.email])\n\ndef email_managers_notifying_new_navigator(navigator_name, navigator_email):\n message = \"Dear AACI Manager,\\n\\nPlease be advised that the following individual has signed up for\\access to the PNC Portal:{0}\\n{1}\\n\\nPlease confirm the account as soon as possible so the patient navigator can begin accessing the PNC Portal. Please contact PNC staff if this new account has been created in error.\\n\\nThank you,\\nPNC Team\".format(navigator_name, navigator_email)\n subject = \"A New Navigator Has Registered for AACI's PNC System\"\n send_mail(subject, message, \"Patient Navigation Center \", [user.email for user in AACIUser.objects.filter(is_worker = True)])\n\n# def email_navigator_three_day_reminder():\n\ndef email_navigator_appointment_acceptance(appointment):\n navigator = AACIUser.objects.get(id=appointment.navigator_id)\n navigator_email = navigator.email\n manager = AACIUser.objects.get(id=appointment.worker_id)\n manager_email = manager.email\n subject = \"AACI Patient Navigator Appointment Confirmation\"\n datetime = parse_datetime(appointment.appointment_datetime)\n navigator_message = \"Hello \" + navigator.first_name + \",\\n\\nThank you for accepting an appointment to provide Patient Navigation services on \" +\\\n datetime + \". Please review the summary information for your appointment.\\n\" +\\\n \"\\n\\tDate: \"+ datetime[:10]+\\\n \"\\n\\tTime: \"+ datetime[14:]+\\\n \"\\n\\tPatient Name: \" + appointment.patient_first_name+ \" \" + appointment.patient_last_name +\\\n \"\\n\\tPatient Phone: \" + appointment.patient_phone +\\\n \"\\n\\tPatient Language: \" + appointment.patient_language +\\\n \"\\n\\tLocation of Appointment: \" + appointment.hospital +\\\n \"\\n\\tDepartment: \" + appointment.department +\\\n \"\\n\\tSpecial Instructions: \" + appointment.special_instructions +\\\n \"\\n\\tMeet-up Location: (default selections: AACI Health Clinic - Moorpark, Valley Medical Center - Hospital, Valley Medical Center - Specialty Center OR Contact Patient to arrange meetup location)\" +\\\n \"\\n\\nSincerely,\\n\\nPatient Navigation Center\\nAsian Americans for Community Involvement (AACI)\"\n manager_message = \"Hello \" + manager.first_name + \",\\n\\nA patient navigator has signed up to volunteer for an appointment you created. \" +\\\n \"Below are the details for this appointment:\\n\" +\\\n \"\\n\\tDate: \"+ datetime[:10]+\\\n \"\\n\\tTime: \"+ datetime[14:]+\\\n \"\\n\\tPatient Name: \" + appointment.patient_first_name+ \" \" + appointment.patient_last_name +\\\n \"\\n\\tPatient Phone: \" + appointment.patient_phone +\\\n \"\\n\\tPatient Language: \" + appointment.patient_language +\\\n \"\\n\\tLocation of Appointment: \" + appointment.hospital +\\\n \"\\n\\tDepartment: \" + appointment.department +\\\n \"\\n\\tSpecial Instructions: \" + appointment.special_instructions +\\\n \"\\n\\tMeet-up Location: (default selections: AACI Health Clinic - Moorpark, Valley Medical Center - Hospital, Valley Medical Center - Specialty Center OR Contact Patient to arrange meetup location)\" +\\\n \"\\n\\nSincerely,\\n\\nPatient Navigation Center\\nAsian Americans for Community Involvement (AACI)\"\n from_email = \"Patient Navigation Center \"\n\n if subject and navigator_message and from_email:\n try:\n send_mail(subject, navigator_message, from_email, [navigator_email,])\n send_mail(subject, manager_message, from_email, [manager_email,])\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return HttpResponseRedirect('/contact/thanks/')\n else:\n # In reality we'd use a form class\n # to get proper validation errors.\n return HttpResponse('Make sure all fields are entered and valid.')\n\ndef email_manager_appointment_acceptance(appointment):\n email = AACIUser.objects.get(id=appointment.worker_id).email\n subject = \"An AACI Navigator has Accepted an Appointment You Created\"\n\n\n\n\n#########################################################\n#########################################################\n#########################################################\n######## Helper Functions #########\n#########################################################\n#########################################################\n#########################################################\n\n\"\"\"\nHelper Function - appointment_details(appointment)\nReturns a String, appropriately formatted, to provide details for an appointment.\nThis function is intended to be called when generating emails that need to include appointment details\nin the body of the message\n\"\"\"\n# def appointment_details(appointment):\n# return \"\\tDate: {0} at {1}\\n\\tPatient Name: {2}\\n\\tPatient Phone: {3}\\n\\tPatient Language: \\\n# {4}\\n\\tLocation of Appointment: {5}\\n\\tDepartment: {6}\\n\\tSpecial Instructions: \\\n# {7}\\n\".format(, appointment.patient.full_name, appointment.patient_phone,\\\n# )\n\ndef get_user_type(request):\n try:\n if request.user.is_navigator:\n return \"navigator\"\n if request.user.is_worker:\n return \"manager\"\n except:\n return \"none\"\n return \"none\"\n\ndef edit_appointment(request):\n appointment = Appointment.objects.get(id=request.POST['edit_appointment'])\n appointment.patient_first_name = request.POST['patient_first_name']\n appointment.patient_last_name = request.POST['patient_last_name']\n appointment.patient_phone = request.POST['patient_phone']\n appointment.patient_language = request.POST['patient_language']\n appointment.special_instructions = request.POST['special_instructions']\n appointment.gender_preference = request.POST['gender_preference']\n appointment.appointment_datetime = request.POST['appointment_datetime']\n appointment.save()\n\ndef delete_appointment(request):\n appointment = Appointment.objects.get(id=request.POST['delete_appointment'])\n appointment.is_canceled = True\n appointment.save()\n email_navigator_appointment_cancellation(appointment)\n\ndef claim_appointment(request):\n appointment = Appointment.objects.get(id=request.POST['claimed_appointment'])\n appointment.is_claimed = True\n appointment.navigator=request.user\n appointment.save()\n send_confirmation_email(appointment)\n return HttpResponseRedirect(\"/AppointmentClaimed\")\n\n# def cancel_appointment(request):\n# appointment_id = request.POST['cancel_appointment']\n# appointment = Appointment.objects.get(id=appointment_id)\n# appointment.is_canceled = True\n# appointment.save()\n\ndef drop_appointment(request):\n appointment_id = request.POST['drop_appointment']\n appointment = Appointment.objects.get(id=appointment_id)\n appointment.is_claimed = False\n dropped_navigator = appointment.navigator\n appointment.navigator = None\n appointment.save()\n tomorrow = timezone.make_aware(datetime.datetime.now(), timezone.get_default_timezone())+datetime.timedelta(hours=24)\n if appointment.appointment_datetime <= tomorrow:\n late_drop_email(appointment, dropped_navigator)\n request.user.num_late_drops = request.user.num_late_drops+1\n request.user.save()\n if request.user.num_late_drops >= 3:\n send_three_cancellation_email(request.user.email)\n\ndef appointment_report(request):\n report = Report()\n report.appointment = Appointment.objects.get(id=request.POST['appointment_report'])\n report.picked_up = request.POST['picked_up']\n report.arrived_on_time = request.POST['arrived_on_time']\n report.received_care = request.POST['received_care']\n report.dropped_off = request.POST['dropped_off']\n report.additional_comments = request.POST['additional_comments']\n report.save()\n\ndef appointment_checkin(request):\n appointment_id = request.POST['appointment_checkin']\n appointment = Appointment.objects.get(id=appointment_id)\n latitude = request.POST['checkin_latitude']\n longitude = request.POST['checkin_longitude']\n address = lookup_address(latitude, longitude)\n appointment.checkin_address = address\n appointment.checkin_time = time.strftime(\"%m/%d/%Y %H:%M:%S\")\n appointment.save()\n\ndef lookup_address(latitude, longitude):\n url = \"https://maps.googleapis.com/maps/api/geocode/json?latlng={0},{1}&sensor=false&key=AIzaSyBoqvjg9YvxFhwQWy40r-HOiwIoLOO8zcg\".format(latitude, longitude)\n json = urllib2.urlopen(url).read()\n address_json = simplejson.loads(json)\n return address_json['results'][0]['formatted_address']\n\ndef find_patient_from_phone(phone):\n phone_list = Patient.objects.filter(phone = phone)\n if len(phone_list) == 1:\n return phone_list[0]\n\ndef find_patient_from_dob(dob):\n dob_list = Patient.objects.filter(dob = dob)\n if len(dob_list) == 1:\n return dob_list[0]\n\ndef find_patient(phone, dob):\n patient_list = Patient.objects.filter(phone = phone, dob = dob)\n if len(patient_list) == 1:\n return patient_list[0]\n\n#########################################################\n#########################################################\n#########################################################\n######## Global Variables #########\n#########################################################\n#########################################################\n#########################################################\n\nfrom_email = \"Patient Navigation Center \"\n\n\n\ndef api_patient_lookup(request, dob, phone):\n patient, dob_provided, phone_provided = None, dob != \"9999999999\", phone != \"9999999999999\"\n if dob_provided:\n dob = datetime.date(int(dob[6:]), int(dob[0:2]), int(dob[3:5]))\n if phone_provided:\n phone = phone[0:5] + \" \" + phone[5:]\n if not dob_provided and phone_provided:\n patient = find_patient_from_phone(phone)\n elif dob_provided and not phone_provided: \n patient = find_patient_from_dob(dob)\n elif dob_provided and phone_provided:\n patient = find_patient(phone, dob)\n if patient:\n data = {'num_patients' : 1, 'first_name' : patient.first_name, 'last_name': patient.last_name,\n 'dob' : patient.dob.strftime(\"%m/%d/%Y\"), 'phone' : patient.phone, 'language' : patient.language}\n return HttpResponse(json.dumps(data))\n data = {\"num_patients\": 0}\n return HttpResponse(json.dumps(data))\n\n\n\n","sub_path":"AACIapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":37587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"503807356","text":"#https://leetcode.com/problems/binary-number-with-alternating-bits/description/\r\n\"\"\"\r\nGiven a positive integer, check whether it has alternating bits: namely, if two adjacent bits will always have different values.\r\n\r\nExample 1:\r\n\r\nInput: 5\r\nOutput: True\r\nExplanation:\r\nThe binary representation of 5 is: 101\r\n\r\nExample 2:\r\n\r\nInput: 7\r\nOutput: False\r\nExplanation:\r\nThe binary representation of 7 is: 111.\r\n\"\"\"\r\nclass Solution:\r\n def hasAlternatingBits(self, n):\r\n \"\"\"\r\n :type n: int\r\n :rtype: bool\r\n \"\"\"\r\n binary = bin(n)[2::]\r\n index = 0\r\n \r\n while index < len(binary)-1:\r\n if binary[index] == binary[index+1]:\r\n return False\r\n index += 1\r\n return True\r\n \r\n","sub_path":"LeetCode/Binary Number with Alternating Bits.py","file_name":"Binary Number with Alternating Bits.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473136125","text":"import math\nimport time\nimport random\nimport redis\nimport copy\n\nclass Graph():\n\n \"\"\"\n elems = [left, middle, right]\n ops = [clockwise face, anticlockwise face, clockwise neighbour, anticlockwise neighbour]\n \"\"\"\n\n def __init__(self):\n self._elements = {}\n self._index = None\n\n def addElement(self, index, elems, ops, buddy):\n self._elements[index] = (elems, ops, buddy)\n self._index = index\n\n def getElements(self, index):\n return self._elements[index][0]\n\n def getBuddy(self, index):\n return self._elements[index][2]\n\n def getNeighbours(self, index):\n return self._elements[index][1]\n\n def getIndex(self):\n return self._index\n\n def __str__(self):\n str_desc = \"\"\n for key in range(len(self._elements.keys())):\n key = str(key)\n str_desc += \"%s: (%s)(%s){|%s||%s||%s|}(%s)(%s) Buddy: %s\\n\" % (key, self._elements[key][1][3], self._elements[key][1][1], \\\n self._elements[key][0][0], self._elements[key][0][1], self._elements[key][0][2], \\\n self._elements[key][1][0], self._elements[key][1][2], self._elements[key][2])\n return str_desc\n\ndef buildGraph(f_name):\n g = Graph()\n f = open(f_name, \"r\")\n for line in f.readlines():\n line = line.split(\":\")\n index = line[0]\n e = line[1].split(\",\")\n elems = [int(e[0]), int(e[1]), int(e[2])]\n o = line[2].split(\",\")\n ops = [o[0], o[1], o[2], o[3]]\n buddy = line[3].strip()\n g.addElement(index, elems, ops, buddy)\n return g\n\nclass Cube:\n\n \"\"\"\n Cube is the main cube and holds all the functions for representing and manipulating the cube.\n\n VARIABLES:\n _graph - a graph representing the cube. It has the structure of a node being a group of three stickers on the cube and the edges are all the possible nodes that\n can be reached from that node.\n _middle - a graph representing the middle part of the cube. It has the same structure as _graph\n _opposites - maps a colour of a face to the face colour on the opposite side of the cube\n _translation - maps a face colour to its letter representation for rotation\n _inverts - the opposite rotation to a letter representation\n _not_effected - maps a letter representation to the letter representation that is not affected by its rotation\n _cube - a list representing the stickers on the cube\n _r - the database that holds the F2L algorithms\n _oll_r - the database that holds the OLL algorithms\n _pll_r - the database that holds the PLL algorithms\n _wide_rotations - maps the letter representation of a wide rotation to its parameters for rotation\n _face_rotations - maps the letter representation of a face rotation to its parametes for rotation\n _cube_rotations - maps the letter representation of a cube rotation to its parameters for rotation\n \"\"\"\n\n def __init__(self):\n self._graph = buildGraph(\"../data/config.txt\")\n self._middle = buildGraph(\"../data/middle_config.txt\")\n self._opposites = {\"R\":\"O\", \"W\":\"Y\", \"G\":\"B\", \"Y\":\"W\", \"O\":\"R\", \"B\":\"G\"}\n self._translation = {\"R\":\"U\", \"W\":\"L\", \"G\":\"F\", \"Y\":\"R\", \"O\":\"D\", \"B\":\"B\"}\n self._inverts = {\"U\": \"U'\", \"D\": \"D'\", \"L\": \"L'\", \"R\": \"R'\", \"F\": \"F'\", \"B\": \"B'\", \"U'\": \"U\", \"D'\": \"D\", \"L'\": \"L\", \"R'\": \"R\", \"F'\": \"F\", \"B'\": \"B\", \"M\":\"M'\", \"M'\":\"M\"}\n self._not_effected = {\"R\":\"L\", \"L\":\"R\", \"U\":\"D\", \"D\":\"U\", \"F\":\"B\", \"B\":\"F\"}\n self._cube = self._createCube()\n self._readable_solution = []\n self._r = redis.Redis(host='localhost', port=6379, db=0)\n self._oll_r = redis.Redis(host='localhost', port=6379, db=1)\n self._pll_r = redis.Redis(host='localhost', port=6379, db=2)\n\n self._wide_rotations = {\"u\":('0', 0, '5', 0), \"u'\":('0', 1, '5', 1), \"d\":('16', 0, '5', 1), \"d'\":('16', 1, '5', 0),\n \"l\":('3', 2, '0', 0), \"l'\":('3', 3, '0', 1), \"r\":('1', 2, '0', 1), \"r'\":('1', 3, '0', 0),\n \"f\":('2', 2, '1', 0), \"f'\":('2', 3, '1', 1), \"b\":('0', 2, '1', 1), \"b'\":('0', 3, '1', 0)}\n\n self._face_rotations = {\"U\":('0', 0), \"U'\":('0', 1), \"D\":('10', 2), \"D'\":('10', 3),\n \"L\":('3', 2), \"L'\":('3', 3), \"R\":('1', 2), \"R'\":('1', 3),\n \"F\":('2', 2), \"F'\":('2', 3), \"B\":('0', 2), \"B'\":('0', 3),\n \"M\":('0', 0), \"M'\":('0', 1)}\n\n self._cube_rotations = {\"x\":(0, 0), \"x'\":(0, 1), \"y\":(1, 0), \"y'\":(1, 1), \"z\":(2, 0), \"z'\":(2, 1)}\n\n def _createCube(self):\n cube = []\n c = [\"R\", \"W\", \"G\", \"Y\", \"O\", \"B\"]\n for i in range(54):\n #if i < 9:\n # s = Square(str(i))\n #else:\n face = faceNumber(i)\n s = Square(c[face])\n #s = Square(str(i))\n cube.append(s)\n\n return cube\n\n def Rotate(self, index, op):\n buddyOp = op\n for _ in range(2):\n if buddyOp == 3:\n buddyOp = 0\n else:\n buddyOp += 1\n self._rotate(index, op, self._graph)\n self._rotate(self._graph.getBuddy(index), buddyOp, self._graph)\n self._translateToNotation(index, op)\n\n def RotateWithNotation(self, letter):\n rotates = 1\n if letter[-1] == \"2\":\n rotates = 2\n letter = letter[0]\n\n if letter in self._wide_rotations.keys():\n params = self._wide_rotations[letter]\n for _ in range(rotates):\n self.RotateWide(params[0], params[1], params[2], params[3])\n elif letter in self._cube_rotations.keys():\n params = self._cube_rotations[letter]\n for _ in range(rotates):\n self.RotateCube(params[0], params[1])\n else:\n face = self._face_rotations[letter]\n if letter[0] != \"M\":\n for _ in range(rotates):\n self.Rotate(face[0], face[1])\n else:\n for _ in range(rotates):\n self.RotateMiddle(face[0], face[1])\n\n def _rotate(self, index, op, graph):\n elems = []\n next_n = []\n for i in range(4):\n current_elems = graph.getElements(index)\n elems.append([None, [self._cube[current_elems[x]] for x in range(3)]])\n index = graph._elements[index][1][op]\n next_elems = graph.getElements(index)\n elems[i][0] = next_elems\n if i == 3:\n for j in range(1, 5):\n for k in range(3):\n self._cube[elems[j*-1][0][k]] = elems[j*-1][1][k]\n\n def RotateMiddle(self, index, op):\n self._rotate(index, op, self._middle)\n\n def RotateWide(self, index, op, mid_index, mid_op):\n self.Rotate(index, op)\n self.RotateMiddle(mid_index, mid_op)\n\n def RotateCube(self, index, op):\n \"\"\"\n index ==> 0:x, 1:y, 2:z\n op ==> 0:clockwise, 1:anticlockwise\n \"\"\"\n if index == 0:\n if op == 0:\n wide = self._wide_rotations[\"l'\"]\n face = self._face_rotations[\"R\"]\n else:\n wide = self._wide_rotations[\"l\"]\n face = self._face_rotations[\"R'\"]\n elif index == 1:\n if op == 0:\n wide = self._wide_rotations[\"u\"]\n face = self._face_rotations[\"D'\"]\n else:\n wide = self._wide_rotations[\"u'\"]\n face = self._face_rotations[\"D\"]\n else:\n if op == 0:\n wide = self._wide_rotations[\"f\"]\n face = self._face_rotations[\"B'\"]\n else:\n wide = self._wide_rotations[\"f'\"]\n face = self._face_rotations[\"B\"]\n self.RotateWide(wide[0], wide[1], wide[2], wide[3])\n self.Rotate(face[0], face[1])\n\n def printNodes(self):\n for index in range(24):\n index = str(index)\n elems = self._graph.getElements(index)\n print(\"|%s||%s||%s|\" % (self._cube[elems[0]].colour, self._cube[elems[1]].colour, self._cube[elems[2]].colour))\n\n def _representCube(self):\n str_desc = \"\"\n i = 0\n while i < 54:\n str_desc += \"|%s||%s||%s|\\n|%s||%s||%s|\\n|%s||%s||%s|\\n---------\\n\" % (self._cube[i].colour, self._cube[i+1].colour, self._cube[i+2].colour, self._cube[i+7].colour, self._cube[i+8].colour, self._cube[i+3].colour, self._cube[i+6].colour, self._cube[i+5].colour, self._cube[i+4].colour)\n i += 9\n return str_desc\n\n def SolveCross(self):\n \"\"\"\n Returns the steps to solving the first stage of the solve\n \"\"\"\n \n return self._solveCross()\n\n def _solveCross(self):\n \"\"\"\n Main funciton of the cross solve. Starts by getting the positions of each of the cross squares. Checks whether on of them is in the bottom layer. If not\n then the closest square to the bottom layer is found and inserted. After that the anchor square is found - which is the square that the positions of all\n other cross squares are based off. Next it finds where the other cross squares are supposed to go in relation to this anchor - called anchorTargets. It then\n finds the shortest path to the respective anchor targets. It then changes this path into its letter representation, passing in squares that are already in the\n correct position so they don't get moved. This is repeated until every cross square is in the bottom layer in the correct position inrespect to the other\n cross squares. Then the bottom layer is turned until it's in the correct position.\n\n #VARIABLES:\n cross_turns - the turns undertaken while solving the cross\n positions - the positions of the cross squares in the form (x, y) where x is the index in _graph and y is the index in _cube. positions[0] is the cross colour\n sticker positions, positions[1] is the corresponding colour sticker positions for those squares\n cross_positions - takes on positions[0]\n coloured_positions - takes on positions[1]\n sq_in_bottom - True if there's a square in the bottom layer, False otherwise\n close - holds the index (in cross_positions) of the square that is closest to the bottom layer, and the path to this target\n close_index - takes on close[0] (closest square's index)\n path - takes on close[1] (path to the bottom layer)\n p - the letter representation of path\n anchor_index - the index (in cross_positions) of the cross square in the bottom layer to base the other cross square positions off\n cross_solved - False until each piece is in the correct orientation in the bottom layer\n anchor_targets - the targets in the bottom layer for each cross square not inserted yet represented as a dictionary where the key is the colour of the\n corresponding sticker on the cross square and the value is a truple ((index in _graph, index in _cube), index in cross_positions)\n both_paths - returns the path from the closest cross square to the bottom layer, the path from that squares anchor_target, the starting position of the cross\n square, and the starting position of the anchor_target square\n in_position - the colours of all the cross squares in position\n correct_path - the letter representation of the path the insert the closest cross square\n actual_turns - used to clean up cross_turns\n \"\"\"\n \n cross_turns = []\n cross_colour = \"O\" #TODO: move to global variable\n positions = self._findCrossSquares()\n cross_positions = positions[0]\n coloured_positions = positions[1]\n sq_in_bottom = False\n for i in range(len(cross_positions)):\n if cross_positions[i][1] >= 37 and cross_positions[i][1] <= 43:\n sq_in_bottom = True\n break\n if not sq_in_bottom:\n close = self._closestInitialSquare(cross_positions)\n close_index = close[0]\n path = close[1]\n built_path = []\n current = cross_positions[close_index][0]\n for item in path:\n built_path.append((current, [item]))\n current = item\n p = self._pathNotation(cross_positions[close_index][0], path, [])\n positions = self._rotateReturnPosition(cross_positions, coloured_positions, p, [], built_path)\n cross_positions = positions[0][0]\n coloured_positions = positions[0][1]\n for i in positions[1]:\n cross_turns.append(i)\n\n anchor_index = self._findAnchor(cross_positions, coloured_positions)\n cross_solved = False\n bottom_layer = ['16', '17', '18', '19']\n while not cross_solved:\n anchor_targets = self._getAnchorTargets(cross_positions, coloured_positions, anchor_index)\n both_paths = self._decideOnBestPath(anchor_targets, cross_positions, coloured_positions, anchor_index)\n if not both_paths:\n break\n in_position = self._findInPosition(anchor_targets, cross_positions, coloured_positions)\n in_position.append(coloured_positions[anchor_index][2])\n correct_path = self._decidePath(both_paths, in_position)\n positions = self._rotateReturnPosition(cross_positions, coloured_positions, correct_path[0], in_position, correct_path[1])\n cross_positions = positions[0][0]\n coloured_positions = positions[0][1]\n for i in positions[1]:\n cross_turns.append(i)\n anchor_targets = self._getAnchorTargets(cross_positions, coloured_positions, anchor_index)\n in_position = self._findInPosition(anchor_targets, cross_positions, coloured_positions)\n in_position.append(coloured_positions[anchor_index][2])\n #print(in_position)\n #correct = 0\n #for i in bottom_layer:\n # cube_i = self._graph.getElements(i)[1]\n # if self._cube[cube_i].colour == cross_colour:\n # correct += 1\n if len(in_position) == 4:\n cross_solved = True\n\n buddy = self._graph.getBuddy(bottom_layer[0])\n buddy_i = self._graph.getElements(buddy)[1]\n center = (((buddy_i / 9) + 1) * 9) - 1\n turns = []\n while self._cube[buddy_i].colour != self._cube[center].colour:\n self.RotateWithNotation(\"D\")\n turns.append(\"D\")\n buddy = self._graph.getBuddy(bottom_layer[0])\n buddy_i = self._graph.getElements(buddy)[1]\n center = (((buddy_i / 9) + 1) * 9) - 1\n\n actual_turns = []\n i = 0\n while i < len(cross_turns):\n if (i+1 < len(cross_turns) and cross_turns[i] == cross_turns[i+1]) and (i+2 < len(cross_turns) and cross_turns[i] == cross_turns[i+2]):\n actual_turns.append(self._inverts[cross_turns[i]])\n i += 3\n elif (i+1 < len(cross_turns) and cross_turns[i] == cross_turns[i+1]):\n actual_turns.append(cross_turns[i]+\"2\")\n i += 2\n elif i+1 < len(cross_turns) and cross_turns[i] == self._inverts[cross_turns[i+1]]:\n i += 2\n continue\n else:\n actual_turns.append(cross_turns[i])\n i += 1\n\n if len(turns) > 0:\n if len(turns) == 3:\n t = turns[0] + \"'\"\n elif len(turns) == 2:\n t = turns[0] + \"2\"\n else:\n t = turns[0]\n actual_turns.append(t)\n\n return actual_turns\n\n def _rotateReturnPosition(self, cross_positions, coloured_positions, path, in_position, path_indices):\n \"\"\"\n Rotates the cube based on path. It checks whether one of the turns in path moves one of the correctly oriented cross squares in the bottom out of position and\n corrects it after the current cross square is moved into position. It then finds the new positions of the cross squares and updates the values in positions. It\n keeps the positioning of the squares in positions consistent for simplicity for other functions. Returns the updated values for cross_positions and\n coloured_positions and the letter representation of the moves taken\n\n #VARIABLES:\n old_cross - holds the values of the old cross positions\n old_coloured - holds the values of the old coloured stickers of each cross square\n collisions - holds the values of any collisions that happen during turning\n taken - holds the letter representation of every turn taken, including readjustments for collisions\n new_positions - holds the new values for cross positions and their corresponding coloured stickers\n \"\"\"\n \n cross_colour = \"O\" #TODO: make global\n old_cross = cross_positions\n old_coloured = coloured_positions\n collisions = []\n taken = []\n for t in range(len(path)):\n if path[t][0] != \"D\":\n neigh = self._graph.getNeighbours(path_indices[t][0])\n for n in range(len(neigh)):\n if neigh[n] == path_indices[t][1][0]:\n op = n\n break\n if self._returnCollision(['16', '17', '18', '19'], op, path_indices[t][0], in_position):\n collisions.append(t)\n self.RotateWithNotation(path[t])\n taken.append(path[t])\n for t in collisions:\n self.RotateWithNotation(self._inverts[path[t]])\n taken.append(self._inverts[path[t]])\n new_positions = self._findCrossSquares()\n for i in range(len(old_coloured)):\n for j in range(len(new_positions[1])):\n if new_positions[1][j][2] == old_coloured[i][2]:\n old_coloured[i] = new_positions[1][j]\n old_cross[i] = new_positions[0][j]\n return ((old_cross, old_coloured), taken)\n \n def _findCrossSquares(self):\n \"\"\"\n Runs through _graph checking if the elements in the center of the node is the same as cross_colour, taking note of their index in _graph and _cube. It also\n takes note of the positions of the corresponding colour sticker, along with the colour of the sticker. Returns these values\n\n #VARIABLES:\n cross_positions - the index of the cross square in _graph and _cube\n buddy_positions - the index of the corresponding colour sticker in _graph and _cube, as well as the colour\n \"\"\"\n \n cross_colour = \"O\" #TODO: make global\n cross_positions = []\n buddy_positions = []\n for i in range(len(self._graph._elements)):\n sq_index = self._graph.getElements(str(i))[1]\n if self._cube[sq_index].colour == cross_colour:\n bud_g = self._graph.getBuddy(str(i))\n bud_e = self._graph.getElements(bud_g)[1]\n buddy_positions.append((bud_g, bud_e, self._cube[bud_e].colour))\n cross_positions.append((str(i), sq_index))\n return (cross_positions, buddy_positions)\n\n def _closestInitialSquare(self, positions):\n \"\"\"\n Runs through _graph looking for cross squares and takes note of the distance of each of these squares to the bottom layer. Returns the index (in\n cross_positions) of the closest square and the path to the bottom_layers\n\n #VARIABLES:\n distances - the paths of each of the cross squares to the bottom_layer\n closest - the number of moves of the closest square\n index - the index of the closest square\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n distances = []\n for sq in positions:\n distances.append(self._bfs(sq[0], bottom_layer))\n closest = None\n index = 0\n for i in range(len(distances)):\n if closest == None or len(distances[i]) < closest:\n closest = len(distances[i])\n index = i\n return (index, distances[index])\n\n def _bfs(self, initial, target):\n \"\"\"\n Implementation of the Breadth-First Search algorithm to search for the shortest path between a cross square and its target position in the bottom layer. First\n has to check if the initial square is in the bottom layer, because if it is then it will not be able to find a correct path to its target, so it must be\n moved out. After that it then runs the actual Breadth-First Search algorithm, returning the path to it.\n\n #VARIABLES:\n bottom_layer - used to check if the square is in the bottom layer\n found - bool to tell if the target has been found\n target_found - the index in _graph of the target when it is found\n q - queue used to keep track of the next node to check\n path - the path taken from each node, the key is the node and the value is the node from which you came from to get to the current node\n visited - a list of all the visited nodes so we don't continuously check the same node\n current - the current node to check\n neigh - the neighbours of the current node\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n found = False\n target_found = None\n q = Queue()\n path = {}\n visited = [initial]\n current = initial\n path[initial] = None\n\n if current in bottom_layer:\n neigh = self._graph.getNeighbours(current)\n for n in neigh:\n if n not in bottom_layer:\n path[n] = current\n if current in target:\n return [n, current]\n current = n\n break\n \n while not found:\n neigh = self._graph.getNeighbours(current)\n for n in range(len(neigh)):\n if found:\n break\n if neigh[n] not in visited:\n q.enqueue(neigh[n])\n visited.append(neigh[n])\n path[neigh[n]] = current\n for i in target:\n if i == neigh[n]:\n found = True\n target_found = neigh[n]\n break\n current = q.dequeue()\n return self._buildPath(path, target_found)\n\n def _buildPath(self, path, current):\n \"\"\"\n Takes the dictionary from _bfs and builds the path taken from the initial square to the target. Simply adds current to the path list if it not None, then\n updates current to be the value at path[current]\n\n #VARIABLES:\n p - the path taken from the initial square to its target\n path - a dictionary representing the path from _bfs\n current - the target node, where the trail to the path starts\n \"\"\"\n\n p = []\n while True:\n if path[current] != None:\n p = [current] + p\n current = path[current]\n else:\n break\n return p\n\n def _pathNotation(self, index, path, in_position):\n #TODO: clean up - remove in_position and commented out variables (bottom_layer and correntions)\n \"\"\"\n Takes the path derived from _buildPath and changes it from the indices of the squares in _graph to their actual letter representation in the cube. Does so by\n looping through path, getting the neighbours of the current node in question, taking the matching neighbour and determining which way it moves (clockwise or\n anticlockwise)\n\n #VARIABLES:\n notation - translation from each index to its corresponding letter representation\n p - p in letter format\n current - the current node in the path\n path - the path in indices format from the initial square to its target\n index - the index in _graph of the initial square\n \"\"\"\n \n #bottom_layer = ['16', '17', '18', '19']\n notation = {'0':['U', 'B'], '1':['U', 'R'], '2':['U', 'F'], '3':['U', 'L'], '4':['L', 'U'], '5':['U', 'F'], '6':['L', 'D'], '7':['L', 'B'], '8':['F', 'U'], \\\n '9':['F', 'R'], '10':['F', 'F'], '11':['F', 'L'], '12':['R', 'U'], '13':['R', 'B'], '14':['R', 'D'], '15':['R', 'F'], '16':['D', 'F'], \\\n '17':['D', 'R'], '18':['D', 'B'], '19':['D', 'L'], '20':['B', 'D'], '21':['B', 'R'], '22':['B', 'U'], '23':['B', 'L']}\n\n p = []\n current = index\n #corrections = []\n for i in path:\n neigh = self._graph.getNeighbours(current)\n for n in range(len(neigh)):\n if neigh[n] == i:\n t = notation[current][n/2]\n \"\"\"\n This checks if the matching node is a clockwise or anticlockwise rotation from the current node. Becuase the rotations are represented like this:\n [clockwise_face, anticlockwise_face, clockwise_neighbour, anticlockwise_neighbour], the turn is clockwise if it's even and anticlockwise if it's\n odd\n \"\"\"\n if n%2 == 1:\n t += \"'\"\n p.append(t)\n current = neigh[n]\n break\n return p\n\n def _returnCollision(self, bottom_layer, op, current, in_position):\n \"\"\"\n Determines whether the movement of one cross piece interferes and moves a solved cross piece out of its correct position, taking note of the rotation that\n caused the interference. It does this by taking all the side pieces of the affected squares during the rotation and checks if they are both in the bottom\n layer and if they are correctly oriented cross pieces\n\n #VARIABLES:\n bottom_layer - the indices in _graph of the bottom layer\n op - the index of the operation that rotated the cube\n current - the index of the square that cause the rotation\n in_position - list of the cross pieces correctly oriented\n buddy_index - the index of the sticker that is on the same square as the current sticker being checked\n cube_i - the index in _cube of the current square\n buddy_i the index in _cube of the current square's buddy\n bottom_colour - the colour of the current sticker\n buddy_clr - the colour of the current sticker's buddy\n \"\"\" \n for i in range(4):\n buddy_index = self._graph.getBuddy(current)\n if current in bottom_layer or buddy_index in bottom_layer:\n cube_i = self._graph.getElements(current)[1]\n buddy_i = self._graph.getElements(self._graph.getBuddy(current))[1]\n bottom_clr = self._cube[cube_i].colour\n buddy_clr = self._cube[buddy_i].colour\n if (bottom_clr == \"O\" and buddy_clr in in_position) or (buddy_clr == \"O\" and bottom_clr in in_position):\n return True\n current = self._graph.getNeighbours(current)[op]\n return False\n\n def _findAnchor(self, cross_positions, coloured_positions):\n \"\"\"\n Finds the cross square in the bottom layer that will be used as the reference point for the positioning of the other cross squares. Goes through the bottom\n layer and returns index in cross_positions of the first cross square it finds\n\n #VARIABLES:\n cross_positions - the positions of the cross positions\n coloured_positions - the positions of the corresponding coloured positions\n bottom_layer - the coordinates of the bottom layer\n anchor - the index in cross_positions of the anchor\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n anchor = None\n for c in range(len(cross_positions)):\n for i in range(len(bottom_layer)):\n if cross_positions[c][0] == bottom_layer[i]:\n anchor = c\n return anchor\n\n def _getAnchorTargets(self, cross_positions, coloured_positions, anchor_index):\n \"\"\"\n Returns the positions of the cross squares that are not the anchor in respect to this anchor. First it gets the correct position in the bottom layer of the\n anchor. This position is the index in both _graph and _cube. Next it rotates the cube clockwise in respect to the bottom layer, taking note of the order of\n the colours of the center squares of each of the buddies of the bottom layer squares. This order represents the positioning of the other cross squares relative\n to the anchor. It then starts at the current position of the anchor square and rotates clockwise, adding the colour as the key and the index in both _graph\n and _cube, along with the index in cross_positions of the corresponding cross square.\n\n #VARIABLES:\n cross_positions - the positions of teh cross positions\n coloured_positions - the positions of the corresponding coloured stickers\n anchor_index - the index of the anchor square in _graph\n bottom_layer - the positions of the bottom layer squares\n anchor_position - the index of the anchor in bottom_layer, as well as the index in _cube\n colour_order - the actual order of the colours in repect to the anchor\n order - the order in respect to the current position of the anchor in the bottom layer\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n anchor_position = None\n buddy = self._graph.getElements(self._graph.getBuddy(cross_positions[anchor_index][0]))[1]\n buddy_colour = self._cube[buddy].colour\n for i in bottom_layer:\n buddy_i = int(self._graph.getElements(self._graph.getBuddy(i))[1])\n center = (((buddy_i / 9) + 1) * 9) - 1\n if self._cube[center].colour == buddy_colour:\n anchor_position = (i, self._graph.getElements(i)[1])\n break\n current = anchor_position[0]\n colour_order = []\n for _ in range(3):\n current = self._graph.getNeighbours(current)[0]\n current_cube = self._graph.getElements(self._graph.getBuddy(current))[1]\n center = (((current_cube / 9) + 1) * 9) - 1\n current_colour = self._cube[center].colour\n colour_order.append(current_colour)\n current = cross_positions[anchor_index][0]\n order = {}\n for i in range(3):\n current = self._graph.getNeighbours(current)[0]\n elem = self._graph.getElements(current)[1]\n index = None\n for j in range(len(coloured_positions)):\n if coloured_positions[j][2] == colour_order[i]:\n index = j\n break\n order[colour_order[i]] = ((current, elem), index)\n return order\n\n def _decideOnBestPath(self, anchor_targets, cross_positions, coloured_positions, anchor_index):\n \"\"\"\n Determines which cross square is the closest to being in its solved space. Does so by first gettig the cross squares that should be excluded - the ones already\n in position. It then loops through cross_positions, taking the ones that are not excluded and determining their path to their solved slot. It does this in two\n steps. First it finds the path of the cross square to the closest square in the bottom layer, not taking into account its target position with respect to the\n anchor square. Next it takes its target square and finds the path from this to where the just mentioned bottom layer square. It then finds the square with\n the least number of moves to its solved state.\n\n #VARIABLES:\n anchor_targets - the positions of the cross squares in respect to the anchor square\n cross_positions - the positions of the cross squares\n coloured_positions - the positions of the corresponding stickers\n anchor_index - the index of the anchor square in _graph\n bottom_layer - the indices in _graph of the bottom layer\n exclude - the cross squares that are already in the correct position and should be excluded from path determining\n path_to_bottom - list containing the paths of each cross square not excluded from their closest bottom layer square\n target_to_position - lsit containing the paths of the anchor position of each cross square to the target position from path_to_bottom\n target - the index of the bottom layer position found by taking the final value of path\n clr - the sticker colour of the current cross square's anchor position\n least - the length of the shorstest path\n index - the index in path_to_bottom and target_to_positions of the shortest path\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n exclude = self._findInPosition(anchor_targets, cross_positions, coloured_positions)\n exclude.append(coloured_positions[anchor_index][2])\n if len(exclude) == 4:\n return None\n path_to_bottom = []\n target_to_position = []\n for i in range(len(cross_positions)):\n if coloured_positions[i][2] not in exclude:\n path = self._bfs(cross_positions[i][0], bottom_layer)\n path_to_bottom.append((path, cross_positions[i][0]))\n target = path[-1]\n clr = coloured_positions[i][2]\n bottom_path = self._findBottomToTarget(anchor_targets[clr][0][0], target)\n target_to_position.append((bottom_path, anchor_targets[clr][0][0]))\n least = None\n index = None\n for i in range(len(path_to_bottom)):\n if least == None or (len(path_to_bottom[i][0]) + len(target_to_position[i][0])) < least:\n least = len(path_to_bottom[i][0]) + len(target_to_position[i][0])\n index = i\n return (path_to_bottom[index][0], target_to_position[index][0], path_to_bottom[index][1], target_to_position[index][1])\n\n def _findBottomToTarget(self, start, target):\n \"\"\"\n Determines the path from start (the position of the anchor_target of the current cross square in question) and the target of said cross_square. We can't use\n _bfs here because of the way it is designed, in that it will move the start square out of the bottom layer at the beginning of the search, which here wouldn't\n make sense. Instead it just starts at the start square and rotates the bottom layer until it finds teh correct position, return the path undertaken\n\n #VARIABLES:\n start - the position of the anchor_target of the current cross square\n target - the target of the current cross square\n path - the path taken from start to target\n \"\"\"\n \n if start == target:\n return []\n path = []\n neigh = self._graph.getNeighbours(start)\n if neigh[1] == target:\n path.append(neigh[1])\n elif neigh[0] == target:\n path.append(neigh[0])\n else:\n path.append(neigh[0])\n new = neigh[0]\n neigh = self._graph.getNeighbours(new)\n path.append(neigh[0])\n return path\n\n def _findInPosition(self, anchor_targets, cross_positions, coloured_positions):\n \"\"\"\n Finds the cross squares that are correctly oriented in the bottom layer. Simply takes the the values in anchor_targets and cross_positions and compares them\n\n VARIABLES:\n anchor_targets - positions of the target positions relative to the anchor for each cross square\n cross_positions - positions of each cross_position\n coloured_positions - positions of each corresponding sticker\n \"\"\"\n \n in_position = []\n for key in anchor_targets:\n i = anchor_targets[key][1]\n if anchor_targets[key][0][0] == cross_positions[i][0]:\n in_position.append(coloured_positions[i][2])\n return in_position\n\n def _decidePath(self, both_paths, in_position):\n \"\"\"\n Builds the path of the closest cross square based on both_paths. It builds the path by doing a check first, taking an action or not based on this check, moving\n the bottom layer into the target position, and finally also moving the cross square itself into the target position. The check is to see whether the square\n piece is in the bottom layer or not. If it is, then the first move will be to move the piece out of this position, so it executes the first move in square_path\n and then deletes it so it doesn't get executed again\n\n VARIABLES:\n both_paths - contains the path for the cross square and the path for the anchor_target\n in_positions - contains the cross squares that are in position (used for _pathNotation)\n bottom_layer - the positions in _graph of the bottom layer\n square_path - holds the path of the cross square\n starting_point - the starting point of the cross square\n bottom_start - the starting point of the anchor_target\n bottom_path - the path of the anchor_target to the target\n path_to_take - the actual path to be taken (a mixture of both paths) to be returned\n path_indices - the list of the corresponding indices in _graph of the turns in path_to_take, used in _rotateReturnPosition to check if correctly oriented\n cross squares are moved out of position\n starting_neighbour - the corresponding sticker to check if the square is in the bottom_layer\n \"\"\"\n \n bottom_layer = ['16', '17', '18', '19']\n square_path = both_paths[0]\n starting_point = both_paths[2]\n bottom_start = both_paths[3]\n bottom_path = both_paths[1]\n path_to_take = []\n path_indices = []\n starting_neighbour = self._graph.getBuddy(starting_point)\n if starting_point in bottom_layer or starting_neighbour in bottom_layer:\n path_indices.append((starting_point, [square_path[0]]))\n p = self._pathNotation(starting_point, [square_path[0]], in_position)\n starting_point = square_path[0]\n for t in p:\n path_to_take.append(t)\n del square_path[0]\n if len(bottom_path) > 0:\n current = bottom_start\n for item in bottom_path:\n path_indices.append((bottom_start, [item]))\n current = item\n p = self._pathNotation(bottom_start, bottom_path, [])\n for t in p:\n path_to_take.append(t)\n if len(square_path) > 0:\n current = starting_point\n for item in square_path:\n path_indices.append((current, [item]))\n current = item\n p = self._pathNotation(starting_point, square_path, in_position)\n for t in p:\n path_to_take.append(t)\n return (path_to_take, path_indices)\n \n #Start of F2L\n def SolveF2L(self):\n \"\"\"\n Returns the path to take to solve the F2L stage of the solve in the fewest number of turns based on the cross solve\n \"\"\"\n \n return self._solveF2L()\n\n def _calculatePair(self, pair):\n \"\"\"\n Calculates the value associated with an F2L pair and returns the algorithm for that pair\n \"\"\"\n \n a = pair[5]**pair[0]\n b = pair[1][1]**pair[6]\n c = pair[7]**pair[2][1]\n d = pair[3][1]**pair[8]\n e = pair[9]**pair[4][1]\n val = a + b + c + d + e\n alg = self._r.get(val)\n return alg\n\n def _solveF2L(self):#def OptimisedF2L(self):\n \"\"\"\n Returns the series of F2L algorithms that has the shortest number of steps. It does so recursively. First it gets all of the F2L pairs for the current cube\n configuration. It then goes through these pairs one by one and builds a tree which contains all possible F2L algorithms that occur after executing that\n algorithm. This tree has the following structure: the root of the tree is the first algorithm to be executed, then it takes the next series of pairs and adds\n them as its children, and then it does so recursively for each of the children and the children's children until there are no F2L pairs left. It then goes\n through the leaves of the tree and builds upwards to the root to form the path of each of the possible solutions. Finally it takes the shortest of these paths\n and returns it\n\n VARIABLES:\n all_pairs - the initial F2L pair coordinates that form the roots of each of the trees (one per pair)\n roots - the root nodes of the trees\n nodes - each node in every tree, to keep track of which ones are leaves\n orig_cube - a snapshot of the cube's configuration so it can test out all the algorithms at that level\n orig_graph - a snapshot of the graph's configuration\n new_pairs - the F2L pair coordinates after the initial F2L pair's algorithm has been executed\n least - the length of the shortest path\n path - the shortest path\n \"\"\"\n \n all_pairs = self._getF2LPairs()\n roots = []\n nodes = []\n for pair in all_pairs:\n orig_cube = copy.deepcopy(self._cube)\n orig_graph = copy.deepcopy(self._graph)\n alg = self._calculatePair(pair)\n root = Node(None, alg)\n nodes.append(root)\n a = alg.split(\" \")\n for t in a:\n self.RotateWithNotation(t)\n new_pairs = self._getF2LPairs()\n self._buildF2LTree(root, new_pairs, nodes)\n roots.append(root)\n self._cube = orig_cube\n self._graph = orig_graph\n paths = []\n for n in nodes:\n if n.leaf:\n paths.append(self._returnPath(n))\n\n least = None\n path = None\n for p in paths:\n total = 0\n for alg in p:\n total += len(alg)\n if least == None or total < least:\n least = total\n path = p\n return path\n\n def _getF2LPairs(self):\n \"\"\"\n Returns all unsolved F2L pair coordinates. First goes through each corner position on the cube to test for a corner piece (one that has a cross colour on it),\n taking note of the position of each of the stickers that are a part of that piece. It then finds the corresponding side piece for the corner, based on the non\n cross colour stickers on the corner. It then adds the coordinates of all these pieces into the pair list before finding the coordinates of where this pair\n is to be inserted. slot_deciders is used to determine where the pair is to be inserted. They are the coordinates of the stickers of the cross squares that\n lie either side of the corner piece's correct slot. The list is looped through until a match is found and the coordinates are then add to the pair list. It\n then checks whether the pair is already in its correct slot, where if it is not it is added to the all_pairs list, which is returned after all pairs are\n checked.\n\n VARIABLES:\n slot_decides - the coordinates of the colours either side of each of the corner slots, used to determine where each pair needs to go\n slot - the coordinates of the slots of each of the corners\n corner_positions - the positions of all the corners on the cube, including the other two sticker's coordinates in a clockwise manor. They are in a clockwise\n manor so that the values for the pair are consistent\n side_positions - the positions of all the possible side pieces on the cube (must exclude bottom layer pieces)\n corner - list of the three coordinates of the current corner\n side - list of the two coordinates of the current side\n pair - list of the coordinates of the corner and side pair as well as the coordinates of the slot where they need to be inserted\n all_pairs - the list of all unsolved F2L pairs to be returned\n \"\"\"\n \n cross_colour = \"O\"\n slot_deciders = [(14, 23), (23, 32), (32, 46), (46, 14)]\n slots = [(36, 13, 24, 12, 25), (38, 22, 33, 21, 34), (40, 31, 47, 30, 48), (42, 45, 15, 52, 16)]\n corner_positions = {0:[9, 51], 9:[51, 0], 51:[0, 9], 2:[49, 29], 49:[29, 2], 29:[2, 49], \\\n 4:[27, 20], 27:[20, 4], 20:[4, 27], 6:[18, 11], 18:[11, 6], 11:[6, 18], \\\n 13:[24, 36], 24:[36, 13], 36:[13, 24], 45:[15, 42], 15:[42, 45], 42:[45, 15], \\\n 31:[47, 40], 47:[40, 31], 40:[31, 47], 22:[33, 38], 33:[38, 22], 38:[22, 33]}\n\n side_positions = {1:50, 3:28, 5:19, 7:10, 10:7, 12:25, 16:52, 19:5, 21:34, \\\n 25:12, 28:3, 30:48, 34:21, 50:1, 52:16, 48:30}\n \n corner = [0, 0, 0]\n side= [0, 0]\n pair = []\n all_pairs = []\n\n for k in corner_positions.keys():\n if self._cube[k].colour == cross_colour:\n corner[0] = k\n corner[1] = (self._cube[corner_positions[k][0]].colour, corner_positions[k][0])\n corner[2] = (self._cube[corner_positions[k][1]].colour, corner_positions[k][1])\n\n for sk in side_positions.keys():\n s2_position = side_positions[sk]\n s2_colour = self._cube[s2_position].colour\n \n if self._cube[sk].colour == corner[1][0] and s2_colour == corner[2][0]:\n s1_position = sk\n s1_colour = self._cube[sk].colour\n side[0] = (s1_colour, s1_position)\n side[1] = (s2_colour, s2_position)\n\n break\n\n pair = [corner[0], corner[1], corner[2], side[0], side[1], 0, 0, 0, 0, 0]\n\n s = 0\n c1 = None\n c2 = None\n s1 = None\n s2 = None\n while s < len(slot_deciders):\n if pair[1][0] == self._cube[slot_deciders[s][0]].colour and pair[2][0] == self._cube[slot_deciders[s][1]].colour:\n c1 = slots[s][1]\n c2 = slots[s][2]\n s1 = slots[s][3]\n s2 = slots[s][4]\n break\n elif pair[1][0] == self._cube[slot_deciders[s][1]].colour and pair[2][0] == self._cube[slot_deciders[s][0]].colour:\n c1 = slots[s][2]\n c2 = slots[s][1]\n s1 = slots[s][4]\n s2 = slots[s][3]\n break\n s += 1\n\n pair[5] = slots[s][0]\n pair[6] = c1\n pair[7] = c2\n pair[8] = s1\n pair[9] = s2\n\n total = abs(pair[0] - pair[5]) + abs(pair[1][1] - pair[6]) + abs(pair[2][1] - pair[7]) + abs(pair[3][1] - pair[8]) + abs(pair[4][1] - pair[9])\n\n \"\"\"\n If total is 0 then the pair has already be inserted\n \"\"\"\n if total != 0:\n all_pairs.append(pair)\n\n return all_pairs\n\n def _buildF2LTree(self, parent, pair_list, nodes):\n \"\"\"\n Algorithm to build the tree of all possible algorithms that can occur after a certain initial F2L algorithm. Does so by looping through the F2L pairs in\n pair_list, getting the corresponding algorithm, creates a new Node object with this algorithm and parent as the parent node, adds this node to the list of\n children in the parent node, indicates the parent node is no longer a leaf if it has not already been done, takes a snapshot of the current cube and graph\n configuration, executes the algorithm, gets the list of the remaining F2L pairs after this algorithm, and repeats with this new pair list if it is not empty\n\n VARIABLES:\n parent - the parent node of all pairs in pair_list\n pair_list - the current list of all unsolved F2L lists of the current cube configuration\n nodes - the list of nodes in each tree\n new_pair_list - the list of F2L pairs after the algorithm is executed\n \"\"\"\n \n for pair in pair_list:\n alg = self._calculatePair(pair)\n new_alg = Node(parent, alg)\n parent.addChild(new_alg)\n parent.switchLeaf()\n nodes.append(new_alg)\n old_cube = copy.deepcopy(self._cube)\n old_graph = copy.deepcopy(self._graph)\n a = alg.split(\" \")\n for t in a:\n self.RotateWithNotation(t)\n new_pair_list = self._getF2LPairs()#self._optimisedF2L()\n if len(new_pair_list) > 0:\n self._buildF2LTree(new_alg, new_pair_list, nodes)\n \n self._cube = old_cube\n self._graph = old_graph\n\n def _returnPath(self, leaf):\n \"\"\"\n Returns the list of F2L algorithms based on the provided leaf node. It simply adds the algorithm associated with the current node, which starts with the\n provided leaf, makes the current node the parent of the node until eventually the node is set to None, which is the parent of the root node\n\n VARIABLES:\n leaf - the leaf node that is the last algorithm of the particular F2L path\n node - the current node to be added to the path\n p - the path to be returned\n \"\"\"\n \n node = leaf\n p = []\n while node:\n p = [node.alg] + p\n node = node.parent\n return p\n\n def SolveOLL(self):\n \"\"\"\n Returns the algorithm to solve the OLL stage\n \"\"\"\n \n return self._solveOLL()\n\n def _solveOLL(self, stats=False):\n \"\"\"\n Works out the value associated with the cube configuration and returns the algorithm for that value. It runs through the squares on the top layer of the cube,\n marking each square as a 0 or a 1 depending on whether the square is not the correct colour or it is, respectively. The correct colour is the opposite of that\n of the cross colour. This sequence of 0sand 1s is treated as a binary sequence and converted into an int. Before this conversion, the number of 1s is counted.\n If that number is 9, then the OLL stage is already solved and there is no need to continue. The corresponding stickers of each of the top layer squares are\n then taken into consideration and the same thing is done with them. These two integers are then multiplied together, obtaining the value for the algorithm,\n which is used to return the corresponding algorithm. Each top layer pattern maps to an algorithm, butonly in one orientation, so if no algorithm is returned\n for a given pattern, the top layer is rotated and the process is repeated until an algorithm is found, adding the number of rotations to the start of the\n algorithm\n\n VARIABLES:\n tops_squares - the indices in _cube of the top layer in the order they are to be checked\n side_squares - the indices in _cube of the side stickers of the top layer in the order they are to be checked\n match_colour - the colour of the top layer\n turns - the list of turns until the correct pattern is found\n top_bits - the bits associated with the top layer\n side_bits - the bits associated with the side stickers of the top layer\n num_top_bits - the number of bits that are 1 in top_bits\n top_val - the integer value of the top_bits sequence\n side_val - the integer value of the side_bits sequence\n act - the actual algorithm to be returned, including the turns that occurred to find th ecorrect pattern\n \"\"\"\n \n cross_colour = \"O\"\n top_squares = [0, 1, 2, 7, 8, 3, 6, 5, 4]\n side_squares = [51, 50, 49, 29, 28, 27, 20, 19, 18, 11, 10, 9]\n match_colour = self._opposites[cross_colour]\n turns = []\n alg = None\n\n while True:\n top_bits = []\n side_bits = []\n num_top_bits = 0\n\n for i in top_squares:\n if self._cube[i].colour == match_colour:\n num_top_bits += 1\n top_bits.append(1)\n else:\n top_bits.append(0)\n\n if num_top_bits == 9:\n return []\n\n for i in side_squares:\n if self._cube[i].colour == match_colour:\n side_bits.append(1)\n else:\n side_bits.append(0)\n \n top_val = self._bToI(top_bits)\n side_val = self._bToI(side_bits)\n alg_value = top_val * side_val\n alg = self._oll_r.get(str(alg_value))\n if alg != None:\n if stats:\n f = open(\"../data/oll_stats.txt\", \"a\")\n top = \"\"\n side = \"\"\n for i in top_bits:\n top += str(i)\n for i in side_bits:\n side += str(i)\n f.write(top + \"-\" + side + \"\\n\")\n f.close()\n break\n else:\n turns.append(\"U\")\n self.RotateWithNotation(\"U\")\n act = []\n if len(turns) > 0:\n if len(turns) == 3:\n t = turns[0] + \"'\"\n elif len(turns) == 2:\n t = turns[0] + \"2\"\n else:\n t = turns[0]\n act.append(t)\n\n a = alg.split(\" \")\n for i in a:\n self.RotateWithNotation(i)\n act.append(i)\n return act\n\n def _bToI(self, bits):\n \"\"\"\n Function that returns the integer value associated with the bit sequence\n\n VARIABLES:\n bits - the list of 0s and 1s to be converted\n total - the integer value to be returned\n \"\"\"\n \n total = 0\n for bit in range(len(bits)):\n total += bits[bit] * (2**bit)\n return total\n\n def SolvePLL(self):\n \"\"\"\n Returns the steps involved in solving the PLL stage\n \"\"\"\n \n return self._solvePLL()\n\n def _solvePLL(self, stats=False):\n \"\"\"\n Works out the value of the pattern on the side stickers of the top layer and returns the algorithm associated with this value. First it checks if the PLL stage\n is already solved, if so there is no need to continue. If not then the side stickers of the top layer are checked. When a new colour is seen, it is added to\n a dictionary where the colour is the key and the value is the number in the sequence of new colours that have been seen. This number is then added into the\n correct list, by taking the index of the value in side_positions and dividing by 3, as the side stickers are in groups of 3. These values are then used in\n an equation to determine the unique value that represents the pattern. This pattern is then used to return the corresponding algorithm, again if no algorithm\n is returned, the top layer is rotated until a recognizable pattern is found\n\n VARIABLES:\n turns - the rotations undertaken to find a recognizable patter is found\n side_positions - the indices in _cube of the side stickers of the top layer\n l - the number associated with each sticker in each of the four groups\n cols - the colours and their associated sequence number in the pattern\n val - the unique value associated with the pattern\n act - the actual algorithm that gets returned\n \"\"\"\n \n if (self._cube[51].colour == self._cube[50].colour and self._cube[50].colour == self._cube[49].colour) and \\\n (self._cube[29].colour == self._cube[28].colour and self._cube[28].colour == self._cube[27].colour):\n return [self._alignAfterPLL()]\n alg = None\n turns = []\n side_positions = [51, 50, 49, 29, 28, 27, 20, 19, 18, 11, 10, 9]\n\n while alg == None:\n l = [[], [], [], []]\n cols = {}\n for i in range(len(side_positions)):\n colour = self._cube[side_positions[i]].colour\n if colour not in cols.keys():\n cols[colour] = len(cols)+1\n l[i/3].append(cols[colour])\n\n a = l[0][0] * (l[0][1] + l[0][2])\n b = l[1][0] + (l[1][1] * l[1][2])\n c = (l[2][0] * l[2][1]) + l[2][2]\n d = (l[3][0] + l[3][1]) * l[3][2]\n\n val = a**c + b**d\n alg = self._pll_r.get(str(val))\n\n if alg == None:\n turns.append(\"U\")\n self.RotateWithNotation(\"U\")\n\n if stats:\n f = open(\"../data/pll_stats.txt\", \"a\")\n side = \"\"\n for lis in l:\n for item in lis:\n side += item\n f.write(side + \"\\n\")\n f.close()\n\n act = []\n if len(turns) > 0:\n if len(turns) == 3:\n t = turns[0]+\"'\"\n elif len(turns) == 2:\n t = turns[0]+\"2\"\n else:\n t = turns[0]\n act.append(t)\n\n a = alg.split(\" \")\n for i in a:\n self.RotateWithNotation(i)\n act.append(i)\n align = self._alignAfterPLL()\n if len(align) > 0:\n act.append(align)\n return act\n\n def _alignAfterPLL(self):\n \"\"\"\n Aligns the top layer of the cube after the PLL stage, solving the cube. Simply rotates the top layer until the cube is aligned\n\n VARIABLES:\n turns - the rotations undertaken\n t - the turns to be returned\n \"\"\"\n \n turns = []\n while self._cube[51].colour != self._cube[52].colour:\n self.RotateWithNotation(\"U\")\n turns.append(\"U\")\n\n t = \"\"\n if len(turns) > 0:\n if len(turns) == 3:\n t = turns[0]+\"'\"\n elif len(turns) == 2:\n t = turns[0]+\"2\"\n else:\n t = turns[0]\n\n return t\n\n def _translateToNotation(self, state, op):\n \"\"\"\n Returns the letter representation of the operation given the index in _cube of the piece being moved. Gets the center peice of the face on which the piece\n is and returns the associated value of the layer it is on.\n \"\"\"\n \n t = \"\"\n if op < 2:\n key = self._cube[((int(state)/4) * 9) + 8].colour\n else:\n buddy = self._graph.getBuddy(state)\n key = self._cube[((int(buddy)/4) * 9) + 8].colour\n t += self._translation[key]\n\n if op % 2 != 0:\n t += \"'\"\n self._readable_solution.append(t)\n\n def _cleanUpSolution(self, solution):\n new_solution = []\n null = []\n\n for i in range(len(solution)):\n if i in null:\n continue\n elif i == len(solution)-1:\n new_solution.append(solution[i])\n\n for j in range(i+1, len(solution)):\n if solution[j][0] == self._not_effected[solution[i][0]]:\n continue\n elif solution[j] == solution[i]:\n new_solution.append(solution[i][0]+\"2\")\n null.append(j)\n break\n else:\n new_solution.append(solution[i])\n break\n \n return new_solution\n\n def _translateToMechanismInstructions(self, turns):\n letter_to_arm = {\"R\":(\"arm3\", 3), \"L\":(\"arm1\", 1), \"U\":(\"arm0\", 0), \"D\":(\"arm4\", 4), \"F\":(\"arm2\", 2), \"B\":(\"arm5\", 5)}\n arm_rotations = []\n for item in turns:\n d = 0\n direction = \"clockwise\"\n arm = letter_to_arm[item[0]]\n if len(item) > 1 and item[1] == \"'\":\n direction = \"anticlockwise\"\n d = 1\n\n arm_rotations.append((arm[1], d))\n #print(\"Rotating \" + arm[0] + \" \" + direction + \"...\")\n \n if len(item) > 1 and item[1] == \"2\":\n arm_rotations.append((arm[1], d))\n #print(\"Rotating \" + arm[0] + \" \" + direction + \"...\")\n\n return arm_rotations\n\n def __getstate__(self):\n return self.__dict__\n \n def __setstate__(seld, d):\n self.__dict__.update(d)\n\n def _checkEquality(self, l1, l2):\n for i in range(len(l1)):\n if l1[i].colour != l2[i].colour:\n return False\n return True\n \n def __str__(self):\n return self._representCube()\n\nclass Face:\n \n \"\"\"\n Face represents the faces of the cube - 6 in total.\n Colour is the colour of the face (based on the center square) and is an int.\n Neighbours are the positions of the neighbours of the face\n Square_positions is to make turning the face easier (Rethink this - can do it better probably)\n \"\"\"\n\n def __init__(self, colour, opposite, neighbours):\n self._colour = colour\n self._opposite = opposite\n self._neighbours = neighbours\n\nclass Square:\n\n \"\"\"\n Square represents the individual squares of each face\n \"\"\"\n\n def __init__(self, colour):\n self._colour = colour\n\n def getColour(self):\n return self._colour\n\n colour = property(getColour)\n\nclass Queue:\n\n def __init__(self):\n self._q = []\n\n def enqueue(self, item):\n self._q.append(item)\n\n def dequeue(self):\n item = self._q[0]\n del self._q[0]\n return item\n\n def empty(self):\n if len(self._q) == 0:\n return True\n return False\n\n def __contains__(self, item):\n for i in self._q:\n if item == i:\n return True\n return False\n\nclass Node:\n\n def __init__(self, parent, alg):\n self._parent = parent\n self._children = []\n self._alg = alg\n self._leaf = True\n\n def getParent(self):\n return self._parent\n\n def setParent(self, p):\n self._parent = p\n\n def getChildren(self):\n return self._children\n\n def addChild(self, c):\n self._children.append(c)\n\n def getAlg(self):\n return self._alg\n\n def setAlg(self, a):\n self._alg = a\n\n def getLeaf(self):\n return self._leaf\n\n def switchLeaf(self):\n if self._leaf:\n self._leaf = not self._leaf\n\n def __str__(self):\n strDesc = \"\"\n if self.parent:\n strDesc += self.parent.alg\n else:\n strDesc += \"None\"\n\n strDesc += \" ---> \" + self.alg + \" ---> |\"\n\n for item in self.children:\n strDesc += \"| \" + item.alg + \" \"\n\n strDesc += \"||\"\n\n return strDesc\n\n parent = property(getParent, setParent)\n children = property(getChildren)\n alg = property(getAlg, setAlg)\n leaf = property(getLeaf)\n\ndef listPosition(face, face_position):\n return (9 * face) + face_position\n\ndef faceNumber(list_position):\n return int(list_position / 9)\n\ndef facePosition(list_position):\n return list_position - (9 * faceNumber(list_position))\n\ndef neighbourNumber(face_position):\n return face_position / 2\n\ndef targetSpace(face, list_position):\n return (9 * face) + facePosition(list_position)\n\ndef increaseIndexByTwo(index, direction):\n t = index + (2 * direction)\n if t < 0 or t > 7:\n return index + (6 * (direction * -1))\n else:\n return t\n\ndef increaseIndexByOne(index, direction):\n t = index + (1 * direction)\n if t < 0 or t > 7:\n return index + (7 * (direction * -1))\n else:\n return t\n\ndef listToStr(l):\n s = \"\" + l[0]\n for i in range(1, len(l)):\n s += \" \" + l[i]\n return s\n\ndef SolveCube():\n moves = []\n steps = 0\n start = time.time()\n c = Cube()\n scramble = CreateScramble()\n ns = listToStr(scramble)\n print(\"SCRAMBLE:\")\n print(ns)\n\n for r in scramble:\n c.RotateWithNotation(r)\n\n print(\"\")\n print(\"SOLVE:\")\n\n cross = c.SolveCross()\n moves += cross\n if len(cross) > 0:\n nc = listToStr(cross)\n steps += len(cross)\n\n f2l = c.SolveF2L()\n if f2l:\n for alg in f2l:\n a = alg.split(\" \")\n moves += a\n for r in a:\n c.RotateWithNotation(r)\n steps += 1\n\n oll = c.SolveOLL()\n moves += oll\n steps += len(oll)\n if len(oll) > 0:\n no = listToStr(oll)\n\n pll = c.SolvePLL()\n moves += pll\n steps += len(pll)\n if len(pll) > 0:\n np = listToStr(pll)\n\n fin = time.time()\n\n total = fin - start\n\n if len(cross) > 0:\n print(nc)\n if f2l:\n for alg in f2l:\n print(alg)\n if len(oll) > 0:\n print(no)\n if len(pll) > 0:\n print(np)\n print(\"\")\n print(\"Time taken: \" + str(total))\n print(\"Steps taken: \" + str(steps))\n print(\"\")\n mech = c._translateToMechanismInstructions(moves)\n print(\"Mechanical movement instructions:\")\n print(mech)\n\ndef main():\n SolveCube()\n #SolveMultipleCubes()\n\ndef SolveMultipleCubes():\n total = 0\n for i in range(1000):\n start = time.time()\n c = Cube()\n scramble = CreateScramble()\n ns = listToStr(scramble)\n print(\"SCRAMBLE:\")\n print(ns)\n\n for r in scramble:\n c.RotateWithNotation(r)\n\n print(\"\")\n print(\"SOLVE:\")\n\n\n cross = c.SolveCross()\n if len(cross) > 0:\n nc = listToStr(cross)\n\n f2l = c.SolveF2L()\n for alg in f2l:\n a = alg.split(\" \")\n for r in a:\n c.RotateWithNotation(r)\n\n oll = c.SolveOLL()\n if len(oll) > 0:\n no = listToStr(oll)\n\n pll = c.SolvePLL()\n if len(pll) > 0:\n np = listToStr(pll)\n\n fin = time.time()\n\n total += fin - start\n\n if len(cross) > 0:\n print(nc)\n for alg in f2l:\n print(alg)\n if len(oll) > 0:\n print(no)\n if len(pll) > 0:\n print(np)\n print(\"\")\n print(\"Time taken: \" + str(total))\n\ndef CreateScramble():\n c = Cube()\n s = []\n moves = [\"U\", \"D\", \"L\", \"R\", \"F\", \"B\", \"U'\", \"D'\", \"L'\", \"R'\", \"F'\", \"B'\"]\n counter = 0\n while counter < 30:\n i = random.randint(0, 11)\n if len(s) == 0:\n s.append(moves[i])\n counter += 1\n else:\n j = len(s)-1\n doub = 0\n while j >= 0:\n if s[j] == moves[i]:\n doub += 1\n j -= 1\n if doub == 2:\n break\n else:\n if moves[i] == c._inverts[s[j]]:\n break\n elif s[j][0] == c._not_effected[moves[i][0]]:\n j -= 1\n else:\n s.append(moves[i])\n counter += 1\n break\n\n s = c._cleanUpSolution(s)\n\n c._readable_solution = []\n\n return s\n\ndef CheckOLLAlgs():\n f = open(\"../data/rpiOLLValues.txt\", \"r\")\n f2 = open(\"../data/rpiOLLAlgs.txt\", \"r\")\n x = {\"1\":\"R\", \"0\":\"X\"}\n y = 0\n\n top_squares = [0, 1, 2, 7, 8, 3, 6, 5, 4]\n side_squares = [51, 50, 49, 29, 28, 27, 20, 19, 18, 11, 10, 9]\n\n for line in f:\n y += 1\n c = Cube()\n l = line.split(\"\\n\")[0].split(\"-\")\n alg = f2.readline().split(\"\\n\")[0].split(\" \")\n for i in range(len(l[0])):\n c._cube[top_squares[i]].colour = x[l[0][i]]\n for i in range(len(l[1])):\n c._cube[side_squares[i]].colour = x[l[1][i]]\n\n for a in alg:\n c.RotateWithNotation(a)\n\n for i in top_squares:\n if c._cube[i].colour != \"R\":\n print(\"ERROR: \" + str(y))\n print(l)\n print(alg)\n print(c)\n return\n\n f.close()\n f2.close()\n print(\"FIN\")\n\ndef CheckPLLAlgs():\n f = open(\"../data/rpiPLLValues.txt\", \"r\")\n f2 = open(\"../data/rpiPLLAlgs.txt\", \"r\")\n num_to_col = {\"1\":\"W\", \"2\":\"G\", \"3\":\"Y\", \"4\":\"B\"}\n side_positions = [51, 50, 49, 29, 28, 27, 20, 19, 18, 11, 10, 9]\n y = 0\n\n for line in f:\n y += 1\n cu = Cube()\n l = line.split(\"\\n\")[0].split(\"-\")\n l2 = []\n for item in l:\n for item2 in item:\n l2.append(item2)\n\n for i in range(len(side_positions)):\n cu._cube[side_positions[i]].colour = num_to_col[l2[i]]\n\n a = int(l[0][0]) * (int(l[0][1]) + int(l[0][2]))\n b = int(l[1][0]) + (int(l[1][1]) * int(l[1][2]))\n c = (int(l[2][0]) * int(l[2][1])) + int(l[2][2])\n d = (int(l[3][0]) + int(l[3][1])) * int(l[3][2])\n\n val = a**c + b**d\n alg = cu._pll_r.get(str(val))\n a = alg.split(\" \")\n for item in a:\n cu.RotateWithNotation(item)\n\n for i in range(9):\n if cu._cube[i].colour != \"R\":\n print(\"ERROR TOP:\")\n print(y)\n print(l)\n print(alg)\n print(cu)\n return\n\n pos_to_check = [51, 29, 20, 11]\n\n for i in pos_to_check:\n if cu._cube[i].colour != cu._cube[i-1].colour or cu._cube[i].colour != cu._cube[i-2].colour:\n print(\"ERROR SIDE:\")\n print(y)\n print(l)\n print(alg)\n print(cu)\n return\n\n f.close()\n f2.close()\n print(\"FIN\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"rpi/rpi_cube.py","file_name":"rpi_cube.py","file_ext":"py","file_size_in_byte":70215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"266217945","text":"\"\"\"\n221. 最大正方形\n在一个由 0 和 1 组成的二维矩阵内,找到只包含 1 的最大正方形,并返回其面积。\n\n1 0 1 0 0\n1 0 1 1 1\n1 1 1 1 1\n1 0 0 1 0\n\n输出: 4\n\"\"\"\ndef maximalSquare(matrix):\n m = 0\n a = len(matrix)\n b = 0 if a==0 else len(matrix[0])\n for i in range(0,a):\n for j in range(0,b):\n matrix[i][j] = int(matrix[i][j])\n if (matrix[i][j] != 0):\n n = min(\n 0 if (i*j==0) else matrix[i-1][j-1],\n 0 if i==0 else matrix[i-1][j],\n 0 if j==0 else matrix[i][j-1])+1\n if n>m:m=n\n matrix[i][j] = n\n return m*m\n\nm= [[\"1\"]]\nmaximalSquare(m)\n","sub_path":"p0221.py","file_name":"p0221.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"341769605","text":"\"\"\"\nGet all downtimes returns \"OK\" response\n\"\"\"\n\nfrom datadog_api_client import ApiClient, Configuration\nfrom datadog_api_client.v1.api.downtimes_api import DowntimesApi\n\nconfiguration = Configuration()\nwith ApiClient(configuration) as api_client:\n api_instance = DowntimesApi(api_client)\n response = api_instance.list_downtimes(\n with_creator=True,\n )\n\n print(response)\n","sub_path":"examples/v1/downtimes/ListDowntimes.py","file_name":"ListDowntimes.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259849868","text":"import os\nimport tempfile\nimport time\n\nimport yaml\n\nfrom fabric.api import env, execute, get, hide, cd, lcd, local, put, require, run, settings, sudo, task\nfrom fabric.colors import red\nfrom fabric.contrib import files, project, console\nfrom fabric.contrib.console import confirm\nfrom fabric.utils import abort\n\nDEFAULT_SALT_LOGLEVEL = 'info'\nPROJECT_ROOT = os.path.dirname(__file__)\nCONF_ROOT = os.path.join(PROJECT_ROOT, 'conf')\n\nVALID_ROLES = (\n 'salt-master',\n 'web',\n 'worker',\n 'balancer',\n 'db-master',\n 'queue',\n 'cache',\n)\n\n\nenvs = {\n 'staging': {\n 'master': '107.20.144.189',\n 'host_string': 'rapidpro-staging.cakt.us',\n\n },\n 'production': {\n 'master': '54.77.58.154',\n 'host_string': 'rapidpro-prod.cakt.us',\n },\n 'local': {\n 'user': 'vagrant',\n }\n}\n\n\ndef _common_env():\n env.forward_agent = True\n env.project = 'rapidpro_community_portal'\n env.project_root = os.path.join('/var', 'www', env.project)\n for key, value in envs[env.environment].items():\n setattr(env, key, value)\n\n\n@task\ndef staging():\n env.environment = 'staging'\n _common_env()\n\n\n@task\ndef production():\n env.environment = 'production'\n _common_env()\n\n\n@task\ndef vagrant():\n env.environment = 'local'\n _common_env()\n # convert vagrant's ssh-config output to a dictionary\n ssh_config_output = local('vagrant ssh-config', capture=True)\n ssh_config = dict(line.split() for line in ssh_config_output.splitlines())\n env.master = '{HostName}:{Port}'.format(**ssh_config)\n env.key_filename = ssh_config['IdentityFile']\n\n\n@task\ndef setup_master():\n \"\"\"Provision master with salt-master.\"\"\"\n with settings(warn_only=True):\n with hide('running', 'stdout', 'stderr'):\n installed = run('which salt')\n if not installed:\n sudo('apt-get update -qq -y')\n sudo('apt-get install python-software-properties -qq -y')\n sudo('add-apt-repository ppa:saltstack/salt -y')\n sudo('apt-get update -qq')\n sudo('apt-get install salt-master -qq -y')\n # make sure git is installed for gitfs\n with settings(warn_only=True):\n with hide('running', 'stdout', 'stderr'):\n installed = run('which git')\n if not installed:\n sudo('apt-get install python-pip git-core python-git -qq -y')\n put(local_path='conf/master.conf', remote_path=\"/etc/salt/master\", use_sudo=True)\n sudo('service salt-master restart')\n\n\n@task\ndef sync():\n \"\"\"Rysnc local states and pillar data to the master.\"\"\"\n # Check for missing local secrets so that they don't get deleted\n # project.rsync_project fails if host is not set\n with settings(host=env.master, host_string=env.master):\n if not have_secrets():\n get_secrets()\n else:\n # Check for differences in the secrets files\n for environment in ['staging', 'production']:\n remote_file = os.path.join('/srv/pillar/', environment, 'secrets.sls')\n with lcd(os.path.join(CONF_ROOT, 'pillar', environment)):\n if files.exists(remote_file):\n get(remote_file, 'secrets.sls.remote')\n else:\n local('touch secrets.sls.remote')\n with settings(warn_only=True):\n result = local('diff -u secrets.sls.remote secrets.sls')\n if (result.failed and\n not confirm(red(\n \"Above changes will be made to secrets.sls. Continue?\"))):\n abort(\n \"Aborted. File have been copied to secrets.sls.remote. \" +\n \"Resolve conflicts, then retry.\")\n else:\n local(\"rm secrets.sls.remote\")\n salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/'\n project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True)\n sudo('rm -rf /srv/salt /srv/pillar')\n sudo('mv /tmp/salt/* /srv/')\n sudo('rm -rf /tmp/salt/')\n\n\ndef have_secrets():\n \"\"\"Check if the local secret files exist for all environments.\"\"\"\n found = True\n for environment in ['staging', 'production']:\n local_file = os.path.join(CONF_ROOT, 'pillar', environment, 'secrets.sls')\n found = found and os.path.exists(local_file)\n return found\n\n\n@task\ndef get_secrets():\n \"\"\"Grab the latest secrets file from the master.\"\"\"\n with settings(host=env.master):\n for environment in ['staging', 'production']:\n local_file = os.path.join(CONF_ROOT, 'pillar', environment, 'secrets.sls')\n if os.path.exists(local_file):\n local('cp {0} {0}.bak'.format(local_file))\n remote_file = os.path.join('/srv/pillar/', environment, 'secrets.sls')\n get(remote_file, local_file)\n\n\n@task\ndef setup_minion(*roles):\n \"\"\"Setup a minion server with a set of roles.\"\"\"\n require('environment')\n for r in roles:\n if r not in VALID_ROLES:\n abort('%s is not a valid server role for this project.' % r)\n # install salt minion if it's not there already\n with settings(warn_only=True):\n with hide('running', 'stdout', 'stderr'):\n installed = run('which salt-minion')\n if not installed:\n # install salt-minion from PPA\n sudo('apt-get update -qq -y')\n sudo('apt-get install python-software-properties -qq -y')\n sudo('add-apt-repository ppa:saltstack/salt -y')\n sudo('apt-get update -qq')\n sudo('apt-get install salt-minion -qq -y')\n config = {\n 'master': 'localhost' if env.master == env.host else env.master,\n 'output': 'mixed',\n 'grains': {\n 'environment': env.environment,\n 'roles': list(roles),\n },\n 'mine_functions': {\n 'network.interfaces': []\n },\n }\n _, path = tempfile.mkstemp()\n with open(path, 'w') as f:\n yaml.dump(config, f, default_flow_style=False)\n put(local_path=path, remote_path=\"/etc/salt/minion\", use_sudo=True)\n sudo('service salt-minion restart')\n # queries server for its fully qualified domain name to get minion id\n key_name = run('python -c \"import socket; print socket.getfqdn()\"')\n time.sleep(5)\n execute(accept_key, key_name)\n\n\n@task\ndef add_role(name):\n \"\"\"Add a role to an exising minion configuration.\"\"\"\n if name not in VALID_ROLES:\n abort('%s is not a valid server role for this project.' % name)\n _, path = tempfile.mkstemp()\n get(\"/etc/salt/minion\", path)\n with open(path, 'r') as f:\n config = yaml.safe_load(f)\n grains = config.get('grains', {})\n roles = grains.get('roles', [])\n if name not in roles:\n roles.append(name)\n else:\n abort('Server is already configured with the %s role.' % name)\n grains['roles'] = roles\n config['grains'] = grains\n with open(path, 'w') as f:\n yaml.dump(config, f, default_flow_style=False)\n put(local_path=path, remote_path=\"/etc/salt/minion\", use_sudo=True)\n sudo('service salt-minion restart')\n\n\n@task\ndef salt(cmd, target=\"'*'\", loglevel=DEFAULT_SALT_LOGLEVEL):\n \"\"\"Run arbitrary salt commands.\"\"\"\n with settings(warn_only=True, host_string=env.master):\n sudo(\"salt -v {0} -l{1} {2} \".format(target, loglevel, cmd))\n\n\n@task\ndef highstate(target=\"'*'\", loglevel=DEFAULT_SALT_LOGLEVEL):\n \"\"\"Run highstate on master.\"\"\"\n with settings(host_string=env.master):\n print(\"This can take a long time without output, be patient\")\n salt('state.highstate', target, loglevel)\n\n\n@task\ndef accept_key(name):\n \"\"\"Accept minion key on master.\"\"\"\n with settings(host_string=env.master):\n sudo('salt-key --accept={0} -y'.format(name))\n sudo('salt-key -L')\n\n\n@task\ndef delete_key(name):\n \"\"\"Delete specific key on master.\"\"\"\n with settings(host_string=env.master):\n sudo('salt-key -L')\n sudo('salt-key --delete={0} -y'.format(name))\n sudo('salt-key -L')\n\n\n@task\ndef deploy(loglevel=DEFAULT_SALT_LOGLEVEL):\n \"\"\"Deploy to a given environment by pushing the latest states and executing the highstate.\"\"\"\n require('environment')\n with settings(host_string=env.master):\n if env.environment != \"local\":\n sync()\n target = \"-G 'environment:{0}'\".format(env.environment)\n salt('saltutil.sync_all', target, loglevel)\n highstate(target)\n\n\n@task\ndef manage_run(command):\n \"\"\"\n Run a Django management command on the remote server.\n \"\"\"\n require('environment')\n # Setup the call\n manage_sh = u\"/var/www/{project}/manage.sh \".format(**env)\n sudo(manage_sh + command, user=env.project)\n\n\n@task\ndef manage_shell():\n manage_run('shell')\n\n\n@task\ndef get_db_dump(clean=False):\n \"\"\"Get db dump of remote enviroment.\"\"\"\n require('environment')\n db_name = '%(project)s_%(environment)s' % env\n dump_file = db_name + '.sql' % env\n project_root = os.path.join('/var', 'www', env.project)\n temp_file = os.path.join(project_root, dump_file)\n flags = '-Ox'\n if clean:\n flags += 'c'\n dump_command = 'pg_dump %s %s -U %s > %s' % (flags, db_name, db_name, temp_file)\n with settings(host_string=env.host_string):\n sudo(dump_command, user=env.project)\n get(temp_file, dump_file)\n\n\n@task\ndef reset_local_db():\n \"\"\" Reset local database from remote host \"\"\"\n require('environment')\n question = 'Are you sure you want to reset your local ' \\\n 'database with the %(environment)s database?' % env\n if not console.confirm(question, default=False):\n abort('Local database reset aborted.')\n remote_db_name = '%(project)s_%(environment)s' % env\n db_dump_name = remote_db_name + '.sql'\n local_db_name = env.project\n get_db_dump()\n with settings(warn_only=True):\n local('dropdb %s' % local_db_name)\n local('createdb -E UTF-8 %s' % local_db_name)\n local('cat %s | psql %s' % (db_dump_name, local_db_name))\n\n\n@task\ndef reset_local_media():\n \"\"\" Reset local media from remote host \"\"\"\n require('environment')\n media_source = os.path.join('/var', 'www', env.project, 'public', 'media')\n media_target = os.path.join(PROJECT_ROOT, 'public')\n with settings():\n local(\"rsync -rvaz %s:%s %s\" % (env.master, media_source, media_target))\n\n\n@task\ndef refresh_environment():\n require('environment')\n if env.environment == 'production':\n abort('Production cannot be refreshed!')\n\n source_env = 'production'\n\n db_name = '%s_%s' % (env.project, source_env)\n dump_file_name = '%s.sql' % db_name\n full_dump_file_path = os.path.join(env.project_root, dump_file_name)\n src_env_host = envs[source_env]['host_string']\n\n with settings(host_string=src_env_host):\n sudo('pg_dump -Ox %s -U %s > %s' % (db_name, db_name, full_dump_file_path))\n\n sudo('supervisorctl stop all')\n db_name = db_user = '%s_%s' % (env.project, env.environment)\n media_full_path = '%s/public/media' % env.project_root\n with cd('/tmp'):\n run('scp %s:%s %s' % (src_env_host, full_dump_file_path, dump_file_name))\n sudo('dropdb %s_backup' % db_name, user='postgres')\n sudo('psql -c \"alter database %s rename to %s_backup\"' % (db_name, db_name), user='postgres')\n sudo('createdb -E UTF-8 -O %s %s' % (db_user, db_name), user='postgres')\n sudo('psql -U %s -d %s -f %s' % (db_user, db_name, dump_file_name))\n run('rsync -zPae ssh %s:%s .' % (src_env_host, media_full_path))\n sudo('rm -rf %s.backup' % media_full_path)\n sudo('mv %s %s.backup' % (media_full_path, media_full_path))\n sudo('mv media %s' % media_full_path)\n sudo('chown -R %s:%s %s' % (env.project, env.project, media_full_path))\n\n manage_run(\"migrate\")\n sudo('supervisorctl start all')\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":11954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"407656343","text":"# pylint: disable = W0703, C0301, W0611\n\"\"\" this program ask for the parent directory and the file suffix in the command line, then return\nlist of file in each directory that has the same suffix as given \"\"\"\nimport argparse\nimport os\nimport pathlib\nimport pprint\n\n\n# PARSER = argparse.ArgumentParser(description=\"Finding JPG files!\")\n# PARSER.add_argument('-p', '--parent', help=\"need to enter the path\", required=True)\n# PARSER.add_argument('-s', '--suffix', help='for entering the file suffix', default='.png')\n# ARGS = PARSER.parse_args()\n\n\ndef finding_files_with_os_walk(directory):\n \"\"\" find the files with the given suffix with os.walk method from os module\"\"\"\n total_list = []\n for paths, _, files in os.walk(directory):\n file_list = []\n for file in files:\n if os.path.splitext(file)[1] == '.png':\n file_list.append(file)\n total_list.append(paths)\n total_list.append(file_list)\n return total_list\n\n\nSUFFIX_LIST = []\n\n\ndef list_jpg_files(directory):\n \"\"\" find the file with the suffix with recursive function\"\"\"\n file_list = []\n folder = set()\n for i in directory.iterdir():\n if i.suffix == \".png\":\n folder.add(i.parent)\n file_list.append(i.name)\n if file_list:\n SUFFIX_LIST.append(folder.pop())\n SUFFIX_LIST.append(file_list)\n for i in directory.iterdir():\n if i.is_dir():\n list_jpg_files(i)\n return SUFFIX_LIST\n","sub_path":"students/navid_bahadoran/lesson09/assignment/src/jpgdiscover.py","file_name":"jpgdiscover.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"287541822","text":"import torch\nimport numpy as np\nimport cv2\nimport pdb\nimport matplotlib.pyplot as plt\n\ndef extract_ampl_phase(fft_im):\n # fft_im: size should be bx3xhxwx2\n #计算幅度\n fft_amp = fft_im[:,:,:,:,0]**2 + fft_im[:,:,:,:,1]**2\n fft_amp = torch.sqrt(fft_amp)\n fft_pha = torch.atan2( fft_im[:,:,:,:,1], fft_im[:,:,:,:,0] )\n return fft_amp, fft_pha\n\ndef low_freq_mutate( amp_src, amp_trg, L=0.1 ):\n _, _, h, w = amp_src.size()\n b = ( np.floor(np.amin((h,w))*L) ).astype(int) # get b\n amp_src[:,:,0:b,0:b] = amp_trg[:,:,0:b,0:b] # top left\n amp_src[:,:,0:b,w-b:w] = amp_trg[:,:,0:b,w-b:w] # top right\n amp_src[:,:,h-b:h,0:b] = amp_trg[:,:,h-b:h,0:b] # bottom left\n amp_src[:,:,h-b:h,w-b:w] = amp_trg[:,:,h-b:h,w-b:w] # bottom right\n return amp_src\n\ndef low_freq_mutate_np( amp_src, amp_trg, L=0.1 ):\n a_src = np.fft.fftshift( amp_src, axes=(-2, -1) )\n a_trg = np.fft.fftshift( amp_trg, axes=(-2, -1) )\n\n _, h, w = a_src.shape\n b = ( np.floor(np.amin((h,w))*L) ).astype(int)\n c_h = np.floor(h/2.0).astype(int)\n c_w = np.floor(w/2.0).astype(int)\n\n h1 = c_h-b\n h2 = c_h+b+1\n w1 = c_w-b\n w2 = c_w+b+1\n\n a_src[:,h1:h2,w1:w2] = a_trg[:,h1:h2,w1:w2]\n a_src = np.fft.ifftshift( a_src, axes=(-2, -1) )\n return a_src\ndef FDA_source_to_target_test(src_img, trg_img, L=0.1):\n return src_img\n\n\n\ndef FDA_source_to_target(src_img, trg_img, L=0.1):\n # exchange magnitude\n # input: src_img, trg_img\n\n # get fft of both source and target\n # torch.fft.rfft2(input, s=None, dim=(-2, -1), norm=None) → Tensor\n fft_src = torch.fft.fft( src_img.clone(), n=None, dim=-1,norm=None)\n pdb.set_trace()\n fft_trg = torch.rfft( trg_img.clone(), signal_ndim=2, onesided=False )\n\n # extract amplitude and phase of both ffts\n amp_src, pha_src = extract_ampl_phase( fft_src.clone())\n amp_trg, pha_trg = extract_ampl_phase( fft_trg.clone())\n\n # replace the low frequency amplitude part of source with that from target\n amp_src_ = low_freq_mutate( amp_src.clone(), amp_trg.clone(), L=L )\n\n # recompose fft of source\n fft_src_ = torch.zeros( fft_src.size(), dtype=torch.float )\n fft_src_[:,:,:,:,0] = torch.cos(pha_src.clone()) * amp_src_.clone()\n fft_src_[:,:,:,:,1] = torch.sin(pha_src.clone()) * amp_src_.clone()\n\n # get the recomposed image: source content, target style\n _, _, imgH, imgW = src_img.size()\n src_in_trg = torch.irfft( fft_src_, signal_ndim=2, onesided=False, signal_sizes=[imgH,imgW] )\n\n return src_in_trg\n\ndef FDA_source_to_target_np( src_img, trg_img, L=0.1 ):\n # exchange magnitude\n # input: src_img, trg_img\n\n src_img_np = src_img #.cpu().numpy()\n trg_img_np = trg_img #.cpu().numpy()\n\n # get fft of both source and target\n fft_src_np = np.fft.fft2( src_img_np, axes=(-2, -1) )\n fft_trg_np = np.fft.fft2( trg_img_np, axes=(-2, -1) )\n\n # extract amplitude and phase of both ffts\n amp_src, pha_src = np.abs(fft_src_np), np.angle(fft_src_np)\n amp_trg, pha_trg = np.abs(fft_trg_np), np.angle(fft_trg_np)\n\n # mutate the amplitude part of source with target\n amp_src_ = low_freq_mutate_np( amp_src, amp_trg, L=L )\n\n # mutated fft of source\n fft_src_ = amp_src_ * np.exp( 1j * pha_src )\n\n # get the mutated image\n src_in_trg = np.fft.ifft2( fft_src_, axes=(-2, -1) )\n src_in_trg = np.real(src_in_trg)\n\n return src_in_trg\n\nif __name__ == '__main__':\n source_file_name ='/mnt/lustre/hemengzhe/datasets/cityscapes/leftImg8bit/train/aachen/aachen_000001_000019_leftImg8bit.png'\n target_file_name = '/mnt/lustre/hemengzhe/datasets/cityscapes/leftImg8bit_foggy/train/bochum/bochum_000000_000313_leftImg8bit_foggy_beta_0.02.png'\n # source_image = torch.Tensor(cv2.imread(source_file_name)).permute(2,0,1).unsqueeze(0) #[h,w,3]\n # target_image = torch.Tensor(cv2.imread(source_file_name)).permute(2,0,1).unsqueeze(0) #[h,w,3]\n # s_t_image = FDA_source_to_target(source_image, target_image).squeeze(0)\n\n source_image = cv2.imread(source_file_name).transpose(2,0,1)\n target_image = cv2.imread(source_file_name).transpose(2,0,1)\n source_image = source_image[np.newaxis,:]\n target_image = target_image[np.newaxis,:]\n print(\"transfer image\")\n s_t_image = FDA_source_to_target_np(source_image,target_image)\n print(\"transfer done\")\n s_t_image = np.array(s_t_image)\n s_t_image =s_t_image.astype(int)\n # pdb.set_trace()\n rtn = plt.imshow(s_t_image)\n plt.show()\n\n","sub_path":"fft_debug.py","file_name":"fft_debug.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"116448680","text":"import chainer\nimport chainerrl\nfrom chainer import functions as F, links as L\n\n\nclass DQNModel(chainer.Chain, chainerrl.q_function.StateQFunction):\n \"\"\"1次元ベクトルからQ値を返すQ関数モデル。\n\n Args:\n in_size: 入力される次元数\n out_size: 出力される次元数 (actionの数)\n gpu_id (int): GPU 番号 (GPUを使わない場合は None にする)\n \"\"\"\n\n def __init__(self, in_size, out_size, gpu_id):\n unit_sizes = [in_size, 50, 50]\n super(DQNModel, self).__init__(\n l_1=L.Linear(unit_sizes[0], unit_sizes[1]),\n l_2=L.Linear(unit_sizes[1], unit_sizes[2]),\n l_out=L.Linear(unit_sizes[-1], out_size),\n )\n\n def __call__(self, x):\n h = x\n h = self.l_1(h)\n h = F.tanh(h)\n h = self.l_2(h)\n h = F.tanh(h)\n h = self.l_out(h)\n return chainerrl.action_value.DiscreteActionValue(h)\n","sub_path":"dl_exp_rl/agents/models/dqn_model_complete.py","file_name":"dqn_model_complete.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357025876","text":"import os\nfrom string import Template\n\n\nclass CtiOsTemplate:\n \"\"\"\n CTIOS class which read XML template and prepare it for CtiOs service\n \"\"\"\n\n def __init__(self, template_dir):\n \"\"\"\n Constructor of CtiOsTemplate class\n\n Args:\n template_dir (str): Template directory has to be absolute path, relative paths are not supported\n \"\"\"\n if template_dir and os.path.isabs(template_dir):\n self.template_dir = template_dir\n else:\n self.template_dir = os.path.join(\n os.path.dirname(__file__)\n )\n\n def _read_template(self, template_name):\n \"\"\"\n Read template\n\n Args:\n template_name (str): name of template xml file\n\n Returns:\n template.read()\n \"\"\"\n with open(os.path.join(self.template_dir, template_name)) as template:\n return template.read()\n\n def render(self, template_name, **kwargs):\n \"\"\"\n Render template using arguments\n\n Args:\n template_name (str): name of template xml file\n **kwargs: arguments\n\n Returns:\n Template()\n \"\"\"\n return Template(\n self._read_template(template_name)\n ).substitute(**kwargs)\n","sub_path":"ctios/templates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"193767391","text":"'''\r\nfrom time import time\r\nfrom Project_Euler import all_factors, miller_rabin, sieve_of_eratosthenes\r\n\r\n\r\nprimes = sieve_of_eratosthenes(100000000)\r\nt1 = time()\r\ntot = 0\r\nfor p in primes:\r\n i = p - 1\r\n factors = all_factors(i)\r\n all_good = True\r\n for f in factors:\r\n if not miller_rabin(f + i // f):\r\n all_good = False\r\n break\r\n if all_good:\r\n tot += i\r\n\r\nprint(tot)\r\n'''\r\ndef sqrt(x):\r\n assert x >= 0\r\n i = 1\r\n while i * i <= x:\r\n i *= 2\r\n y = 0\r\n while i > 0:\r\n if (y + i)**2 <= x:\r\n y += i\r\n i //= 2\r\n return y\r\n\r\n\r\ndef list_primality(n):\r\n result = [True] * (n + 1)\r\n result[0] = result[1] = False\r\n for i in range(sqrt(n) + 1):\r\n if result[i]:\r\n for j in range(i * i, len(result), i):\r\n result[j] = False\r\n return result\r\n\r\n\r\ndef compute():\r\n LIMIT = 10 ** 8\r\n\r\n isprime = list_primality(LIMIT + 1)\r\n\r\n def is_prime_generating(n):\r\n return all(\r\n (n % d != 0 or isprime[d + n // d])\r\n for d in range(2, sqrt(n) + 1))\r\n\r\n ans = sum(n for n in range(LIMIT + 1)\r\n if isprime[n + 1] and is_prime_generating(n))\r\n return str(ans)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(compute())","sub_path":"PE357.py","file_name":"PE357.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"13888065","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: amaris\n\"\"\"\nimport os\nfrom threading import Timer\nfrom datetime import datetime\n\ndef get_tomorrow():\n x = datetime.today()\n year = x.year\n month = x.month\n day = x.day\n # Next day\n day += 1\n if day > 31:\n day = 1\n elif day > 30 and month in [4,6,9,11]:\n day = 1\n elif month==2 and ((day>29 and year%4==0) or (day>28 and not year%4==0)):\n day = 1\n happy_new_year = False\n if day == 1:\n month += 1\n if month == 13:\n month = 1\n happy_new_year = True\n if happy_new_year:\n year += 1\n return x.replace(year=year,month=month,day=day,hour=6,minute=15,second=0,microsecond=0)\n #return x.replace(year=2018,month=5,day=31,hour=16,minute=1,second=10,microsecond=0)\n #print('year:'+str(year)+' month:'+str(month)+' day:'+str(day))\n #return None\n\ndef launch_daily():\n os.popen('echo \"amaris2017\" | sudo -S ./activator.sh pass amaris2017')\n\nif __name__ == \"__main__\":\n now = datetime.today()\n #y = x.replace(day=x.day+1, hour=5, minute=10, second=0, microsecond=0)\n tomorrow = get_tomorrow()\n print(tomorrow)\n print(now)\n print(tomorrow-now)\n delta_t = tomorrow-now\n secs = delta_t.seconds+1\n t = Timer(secs, launch_daily)\n print('Waiting...')\n t.start()","sub_path":"trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"222903316","text":"#编译语言为python\nb=[0]\na=[-1,0,-3,1,5,6,-11,5]\nglobal count,bi,bj\ncount=0\n#三次循环方法\ndef MaxSum3(n,a,besti,bestj):\n sum=0\n global count\n for i in range(0,n):\n for j in range (i,n):\n thissum=0\n for k in range(i,j+1):\n thissum+=a[k]\n if(thissum>sum) :\n sum=thissum\n besti=i\n bestj=j\n count+=1\n return sum,besti,bestj\n#分治法\n#从中间向两边寻找最大连续子数组\ndef findmid(a,left,right):\n global count\n count+=1\n #左右下标相等时,数组只有一个数,直接输出\n if(right==left):\n besti=left\n bestj=right\n sum=a[left]\n return sum,besti,bestj\n #参数定义\n center=int((left+right)/2)\n s1=0\n lefts=0\n s2=0\n rights=0\n cibest=0\n cjbest=0\n i=center\n j=center+1\n #从数组中间位置向左循环累加,找到从数组中间向左边数的最长连续子数组\n #更新s1的值,这一步是为了防止累加结果一直是负数,s1始终是0,不能正确更新\n if(a[i]<0):\n s1=a[i]\n while(i>=left):\n lefts+=a[i] \n if(lefts>s1):\n s1=lefts\n cibest=i\n i-=1\n if(i==left-1)and(lefts>a[center]):\n cibest=left\n #从中间位置向数组右边累加,操作过程与前面类似\n j=center+1\n if(a[j]<0):\n s2=a[j]\n while(j<=right):\n rights+=a[j]\n if(rights>s2):\n s2=rights\n cjbest=j\n j+=1\n if(j==right+1)and(rights>a[center+1]):\n cjbest=right\n #两个结果相加,即可���算出从中间向两边寻找的最长连续子数组的值和下标\n crosssum=s1+s2\n return crosssum,cibest,cjbest\ndef Maxsubsum(a,left,right):\n sum=0\n global count\n #左右下标相等时,数组只有一个数,直接输出\n if(right==left):\n sum=a[left]\n return sum,left,right\n else:\n center=int((left+right)/2)\n #从中间向右边递归计算最大的连续子数组\n (rightsum,ribest,rjbest)=Maxsubsum(a,center+1,right)\n #从左边向中间递归计算最长连续子数组\n (leftsum,libest,ljbest)=Maxsubsum(a,left,center)\n #从中间向两边递归计算最长连续子数组\n (crosssum,cibest,cjbest)=findmid(a,left,right)\n count+=1\n #比较三个值,求解\n if(crosssumsum):\n sum=thissum\n besti=i\n bestj=j\n count+=1\n return sum,besti,bestj\nprint(\"测试数组为\",a)\nprint(\"三次循环法\",MaxSum3(8,a,0,0))\nprint(\"运行次数为\",count)\ncount=0\nprint(\"二次循环法\",MaxSum2(8,a,0,0))\nprint(\"运行次数为\",count)\ncount=0\nprint(\"分治法\",Maxsubsum(a,0,7))\nprint(\"运行次数为\",count)\n","sub_path":"求最长连续子数组及其下标的三种方法_三次循环二次循环分治法.py","file_name":"求最长连续子数组及其下标的三种方法_三次循环二次循环分治法.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}