diff --git "a/4278.jsonl" "b/4278.jsonl"
new file mode 100644--- /dev/null
+++ "b/4278.jsonl"
@@ -0,0 +1,591 @@
+{"seq_id":"35483398599","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 10 21:12:09 2019\n\nCreates the plausible prior samples (stage 1, beta 0)\n@author: duttar\n\"\"\"\n# %%\n# load libraries \nimport numpy as np\nimport scipy.io as sio\nimport random\nimport sys\nimport os\nfrom scipy.optimize import lsq_linear\n#import matplotlib.pyplot as plt\n#from mpl_toolkits.mplot3d import Axes3D\n#simport matplotlib.tri as mtri\nfrom collections import namedtuple\nsys.path.append('additional_scripts/')\nsys.path.append('additional_scripts/geompars/')\nsys.path.append('additional_scripts/greens/')\nfrom Gorkhamakemesh import *\nfrom greenfunction import * \nfrom posteriorGorkha import *\nimport time\n\n#%% Define functions\n\ndef lin_inv(model, subdisp, subloc, sublos, W, musq, disct_x, disct_z, surf_pts, LB, UB):\n '''\n Regularize non-negative linear least squares slip inversion RNNLSQ\n '''\n bestgeo = model[:10]\n NT1 = namedtuple('NT1', \\\n 'trired p q r xfault yfault zfault disct_x disct_z surfpts model')\n mesh = NT1(None, None, None, None, None, None, None, \\\n disct_x, disct_z, surf_pts, bestgeo)\n \n finalmesh = Gorkhamesh(mesh, NT1)\n grn1, obsdata = grn_func(subloc, subdisp, sublos, finalmesh.trired, \\\n finalmesh.p, finalmesh.q, finalmesh.r)\n \n obsdata = np.reshape(obsdata, (obsdata.shape[0], 1))\n\n numpars = 2*finalmesh.trired.shape[0]\n numdata = subdisp.shape[0]\n greens1 = grn1\n lb = LB[-numpars:]; ub = UB[-numpars:]\n \n laplac1 = laplacian(finalmesh.trired, finalmesh.p, finalmesh.q, finalmesh.r)\n \n laplac = np.r_[np.c_[laplac1.todense()*musq, np.zeros(laplac1.todense().shape)], \\\n np.c_[np.zeros(laplac1.todense().shape), laplac1.todense()*musq]]\n \n Amat = np.r_[np.matmul(W, greens1), laplac]\n Bmat = np.r_[np.matmul(W, obsdata), np.zeros((numpars, 1))]\n \n tikhA = np.matmul(np.transpose(Amat), Amat) + \\\n .005**2* np.eye(Amat.shape[1])\n tikhA = np.array(tikhA)\n \n tikhB = np.matmul(np.transpose(Amat), Bmat)\n tikhB = np.array(tikhB).flatten('F')\n \n res = lsq_linear(tikhA, tikhB, bounds=(lb, ub), method = 'trf', \\\n lsmr_tol='auto', max_iter= 500, verbose=1)\n \n return res\n\n# %%\n\nmat_c1 = sio.loadmat('additional_scripts/GPS_subsampledGorkha.mat')\ncovall = mat_c1['covall']\nsubdisp = mat_c1['subdisp']\nsubloc1 = mat_c1['subloc']\nsublos = mat_c1['sublos']\nnumdis = subloc1.shape[0]\n\nsubloc = np.hstack((subloc1,np.zeros((numdis,1))))\n\nfor i in range(numdis):\n covall[i,i] = 1.19*covall[i,i]\n\ninvcov = np.linalg.inv(covall)\nW = np.linalg.cholesky(invcov).T\n\nsurf_pts = np.array([[215.8972,3.0950e+03],[442.8739,3.0097e+03]])\nbestgeo = np.array([1.0927 ,0.0241, 5.6933,-20.0000, 2.1345, \\\n 0.3529, 4.4643, 0.0336, -12, -2.8494, 0.0043])\ndisct_x = 20; disct_z = 12\n\nNT1 = namedtuple('NT1', \\\n'trired p q r xfault yfault zfault disct_x disct_z surfpts model')\nmesh = NT1(None, None, None, None, None, None, None, \\\n disct_x, disct_z, surf_pts, bestgeo)\n \n# get the geometry \nstart = time.time()\nfinalmesh = Gorkhamesh(mesh, NT1) \nend = time.time()\nprint(end - start)\n\n################# make a plot #################################\n#fig = plt.figure(figsize=plt.figaspect(0.5))\n\n#ax = fig.add_subplot(1, 1, 1, projection='3d')\n#ax.plot_trisurf(finalmesh.p, finalmesh.q, finalmesh.r, \\\n# triangles=finalmesh.trired, cmap=plt.cm.Spectral)\n#plt.axis('equal')\n#ax.set(xlim=(200, 500), ylim=(2900, 3400), zlim=(-25, -5))\n#plt.show()\n##############################################################\n\n# run the greens function \nstart = time.time()\n#grn1, obsdata = grn_func(subloc, subdisp, sublos, finalmesh.trired, \\\n# finalmesh.p, finalmesh.q, finalmesh.r)\nend = time.time()\nprint(end - start)\n\n##############################################################\n\nlowslip1 = np.zeros((1,finalmesh.trired.shape[0]))\nlowslip2 = -10*np.ones((1,finalmesh.trired.shape[0]))\nmaxslip1 = 25*np.ones((1,finalmesh.trired.shape[0]))\nmaxslip2 = 10*np.ones((1,finalmesh.trired.shape[0]))\n\nLBslip = np.append(lowslip1, lowslip2)\nUBslip = np.append(maxslip1, maxslip2)\n\nLB = np.append(np.array([-5, -.5, 3, -25, -5, -.5, -5, -.5, -13, -8, -8]),LBslip)\nUB = np.append(np.array([7, .5, 25, -16, 7, .5, 7, .5, -6, 8, 8]),UBslip)\n\nedges = np.array([], dtype = int)\nfor i in range(1, disct_x+1):\n if i == 1:\n edges = np.append(edges, np.arange(1,(disct_z)*2+1))\n elif i < disct_x and i > 1:\n edges = np.append(edges, np.array([(i-1)*disct_z*2+1, (i-1)*disct_z*2+2, \\\n i*(disct_z)*2, i*(disct_z)*2-1]))\n elif i == disct_x:\n edges = np.append(edges, np.arange((i-1)*(disct_z)*2+1,(i)*(disct_z)*2+1))\nedges = edges-1\nLB[edges+bestgeo.shape[0]] = 0\nUB[edges+bestgeo.shape[0]] = 1e-5\nLB[edges+bestgeo.shape[0]+finalmesh.trired.shape[0]] = 0\nUB[edges+bestgeo.shape[0]+finalmesh.trired.shape[0]] = 1e-5\n \nNTpostin = namedtuple('NTpostin', \\\n 'surf_pts disct_x disct_z LB UB subdisp subloc sublos W') \noptall = NTpostin(surf_pts, disct_x, disct_z, LB, UB, subdisp, subloc, \\\n sublos, W)\n\nNTpostout = namedtuple('NTpostout', \\\n 'logpost, reslaplac, resdata, momag, trired, p, q, r, xfault, yfault, zfault')\noutput = NTpostout(None, None, None, None, None, None, None, None, None, None, None)\n\ndef postGorkha(x):\n postout = posterior(x, optall, NTpostin, output, NTpostout)\n return postout.logpost\n\n################################################################\n \nNmarkov = 4000; Nchains = 100\ndiffbnd = UB - LB\ndiffbndN = np.tile(diffbnd, (Nmarkov, 1))\nLBN = np.tile(np.transpose(LB), (Nmarkov, 1))\nrandadd = np.random.rand(Nmarkov, LB.shape[0])\n#samplestage = LBN + randadd*diffbndN\n#sio.savemat('sample0stage.mat', {'samplestage':samplestage})\nmat_samplestage = sio.loadmat('samples1/stage1/sample1stage.mat')\nsamplestage = mat_samplestage['samplestage']\n\n#indarray = np.int(os.environ['arrayindex'])\n#numind = 8; \n#index = np.arange((indarray-1)*numind, indarray*numind)\n\n# %%\n# sliparray = np.zeros((numind, 2*finalmesh.trired.shape[0]))\n# for i in index:\n# j = np.where(index == i)\n# arraymodel = samplestage[i, :]\n# slip = lin_inv(arraymodel, subdisp, subloc, sublos, W, .4, disct_x, disct_z, surf_pts, \\\n# LB, UB)\n# slipall = slip.x \n# sliparray[j,:] = slipall\n\n# sampstage = np.c_[samplestage[index,:11], sliparray]\n\n# varname = 'samples1/stage1/samples/sample1stage' + np.str(indarray) + '.mat'\n# sio.savemat(varname, {'sampstage':sampstage})\n#%%\n\npostval = np.zeros((4000, 1))\nfor i in range(4000):\n print(i)\n sample_i = samplestage[i,:]\n postsamp = postGorkha(sample_i)\n postval[i] = postsamp\n \nbeta = np.array([0]) \nstage = np.array([1]) \n\nNT2 = namedtuple('NT2', 'allsamples postval beta stage covsmpl resmpl')\nsamples = NT2(samplestage, postval, beta, stage, None, None)\n\nvarname = 'samples1/stage1/sample1stage.mat' \nsio.savemat(varname, {'samplestage':samples.allsamples, 'postval':samples.postval, \\\n 'beta':samples.beta, 'stage':samples.stage})\n \n\n\n\n\n\n","repo_name":"rishabhdutta/nonplanar_gorkha","sub_path":"stage_1_run.py","file_name":"stage_1_run.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"2957823244","text":"import datetime\nimport PySimpleGUI as psg\nimport logging.config\nfrom configparser import ConfigParser\nfrom pathlib import Path\nfrom attendance import Attendance\nfrom session_data import SessionData\nimport cv2\n\nfrom ui_register import RegisterGui\n\n\nclass SimpleGui:\n selectable_size = (23, 1)\n button_size = (20, 1)\n font = ('Helvetica', 10)\n\n def __init__(self, config):\n self.config = config\n self.diplay_image_height = self.config.getint('DISPLAY_IMAGE_HEIGHT')\n self.diplay_image_width = self.config.getint('DISPLAY_IMAGE_WIDTH')\n\n self.attendance = Attendance(self.config)\n\n\n psg.theme('LightGreen3') \n\n self.button_start = self.config['BUTTON_START_LABEL']\n self.button_stop = self.config['BUTTON_STOP_LABEL']\n self.button_exit = self.config['BUTTON_EXIT_LABEL']\n self.button_photo = self.config['BUTTON_PHOTO_LABEL']\n\n self.select_course_label = self.config['SELECT_COURSE_LABEL']\n self.select_course_type_label = self.config['SELECT_COURSE_TYPE_LABEL']\n self.select_room_label = self.config['SELECT_ROOM_LABEL']\n self.select_week_label = self.config['SELECT_WEEK_LABEL']\n self.select_specialization_label = self.config['SELECT_SPECIALIZATION_LABEL']\n\n self.course_list = self.config['COURSE_LIST'].split(',')\n\n self.specialization_list = self.config['SPECIALIZATION_LIST'].split(',')\n\n self.room_list = self.config['ROOM_LIST'].split(',')\n\n self.week_list = self.config['WEEK_LIST'].split(',')\n\n self.course_type_list = self.config['COURSE_TYPE_LIST'].split(',')\n\n data_column = [\n [psg.Text(self.select_course_label)],\n [psg.Combo(self.course_list, size=self.selectable_size, key='subject', default_value=self.course_list[0])],\n [psg.Text(self.select_course_type_label)],\n [psg.Combo(self.course_type_list, size=self.selectable_size, key='type', default_value=self.course_type_list[0])],\n [psg.Text(self.select_specialization_label)],\n [psg.Combo(self.specialization_list, size=self.selectable_size, key='class', default_value=self.specialization_list[0])],\n [psg.Text(self.select_week_label)],\n [psg.Combo(self.week_list, size=self.selectable_size, key='week', default_value=self.week_list[0])],\n [psg.Text(self.select_room_label)],\n [psg.Combo(self.room_list, size=self.selectable_size, key='classroom', default_value=self.room_list[0])],\n [psg.Text(size=(25, 1), k='-OUTPUT-')],\n [psg.Button(self.button_start, size=self.button_size)],\n [psg.Button(self.button_stop, size=self.button_size)],\n [psg.Button(self.button_exit, size=self.button_size)],\n [psg.Button(self.button_photo, size=self.button_size)],\n ]\n\n\n self.layout = [\n [\n psg.Image(key='image_box', size=(\n self.diplay_image_height, self.diplay_image_width)),\n psg.Column(data_column),\n ]\n ]\n\n \n self.window = psg.Window('Jelenlétkezelő',\n self.layout, font=self.font)\n\n \n\n def run(self):\n while True:\n frame_bytes = self.attendance.get_latest_frame(resized_bytes=True)\n\n event, values = self.window.read(timeout=10)\n\n if event == self.button_exit or event == psg.WIN_CLOSED:\n break\n\n if event == self.button_photo:\n register_gui = RegisterGui(self.config)\n register_gui.run()\n\n if event == self.button_start:\n\n if values['subject'] != \"\" and values['type'] != \"\" and values['class'] != \"\" and values['week'] != \"\" and values['classroom'] != \"\":\n psg.popup('OK', 'Az adatok mentése elkezdődik!')\n\n session_data = SessionData(subject=values['subject'],\n type=values['type'],\n className=values['class'],\n week=values['week'],\n classroom=values['classroom'],\n date=datetime.datetime.now().strftime(\"%Y-%m-%d\"),\n time=datetime.datetime.now().strftime(\"%H:%M:%S\"))\n self.attendance.start(session_data)\n else:\n psg.popup('Hiba', 'Minden mező kitöltése kötelező!')\n\n if event == self.button_stop:\n self.attendance.stop()\n pass\n\n self.window[\"image_box\"].update(data=frame_bytes)\n\n self.window.close()\n self.attendance.stop()\n\n\n\n\ndef main():\n config_object = ConfigParser()\n config_object.read(Path(\"config.ini\"))\n config = config_object[\"DEFAULT\"]\n\n logging.config.fileConfig(Path(\"log_config.ini\"))\n\n display = SimpleGui(config)\n display.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Szilvia97/LiveFaceRecognition","sub_path":"live-face-recognition/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"75149028652","text":"#!/usr/bin/env python\n# File: mandelbrotgp.py\n# Name: D.Saravanan\n# Date: 21/04/2023\n\n\"\"\" Script for the Mandelbrot set with gnuplot \"\"\"\n\nimport os\nimport numpy as np\n\n\ndef mandelbrot(rmin, rmax, imin, imax):\n \"\"\"an algorithm to generate an image of the Mandelbrot set\"\"\"\n\n max_iters = 256\n upper_bound = 2.5\n width = height = 512\n\n real_vals = np.linspace(rmin, rmax, width)\n imag_vals = np.linspace(imin, imax, height)\n\n # we will represent members as 1, non-members as 0\n mandelbrot_graph = np.ones((height, width), dtype=np.int32)\n\n for x in range(width):\n for y in range(height):\n c = np.complex64(real_vals[x] + imag_vals[y] * 1j)\n z = np.complex64(0)\n\n for _ in range(max_iters):\n z = z**2 + c\n\n if np.abs(z) > upper_bound:\n mandelbrot_graph[y, x] = 0\n break\n\n return mandelbrot_graph\n\n\nif __name__ == \"__main__\":\n mandel = mandelbrot(-2, 2, -2, 2)\n\n gp = os.popen(\"gnuplot -persist\", \"w\")\n gp.write(\"set colorsequence classic\\n\")\n gp.write(\"set output 'mandelbrotgp.png'\\n\")\n gp.write(\"set terminal pngcairo font 'Times,12'\\n\")\n gp.write(\"set autoscale xfix; set autoscale yfix\\n\")\n gp.write(\"set cbrange [0:1]; set autoscale cbfix\\n\")\n gp.write(\"set palette defined (0 'blue', 1 'white')\\n\")\n gp.write(\"plot '-' matrix with image pixels notitle\\n\")\n for i in range(512):\n for j in range(512):\n gp.write(\"%d \" % mandel[i][j])\n gp.write(\"\\n\")\n gp.write(\"e\\n\")\n gp.close()\n","repo_name":"dsarvan/codelearn","sub_path":"python/mandelbrotgp.py","file_name":"mandelbrotgp.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"19603736798","text":"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport imp,nltk,requests,re,heapq\nfrom gensim.summarization import summarize\nfrom bs4 import BeautifulSoup\nstopwords = nltk.corpus.stopwords.words('english')\ndef summ(s):\n sentence_list = nltk.sent_tokenize(s) \n formatted_article_text = s\n word_frequencies = {} \n for word in nltk.word_tokenize(formatted_article_text): \n if word not in stopwords:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n maximum_frequncy = max(word_frequencies.values())\n\n for word in word_frequencies.keys(): \n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy)\n sentence_scores = {} \n for sent in sentence_list: \n for word in nltk.word_tokenize(sent.lower()):\n if word in word_frequencies.keys():\n if len(sent.split(' ')) < 50:\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word]\n else:\n sentence_scores[sent] += word_frequencies[word]\n summary_sentences = heapq.nlargest(15, sentence_scores, key=sentence_scores.get)\n summary = ' '.join(summary_sentences) \n return summary\n","repo_name":"vinaykirpalani/NewsNow","sub_path":"Summarize.py","file_name":"Summarize.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"55"}
+{"seq_id":"30028783445","text":"#!/usr/bin/env python\nfrom __future__ import division\n\nimport pybullet as p\nimport pybullet_data\n\nimport time\nimport sys\nimport os\nimport copy\nimport math\nimport numpy as np\nimport random\nfrom collections import OrderedDict\n\nimport rospy\nimport rospkg\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import Point\n\nfrom MotomanRobot import MotomanRobot\nfrom WorkspaceTable import WorkspaceTable\nfrom Planner import Planner\nfrom Planner import PositionCandidateConfigs\nimport utils\n\nfrom uniform_object_rearrangement.msg import ArmTrajectory\nfrom uniform_object_rearrangement.msg import ObjectRearrangePath\nfrom uniform_object_rearrangement.msg import ArrState\nfrom uniform_object_rearrangement.msg import ObjArrStates\nfrom uniform_object_rearrangement.srv import ReproduceInstanceCylinder, ReproduceInstanceCylinderResponse\nfrom uniform_object_rearrangement.srv import GenerateConfigsForStartPositions, GenerateConfigsForStartPositionsResponse\nfrom uniform_object_rearrangement.srv import DetectInvalidArrStates, DetectInvalidArrStatesResponse\nfrom uniform_object_rearrangement.srv import RearrangeCylinderObject, RearrangeCylinderObjectResponse\nfrom uniform_object_rearrangement.srv import GetCurrRobotConfig, GetCurrRobotConfigResponse\nfrom uniform_object_rearrangement.srv import UpdateCertainObjectPose, UpdateCertainObjectPoseResponse\nfrom uniform_object_rearrangement.srv import ResetRobotCurrConfig, ResetRobotCurrConfigResponse\nfrom uniform_object_rearrangement.srv import UpdateManipulationStatus, UpdateManipulationStatusResponse\nfrom uniform_object_rearrangement.srv import SetSceneBasedOnArrangement, SetSceneBasedOnArrangementResponse\nfrom uniform_object_rearrangement.srv import SelectObjectAndBuffer, SelectObjectAndBufferResponse\nfrom uniform_object_rearrangement.srv import ResetPlanningInstance, ResetPlanningInstanceResponse\nfrom uniform_object_rearrangement.srv import ClearPlanningInstance, ClearPlanningInstanceResponse\nfrom uniform_object_rearrangement.srv import ResetRobotHome, ResetRobotHomeResponse\n\n################################## description #####################################\n### This class defines a PybulletPlanScene class which\n### entertain an planning scene that\n### (1) reproduces the instance from the execution scene based on perception output\n### (2) performs planning for manipulation (transit + transfer)\n### (3) communicates with grasp_pose node to get the grasp pose for grasping objects\n####################################################################################\n\n\n# Disable\ndef blockPrint():\n sys.stdout = open(os.devnull, 'w')\n\n# Restore\ndef enablePrint():\n sys.stdout = sys.__stdout__\n\n\nclass PybulletPlanScene(object):\n\n def __init__(self, args):\n ### read in relevant ros parameters for plan scene\n basePosition, baseOrientation, urdfFile, \\\n leftArmHomeConfiguration, rightArmHomeConfiguration, torsoHomeConfiguration, \\\n standingBase_dim, table_dim, table_offset_x, \\\n cylinder_radius, cylinder_height, \\\n discretization_x, discretization_y, \\\n object_interval_x, object_interval_y, \\\n side_clearance_x, side_clearance_y, \\\n ceiling_height, thickness_flank, \\\n object_mesh_path = self.readROSParam()\n \n ### set the rospkg path\n rospack = rospkg.RosPack()\n self.rosPackagePath = rospack.get_path(\"uniform_object_rearrangement\")\n\t\n ### set the server for the pybullet plan scene\n # self.planningClientID = p.connect(p.DIRECT)\n self.planningClientID = p.connect(p.GUI)\n # p.setAdditionalSearchPath(pybullet_data.getDataPath())\n # self.egl_plugin = p.loadPlugin(egl.get_filename(), \"_eglRendererPlugin\")\n # print(\"plugin=\", self.egl_plugin)\n\n ### configure the robot\n self.configureMotomanRobot(urdfFile, basePosition, baseOrientation, \\\n leftArmHomeConfiguration, rightArmHomeConfiguration, torsoHomeConfiguration, False)\n ### setup the workspace\n self.setupWorkspace(standingBase_dim, table_dim, table_offset_x, object_mesh_path, False)\n self.workspace_p.addConstrainedArea(ceiling_height, thickness_flank)\n self.workspace_p.setDeploymentParam(\n cylinder_radius, cylinder_height, side_clearance_x, side_clearance_y, \\\n discretization_x, discretization_y, object_interval_x, object_interval_y)\n self.workspace_p.deployAllPositionCandidates(generateMesh=True)\n\n ### create a planner assistant\n self.planner_p = Planner(\n self.rosPackagePath, self.planningClientID,\n isObjectInLeftHand=False, isObjectInRightHand=False,\n objectInLeftHand=None, objectInRightHand=None)\n\n\n def configureMotomanRobot(self, \n urdfFile, basePosition, baseOrientation,\n leftArmHomeConfiguration, rightArmHomeConfiguration, torsoHomeConfiguration, isPhysicsTurnOn):\n ### This function configures the robot in the real scene\n self.robot_p = MotomanRobot(\n os.path.join(self.rosPackagePath, urdfFile), \n basePosition, baseOrientation, \n leftArmHomeConfiguration, rightArmHomeConfiguration, torsoHomeConfiguration,\n isPhysicsTurnOn, self.planningClientID)\n\n def setupWorkspace(self,\n standingBase_dim, table_dim, table_offset_x,\n object_mesh_path, isPhysicsTurnOn):\n ### This function sets up the workspace\n self.workspace_p = WorkspaceTable(self.rosPackagePath, self.robot_p.basePosition,\n standingBase_dim, table_dim, table_offset_x, \n os.path.join(self.rosPackagePath, object_mesh_path),\n isPhysicsTurnOn, self.planningClientID)\n\n\n def rosInit(self):\n ### This function specifies the role of a node instance for this class\n ### and initialize a ros node\n self.reproduce_instance_cylinder_server = rospy.Service(\n \"reproduce_instance_cylinder\", ReproduceInstanceCylinder,\n self.reproduce_instance_cylinder_callback)\n\n self.rearrange_cylinder_object_server = rospy.Service(\n \"rearrange_cylinder_object\", RearrangeCylinderObject,\n self.rearrange_cylinder_object_callback)\n\n self.generate_configs_for_start_positions_server = rospy.Service(\n \"generate_configs_for_start_positions\", GenerateConfigsForStartPositions,\n self.generate_configs_for_start_positions_callback)\n\n self.detect_invalid_arr_states_server = rospy.Service(\n \"detect_invalid_arr_states\", DetectInvalidArrStates,\n self.detect_invalid_arr_states_callback)\n\n self.get_curr_robot_config_server = rospy.Service(\n \"get_curr_robot_config\", GetCurrRobotConfig,\n self.get_curr_robot_config_callback)\n\n self.update_certain_object_pose_server = rospy.Service(\n \"update_certain_object_pose\", UpdateCertainObjectPose,\n self.update_certain_object_pose_callback)\n\n self.reset_robot_curr_config_server = rospy.Service(\n \"reset_robot_curr_config\", ResetRobotCurrConfig,\n self.reset_robot_curr_config_callback)\n\n self.update_manipulation_status_server = rospy.Service(\n \"update_manipulation_status\", UpdateManipulationStatus,\n self.update_manipulation_status_callback)\n\n self.set_scene_basedOn_arrangement_server = rospy.Service(\n \"set_scene_based_on_arrangement\", SetSceneBasedOnArrangement,\n self.set_scene_basedOn_arrangement_callback)\n\n self.select_object_and_buffer_server = rospy.Service(\n \"select_object_and_buffer\", SelectObjectAndBuffer,\n self.select_object_and_buffer_callback)\n\n self.reset_planning_instance_server = rospy.Service(\n \"reset_planning_instance\", ResetPlanningInstance,\n self.reset_planning_instance_callback)\n\n self.clear_planning_instance_server = rospy.Service(\n \"clear_planning_instance\", ClearPlanningInstance,\n self.clear_planning_instance_callback)\n\n self.reset_robot_home_server = rospy.Service(\n \"reset_robot_home\", ResetRobotHome,\n self.reset_robot_home_callback)\n\n rospy.init_node(\"pybullet_plan_scene\", anonymous=True)\n\n\n def reproduce_instance_cylinder_callback(self, req):\n ### given the estimated cylinder objects\n rospy.logwarn(\"REPRODUCE REARRANGEMENT INSTANCE\")\n initial_arrangement, final_arrangement, success = \\\n self.workspace_p.reproduceInstance_cylinders(req.cylinder_objects)\n if success == True:\n print(\"successfully reproduce an instance\")\n else:\n print(\"fail to reproduce an instance\")\n return ReproduceInstanceCylinderResponse(initial_arrangement, final_arrangement, success)\n\n def generate_configs_for_start_positions_callback(self, req):\n rospy.logwarn(\"GENERATE CONFIGS FOR START POSITIONS OF ALL OBJECTS\")\n self.planner_p.generateAllConfigPoses_startPositions(self.robot_p, self.workspace_p, req.armType)\n return GenerateConfigsForStartPositionsResponse(True)\n\n def detect_invalid_arr_states_callback(self, req):\n rospy.logwarn(\"DETECT INVALID ARR STATES\")\n ### data initialization\n self.planner_p.invalid_arr_states_per_obj = OrderedDict()\n for obj_idx in range(len(req.start_arrangement)):\n self.planner_p.invalid_arr_states_per_obj[obj_idx] = []\n ### reason about each object to be manipulated\n all_objects = [i for i in range(len(req.start_arrangement)) \\\n if req.start_arrangement[i] != req.target_arrangement[i]]\n for obj_idx in all_objects:\n print(\"obj_idx: \" + str(obj_idx))\n #################################################################################\n ### get the object's all pre-picking + picking configPoses\n curr_object_configPoses = \\\n self.planner_p.obtainCurrObjectConfigPoses(self.workspace_p, obj_idx)\n ### get picking_configPoses_constraints (a list of list of objects) for this object\n picking_configPoses_constraints = self.planner_p.getConstraintsFromLabels(\n curr_object_configPoses, obj_idx, req.target_arrangement, \"picking\")\n # print(\"picking_configPoses_constraints: \")\n # print(picking_configPoses_constraints)\n self.planner_p.addInvalidArrStates(picking_configPoses_constraints, obj_idx)\n ##################################################################################\n ##################################################################################\n ### get the object's all placing configPoses\n target_object_configPoses = \\\n self.planner_p.position_candidates_configPoses[req.target_arrangement[obj_idx]]\n ### get placing_configPoses_constraints (a list of list of objects) for this object\n placing_configPoses_constraints = self.planner_p.getConstraintsFromLabels(\n target_object_configPoses, obj_idx, req.target_arrangement, \"placing\")\n # print(\"placing_configPoses_constraints: \")\n # print(placing_configPoses_constraints)\n self.planner_p.addInvalidArrStates(placing_configPoses_constraints, obj_idx)\n ##################################################################################\n # print(\"invalid_arr_states_per_obj: \")\n # print(self.planner_p.invalid_arr_states_per_obj)\n # input(\"Check invalid arr states per object, to get an overall idea why this instance is non-monotone...\")\n ### prepare the response\n detect_invalid_arr_states_response = DetectInvalidArrStatesResponse()\n for obj_idx, obj_arr_states in self.planner_p.invalid_arr_states_per_obj.items():\n obj_arr_states_msg = ObjArrStates()\n #################################################\n obj_arr_states_msg.obj_idx = obj_idx\n for arr_state in obj_arr_states:\n ### each arr_state is a dict, construct it as ArrState msg\n arr_state_msg = ArrState()\n for obj, isAtTarget in arr_state.items():\n arr_state_msg.obj_indices.append(obj)\n arr_state_msg.isAtTarget.append(isAtTarget)\n obj_arr_states_msg.invalid_arr_states.append(arr_state_msg)\n #################################################\n detect_invalid_arr_states_response.all_obj_invalid_arr_states.append(obj_arr_states_msg)\n # print(\"check ros messages\")\n # for obj_arr_states_msg in detect_invalid_arr_states_response.all_obj_invalid_arr_states:\n # print(\"obj_idx: \" + str(obj_arr_states_msg.obj_idx))\n # for arr_state_msg in obj_arr_states_msg.invalid_arr_states:\n # print(\"obj_indices: \")\n # print(arr_state_msg.obj_indices)\n # print(\"isAtTarget: \")\n # print(arr_state_msg.isAtTarget)\n # print(\"========================\")\n return detect_invalid_arr_states_response\n\n\n def get_curr_robot_config_callback(self, req):\n ### get the current robot config\n joint_state = JointState()\n joint_state.position = self.robot_p.getRobotCurrConfig()\n # print(\"successfully get the current robot configuration\")\n return GetCurrRobotConfigResponse(joint_state)\n\n def update_certain_object_pose_callback(self, req):\n ### update the geometry mesh of a certain object to the target pose\n self.workspace_p.updateObjectMesh(req.object_idx, req.object_position_idx)\n # print(\"successfully update certain object geometry to the target pose\" + str(position))\n return UpdateCertainObjectPoseResponse(True)\n\n def reset_robot_curr_config_callback(self, req):\n ### reset the robot to the specified configuration\n joint_positions = list(req.robot_config.position)\n self.robot_p.resetArmConfig_torso(joint_positions[1:15], joint_positions[0])\n ### update the hand as well (I doubt it is a redundant step at this point)\n self.robot_p.resetRightHandConfig(joint_positions[15:21])\n # print(\"successfully reset the robot current configuration\")\n return ResetRobotCurrConfigResponse(True)\n\n def update_manipulation_status_callback(self, req):\n ### disable any relationship between the robot and any of the object\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n # print(\"successfully update the manipulation status\")\n return UpdateManipulationStatusResponse(True)\n\n def set_scene_basedOn_arrangement_callback(self, req):\n ### (i) put all the objects to the position specified in the arrangement\n self.workspace_p.set_scene_for_objects(req.arrangement)\n ### (ii) reset the robot to the specified configuration\n joint_positions = list(req.robot_config.position)\n # self.robot_p.setSingleArmToConfig_torso(joint_positions[8:15], joint_positions[0], req.armType)\n self.robot_p.resetArmConfig_torso(joint_positions[1:15], joint_positions[0])\n ### update the hand as well (I doubt it is a redundant step at this point)\n self.robot_p.resetRightHandConfig(joint_positions[15:21])\n ### (iii) disable any relationship between the robot and any of the object\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n return SetSceneBasedOnArrangementResponse(True)\n\n def select_object_and_buffer_callback(self, req):\n ############################## first select an object ##############################\n object_path = ObjectRearrangePath()\n if req.heuristic_level == 0:\n object_idx = random.choice(list(req.objects_to_move))\n if req.heuristic_level == 1:\n object_ranking = self.workspace_p.getObjectConstraintRanking(req.objects_to_move, req.final_arrangement)\n object_idx = object_ranking[0]\n ### Once select the object, check if it can be reached based on current arrangement\n transit_success, transit_traj, pickingPose_neighbors_idx, pickingPose_neighbors_cost = \\\n self.transit_cylinder_object(object_idx, req)\n if transit_success == False:\n ### the selected object is not reachable\n print(\"The selected object is not reachable, let alone selecting buffer\")\n return SelectObjectAndBufferResponse(transit_success, -1, -1, object_path)\n ####################################################################################\n\n ### before move on to selecting and putting on a buffer, do some book keeping\n obj_position_idx_transit = self.workspace_p.object_geometries[object_idx].curr_position_idx\n robot_config_transit = self.robot_p.getRobotCurrConfig()\n\n ############################### then select a buffer ###############################\n ### 3 chances are given for selecting a buffer\n max_trials = 3\n current_trials = 1\n buffer_success = False\n while (current_trials < max_trials) and (buffer_success == False):\n ### select a buffer, which \n ### (1) is not the current/target position of the selected object\n ### (2) should not collide with any objects other than itself\n buffer_select_success, buffer_idx = self.workspace_p.selectNoCollisionBuffer(\n object_idx, req.final_arrangement[object_idx])\n if buffer_select_success:\n ### check the transfer path to the buffer location for that object\n transfer_success, transfer_traj, finish_traj = \\\n self.transfer_cylinder_object(\n object_idx, buffer_idx, pickingPose_neighbors_idx, pickingPose_neighbors_cost, req)\n if transfer_success == True:\n ### congrats! you successfully transfer the object to the buffer\n buffer_success = True\n break\n else:\n ### it does not work, put it back to the end of transit stage\n ### (i) put the object back to the end of transit\n self.workspace_p.updateObjectMesh(object_idx, obj_position_idx_transit)\n ### (ii) put the arm back to the end of transit\n self.robot_p.resetArmConfig_torso(robot_config_transit[1:15], robot_config_transit[0])\n ### update the hand as well (I doubt it is a redundant step at this point)\n self.robot_p.resetRightHandConfig(robot_config_transit[15:21])\n ### (iii) update manipulation status\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n current_trials += 1\n continue\n else:\n current_trials += 1\n continue\n ### reach here either success or not\n if buffer_success == True:\n object_path.transit_trajectory = self.generateArmTrajectory(\n transit_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.transfer_trajectory = self.generateArmTrajectory(\n transfer_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.finish_trajectory = self.generateArmTrajectory(\n finish_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.object_idx = object_idx\n return SelectObjectAndBufferResponse(buffer_success, object_idx, buffer_idx, object_path)\n else:\n return SelectObjectAndBufferResponse(buffer_success, -1, -1, object_path)\n ####################################################################################\n\n\n def reset_planning_instance_callback(self, req):\n ### reset the instance in the planning scene, which involves\n ### (i) reset all object meshes (current collision bodies) in the workspace\n self.workspace_p.reset_planning_instance()\n ### (ii) reset some planner parameters\n self.planner_p.resetPlannerParams()\n ### (iii) reset the robot back to the home configuration\n self.robot_p.resetRobotToHomeConfiguration()\n return ResetPlanningInstanceResponse(True)\n\n def clear_planning_instance_callback(self, req):\n ### clear the instance in the planning scene, which involves\n ### (i) delete all object meshes (current collision bodies/goal visualization) in the workspace\n self.workspace_p.clear_planning_instance()\n ### (ii) reset some planner parameters\n self.planner_p.resetPlannerParams()\n ### (iii) reset the robot back to the home configuration\n self.robot_p.resetRobotToHomeConfiguration()\n return ClearPlanningInstanceResponse(True)\n\n def reset_robot_home_callback(self, req):\n resetHome_trajectory = ArmTrajectory()\n ### reset the robot to home configuration\n ######################## check currConfig's neighboring connectivity ########################\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n connectSuccess, currConfig_neighbors_idx, currConfig_neighbors_cost = self.planner_p.connectToNeighbors(\n currConfig, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"There exists connection problem for current config. Not be able to reset the robot home\")\n return ResetRobotHomeResponse(False, resetHome_trajectory)\n ##############################################################################################\n\n ######################## check homeConfig's neighboring connectivity ########################\n if req.armType == \"Right_torso\":\n homeConfig = [self.robot_p.torsoHomeConfiguration] + self.robot_p.rightArmHomeConfiguration\n connectSuccess, homeConfig_neighbors_idx, homeConfig_neighbors_cost = self.planner_p.connectToNeighbors(\n homeConfig, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"home config has problem of connecting to neighbors, which should not happen. Not be able to reset the robot home\")\n return ResetRobotHomeResponse(False, resetHome_trajectory)\n ##############################################################################################\n\n ####################### motion planning from currConfig to homeConfig ########################\n resetHome_traj = self.planner_p.AstarPathFinding(currConfig, homeConfig,\n currConfig_neighbors_idx, currConfig_neighbors_cost, \n homeConfig_neighbors_idx, homeConfig_neighbors_cost,\n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n if resetHome_traj != []:\n print(\"The reset_home path for %s arm is successfully found\" % req.armType)\n ### generate ArmTrajectory from resetHome_traj\n resetHome_trajectory = self.generateArmTrajectory(resetHome_traj, req.armType, self.robot_p.motomanRJointNames)\n return ResetRobotHomeResponse(True, resetHome_trajectory)\n else:\n print(\"The reset_home path for %s arm is not successfully found\" % req.armType)\n print(\"Not be able to reset the robot home\")\n return ResetRobotHomeResponse(False, resetHome_trajectory)\n ##############################################################################################\n \n\n def rearrange_cylinder_object_callback(self, req):\n rearrange_success, object_manipulation_path = self.rearrange_cylinder_object(req)\n return RearrangeCylinderObjectResponse(rearrange_success, object_manipulation_path)\n\n def rearrange_cylinder_object(self, req):\n ### given the specified cylinder object and the armType\n rospy.logwarn(\"PLANNING TO REARRANGE THE OBJECT %s\", str(req.object_idx))\n object_path = ObjectRearrangePath()\n transit_traj = []\n transfer_traj = []\n finish_traj = []\n blockPrint()\n\n curr_object_configPoses = self.planner_p.obtainCurrObjectConfigPoses(self.workspace_p, req.object_idx)\n\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ############################# select the right picking pose until it works #############################\n transit_success = False\n for config_id in range(len(curr_object_configPoses.grasping_configs)):\n configToPickingPose = curr_object_configPoses.grasping_configs[config_id]\n ############## check the collision of the selected configToPickingPose ##############\n self.planner_p.setRobotToConfig(configToPickingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This picking pose is not even valid.\")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The picking pose works. Check its neighboring connections.\")\n ### when to check the connection of the picking pose, you have to attach the object\n temp_object_curr_pos = self.workspace_p.object_geometries[req.object_idx].curr_pos\n self.planner_p.attachObject(req.object_idx, self.workspace_p, self.robot_p, req.armType)\n connectSuccess, pickingPose_neighbors_idx, pickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPickingPose, self.robot_p, self.workspace_p, req.armType)\n ############## after check, disattach the object and put the object back ##############\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n p.resetBasePositionAndOrientation(\n self.workspace_p.object_geometries[req.object_idx].geo, \n temp_object_curr_pos, [0, 0, 0, 1.0], physicsClientId=self.planningClientID)\n self.workspace_p.object_geometries[req.object_idx].curr_pos = temp_object_curr_pos\n #######################################################################################\n\n if not connectSuccess:\n print(\"This picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n else:\n print(\"The picking pose is valid, generate pre-picking\")\n configToPrePickingPose = curr_object_configPoses.approaching_configs[config_id]\n ############## check the collision of the selected configToPrePickingPose ##############\n self.planner_p.setRobotToConfig(configToPrePickingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This pre-picking pose is not even valid. \")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The pre-picking pose works. Check its neighboring connections.\")\n connectSuccess, prePickingPose_neighbors_idx, prePickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPrePickingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This pre-picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"Both picking pose and pre-picking pose are legitimate. Proceed to planning for pre-picking.\")\n ###########################################################################################\n\n ################### plan the path to pre-picking configuration ############################\n connectSuccess, currConfig_neighbors_idx, currConfig_neighbors_cost = self.planner_p.connectToNeighbors(\n currConfig, self.robot_p, self.workspace_p, req.armType)\n prePicking_traj = self.planner_p.AstarPathFinding(currConfig, configToPrePickingPose, \n currConfig_neighbors_idx, currConfig_neighbors_cost, \n prePickingPose_neighbors_idx, prePickingPose_neighbors_cost, \n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed) \n ### the planning has been finished, either success or failure\n if prePicking_traj != []:\n print(\"The transit (pre-picking) path for %s arm is successfully found\" % req.armType)\n transit_traj += prePicking_traj\n ################# cartesian path from pre-picking to picking configuration #####################\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ### you are reaching here since pre-picking has been reached, \n ### now get the path from pre-picking to picking\n prePickToPickTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPickingPose, self.robot_p, req.armType, self.workspace_p)\n transit_traj += prePickToPickTraj\n #################################################################################################\n transit_success = True\n break\n else:\n print(\"The transit (pre-picking) path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ###########################################################################################\n\n if not transit_success:\n print(\"No picking pose is qualified, either failed (1) picking pose (2) pre-picking pose (3) planning to pre-picking\")\n return False, object_path\n \n ### Otherwise, congrats! Transit is successful!\n ######################################### attach the object ###########################################\n ### Now we need to attach the object in hand before transferring the object\n self.planner_p.attachObject(req.object_idx, self.workspace_p, self.robot_p, req.armType)\n ####################################################################################################### \n\n target_object_configPoses = self.planner_p.position_candidates_configPoses[req.target_position_idx]\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ############################# select the right placing pose until it works #############################\n transfer_success = False\n for config_id in range(len(target_object_configPoses.grasping_configs)):\n configToPlacingPose = target_object_configPoses.grasping_configs[config_id]\n ############## check the collision of the selected configToPlacingPose ##############\n self.planner_p.setRobotToConfig(configToPlacingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This placing pose is not even valid.\")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The placing pose works. Check its neighboring connections.\")\n connectSuccess, placingPose_neighbors_idx, placingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPlacingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This placing pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"The placing pose is legitimate. Proceed to planning for placing.\")\n \n ################### plan the path to placing configuration ###################\n placing_traj = self.planner_p.AstarPathFinding(currConfig, configToPlacingPose, \n pickingPose_neighbors_idx, pickingPose_neighbors_cost, \n placingPose_neighbors_idx, placingPose_neighbors_cost,\n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n ### the planning has been finished, either success or failure\n if placing_traj != []:\n print(\"The transfer placing path for %s arm is successfully found\" % req.armType)\n transfer_traj += placing_traj\n transfer_success = True\n ### after transferring the object, \n ### update the object's current position_idx and collision_position_idx\n self.workspace_p.object_geometries[req.object_idx].setCurrPosition(\n req.target_position_idx, req.target_position_idx)\n break\n else:\n print(\"The transfer placing path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ############################################################################################\n \n if not transfer_success:\n print(\"No placing pose is qualified, either failed (1) placing pose (2) planning to placing\")\n return False, object_path\n \n ### Otherwise, congrats! Transfer is successful!\n ######################################### detach the object ###########################################\n ### Now we need to detach the object in hand before retracting the object (post-placing)\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n #######################################################################################################\n ############# generate post-placing pose + cartesian move from placing to post-placing ################\n ### The arm leaves the object from ABOVE\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n placingPose = self.robot_p.getCurrentEEPose(req.armType)\n postPlacingPose = copy.deepcopy(placingPose)\n postPlacingPose[0][2] += 0.05\n isPoseValid, FLAG, configToPostPlacingPose = self.planner_p.generateConfigBasedOnPose(\n postPlacingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n placeToPostPlaceTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPostPlacingPose, self.robot_p, req.armType, self.workspace_p)\n finish_traj += placeToPostPlaceTraj\n ########################################################################################################\n\n ################################# prepare the path for the object ######################################\n ### get the current state\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ### congrat! No problem of rearranging the current object\n ### prepare the object path\n object_path.transit_trajectory = self.generateArmTrajectory(\n transit_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.transfer_trajectory = self.generateArmTrajectory(\n transfer_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.finish_trajectory = self.generateArmTrajectory(\n finish_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.object_idx = req.object_idx\n enablePrint()\n return True, object_path\n ########################################################################################################\n\n def transit_cylinder_object(self, object_idx, req):\n '''This function plans a transit to a specified object given the current arrangement'''\n rospy.logwarn(\"PLANNING TO TRANSIT TO THE OBJECT %s\", str(object_idx))\n transit_traj = []\n blockPrint()\n\n curr_object_configPoses = self.planner_p.obtainCurrObjectConfigPoses(self.workspace_p, object_idx)\n\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ############################# select the right picking pose until it works #############################\n transit_success = False\n for config_id in range(len(curr_object_configPoses.grasping_configs)):\n configToPickingPose = curr_object_configPoses.grasping_configs[config_id]\n ############## check the collision of the selected configToPickingPose ##############\n self.planner_p.setRobotToConfig(configToPickingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This picking pose is not even valid.\")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The picking pose works. Check its neighboring connections.\")\n ### when to check the connection of the picking pose, you have to attach the object\n temp_object_curr_pos = self.workspace_p.object_geometries[object_idx].curr_pos\n self.planner_p.attachObject(object_idx, self.workspace_p, self.robot_p, req.armType)\n connectSuccess, pickingPose_neighbors_idx, pickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPickingPose, self.robot_p, self.workspace_p, req.armType)\n ############## after check, disattach the object and put the object back ##############\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n p.resetBasePositionAndOrientation(\n self.workspace_p.object_geometries[object_idx].geo, \n temp_object_curr_pos, [0, 0, 0, 1.0], physicsClientId=self.planningClientID)\n self.workspace_p.object_geometries[object_idx].curr_pos = temp_object_curr_pos\n #######################################################################################\n\n if not connectSuccess:\n print(\"This picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n else:\n print(\"The picking pose is valid, generate pre-picking\")\n configToPrePickingPose = curr_object_configPoses.approaching_configs[config_id]\n ############## check the collision of the selected configToPrePickingPose ##############\n self.planner_p.setRobotToConfig(configToPrePickingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This pre-picking pose is not even valid. \")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The pre-picking pose works. Check its neighboring connections.\")\n connectSuccess, prePickingPose_neighbors_idx, prePickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPrePickingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This pre-picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"Both picking pose and pre-picking pose are legitimate. Proceed to planning for pre-picking.\")\n ###########################################################################################\n \n ################### plan the path to pre-picking configuration ############################\n connectSuccess, currConfig_neighbors_idx, currConfig_neighbors_cost = self.planner_p.connectToNeighbors(\n currConfig, self.robot_p, self.workspace_p, req.armType)\n prePicking_traj = self.planner_p.AstarPathFinding(currConfig, configToPrePickingPose, \n currConfig_neighbors_idx, currConfig_neighbors_cost, \n prePickingPose_neighbors_idx, prePickingPose_neighbors_cost, \n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n ### the planning has been finished, either success or failure\n if prePicking_traj != []:\n print(\"The transit (pre-picking) path for %s arm is successfully found\" % req.armType)\n transit_traj += prePicking_traj\n ################# cartesian path from pre-picking to picking configuration #####################\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ### you are reaching here since pre-picking has been reached, \n ### now get the path from pre-picking to picking\n prePickToPickTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPickingPose, self.robot_p, req.armType, self.workspace_p)\n transit_traj += prePickToPickTraj\n #################################################################################################\n transit_success = True\n break\n else:\n print(\"The transit (pre-picking) path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ###########################################################################################\n \n ### reach here either transit_success == True or None of the option works\n if not transit_success:\n print(\"No picking pose is qualified, either failed (1) picking pose (2) pre-picking pose (3) planning to pre-picking\")\n return transit_success, transit_traj, [], []\n else:\n return transit_success, transit_traj, pickingPose_neighbors_idx, pickingPose_neighbors_cost\n\n \n def transfer_cylinder_object(self, object_idx, buffer_idx, pickingPose_neighbors_idx, pickingPose_neighbors_cost, req):\n '''This function plans an object transfer to a specified buffer'''\n rospy.logwarn(\"PLANNING TO TRANSFER OBJECT %s TO A BUFFER\", str(object_idx))\n rospy.logwarn(\"BUFFER: %s\", str(buffer_idx))\n transfer_traj = []\n finish_traj = []\n blockPrint()\n\n ######################################### attach the object ###########################################\n ### Now we need to attach the object in hand before transferring the object\n self.planner_p.attachObject(object_idx, self.workspace_p, self.robot_p, req.armType)\n #######################################################################################################\n\n target_object_configPoses = self.planner_p.position_candidates_configPoses[buffer_idx]\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ############################# select the right placing pose until it works #############################\n transfer_success = False\n for config_id in range(len(target_object_configPoses.grasping_configs)):\n configToPlacingPose = target_object_configPoses.grasping_configs[config_id]\n ############## check the collision of the selected configToPlacingPose ##############\n self.planner_p.setRobotToConfig(configToPlacingPose, self.robot_p, req.armType)\n # isConfigValid, FLAG = self.planner_p.checkConfig_AllCollisions(self.robot_p, self.workspace_p, req.armType)\n isConfigValid, FLAG, objectCollided = self.planner_p.checkConfig_labelCollisions(self.robot_p, self.workspace_p, req.armType)\n if not isConfigValid:\n print(\"This placing pose is not even valid.\")\n print(\"FLAG: {}, objectCollided: {}\".format(FLAG, objectCollided))\n print(\"Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The placing pose works. Check its neighboring connections.\")\n connectSuccess, placingPose_neighbors_idx, placingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPlacingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This placing pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"The placing pose is legitimate. Proceed to planning for placing.\")\n\n ################### plan the path to placing configuration ###################\n placing_traj = self.planner_p.AstarPathFinding(currConfig, configToPlacingPose, \n pickingPose_neighbors_idx, pickingPose_neighbors_cost, \n placingPose_neighbors_idx, placingPose_neighbors_cost,\n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n ### the planning has been finished, either success or failure\n if placing_traj != []:\n print(\"The transfer placing path for %s arm is successfully found\" % req.armType)\n transfer_traj += placing_traj\n transfer_success = True\n ### after transferring the object, \n ### update the object's current position_idx and collision_position_idx\n self.workspace_p.object_geometries[object_idx].setCurrPosition(buffer_idx, buffer_idx)\n break\n else:\n print(\"The transfer placing path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ############################################################################################\n \n if not transfer_success:\n print(\"No placing pose is qualified, either failed (1) placing pose (2) planning to placing\")\n return transfer_success, transfer_traj, finish_traj\n \n ### Otherwise, congrats! Transfer is successful!\n ######################################### detach the object ###########################################\n ### Now we need to detach the object in hand before retracting the object (post-placing)\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n #######################################################################################################\n ############# generate post-placing pose + cartesian move from placing to post-placing ################\n ### The arm leaves the object from ABOVE\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n placingPose = self.robot_p.getCurrentEEPose(req.armType)\n postPlacingPose = copy.deepcopy(placingPose)\n postPlacingPose[0][2] += 0.05\n isPoseValid, FLAG, configToPostPlacingPose = self.planner_p.generateConfigBasedOnPose(\n postPlacingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n placeToPostPlaceTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPostPlacingPose, self.robot_p, req.armType, self.workspace_p)\n finish_traj += placeToPostPlaceTraj\n ########################################################################################################\n return transfer_success, transfer_traj, finish_traj\n\n\n def generateArmTrajectory(self, traj, armType, motomanRJointNames):\n '''generate arm trajectory (a list of JointState)\n inputs\n ======\n traj (a list of list): a list of joint states [q1, q2, ..., qn]\n armType (string): the arm type (e.g., \"Left\", \"Right_torso)\n motomanRJointNames (a list of strings): the names for controllable joints\n outputs\n =======\n result_traj (ArmTrajectory()): the resulting trajectory (ArmTrajectory object)\n '''\n result_traj = ArmTrajectory()\n result_traj.armType = armType\n if armType == \"Left\" or armType == \"Left_torso\":\n first_joint_index = 1\n if armType == \"Right\" or armType == \"Right_torso\":\n first_joint_index = 8\n if armType == \"Left_torso\" or armType == \"Right_torso\":\n jointNames = [motomanRJointNames[0]] + motomanRJointNames[first_joint_index:first_joint_index+7]\n if armType == \"Left\" or armType == \"Right\":\n jointNames = motomanRJointNames[first_joint_index:first_joint_index+7]\n\n for config in traj:\n joint_state = JointState()\n joint_state.name = jointNames\n joint_state.position = config\n result_traj.trajectory.append(joint_state)\n \n return result_traj\n\n #########################################################################################\n #########################################################################################\n\n def rearrange_cylinder_object_legend(self, req):\n ### given the specified cylinder object and the armType\n rospy.logwarn(\"PLANNING TO REARRANGE THE OBJECT %s\", str(req.object_idx))\n object_path = ObjectRearrangePath()\n transit_traj = []\n transfer_traj = []\n finish_traj = []\n blockPrint()\n\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ########################## generate picking pose candidates ###################################\n pickingPose_candidates = self.planner_p.generate_pose_candidates(\n self.workspace_p.object_geometries[req.object_idx].curr_pos, self.workspace_p.cylinder_height)\n ###############################################################################################\n\n #################### select the right picking pose until it works #############################\n transit_success = False\n for pose_id, pickingPose in enumerate(pickingPose_candidates):\n ######################## check both picking and pre-picking pose ##########################\n isPoseValid, FLAG, configToPickingPose = self.planner_p.generateConfigBasedOnPose(\n pickingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n if not isPoseValid:\n print(\"This picking pose is not even valid. Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The picking pose works. Check its neighboring connections.\")\n ### when to check the connection of the picking pose, you have to attach the object\n temp_object_curr_pos = self.workspace_p.object_geometries[req.object_idx].curr_pos\n self.planner_p.attachObject(req.object_idx, self.workspace_p, self.robot_p, req.armType)\n connectSuccess, pickingPose_neighbors_idx, pickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPickingPose, self.robot_p, self.workspace_p, req.armType)\n ############## after check, disattach the object and put the object back ##############\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n p.resetBasePositionAndOrientation(\n self.workspace_p.object_geometries[req.object_idx].geo, \n temp_object_curr_pos, [0, 0, 0, 1.0], physicsClientId=self.planningClientID)\n self.workspace_p.object_geometries[req.object_idx].curr_pos = temp_object_curr_pos\n #######################################################################################\n\n if not connectSuccess:\n print(\"This picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n else: \n print(\"The picking pose is valid, generate pre-picking\")\n isPoseValid, FLAG, prePickingPose, configToPrePickingPose = \\\n self.planner_p.generatePrePickingPose(\n pickingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n if not isPoseValid:\n print(\"The pre-picking pose is not valid. Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The pre-picking pose works. Check its neighboring connections.\")\n connectSuccess, prePickingPose_neighbors_idx, prePickingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPrePickingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This pre-picking pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"Both picking pose and pre-picking pose are legitimate. Proceed to planning for pre-picking.\")\n ###########################################################################################\n\n ################### plan the path to pre-picking configuration ############################\n connectSuccess, currConfig_neighbors_idx, currConfig_neighbors_cost = self.planner_p.connectToNeighbors(\n currConfig, self.robot_p, self.workspace_p, req.armType)\n prePicking_traj = self.planner_p.AstarPathFinding(currConfig, configToPrePickingPose, \n currConfig_neighbors_idx, currConfig_neighbors_cost, \n prePickingPose_neighbors_idx, prePickingPose_neighbors_cost, \n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n ### the planning has been finished, either success or failure\n if prePicking_traj != []:\n print(\"The transit (pre-picking) path for %s arm is successfully found\" % req.armType)\n transit_traj += prePicking_traj\n ################# cartesian path from pre-picking to picking configuration #####################\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ### you are reaching here since pre-picking has been reached, \n ### now get the path from pre-picking to picking\n prePickToPickTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPickingPose, self.robot_p, req.armType, self.workspace_p)\n transit_traj += prePickToPickTraj\n #################################################################################################\n transit_success = True\n break\n else:\n print(\"The transit (pre-picking) path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ###########################################################################################\n \n if not transit_success:\n print(\"No picking pose is qualified, either failed (1) picking pose (2) pre-picking pose (3) planning to pre-picking\")\n return False, object_path\n \n ### Otherwise, congrats! Transit is successful!\n ######################################### attach the object ###########################################\n ### Now we need to attach the object in hand before transferring the object\n self.planner_p.attachObject(req.object_idx, self.workspace_p, self.robot_p, req.armType)\n #######################################################################################################\n\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ########################## generate placing pose candidates ###################################\n placingPose_candidates = self.planner_p.generate_pose_candidates(\n self.workspace_p.candidate_geometries[req.target_position_idx].pos, self.workspace_p.cylinder_height)\n ###############################################################################################\n\n #################### select the right placing pose until it works #############################\n transfer_success = False\n for pose_id, placingPose in enumerate(placingPose_candidates):\n ####################### check the placing pose ###########################\n isPoseValid, FLAG, configToPlacingPose = self.planner_p.generateConfigBasedOnPose(\n placingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n if not isPoseValid:\n print(\"This placing pose is not valid. Move on to next candidate.\")\n continue\n else:\n ### check the connection with neighbors in the roadmap\n print(\"The placing pose works. Check its neighboring connections.\")\n connectSuccess, placingPose_neighbors_idx, placingPose_neighbors_cost = self.planner_p.connectToNeighbors(\n configToPlacingPose, self.robot_p, self.workspace_p, req.armType)\n if not connectSuccess:\n print(\"This placing pose is not valid, due to no neighboring connections.\")\n print(\"Move on to next candidate.\")\n continue\n print(\"The placing pose is legitimate. Proceed to planning for placing.\")\n \n ################### plan the path to placing configuration ###################\n placing_traj = self.planner_p.AstarPathFinding(currConfig, configToPlacingPose, \n pickingPose_neighbors_idx, pickingPose_neighbors_cost, \n placingPose_neighbors_idx, placingPose_neighbors_cost,\n self.robot_p, self.workspace_p, req.armType, req.isLabeledRoadmapUsed)\n ### the planning has been finished, either success or failure\n if placing_traj != []:\n print(\"The transfer placing path for %s arm is successfully found\" % req.armType)\n transfer_traj += placing_traj\n transfer_success = True\n ### after transferring the object, \n ### update the object's current position_idx and collision_position_idx\n self.workspace_p.object_geometries[req.object_idx].setCurrPosition(\n req.target_position_idx, req.target_position_idx)\n break\n else:\n print(\"The transfer placing path for %s arm is not successfully found\" % req.armType)\n print(\"Move on to next candidate\")\n continue\n ############################################################################################\n \n if not transfer_success:\n print(\"No placing pose is qualified, either failed (1) placing pose (2) planning to placing\")\n return False, object_path\n\n ### Otherwise, congrats! Transfer is successful!\n ######################################### detach the object ###########################################\n ### Now we need to detach the object in hand before retracting the object (post-placing)\n self.planner_p.detachObject(self.workspace_p, self.robot_p, req.armType)\n #######################################################################################################\n ############# generate post-placing pose + cartesian move from placing to post-placing ################\n ### The arm leaves the object from ABOVE\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n postPlacingPose = copy.deepcopy(placingPose)\n postPlacingPose[0][2] += 0.05\n isPoseValid, FLAG, configToPostPlacingPose = self.planner_p.generateConfigBasedOnPose(\n postPlacingPose, currConfig, self.robot_p, self.workspace_p, req.armType)\n placeToPostPlaceTraj = self.planner_p.generateTrajectory_DirectConfigPath(\n currConfig, configToPostPlacingPose, self.robot_p, req.armType, self.workspace_p)\n finish_traj += placeToPostPlaceTraj\n ########################################################################################################\n \n ################################# prepare the path for the object ######################################\n ### get the current state\n currConfig = self.robot_p.getRobotCurrSingleArmConfig(req.armType)\n ### congrat! No problem of rearranging the current object\n ### prepare the object path\n object_path.transit_trajectory = self.generateArmTrajectory(\n transit_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.transfer_trajectory = self.generateArmTrajectory(\n transfer_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.finish_trajectory = self.generateArmTrajectory(\n finish_traj, req.armType, self.robot_p.motomanRJointNames)\n object_path.object_idx = req.object_idx\n enablePrint()\n return True, object_path\n ########################################################################################################\n\n def readROSParam(self):\n ### This functions read in needed ROS parameters\n while not rospy.has_param('/motoman_robot/basePosition'):\n rospy.sleep(0.2)\n basePosition = rospy.get_param('/motoman_robot/basePosition')\n\n while not rospy.has_param('/motoman_robot/baseOrientation'):\n rospy.sleep(0.2)\n baseOrientation = rospy.get_param('/motoman_robot/baseOrientation')\n\n while not rospy.has_param('/motoman_robot/urdfFile'):\n rospy.sleep(0.2)\n urdfFile = rospy.get_param('/motoman_robot/urdfFile')\n\n while not rospy.has_param('/motoman_robot/leftArmHomeConfiguration'):\n rospy.sleep(0.2)\n leftArmHomeConfiguration = rospy.get_param('/motoman_robot/leftArmHomeConfiguration')\n\n while not rospy.has_param('/motoman_robot/rightArmHomeConfiguration'):\n rospy.sleep(0.2)\n rightArmHomeConfiguration = rospy.get_param('/motoman_robot/rightArmHomeConfiguration')\n\n while not rospy.has_param('/motoman_robot/torsoHomeConfiguration'):\n rospy.sleep(0.2)\n torsoHomeConfiguration = rospy.get_param('/motoman_robot/torsoHomeConfiguration')\n\n while not rospy.has_param('/workspace_table/standingBase_dim'):\n rospy.sleep(0.2)\n standingBase_dim = rospy.get_param('/workspace_table/standingBase_dim')\n\n while not rospy.has_param('/workspace_table/table_dim'):\n rospy.sleep(0.2)\n table_dim = rospy.get_param('/workspace_table/table_dim')\n\n while not rospy.has_param('/workspace_table/table_offset_x'):\n rospy.sleep(0.2)\n table_offset_x = rospy.get_param('/workspace_table/table_offset_x')\n\n while not rospy.has_param('/uniform_cylinder_object/radius'):\n rospy.sleep(0.2)\n cylinder_radius = rospy.get_param('/uniform_cylinder_object/radius')\n\n while not rospy.has_param('/uniform_cylinder_object/height'):\n rospy.sleep(0.2)\n cylinder_height = rospy.get_param('/uniform_cylinder_object/height')\n\n while not rospy.has_param('/object_goal_deployment/discretization_x'):\n rospy.sleep(0.2)\n discretization_x = rospy.get_param('/object_goal_deployment/discretization_x')\n\n while not rospy.has_param('/object_goal_deployment/discretization_y'):\n rospy.sleep(0.2)\n discretization_y = rospy.get_param('/object_goal_deployment/discretization_y')\n\n while not rospy.has_param('/object_goal_deployment/object_interval_x'):\n rospy.sleep(0.2)\n object_interval_x = rospy.get_param('/object_goal_deployment/object_interval_x')\n\n while not rospy.has_param('/object_goal_deployment/object_interval_y'):\n rospy.sleep(0.2)\n object_interval_y = rospy.get_param('/object_goal_deployment/object_interval_y')\n\n while not rospy.has_param('/object_goal_deployment/side_clearance_x'):\n rospy.sleep(0.2)\n side_clearance_x = rospy.get_param('/object_goal_deployment/side_clearance_x')\n\n while not rospy.has_param('/object_goal_deployment/side_clearance_y'):\n rospy.sleep(0.2)\n side_clearance_y = rospy.get_param('/object_goal_deployment/side_clearance_y')\n\n while not rospy.has_param('/constrained_area/ceiling_height'):\n rospy.sleep(0.2)\n ceiling_height = rospy.get_param('/constrained_area/ceiling_height')\n\n while not rospy.has_param('/constrained_area/thickness_flank'):\n rospy.sleep(0.2)\n thickness_flank = rospy.get_param('/constrained_area/thickness_flank')\n\n while not rospy.has_param('/object_mesh_to_drop_in_real_scene/object_mesh_path'):\n rospy.sleep(0.2)\n object_mesh_path = rospy.get_param('/object_mesh_to_drop_in_real_scene/object_mesh_path')\n\n return basePosition, baseOrientation, urdfFile, \\\n leftArmHomeConfiguration, rightArmHomeConfiguration, torsoHomeConfiguration, \\\n standingBase_dim, table_dim, table_offset_x, \\\n cylinder_radius, cylinder_height, \\\n discretization_x, discretization_y, \\\n object_interval_x, object_interval_y, \\\n side_clearance_x, side_clearance_y, \\\n ceiling_height, thickness_flank, \\\n object_mesh_path\n\n\ndef main(args):\n pybullet_plan_scene = PybulletPlanScene(args)\n pybullet_plan_scene.planner_p.loadSamples()\n\n pybullet_plan_scene.rosInit()\n rate = rospy.Rate(10) ### 10hz\n\n rospy.spin()\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"Rui1223/uniform_object_rearrangement","sub_path":"script/PybulletPlanScene.py","file_name":"PybulletPlanScene.py","file_ext":"py","file_size_in_byte":69788,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"55"}
+{"seq_id":"26697683752","text":"import cv2\nimport numpy as np\n\ndef split_channels(img):\n (B,G,R) = cv2.split(img)\n return B,G,R\n\ndef convert_format(img , t_format):\n if t_format == 'Gray':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if t_format == 'HSV':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n if t_format == 'HSV':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n else: \n print(\"Error\")\n return img\n\ndef show_img(img,time):\n cv2.namedWindow('test',cv2.WINDOW_FREERATIO)\n cv2.imshow('test',img)\n cv2.waitKey(time)\n\n\ndef filter(src):\n imgG = cv2.GaussianBlur(src, (5, 5),0)\n imgM = cv2.medianBlur(src, 7)\n imgB = cv2.bilateralFilter(src, 51, 10, 10)\n return imgG,imgM,imgB\n\ndef do_canny(img):\n canny = cv2.Canny(img,30,200)\n return canny\n\n\nimg = cv2.imread('/home/zack/IDPoolBall/poolballs2.jpeg', cv2.IMREAD_COLOR)\nprint('Original Dimensions : ',img.shape)\nscale_percent = 400 # percent of original size\nwidth = int(img.shape[1] * scale_percent / 100)\nheight = int(img.shape[0] * scale_percent / 100)\ndim = (width, height)\nresized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n\n# cimg = convert_format(resized, 'Gray')\n# show_img(cimg,3000)\n# imgG, imgM,imgB = fliter(cimg)\n# show_img(imgM,3000)\n# cannyimg =do_canny(imgM)\n# show_img(cannyimg,3000)\n# kernal = np.ones((31,31),np.uint8)\n# dilateimg = cv2.dilate(cannyimg,kernal,1)\n# show_img(dilateimg,3000)\n\n#Convert to HSV\nimgG, imgM,imgB = filter(img)\norg_cannyed = do_canny(img)\n# cv2.imshow(\"aaa\", org_cannyed)\n# cv2.waitKey(5000)\n# cv2.imshow(\"aaa\",imgG)\n# cv2.waitKey(2000)\n# cv2.imshow(\"aaa\",imgM)\n# cv2.waitKey(2000)\n# cv2.imshow(\"aaa\",imgB)\n# cv2.waitKey(2000)\nHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n# cv2.imshow(\"aaa\",Hsv)\n# cv2.imwrite('/home/zack/IDPoolBall/Hsv.jpeg',Hsv)\ncv2.waitKey(10000)\nH, S, V = cv2.split(Hsv)\n# cv2.imshow(\"aaa\",H)\n# cv2.imwrite('/home/zack/IDPoolBall/Hsv_H.jpeg',H)\n# cv2.waitKey(2000)\n# cv2.imshow(\"aaa\",S)\n# cv2.imwrite('/home/zack/IDPoolBall/Hsv_S.jpeg',S)\n# cv2.waitKey(2000)\n# cv2.imshow(\"aaa\",V)\n# cv2.imwrite('/home/zack/IDPoolBall/Hsv_V.jpeg',V)\n# cv2.waitKey(2000)\nA, B, C = filter(img)\ncv2.imshow(\"aaa\",B)\ncv2.waitKey(2000)\ncv2.imwrite('/home/zack/IDPoolBall/medianBlur.jpeg',B)\ncannyed = do_canny(img)\ncv2.imshow(\"aaa\",cannyed)\ncv2.waitKey(10000)\ncv2.imwrite('/home/zack/IDPoolBall/org_cannyed.jpeg',cannyed)\n# cv2.waitKey(2000)\n\n\n\n# #Read image\n# img = cv2.imread('/home/zack/IDPoolBall/poolballs1.jpeg',cv2.IMREAD_COLOR)\n\n# #Convert to grayscale\n# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# cv2.imshow(\"aaa\",gray)\n# cv2.imwrite('/home/zack/IDPoolBall/gray.jpeg',gray)\n# cv2.waitKey(2000)\n\n# #Blur using 3*3 kernal\n# gray_blurred = cv2.blur(gray,(3,3))\n# cv2.imshow(\"aaa\",gray_blurred)\n# cv2.imwrite('/home/zack/IDPoolBall/gray_blurred.jpeg',gray_blurred)\n# cv2.waitKey(2000)\n\n# # Apply Hough transform on the blurred image.\n# detected_circles = cv2.HoughCircles(gray_blurred, \n# cv2.HOUGH_GRADIENT, 1, 20, param1 = 50,\n# param2 = 30, minRadius = 1, maxRadius = 40)\n\n# cv2.imshow(\"aaa\",detected_circles)\n# cv2.imwrite('/home/zack/IDPoolBall/detected_circles.jpeg',detected_circles)\n# cv2.waitKey(2000)\n\n#Draw circles that are detected\n# if detected_circles is not None:\n\n# #Convert the circle parameters a, b and r to intergers\n# detected_circles = np.uint16(np.around(detected_circles))\n\n# for pt in detected_circles[0, :]:\n# a, b, r = pt[0], pt[1], pt[2]\n\n# #Draw the circumference of the circles\n# cv2.circle(img, (a,b), r, (0, 255, 0), 2)\n\n# #Draw a small circle (of radius 1) t show the center\n# cv2.circle(img, (a, b), 1, (0, 0, 255), 3)\n# cv2.imshow(\"Detected Circle\", img)\n# cv2.waitKey(0)","repo_name":"zhekai-w/pooltable","sub_path":"IDPoolBall.py","file_name":"IDPoolBall.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"19055256683","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport mimetypes\nimport os\nimport sys\nimport tempfile\nimport traceback\n\nfrom lxml import etree as ElementTree\n\nfrom core.pipeline import Pipeline\nfrom core.utils.metadata import Metadata\nfrom core.utils.daisy_pipeline import DaisyPipelineJob\nfrom core.utils.filesystem import Filesystem\n\nif sys.version_info[0] != 3 or sys.version_info[1] < 5:\n print(\"# This script requires Python version 3.5+\")\n sys.exit(1)\n\n\ndef transfer_metadata_from_html_to_pef(html_file, pef_file, additional_metadata):\n html_xml = ElementTree.parse(html_file).getroot()\n pef_xml_document = ElementTree.parse(pef_file)\n pef_xml = pef_xml_document.getroot()\n html_meta_elements = html_xml.xpath(\"/*/*[local-name()='head']/*\")\n pef_meta = pef_xml.xpath(\"/*/*[local-name()='head']/*[local-name()='meta']\")[0]\n pef_meta_elements = pef_xml.xpath(\"/*/*[local-name()='head']/*[local-name()='meta']/*\")\n\n dc = \"{http://purl.org/dc/elements/1.1/}\"\n tail = None\n lasttail = None\n for meta in pef_meta_elements:\n if tail is None:\n tail = meta.tail\n lasttail = meta.tail\n if meta.tag in [f\"{dc}format\", f\"{dc}date\"] or meta.tag.endswith(\"sheet-count\"):\n meta.tail = tail # keep, and make sure the trailing whitespace is the same for all elements\n\n for meta in html_meta_elements:\n tag = None\n text = None\n name = meta.tag.split(\"}\")[-1]\n namespace = None\n namespace = meta.tag.split(\"}\")[0].split(\"{\")[-1]\n\n # title has its own element in HTML, so we need to handle it explicitly here\n if name == \"title\":\n namespace = \"http://purl.org/dc/elements/1.1/\"\n tag = \"{\" + namespace + \"}title\"\n text = meta.text\n\n # meta charset, link, script etc.\n elif name != \"meta\" or \"name\" not in meta.attrib or \"content\" not in meta.attrib:\n continue # not relevant\n\n # we use the dc:format from the preexisting PEF metadata, and ignore some other metadata as well\n elif meta.attrib[\"name\"] in [\"dc:format\", \"dcterms:modified\", \"viewport\"]:\n continue # ignore these\n\n # description doesn't use a prefix in HTML, so we need to handle it explicitly here\n elif meta.attrib[\"name\"] == \"description\":\n namespace = \"http://purl.org/dc/elements/1.1/\"\n tag = \"{\" + namespace + \"}description.abstract\"\n text = meta.attrib[\"content\"]\n\n # all other meta elements\n else:\n prefix = meta.attrib[\"name\"].split(\":\")[0] if \":\" in meta.attrib[\"name\"] else None\n namespace = None\n if prefix is not None:\n namespace = meta.nsmap.get(prefix)\n if namespace is None:\n continue # namespace not found - only metadata with a namespace will (can) be included\n\n tag = \"{\" + namespace + \"}\" + meta.attrib[\"name\"].split(\":\")[1]\n text = meta.attrib[\"content\"]\n\n element = ElementTree.Element(tag, nsmap={prefix: meta.nsmap[prefix] for prefix in meta.nsmap if meta.nsmap[prefix] == namespace})\n element.text = text\n if namespace == \"http://purl.org/dc/elements/1.1/\":\n element = ElementTree.Comment(\" \" + ElementTree.tounicode(element) + \" \")\n element.tail = tail\n pef_meta.append(element)\n\n for (tagname, prefix, namespace, attribname, value) in additional_metadata:\n element = ElementTree.Element(\"{\" + namespace + \"}\" + tagname, nsmap={prefix: namespace})\n if attribname is not None:\n element.attrib[\"name\"] = attribname\n element.text = value\n if namespace == \"http://purl.org/dc/elements/1.1/\":\n element = ElementTree.Comment(\" \" + ElementTree.tounicode(element) + \" \")\n element.tail = tail\n pef_meta.append(element)\n\n # set correct whitespace trailing the last meta element\n pef_meta.xpath(\"*\")[-1].tail = lasttail\n\n pef_xml_document.write(pef_file, method='XML', xml_declaration=True, encoding='UTF-8', pretty_print=False)\n\n\nclass NlbpubToPef(Pipeline):\n uid = \"nlbpub-to-pef\"\n title = \"NLBPUB til PEF\"\n labels = [\"Punktskrift\", \"Statped\"]\n publication_format = \"Braille\"\n expected_processing_time = 880\n\n def on_book_deleted(self):\n self.utils.report.info(\"Slettet bok i mappa: \" + self.book['name'])\n self.utils.report.title = self.title + \" HTML-kilde slettet: \" + self.book['name']\n return True\n\n def on_book_modified(self):\n self.utils.report.info(\"Endret bok i mappa: \" + self.book['name'])\n return self.on_book()\n\n def on_book_created(self):\n self.utils.report.info(\"Ny bok i mappa: \" + self.book['name'])\n return self.on_book()\n\n def on_book(self):\n self.utils.report.attachment(None, self.book[\"source\"], \"DEBUG\")\n\n self.utils.report.info(\"Lager en kopi av filsettet\")\n temp_htmldir_obj = tempfile.TemporaryDirectory()\n temp_htmldir = temp_htmldir_obj.name\n Filesystem.copy(self.utils.report, self.book[\"source\"], temp_htmldir)\n\n self.utils.report.info(\"Finner HTML-fila\")\n html_file = None\n for root, dirs, files in os.walk(temp_htmldir):\n for f in files:\n if f.endswith(\"html\"):\n html_file = os.path.join(root, f)\n if not html_file or not os.path.isfile(html_file):\n self.utils.report.error(self.book[\"name\"] + \": Klarte ikke å finne en HTML-fil.\")\n self.utils.report.title = self.title + \": \" + self.book[\"name\"] + \" feilet \"\n return False\n\n html_xml = ElementTree.parse(html_file).getroot()\n identifier = html_xml.xpath(\"/*/*[local-name()='head']/*[@name='dc:identifier']\")\n\n metadata = Metadata.get_metadata_from_book(self.utils.report, temp_htmldir)\n\n line_spacing = \"single\"\n duplex = \"true\"\n for e in html_xml.xpath(\"/*/*[local-name()='head']/*[@name='dc:format.linespacing']\"):\n if \"double\" == e.attrib[\"content\"]:\n line_spacing = \"double\"\n for e in html_xml.xpath(\"/*/*[local-name()='head']/*[@name='dc:format.printing']\"):\n if \"single-sided\" == e.attrib[\"content\"]:\n duplex = \"false\"\n self.utils.report.info(\"Linjeavstand: {}\".format(\"åpen\" if line_spacing == \"double\" else \"enkel\"))\n self.utils.report.info(\"Trykk: {}\".format(\"enkeltsidig\" if duplex == \"false\" else \"dobbeltsidig\"))\n\n bookTitle = \"\"\n bookTitle = \" (\" + html_xml.xpath(\"string(/*/*[local-name()='head']/*[local-name()='title']/text())\") + \") \"\n\n identifier = identifier[0].attrib[\"content\"] if identifier and \"content\" in identifier[0].attrib else None\n if not identifier:\n self.utils.report.error(self.book[\"name\"] + \": Klarte ikke å finne boknummer i HTML-fil.\")\n self.utils.report.title = self.title + \": \" + self.book[\"name\"] + \" feilet \"\n return False\n epub_identifier = html_xml.xpath(\"/*/*[local-name()='head']/*[@name='nlbprod:identifier.epub']\")\n epub_identifier = epub_identifier[0].attrib[\"content\"] if epub_identifier and \"content\" in epub_identifier[0].attrib else None\n\n # ---------- konverter til PEF ----------\n\n # create context for Pipeline 2 job\n html_dir = os.path.dirname(html_file)\n html_context = {}\n for root, dirs, files in os.walk(html_dir):\n for file in files:\n kind = mimetypes.guess_type(file)[0]\n if kind is not None and kind.split(\"/\")[0] in [\"image\", \"video\", \"audio\"]:\n continue # ignore media files\n fullpath = os.path.join(root, file)\n relpath = os.path.relpath(fullpath, html_dir)\n html_context[relpath] = fullpath\n\n script_id = \"nlb:html-to-pef\"\n pipeline_and_script_version = [\n (\"1.11.1-SNAPSHOT\", \"1.10.0-SNAPSHOT\"),\n ]\n braille_arguments = {\n \"source\": os.path.basename(html_file),\n \"braille-standard\": \"(dots:6)(grade:0)\",\n \"line-spacing\": line_spacing,\n \"duplex\": duplex,\n \"maximum-number-of-sheets\": \"72\",\n }\n\n # for custom Statped options using NLBs PIP (remove `and False` or replace with `or True` to test)\n if metadata[\"library\"].lower() == \"statped\" and False:\n # see: https://github.com/nlbdev/pipeline/blob/nlb/nlb/book-to-pef/src/main/resources/xml/html-to-pef.xpl#L146-L167\n #\n # (1) 'http://www.nlb.no/pipeline/modules/braille/pre-processing.xsl',\n # (2) 'http://www.daisy.org/pipeline/modules/braille/xml-to-pef/generate-toc.xsl',\n # (3) if ($default-table-class = '') then resolve-uri('add-table-classes.xsl') else (),\n # (4) if ($insert-boilerplate = 'true') then 'http://www.nlb.no/pipeline/modules/braille/insert-boilerplate.xsl' else (),\n # (5) if ($apply-default-stylesheet = 'true') then 'http://www.nlb.no/pipeline/modules/braille/default.scss' else (),\n # (6) if ($stylesheet) then tokenize($stylesheet,',') else ()),' ')\"/>\n\n braille_arguments[\"insert-boilerplate\"] = \"false\" # disable (4)\n braille_arguments[\"apply-default-stylesheet\"] = \"false\" # disable (5)\n\n # (1-3) will still be included. Specifying (6) let's us include replacements for (4) and (5)\n braille_arguments[\"stylesheet\"] = \",\".join([\n \"https://raw.githubusercontent.com/StatpedEPUB/nlb-scss/master/src/xslt/insert-boilerplate.xsl\",\n \"https://raw.githubusercontent.com/StatpedEPUB/nlb-scss/master/src/scss/braille.scss\"\n ])\n\n # for custom Statped options using DAISYs PIP (remove `and False` or replace with `or True` to test)\n if metadata[\"library\"].lower() == \"statped\" and True:\n # use DAISYs version of PIP instead\n script_id = \"html-to-pef\"\n pipeline_and_script_version = [\n (None, \"6.0.1\"),\n (None, \"5.0.1\"),\n (None, \"4.2.1\"),\n (None, \"4.1.1\"),\n (\"1.14.6\", \"5.0.1\"),\n (\"1.14.5\", None),\n (\"1.14.4\", \"4.2.0\"),\n (\"1.14.4-SNAPSHOT\", \"4.1.1\"),\n (\"1.14.3\", \"4.1.1\"),\n (\"1.14.2\", \"4.1.0\"),\n (\"1.13.6\", \"1.4.6\"),\n (\"1.13.4\", \"1.4.5\"),\n (\"1.12.1\", \"1.4.2\"),\n (\"1.11.1-SNAPSHOT\", \"1.3.0\"),\n ]\n\n\n braille_arguments = {\n \"html\": os.path.basename(html_file),\n \"transform\": \"(formatter:dotify)(translator:liblouis)(dots:6)(grade:0)\",\n \"stylesheet\": \" \".join([\n # 1. better volume breaking, and also removes title page and print toc, moves the colophon and copyright page to the end of the book\n # \"https://raw.githubusercontent.com/nlbdev/pipeline/nlb/nlb/book-to-pef/src/main/resources/xml/pre-processing.xsl\",\n \"https://raw.githubusercontent.com/StatpedEPUB/nlb-scss/master/src/xslt/pre-processing.xsl\",\n\n #\"https://raw.githubusercontent.com/daisy/pipeline/master/modules/braille/xml-to-pef/src/main/resources/xml/xslt/generate-toc.xsl\",\n\n # 3. NLB: Add table classes based on the dimensions of the table, for better handling of tables\n \"https://raw.githubusercontent.com/nlbdev/pipeline/nlb/nlb/book-to-pef/src/main/resources/xml/add-table-classes.xsl\",\n\n # 4. NLB: Generate a new title page and about page in the frontmatter\n # \"https://raw.githubusercontent.com/nlbdev/pipeline/nlb/nlb/book-to-pef/src/main/resources/xml/insert-boilerplate.xsl\",\n \"https://raw.githubusercontent.com/StatpedEPUB/nlb-scss/master/src/xslt/insert-boilerplate.xsl\",\n # 5. Statped-specific SCSS\n \"https://raw.githubusercontent.com/StatpedEPUB/nlb-scss/master/src/scss/braille.scss\",\n ]),\n \"page-width\": '38',\n \"page-height\": '29',\n \"toc-depth\": '2',\n\t\t\"maximum-number-of-sheets\": '50',\n \"include-production-notes\" : 'true',\n \"hyphenation\" : 'false',\n \"allow-volume-break-inside-leaf-section-factor\" : '10',\n\t\t\"prefer-volume-break-before-higher-level-factor\" : '1',\n \"stylesheet-parameters\": \"(skip-margin-top-of-page:true)\",\n }\n\n pef_tempdir_object = tempfile.TemporaryDirectory()\n\n self.utils.report.info(\"Konverterer fra HTML til PEF...\")\n found_pipeline_version = None\n found_script_version = None\n with DaisyPipelineJob(self,\n script_id,\n braille_arguments,\n pipeline_and_script_version=pipeline_and_script_version,\n context=html_context\n ) as dp2_job:\n found_pipeline_version = dp2_job.found_pipeline_version\n found_script_version = dp2_job.found_script_version\n\n # get conversion report\n if os.path.isdir(os.path.join(dp2_job.dir_output, \"preview-output-dir\")):\n Filesystem.copy(self.utils.report,\n os.path.join(dp2_job.dir_output, \"preview-output-dir\"),\n os.path.join(self.utils.report.reportDir(), \"preview\"))\n self.utils.report.attachment(None,\n os.path.join(self.utils.report.reportDir(), \"preview\" + \"/\" + identifier + \".pef.html\"),\n \"SUCCESS\" if dp2_job.status == \"SUCCESS\" else \"ERROR\")\n\n if dp2_job.status != \"SUCCESS\":\n self.utils.report.info(\"Klarte ikke å konvertere boken\")\n self.utils.report.title = self.title + \": \" + identifier + \" feilet 😭👎\" + bookTitle\n return False\n\n dp2_pef_dir = os.path.join(dp2_job.dir_output, \"pef-output-dir\")\n dp2_new_pef_dir = os.path.join(dp2_job.dir_output, \"output-dir\")\n if not os.path.exists(dp2_pef_dir) and os.path.exists(dp2_new_pef_dir):\n dp2_pef_dir = dp2_new_pef_dir\n\n if not os.path.isdir(dp2_pef_dir):\n self.utils.report.info(\"Finner ikke den konverterte boken.\")\n self.utils.report.title = self.title + \": \" + identifier + \" feilet 😭👎\" + bookTitle\n return False\n\n Filesystem.copy(self.utils.report, dp2_pef_dir, pef_tempdir_object.name)\n\n self.utils.report.info(\"Boken ble konvertert.\")\n\n self.utils.report.info(\"Kopierer metadata fra HTML til PEF...\")\n try:\n pef_file = None\n for root, dirs, files in os.walk(pef_tempdir_object.name):\n for f in files:\n if f.endswith(\".pef\"):\n pef_file = os.path.join(root, f)\n if not pef_file or not os.path.isfile(pef_file):\n self.utils.report.error(self.book[\"name\"] + \": Klarte ikke å finne en PEF-fil.\")\n else:\n additional_metadata = []\n additional_metadata.append((\"daisy-pipeline-engine-version\", \"nlbprod\", \"http://www.nlb.no/production\", None, found_pipeline_version))\n additional_metadata.append((\"daisy-pipeline-script-id\", \"nlbprod\", \"http://www.nlb.no/production\", None, script_id))\n additional_metadata.append((\"daisy-pipeline-script-version\", \"nlbprod\", \"http://www.nlb.no/production\", None, found_script_version))\n for argument in braille_arguments:\n if argument in [\"source\", \"html\"]:\n continue # skip HTML file path\n values = braille_arguments[argument]\n values = values if isinstance(values, list) else [values]\n for value in values:\n additional_metadata.append((\"daisy-pipeline-argument\", \"nlbprod\", \"http://www.nlb.no/production\", argument, value))\n\n transfer_metadata_from_html_to_pef(html_file, pef_file, additional_metadata)\n\n except Exception:\n self.utils.report.warning(traceback.format_exc(), preformatted=True)\n self.utils.report.error(\"An error occured while trying to insert metadata about the conversion\")\n\n self.utils.report.info(\"Kopierer til PEF-arkiv.\")\n archived_path, stored = self.utils.filesystem.storeBook(pef_tempdir_object.name, identifier)\n self.utils.report.attachment(None, archived_path, \"DEBUG\")\n\n self.utils.report.title = self.title + \": \" + identifier + \" ble konvertert 👍😄\" + bookTitle\n return True\n\n\nif __name__ == \"__main__\":\n NlbpubToPef().run()\n","repo_name":"nlbdev/produksjonssystem","sub_path":"produksjonssystem/nlbpub_to_pef.py","file_name":"nlbpub_to_pef.py","file_ext":"py","file_size_in_byte":16989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"}
+{"seq_id":"73887459692","text":"# System\nimport csv\nimport sys\n\n# Django\nfrom sys import stdin, stdout\nfrom django.core.management.base import BaseCommand\nfrom argparse import FileType\n\n# Models\nfrom www.models import Weather, ClothingIcon, WeatherIcon\n\n# Rows\nCODE = 0\nDAY = 1\nSCOTS = 2\nTERRIBLE = 3\nDELTA = 4\nCOLDER = 5\nCOLD = 6\nFAIR = 7\nWARM = 8\nCLOTHING_COLDER = 9\nCLOTHING_COLD = 10\nCLOTHING_FAIR = 11\nCLOTHING_WARM = 12\nWEATHER_DAY = 13\nWEATHER_NIGHT = 14\n\n\nclass Command(BaseCommand):\n help = \"Import weather codes as CSV\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"csv_path\", nargs=1, type=FileType(\"r\"), default=stdin)\n\n def valid_headers(self, row):\n return (\n row[CODE] == \"code\"\n and row[DAY] == \"day\"\n and row[SCOTS] == \"Scots\"\n and row[TERRIBLE] == \"Terrible\"\n and row[DELTA] == \"Delta\"\n and row[COLDER] == \"Colder\"\n and row[COLD] == \"Cold\"\n and row[FAIR] == \"Fair\"\n and row[WARM] == \"Warm\"\n and row[CLOTHING_COLDER] == \"Clothing Colder\"\n and row[CLOTHING_COLD] == \"Clothing Cold\"\n and row[CLOTHING_FAIR] == \"Clothing Fair\"\n and row[CLOTHING_WARM] == \"Clothing Warm\"\n and row[WEATHER_DAY] == \"Weather Day\"\n and row[WEATHER_NIGHT] == \"Weather Night\"\n )\n\n def handle(self, *args, **options):\n input_csv = options[\"csv_path\"][0].name\n\n with open(input_csv, newline=\"\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n first_row = True\n for row in reader:\n # Check for valid data\n if first_row:\n first_row = False\n if not self.valid_headers(row):\n print(\"Error: CSV Columns Invalid\")\n sys.exit()\n continue\n\n # Check if we're about to duplicate something\n if Weather.objects.filter(code=int(row[CODE])).exists():\n print(f\"FAIL - {row[0]} ({row[1]}) already in database\")\n continue\n\n # Look up fks\n clothing_colder = ClothingIcon.objects.get(icon=row[CLOTHING_COLDER])\n clothing_cold = ClothingIcon.objects.get(icon=row[CLOTHING_COLD])\n clothing_fair = ClothingIcon.objects.get(icon=row[CLOTHING_FAIR])\n clothing_warm = ClothingIcon.objects.get(icon=row[CLOTHING_WARM])\n weather_day = WeatherIcon.objects.get(icon=row[WEATHER_DAY])\n weather_night = WeatherIcon.objects.get(icon=row[WEATHER_NIGHT])\n\n # Create model\n weather = Weather(\n code=int(row[CODE]),\n description=str(row[DAY]),\n scots=str(row[SCOTS]),\n terrible=(row[TERRIBLE] == \"1\"),\n delta=float(row[DELTA]),\n colder=float(row[COLDER]),\n cold=float(row[COLD]),\n fair=float(row[FAIR]),\n warm=float(row[WARM]),\n clothing_colder=clothing_colder,\n clothing_cold=clothing_cold,\n clothing_fair=clothing_fair,\n clothing_warm=clothing_warm,\n weather_day=weather_day,\n weather_night=weather_night,\n )\n weather.save()\n print(f\"SUCCESS - {row[0]} ({row[1]}) added to database\")\n","repo_name":"ColinWaddell/TapsAff-Django","sub_path":"www/management/commands/import_weather_csv.py","file_name":"import_weather_csv.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"55"}
+{"seq_id":"71952146733","text":"# encoding: utf-8\n\"\"\"\n @project:data_structure_and_algorithm\n @author: Jiang Hui\n @language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux\n @time: 2019/8/28 20:21\n @desc: 第42题\n\"\"\"\n\n\"\"\"\n 解题思路:\n 本题也是涉及到单调栈的使用,我们要求凹下去区间的面积总和,枚举每一个柱形高度时,要找到它左边第一个比它高的柱子\n \n 要知道,我们想形成一个凹区间,那么必须满足,递减栈中,存在比它矮的柱子,否则当前柱子无法形成凹区间,即不会对\n 面积area作出贡献。\n \n 假设现在是第i个柱子,如果它的高度小于前面的柱子高度,则不会形成凹区间,把它的下标添加到栈中,可以使其保持\n 递减的特性\n \n 假设第i个柱子,它的高度大于前面柱子的高度,那么递减栈中如果元素个数大于等于2的话,就可以形成一个凹区间,\n 这个面积分两种情况计算:\n (1)递减栈中栈顶元素对应的柱子比当前柱子矮的情况:\n 这部分面积等于(栈顶元素对应的柱子高度 - 上一个栈顶元素的柱子高度) * (当前元素与栈顶元素之间的柱子个数)\n \n (2)递减栈中栈顶元素对应的柱子比当前柱子高的情况:(此处是第一个比当前柱子高的柱子下标)\n 这部分面积等于(当前元素对应的柱子高度减去上一个栈顶元素的柱子高度)* (当前元素与栈顶元素之间的柱子个数)\n \n 这两种情况,我们可以利用单调递减栈来实现,具体可以看下方的代码,以及08_接雨水_计算.png\n \n 最后记得把当前柱子下标添加到递减栈中\n\"\"\"\n\n\nclass Solution:\n def trap(self, nums) -> int:\n stack = []\n area = 0\n last = 666 # 这里last初始任意值都可以,因为在下面的第一轮area计算中,i-t-1值必为0,随后last会被覆盖掉\n for i in range(len(nums)):\n while stack and nums[stack[-1]] <= nums[i]: # 第一部分的面积,见上面推理\n t = stack.pop()\n area += (nums[t] - last) * (i - t - 1)\n last = nums[t] # 循环结束时,last是递减栈中最后一个小于nums[i]的柱子高度\n if stack: # 第二部分的面积\n area += (nums[i] - last) * (i - stack[-1] - 1)\n stack.append(i) # 再把当前柱子下标添加到栈中\n return area\n\n\nif __name__ == '__main__':\n print(Solution().trap([5, 4, 3, 0, 1, 4.5]))\n","repo_name":"jh0905/data_structure_and_algorithm","sub_path":"leetcode/7_双指针&滑动窗口&单调栈&单调队列专题/08_接雨水.py","file_name":"08_接雨水.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21147245348","text":"from thread import start_new_thread\nimport socket\nimport time\n\nfrom common.robot import *\nfrom common.amap import *\nfrom algorithms.shortest_path import AStarShortestPathAlgo\n\nfrom simulators.controllers import ArduinoController\n\nclass AppSettings():\n \"\"\"\n settings for Arduino Simulation\n \"\"\"\n TEXTBOX_HEIGHT = 5\n ROBOT_ORI_LABEL = \"Robot Orientation: {}\"\n ROBOT_POS_LABEL = \"Robot position: {},{}\"\n\n\nclass ArduinoSimulationApp(BaseObserver,AppSettings):\n _controller = None # ArduinoController\n # gui elements\n _map_ui = None # MapUI object\n _label_orientation = None\n _label_position = None\n _text_status = None\n _send_data_btn = None\n _map_frame = None\n\n def __init__(self,root):\n # load map from file\n _map_ref = MapRef()\n # init map\n self._map_frame = Frame(master=root) # container\n self._map_ui = MapUI(frame=self._map_frame,map_ref=_map_ref)\n self._map_frame.grid(row=0,column=0)\n # add this app as the listener of robot\n _robot = RobotRef()\n self._robotUI = RobotUIWithTracing(robot=_robot,cells=self._map_ui.get_cells())\n #_robot.add_change_listener(self)\n # init controller\n self._controller = ArduinoController(map_ref=_map_ref,robot_ref=_robot)\n self._controller.add_change_listener(self)\n # init button\n self._control_frame = Frame(master=root)\n self._control_frame.grid(row=1,column=0)\n self._init_control_frame(self._control_frame)\n\n # init labels and text area\n info_frame = Frame(master=root)\n self.init_info_elements(root=info_frame)\n info_frame.grid(row=2,column=0)\n self._controller.run()\n\n def init_info_elements(self,root):\n self._label_orientation = Label(master=root,text=\"...\")\n self._label_orientation.grid(row=0,column=0)\n self._label_position = Label(master=root,text=\"...\")\n self._label_position.grid(row=1,column=0)\n self._text_status = Text(master=root,height=self.TEXTBOX_HEIGHT)\n self._text_status.grid(row=2,column=0)\n scrollb = Scrollbar(root, command=self._text_status.yview)\n scrollb.grid(row=2, column=1, sticky='nsew')\n self._text_status['yscrollcommand'] = scrollb.set\n self.update()\n\n def _init_control_frame(self,fr):\n self._send_data_btn = Button(master=fr,text=\"send sensor data\",command=self._controller.send_sensor_data)\n self._send_data_btn.grid(row=0,column=0)\n self._load_map_btn = Button(master=fr,text=\"load map\",command=self.load_map)\n self._load_map_btn.grid(row=0,column=1)\n self._load_map_text = Text(master=fr,height=1,width=10)\n self._load_map_text.grid(row=0,column=2)\n self._switch_sensor_btn = Button(master=fr,text=\"switch sensor\",command=self.toggle_sensor)\n self._switch_sensor_btn.grid(row=0,column=3)\n self._trace_robot_btn = Button(master=fr,text=\"trace\",command=self.start_tracing)\n self._trace_robot_btn.grid(row=0,column=4)\n self._mf_btn = Button(master=fr,text=\"forward\",command=self._controller.move_forward)\n self._mf_btn.grid(row=0,column=5)\n self._sp_btn = Button(master=fr,text=\"path\",command=self.show_path)\n self._sp_btn.grid(row=0,column=6)\n self._sr_btn = Button(master=fr,text=\"send reading\",command=self._controller.send_sensor_data)\n self._sr_btn.grid(row=0,column=7)\n\n #TODO: this is for debugging purpose only\n def show_path(self):\n map_ref = self._controller.get_map_ref()\n robot_ref = self._controller.get_robot_ref()\n algo = AStarShortestPathAlgo(map_ref=map_ref,target_pos=(13,1))\n algo._build_search_tree(robot_pos=robot_ref.get_position(),robot_ori=robot_ref.get_orientation())\n nodes = algo._get_nodes()\n # paint f values\n for y in range(len(nodes)):\n for x in range(len(nodes[0])):\n if (nodes[y][x]):\n f = nodes[y][x].get_f()\n self._map_ui.paint_text(x,y,\"{}\".format(f))\n # paint trace\n cur_node = nodes[1][13]\n while(cur_node.x!=1 or cur_node.y!=18):\n self._map_ui.paint_color(cur_node.x,cur_node.y,\"pink\")\n cur_node = cur_node.parent\n\n\n def start_tracing(self):\n if (self._robotUI.is_tracing()):\n self._robotUI.stop_tracing()\n else:\n self._robotUI.start_tracing()\n\n def toggle_sensor(self):\n self._controller.toggle_sensor()\n\n def load_map(self):\n filename = self._load_map_text.get(\"1.0\",END)[:-1]\n self._controller.load_map(filename)\n\n def update(self,data=None):\n \"update method as an observer\"\n if (data):\n self._text_status.insert(END,data+\"\\n\")\n pos = self._controller.get_robot_pos()\n ori = self._controller.get_robot_ori()\n self._label_position.config(text=self.ROBOT_POS_LABEL.format(pos[0],pos[1]))\n self._label_orientation.config(text=self.ROBOT_ORI_LABEL.format(ori.get_name()))\n\ndef main():\n window = Tk()\n app = ArduinoSimulationApp(root=window)\n window.title(\"arduino\")\n window.mainloop()\n\nmain()","repo_name":"xiaoxue-ma/MDP","sub_path":"simulators/arduino.py","file_name":"arduino.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"40054919096","text":"import paddle\nimport paddle.nn as nn\nimport pandas as pd\nimport numpy as np\nimport random\n\nfrom ML.model import MyDataset, MyLSTMModel\n\ndata_path = 'bag_process/finetune.csv'\ndata = pd.read_csv(data_path)\ndata.info()\nprint(data.head(3))\ndata = data.values\nprint(data.shape)\n\neval = np.array(data)\n# print(eval.shape)\ntrain = []\ntest = []\n\nfor i in data:\n n = random.randint(0,10)\n if n > 8:\n test.append(i)\n else:\n train.append(i)\ntrain = np.array(train)\ntest = np.array(test)\n\neval_dataset = MyDataset(eval,n_in=1,num_features=4)\n\npaddle.set_device('gpu:0')\nmodel = paddle.Model(MyLSTMModel())\nmodel.load('model/final')\nmodel.prepare(metrics=paddle.metric.Accuracy())\n# callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')\n# evaluate(eval_data, batch_size=1, log_freq=10, verbose=2, num_workers=0, callbacks=None, num_iters=None)\nresult = model.evaluate(eval_dataset,\n # batch_size=10,\n batch_size=1000,\n log_freq=10,\n # callbacks=callback,\n verbose=1)\nprint(result)\n","repo_name":"Feng1909/high_param_opt","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"55"}
+{"seq_id":"71276882732","text":"from django.db import models\nfrom django.core.validators import RegexValidator\nfrom django.conf import settings\nfrom user_account.models.User import User\nfrom django.utils.translation import gettext_lazy as _\nfrom subscriber.models.services import services\n\n\nclass MobileSubscriberModel(models.Model):\n\n phoneNumberRegex = RegexValidator(regex=r\"^\\+?1?\\d{8,15}$\")\n msisdn = models.CharField(\n validators=[phoneNumberRegex],\n max_length=16,\n unique=True\n )\n\n customer_id_owner = models.ForeignKey(\n User,\n on_delete=models.PROTECT,\n related_name='Customer_id_owner'\n )\n\n customer_id_user = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n related_name='Customer_id_user'\n )\n\n service_type = models.CharField(\n max_length=50,\n choices=services,\n default=0,\n )\n\n service_start_date = models.DateTimeField(\n auto_now_add=True\n )\n\n def __str__(self):\n return self.msisdn\n","repo_name":"alhassanmoses/Mobile_Subscribers","sub_path":"Mobile_Subscriber_BE/subscriber/models/MobileSubscriberModel.py","file_name":"MobileSubscriberModel.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"9492365865","text":"#!/usr/bin/python\n\"\"\"\nThis is the most simple example to showcase Fogbed.\n\"\"\"\nfrom src.mininet.net import Fogbed\nfrom src.mininet.node import Controller\nfrom src.mininet.cli import CLI\nfrom src.mininet.link import TCLink\nfrom src.mininet.log import info, setLogLevel\nsetLogLevel('info')\n\nSENSORS_PER_FOG = 8\n\nFOG_NODES = [\"f{}\".format(x+1) for x in range(4)]\nSENSOR_NODES = [\"h{}\".format(x+1) for x in range(SENSORS_PER_FOG*len(FOG_NODES))]\n\nfogs = []\nsensors = []\n\nMANAGER_ADDR = '10.0.0.1:2000'\nMANAGER_ENV = {\n \"CLOUDS\": 1,\n \"FOGS\": len(FOG_NODES),\n \"SENSORS_PER_FOG\": SENSORS_PER_FOG\n}\n\nnet = Fogbed(controller=Controller)\ninfo('**** Adding Virtual Instances\\n')\nmngr = net.addDocker('mngr', ip=MANAGER_ADDR.split(':')[0], dimage=\"manager:latest\", environment=MANAGER_ENV)\nvc = net.addVirtualInstance(\"vc\")\nvfs = [net.addVirtualInstance(\"vf{}\".format(x+1)) for x in range(len(FOG_NODES))]\nvss = [net.addVirtualInstance(\"vs{}\".format(x+1)) for x in range(len(FOG_NODES))]\ninfo('*** Adding controller\\n')\nnet.addController('c0')\ninfo('*** Adding docker containers\\n')\ninfo('*** Cloud\\n')\nnet.addLink(vc, mngr)\nc1 = vc.addDocker('c1', ip='10.0.0.2', dimage=\"cloud:latest\", environment={\"MANAGER_ADDR\":MANAGER_ADDR})\ninfo('*** Fogs\\n')\nfor idx, vf in enumerate(vfs):\n f = vf.addDocker(FOG_NODES[idx], ip='10.0.1.{}'.format(idx+1), dimage=\"fog:latest\", environment={\"MANAGER_ADDR\":MANAGER_ADDR})\n fogs.append(f)\n net.addLink(vf, vc, cls=TCLink, delay='100ms')\ninfo('*** Sensors\\n')\nvid = 1\nfor idx, vs in enumerate(vss):\n net.addLink(vs, vfs[idx], cls=TCLink, delay='10ms')\n for x in range(SENSORS_PER_FOG):\n se = vs.addDocker(SENSOR_NODES[vid-1], ip='10.0.2.{}'.format(vid), dimage=\"sensor:latest\", environment={\"MANAGER_ADDR\":MANAGER_ADDR})\n vid += 1\n sensors.append(se)\n\ninfo('*** Starting network\\n')\nnet.start()\ninfo('*** Testing connectivity\\n')\nnet.ping([fogs[0], sensors[0], c1, mngr])\ninfo('*** Running CLI\\n')\nCLI(net)\ninfo('*** Stopping network\\n')\nnet.stop()\n\n","repo_name":"heitorgo1/myfog","sub_path":"examples/health_care.py","file_name":"health_care.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"71758895530","text":"import sys\n\nDEBUG = False\nlog = lambda *a, **k: print(*a, **k, file=sys.stderr) if DEBUG else None\n\nN = int(input())\n\n\ndef array_cost(array_to_sort):\n # log('[computing cost]', array_to_sort)\n\n length = len(array_to_sort)\n\n total_cost = 0\n for index in range(0, length - 1):\n min_value = index + 1\n min_value_index = array_to_sort.index(min_value)\n total_cost += min_value_index + 1\n array_to_sort = array_to_sort[0:min_value_index][::-1] + array_to_sort[min_value_index + 1:]\n # log('[computing cost] +', min_value_index + 1, ':', array_to_sort)\n return total_cost\n\n\nfor case_id in range(1, N + 1):\n target_length, target_cost = map(int, input().split())\n min_cost = target_length - 1\n max_cost = (target_length + 2) * (target_length - 1) // 2\n\n if not (min_cost <= target_cost <= max_cost):\n print('Case #{}: IMPOSSIBLE'.format(case_id))\n continue\n\n array_prefix = []\n array_suffix = []\n\n cumulated_cost = 0\n is_array_inverted = False\n for number_to_place in range(1, target_length):\n min_cost_of_following_numbers = target_length - number_to_place - 1\n max_cost_induced_by_number = target_length - number_to_place + 1\n\n use_max_cost = cumulated_cost + max_cost_induced_by_number + min_cost_of_following_numbers <= target_cost\n # log('[building] placing {} ({}<=cost<={}+{}) with {} to add: {}({})'.format(\n # number_to_place,\n # min_cost_of_following_numbers + 1,\n # min_cost_of_following_numbers,\n # max_cost_induced_by_number, target_cost - cumulated_cost,\n # 'after' if use_max_cost else 'before',\n # max_cost_induced_by_number if use_max_cost else 1,\n # ))\n\n if use_max_cost == is_array_inverted:\n array_prefix.append(number_to_place)\n else:\n array_suffix.insert(0, number_to_place)\n cumulated_cost += max_cost_induced_by_number if use_max_cost else 1\n if use_max_cost:\n is_array_inverted = not is_array_inverted\n\n unsorted_array = array_prefix + [target_length] + array_suffix\n\n log('[case] goal:', target_cost, target_length,\n 'answer:', array_cost(unsorted_array), len(unsorted_array), unsorted_array)\n print('Case #{}: {}'.format(case_id, ' '.join(map(str, unsorted_array))))\n\n# length 3:\n# 2: 1 2 3 (1+1)\n# 3: (1+2) [autre : (2+1)]\n# 4: (3+1) ou\n# 5: 2 3 1 (3+2)\n","repo_name":"AmauryLiet/CodeJam","sub_path":"2021/0_qualif/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"35491468723","text":"import streamlit as st\nimport joblib\nimport pandas as pd\n\n# Load the pre-trained pipeline\npipeline = joblib.load('pipeline_example.joblib')\n\n# Create a Streamlit web interface\nst.title(\"Predictive Model with Streamlit\")\n\n# Create input widgets for user input\nage = st.slider(\"Age:\", min_value=18, max_value=100, value=30)\nincome = st.slider(\"Income:\", min_value=0, max_value=200000, value=50000)\neducation = st.selectbox(\"Education:\", ['Bachelors', 'Masters', 'PhD', 'High School'])\ngender = st.radio(\"Gender:\", ['Male', 'Female'])\n\n# Create a DataFrame from user input\nuser_data = pd.DataFrame({\n 'age': [age],\n 'income': [income],\n 'education': [education],\n 'gender': [gender]\n})\n\n# Make predictions using the loaded pipeline\nif st.button(\"Predict\"):\n prediction = pipeline.predict(user_data)\n prediction_proba = pipeline.predict_proba(user_data)[:, 1]\n\n st.subheader(\"Prediction:\")\n if prediction[0] == 1:\n st.write(\"The model predicts that the target is 1 (Positive).\")\n else:\n st.write(\"The model predicts that the target is 0 (Negative).\")\n\n st.subheader(\"Prediction Probability:\")\n st.write(f\"The probability of the positive class is: {prediction_proba[0]:.2f}\")\n\n# Add some additional information or explanations if needed\nst.write(\"\"\"\nThis is a simple Streamlit app that uses a pre-trained scikit-learn pipeline to make predictions based on user input. Adjust the sliders and select options, then click the 'Predict' button to see the model's prediction and prediction probability.\n\"\"\")\n","repo_name":"arslan-enes/streamlit_pipeline_test","sub_path":"streamlit_test.py","file_name":"streamlit_test.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"71952130093","text":"# encoding: utf-8\n\"\"\"\n @project:data_structure_and_algorithm\n @author: Jiang Hui\n @language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux\n @time: 2019/8/19 11:24\n @desc: 第160题\n\"\"\"\n\n\"\"\"\n 解题思路:\n 定义两个指针p,q,p指向headA,q指向headB\n 两个指针同时走,如果同时走向空,说明不相交,否则走向空的指针,再从另一个链表的头节点开始走\n 如果存在交点,那么两个指针必定会在交点相遇\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if not headA or not headB:\n return None\n p = headA\n q = headB\n while p != q:\n p = p.next\n q = q.next\n if not p and not q:\n return None\n if not p:\n p = headB\n if not q:\n q = headA\n return p\n","repo_name":"jh0905/data_structure_and_algorithm","sub_path":"leetcode/2_链表专题/09_两个链表的交点.py","file_name":"09_两个链表的交点.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"5756959113","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nimport matplotlib.pyplot as plt\n\nstates={\"M\": 1,\"B\": 2}\n\nstates_rev = {value: key for key, value in states.items()}\n\ndef get_data():\n\ttry:\n\t\tdata_path = sys.argv[1]\n\t\treturn (pd.read_csv(data_path))\n\texcept IndexError:\n\t\tsys.exit(\"usage: python describe.py [your_dataset].csv\")\n\texcept IOError:\n\t\tsys.exit(\"could not read data file\")\n\texcept:\n\t\tsys.exit(\"Error\")\n\t\nif __name__ == \"__main__\":\n\tdata = get_data()\n\tcolumns = [f\"p{i}\" for i in range(1, 32)]\n\tcolumns.insert(1, \"state\")\n\tdata.columns = columns\n\tdata[\"state\"].replace(states, inplace=True)\n\tdata = data.select_dtypes('number')\n\tdata[\"state\"].replace(states_rev, inplace=True)\n\tfor i in range(1,20):\n\t\tdata.drop(f\"p{i}\", axis=1, inplace=True)\n\tprint(data)\n\tsb.pairplot(data, hue='state', markers='.')\n\tplt.show()","repo_name":"Kelias-42/Multilayer_Perceptron","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"32709123657","text":"'''\nProblem Challenge 2\nTarget Sum (hard) \nYou are given a set of positive numbers and a target sum ‘S’. Each number should be assigned either a ‘+’ or ‘-’ sign. \nWe need to find the total ways to assign symbols to make the sum of the numbers equal to the target ‘S’.\n\nExample 1: \nInput: {1, 1, 2, 3}, S=1\nOutput: 3\nExplanation: The given set has '3' ways to make a sum of '1': {+1-1-2+3} & {-1+1-2+3} & {+1+1+2-3}\n\nExample 2: \nInput: {1, 2, 7, 1}, S=9\nOutput: 2\nExplanation: The given set has '2' ways to make a sum of '9': {+1+2+7-1} & {-1+2+7+1}\n'''\n\n\n#mycode\ndef find_target_subsets(num, s):\n dp = [[0 for x in range(2*sum(num)+1)] for y in range(len(num))]\n len_s = sum(num) - s\n\n for i in range(2*sum(num)+1):\n if i - len_s == num[0]:\n dp[0][i] = 1\n if i - len_s == -num[0]:\n dp[0][i] = 1\n \n for i in range(1,len(num)):\n for j in range(1,2*sum(num)+1):\n if j - num[i] >= 0:\n dp[i][j] += dp[i-1][j - num[i]]\n if j + num[i] <= 2*sum(num):\n dp[i][j] += dp[i-1][j + num[i]]\n return dp[len(num)-1][s+len_s]\n\n\n\n\n#answer\ndef find_target_subsets(num, s):\n totalSum = sum(num)\n\n # if 's + totalSum' is odd, we can't find a subset with sum equal to '(s + totalSum) / 2'\n if totalSum < s or (s + totalSum) % 2 == 1:\n return 0\n\n return count_subsets(num, (s + totalSum) // 2)\n\n\n# this function is exactly similar to what we have in 'Count of Subset Sum' problem.\ndef count_subsets(num, s):\n n = len(num)\n dp = [[0 for x in range(s+1)] for y in range(n)]\n\n # populate the sum = 0 columns, as we will always have an empty set for zero sum\n for i in range(0, n):\n dp[i][0] = 1\n\n # with only one number, we can form a subset only when the required sum is\n # equal to the number\n for s in range(1, s+1):\n dp[0][s] = 1 if num[0] == s else 0\n\n # process all subsets for all sums\n for i in range(1, n):\n for s in range(1, s+1):\n dp[i][s] = dp[i - 1][s]\n if s >= num[i]:\n dp[i][s] += dp[i - 1][s - num[i]]\n\n # the bottom-right corner will have our answer.\n return dp[n - 1][s]\n\n\ndef main():\n print(\"Total ways: \" + str(find_target_subsets([1, 1, 2, 3], 1)))\n print(\"Total ways: \" + str(find_target_subsets([1, 2, 7, 1], 9)))\n\n\nmain()\n\n\n'''\nTime and Space complexity \nThe above solution has time and space complexity of O(N*S), where ‘N’ represents total numbers and ‘S’ is the desired sum.\n\nWe can further improve the solution to use only O(S) space.\n'''\n\n\n\ndef find_target_subsets(num, s):\n totalSum = sum(num)\n\n # if 's + totalSum' is odd, we can't find a subset with sum equal to '(s +totalSum) / 2'\n if totalSum < s or (s + totalSum) % 2 == 1:\n return 0\n\n return count_subsets(num, (s + totalSum) // 2)\n\n\n# this function is exactly similar to what we have in 'Count of Subset Sum' problem\ndef count_subsets(num, sum):\n n = len(num)\n dp = [0 for x in range(sum+1)]\n dp[0] = 1\n\n # with only one number, we can form a subset only when the required sum is equal to the number\n for s in range(1, sum+1):\n dp[s] = 1 if num[0] == s else 0\n\n # process all subsets for all sums\n for i in range(1, n):\n for s in range(sum, -1, -1):\n if s >= num[i]:\n dp[s] += dp[s - num[i]]\n\n return dp[sum]\n\n\ndef main():\n print(\"Total ways: \" + str(find_target_subsets([1, 1, 2, 3], 1)))\n print(\"Total ways: \" + str(find_target_subsets([1, 2, 7, 1], 9)))\n\n\nmain()\n","repo_name":"alishsuper/Grokking-the-Coding-Interview","sub_path":"15. Pattern 01 Knapsack (Dynamic Programming)/Problem Challenge 2 - Target Sum (hard).py","file_name":"Problem Challenge 2 - Target Sum (hard).py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"55"}
+{"seq_id":"37527260729","text":"import json\r\n\r\nfrom django.test import TestCase\r\nfrom django.urls import reverse\r\n\r\nfrom apis.scanners.hosts.models import Host\r\nfrom apis.scanners.wafw00f.models import WafWoof\r\n\r\n\r\nclass WafW00fScannerTest(TestCase):\r\n\r\n def setUp(self) -> None:\r\n self.host = '193.122.75.144'\r\n\r\n def test_host_key_in_query_params(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:scan\")}?')\r\n self.assertEqual(response.status_code, 400)\r\n\r\n def test_host_key_value_not_specified_in_query_params(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:scan\")}?host=')\r\n self.assertEqual(response.status_code, 400)\r\n\r\n def test_wafw00f_scan_is_in_progress(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:scan\")}?host={self.host}')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n\r\nclass WafW00fScanResultTest(TestCase):\r\n data = \"\"\"\r\n [{\r\n \"url\": \"https://193.122.75.144\",\r\n \"detected\": false,\r\n \"firewall\": \"None\",\r\n \"manufacturer\": \"None\"\r\n }]\r\n \"\"\"\r\n\r\n def setUp(self) -> None:\r\n Host.create_host('193.122.75.144')\r\n Host.create_host('193.122.66.53')\r\n\r\n self.found_host_with_result = Host.get_host('193.122.75.144')\r\n self.found_host_with_no_result = Host.get_host('193.122.66.53')\r\n self.not_found_host = Host.get_host('122.121.33.45')\r\n\r\n self.create_wafw00f_scan = WafWoof.create_wafwoof_scan(self.found_host_with_result, json.loads(self.data))\r\n\r\n self.get_wafw00f_scan_with_result = WafWoof.get_wafw00f_scan_by_host(self.found_host_with_result)\r\n self.get_wafw00f_scan_with_no_result = WafWoof.get_wafw00f_scan_by_host(self.found_host_with_no_result)\r\n\r\n def test_host_key_in_query_params(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:result\")}?')\r\n self.assertEqual(response.status_code, 400)\r\n\r\n def test_host_key_value_not_specified_in_query_params(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:result\")}?host=')\r\n self.assertEqual(response.status_code, 400)\r\n\r\n def test_host_not_found(self):\r\n # test if the host is not found\r\n self.assertIsNone(self.not_found_host)\r\n response = self.client.get(f'{reverse(\"wafwoof:result\")}?host={self.not_found_host}')\r\n self.assertEqual(response.status_code, 404)\r\n\r\n def test_wafw00f_scan_result_does_not_exist_for_host(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:result\")}?host={self.found_host_with_no_result}')\r\n self.assertEqual(response.status_code, 404)\r\n\r\n def test_wafw00f_scan_result_exist_for_host(self):\r\n response = self.client.get(f'{reverse(\"wafwoof:result\")}?host={self.found_host_with_result}')\r\n self.assertEqual(response.status_code, 200)","repo_name":"Numostanley/NetworkScanner","sub_path":"src/apis/scanners/wafw00f/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"2114613089","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def widthOfBinaryTree(self, root: Optional[TreeNode]) -> int: \n max_width = 1\n curr_level = [(root, 1)] #root, pos\n while curr_level != []:\n next_level = []\n for roots, pos in curr_level:\n if roots.left:\n next_level.append((roots.left, pos*2))\n if roots.right:\n next_level.append((roots.right, pos*2+1))\n if next_level != []:\n max_width = max(max_width, next_level[-1][1]-next_level[0][1]+1)\n curr_level = next_level\n return max_width\n","repo_name":"deepti-talesra/LeetCode","sub_path":"Maximum_Width_of_Binary_Tree.py","file_name":"Maximum_Width_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"55"}
+{"seq_id":"23912320684","text":"import os\nimport csv\nimport datetime\n\ndef get_percentage(numerator, denominator):\n value = (numerator/denominator)*100\n value = round(value, 2)\n return (str(value) + \"%\")\n\ninput_file = os.path.join('election_data_1.csv')\n\noutput_file = os.path.join('results.txt')\nmaster_dict = {}\ntotal_votes = 0\nwinner = \"\"\ntemp = 0\n\nwith open(input_file, 'r', newline='') as csvfile:\n data_file = csv.reader(csvfile, delimiter=',')\n #skip the first row of the csv becuase it's a header line\n next(data_file, None)\n\n for row in data_file:\n voter_id = row[0]\n county = row[1]\n candidate = row[2]\n total_votes = total_votes + 1\n #dictionary of candidate and their vote count\n if candidate in master_dict:\n x = master_dict[candidate]\n master_dict[candidate] = x+1\n x = 0\n else:\n master_dict[candidate] = 1\n\nwith open(output_file, 'w', newline='') as file:\n # Write the first row (column headers)\n file.write(\"Election Results\\n\")\n print(\"Election Results\")\n file.write(\"-------------------------\\n\")\n print(\"-------------------------\")\n file.write(\"Total Votes: \" + str(total_votes) +\"\\n\")\n print(\"Total Votes: \" + str(total_votes))\n file.write(\"-------------------------\\n\")\n print(\"-------------------------\")\n for politican in master_dict:\n file.write(politican + \": \" + get_percentage(master_dict[politican],total_votes)\n + \" (\"+str(master_dict[politican])+\")\\n\")\n print(politican + \": \" + get_percentage(master_dict[politican],total_votes)\n + \" (\"+str(master_dict[politican])+\")\")\n if master_dict[politican] > temp:\n temp = master_dict[politican]\n winner = politican\n file.write(\"-------------------------\\n\")\n print(\"-------------------------\")\n file.write(\"Winner: \" + winner+\"\\n\")\n print(\"Winner: \" + winner)\n file.write(\"-------------------------\\n\")\n print(\"-------------------------\")\n","repo_name":"dipesh267/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"69967210413","text":"liczba = 2315\r\ncyfry = len(str(liczba))\r\n\r\nilosc = 0\r\n\r\nfor maska in range (1, 2**cyfry):\r\n sprawdzana = 0\r\n liczba1 = liczba\r\n pow10 = 1\r\n\r\n while maska > 0:\r\n #1 - cyfra zostaje\r\n #0 - cyfra usuwana\r\n if maska % 2 == 1:\r\n cyfra = liczba1 % 10\r\n sprawdzana += cyfra * pow10\r\n pow10 *= 10\r\n maska //= 2\r\n liczba1 //= 10\r\n\r\n if sprawdzana % 7 == 0:\r\n ilosc += 1\r\n\r\nprint(ilosc)","repo_name":"JakubMlocek/Introduction_to_Computer_Science","sub_path":"Cwiczenia2/zad5.py","file_name":"zad5.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"3487793470","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2022/11/10 15:30\r\n\r\nimport numpy as np\r\nfrom itertools import product\r\n\r\ndef pr2_f1score(precision, recall):\r\n if precision == 0 and recall == 0:\r\n return 0.0\r\n return 2 * precision * recall / (precision + recall)\r\n\r\n\r\ndef precision_recall_f1(true_bkps, my_bkps, margin_percent=5):\r\n\r\n assert margin_percent >= 0, \"margin_percent of error must be non-negative (magin_percent={})\".format(margin_percent)\r\n assert len(true_bkps) > 0, \"currently onlg assume at least one element in true_bkps\"\r\n\r\n if len(my_bkps) == 0:\r\n return 0.0, 0.0, 0.0\r\n\r\n used=set()\r\n true_pos=set(\r\n true_b for true_b, my_b in product(true_bkps,my_bkps)\r\n if true_b*(1-margin_percent/100.0)<=my_b<=true_b*\r\n (1+margin_percent/100.0) and not (my_b in used or used.add(my_b))\r\n )\r\n\r\n tp_=len(true_pos)\r\n precision=tp_/len(my_bkps)\r\n recall=tp_/len(true_bkps)\r\n f1score=pr2_f1score(precision,recall)\r\n return precision,recall,f1score\r\n\r\n\r\n","repo_name":"GoForit-007/EMD-PERIOD","sub_path":"utils/precision_recall_f1.py","file_name":"precision_recall_f1.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"72573382250","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('employees', '0012_auto_20160403_1030'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='employeetype',\n options={'ordering': ['name'], 'verbose_name': 'Tipo de empleado', 'verbose_name_plural': 'Tipos de empleados'},\n ),\n migrations.AlterField(\n model_name='employeetype',\n name='name',\n field=models.CharField(unique=True, max_length=45, verbose_name='nombre'),\n ),\n ]\n","repo_name":"emilioferreyra/iris","sub_path":"iris/employees/migrations/0013_auto_20160611_1735.py","file_name":"0013_auto_20160611_1735.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"55"}
+{"seq_id":"27017446736","text":"from bs4 import BeautifulSoup\nimport lxml\nimport re\nimport os\n\n\ndef parse(path, file):\n # Когда есть список страниц, из них нужно вытащить данные и вернуть их\n out = {}\n\n with open(\"{}{}\".format(path, file), encoding='utf8') as data:\n soup = BeautifulSoup(data, \"lxml\")\n\n # print(soup.prettify())\n # links = soup.find(id='bodyContent').find_all(True)\n # print('\\n'.join([str(link) for link in links]))\n\n print('\\t=========================\\n\\n')\n # find all tags a\n # find all parrents of these tags, and gather to the list\n # find first a tag of a parent\n # find all siblings (not only a)\n # count continuous a's\n parents_set = set()\n [parents_set.add(tag.find_parent()) for tag in soup.find('body').find_all('a') if tag]\n\n max_links_len = 1 if parents_set else 0\n\n # print('\\n\\t==PARENTS_SET==')\n # print('\\n='.join(map(str, parents_set)))\n\n siblings_list = [parent.find('a', recursive=False).find_next_siblings() for parent in parents_set]\n link_re = re.compile(r'^]+href=\"([^\"]+)', file))\n#\n# print([a['href'] for a in soup('a')])\n#\n# print([a['href'] for a in soup.select('a[href]')])\n\n\nif __name__ == '__main__':\n path = './'\n file = 'Python.html'\n parse(path, file)\n","repo_name":"shamanengine/coursera_python_webservices","sub_path":"soup_sample/links_test.py","file_name":"links_test.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"24256191397","text":"# Standard library\nfrom typing import List\nfrom datetime import datetime\n\n# Internal modules\nfrom article_service.app.repository import analytics_repo\nfrom article_service.app.models.dto import MonthStatsDTO\nfrom article_service.app.models import MonthStats\n\n# 3rd party modules\nfrom werkzeug.exceptions import BadRequest, NotFound, Forbidden\n\n\ndef most_frequent_words(YYYYmm: str) -> List:\n _assert_valid_yearmonth(YYYYmm)\n pass\n\n\ndef monthly_statistics(YYYYmm: str) -> MonthStatsDTO:\n \"\"\" Returns number of articles on a given month,\n average number of words in the articles, and median number\n of words in the articles. \"\"\"\n _assert_valid_yearmonth(YYYYmm)\n data: MonthStats = analytics_repo.find(YYYYmm)\n if data:\n dto: MonthStatsDTO = MonthStatsDTO(\n yearmonth=data.yearmonth,\n article_count=data.article_count,\n word_mean=data.word_mean,\n word_median=data.word_median\n )\n return dto\n else:\n raise NotFound(\n \"No monthly statistics available for provided yearmonth\"\n )\n\n\ndef create_monthstat_record(dto: MonthStatsDTO) -> None:\n _assert_valid_yearmonth(dto.yearmonth)\n _assert_record_doesnt_exist(dto.yearmonth)\n monthstat = MonthStats(\n yearmonth=dto.yearmonth,\n article_count=dto.article_count,\n word_mean=dto.word_mean,\n word_median=dto.word_median\n )\n analytics_repo.save(monthstat)\n\n\ndef update_monthstat_record(dto: MonthStatsDTO) -> None:\n _assert_valid_yearmonth(dto.yearmonth)\n monthstat_record = _assert_record_exist(dto.yearmonth)\n monthstat_record.article_count = dto.article_count\n monthstat_record.word_mean = dto.word_mean\n monthstat_record.word_median = dto.word_median\n analytics_repo.save(monthstat_record)\n\n\ndef _assert_valid_yearmonth(YYYYmm: str) -> None:\n if len(YYYYmm) != 6:\n raise BadRequest(\n \"Yearmonth value must be 6 characters length in format YYYYMM\"\n )\n year: int = int(YYYYmm[0:4])\n month: int = int(YYYYmm[4:])\n if year > datetime.utcnow().year:\n raise BadRequest(\"Year value cannot be in the future\")\n if year < 2000:\n raise BadRequest(\"Year value cannot be earlier than 2000\")\n if month not in range(1, 13):\n raise BadRequest(\"Value must be in interval 1-12\")\n\n\ndef _assert_record_doesnt_exist(YYYYmm: str) -> None:\n if analytics_repo.find(YYYYmm) is not None:\n raise Forbidden(\n f\"Monthstat record already exist. \"\n f\"Use PUT request if wanting to update.\"\n )\n\n\ndef _assert_record_exist(YYYYmm: str) -> MonthStats:\n monthstat: MonthStats = analytics_repo.find(YYYYmm)\n if monthstat is None:\n raise NotFound(\"No record available for yearmonth value\")\n return monthstat\n","repo_name":"tbjorch/WordAnalytics","sub_path":"article_service/app/service/yearmonth.py","file_name":"yearmonth.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"30156216877","text":"from django.core.management import BaseCommand, CommandError\nfrom fields.models import MyFields\nfrom random import randint, choice, random\nimport datetime\nimport string\nfrom django.utils import timezone\n\nfrom sport.models import Sportsman\nfrom user.models import User\n\n\nclass Command(BaseCommand):\n help = 'Create a field object with random values'\n\n @staticmethod\n def random_str(length=5):\n character_set = string.ascii_letters\n return ''.join(choice(character_set) for i in range(length))\n\n def add_arguments(self, parser):\n parser.add_argument('number', type=int)\n parser.add_argument('name', type=str)\n\n def handle(self, *args, **options):\n a = options['number']\n name = options['name']\n for item in range(a):\n MyFields.objects.create(\n sportsman=Sportsman.objects.get(name=name),\n is_superfield=randint(0, 1),\n field_name=self.random_str(),\n datetime=timezone.now() - datetime.timedelta(minutes=randint(100, 10000000)),\n field_price=round(random() + randint(1, 1000), 2),\n email=self.random_str(randint(5, 15)) + '@' + self.random_str(randint(3, 5)) + '.com',\n field_year=randint(1, 100),\n field_description=self.random_str(randint(5, 20)) + ', ' + self.random_str(10) + '!',\n field_time=datetime.time(randint(0, 23), randint(0, 59), randint(0, 59)),\n )\n return 'Done'\n","repo_name":"VitaliiFisenko/DjTestShop","sub_path":"fields/management/commands/create_rnd_field.py","file_name":"create_rnd_field.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"4708900137","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rcParams[\"font.family\"] = \"Times New Roman\"\nplt.style.use('seaborn-darkgrid')\npalette = plt.get_cmap('Set1')\nplt.figure(figsize=(8, 6))\n\nsame_result = np.load('matching_result_same.npy')\ndiff_result = np.load('matching_result_diff.npy')\n\nresult = [same_result, diff_result]\nindicator = ['same', 'diff']\nmarker_shape = ['o', 'v', 's']\n\nfor i in range(len(indicator)):\n for j in range(len(marker_shape)):\n plt.plot(np.arange(len(result[i][:, j])) + 1, \n result[i][:, j], marker=marker_shape[j],\n color=palette(i), \n label='{}-layer{}'.format(indicator[i],j + 1))\nplt.xlabel('number of epoches')\nplt.ylabel('mean matched l2 distance')\nplt.legend(frameon=True)\n#plt.show()\nplt.savefig('result.pdf')\nplt.close()\nplt.clf()","repo_name":"wmyw96/convex-cnn-tf-synthetic","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"15155087494","text":"\"\"\"\n 1 4 2\n 3 - 5\n 6 7 8 \n \n 1 - 2 \n 3 4 5\n 6 7 8 \n \n - 1 2 \n 3 4 5\n 6 7 8 \n \n \n 1 2 5\n 3 8 0\n 4 7 6\n \n \n state[n][n] \n\"\"\"\nLEFT = 0\nRIGHT = 1\nUP = 2\nDOWN = 3\nn = 0\nstep = 0\nstate_used = []\ndef move(dir, state):\n if dir == 0:\n newState = left(state)\n if newState == 0:\n return [[0,0],[]]\n return [checkSTF(newState), newState]\n elif dir == 1:\n newState = right(state)\n if newState == 0:\n return [[0,0],[]]\n return [checkSTF(newState), newState]\n elif dir == 2:\n newState = up(state)\n if newState == 0:\n return [[0,0],[]]\n return [checkSTF(newState), newState]\n else:\n newState = down(state)\n if newState == 0:\n return [[0,0],[]]\n return [checkSTF(newState), newState]\n \ndef left(state):\n if state == []:\n return 0\n curState = state\n global n\n for i in range(0, n):\n for j in range(0, n):\n if curState[i][j] == 0:\n if j < n-1:\n curState[i][j] = curState[i][j+1]\n curState[i][j+1] = 0\n return curState\n return 0\n\ndef right(state):\n if state == []:\n return 0\n curState = state\n global n\n for i in range(0, n):\n for j in range(0, n):\n if curState[i][j] == 0:\n if j > 0:\n curState[i][j] = curState[i][j-1]\n curState[i][j-1] = 0\n return curState\n return 0\n\ndef up(state):\n if state == []:\n return 0\n curState = state\n global n\n for i in range(0, n):\n for j in range(0, n):\n if curState[i][j] == 0:\n if i < n-1:\n curState[i][j] = curState[i+1][j]\n curState[i+1][j] = 0\n return curState\n return 0\n\ndef down(state):\n if state == []:\n return 0\n curState = state\n global n\n for i in range(0, n):\n for j in range(0, n):\n if curState[i][j] == 0:\n if i > 0:\n curState[i][j] = curState[i-1][j]\n curState[i-1][j] = 0\n return curState\n return 0\n\ndef checkSTF(state):\n temp = 0\n ST = 0\n SF = 0\n global n\n for i in range(0, n):\n for j in range(0, n):\n if temp == state[i][j]:\n ST = ST + 1\n else:\n SF = SF + 1\n temp = temp + 1\n return [ST, SF]\n\ndef doubleState(state):\n global n\n temp = [[None] * n for x in range(n)]\n for i in range(0, n):\n for j in range(0, n):\n temp[i][j] = state[i][j]\n return temp\n\ndef solveDFS(state):\n print(state)\n print(\"=====================\")\n cloneState = doubleState(state)\n getState = move(0, cloneState)\n STMax = getState[0][0]\n curState = getState[1]\n save_state = []\n global state_used\n if not state in state_used:\n state_used.insert(len(state_used), state)\n global step \n if (step == 1000): \n return 0\n step = step + 1\n print(\"Step: \", step, \"[dir=\",0,\", st=\",STMax,\"] :\", curState)\n for x in range(1,4):\n cloneState = doubleState(state)\n getState = move(x, cloneState)\n print(\"Step: \", step, \"[dir=\",x,\", st=\",getState[0][0],\"] :\", getState[1])\n if getState[0][0] >= STMax and not getState[1] in state_used:\n STMax = getState[0][0]\n curState = getState[1]\n elif getState[1] != [] and getState[0][0] < STMax:\n save_state.append(getState[1])\n \n # if getState[1] != [] and getState[1] not in state_used:\n # save_state.append(getState[1])\n # if curState == [] and save_state != []:\n # curState = save_state[0]\n # print(state_used)\n print(\"=> Choose, ST = \", STMax)\n print(state,\"=>\",curState)\n if not curState in state_used:\n state_used.insert(len(state_used),curState)\n print(\"=====================\")\n # print(curState)\n if STMax == n*n:\n return curState\n elif curState == []:\n # print(\"Can't solve!\")\n print(\"call back!\")\n return -1\n else:\n getSolveDFS = solveDFS(curState)\n # return getSolveDFS\n if getSolveDFS == -1:\n if save_state == []:\n return -1\n else:\n temp = save_state[0]\n save_state.pop(0)\n getSolveDFS = solveDFS(temp)\n\ndef main():\n print(\"Enter n (n > 2): \")\n global n\n # n = int(input())\n print(\"Initial state: \")\n n = 3\n # state = [[None] * n for x in range(n)]\n # state = [[1,4,2],[3,0,5],[6,7,8]]\n state = [[3,1,2],[4,7,0],[6,8,5]]\n # state = [[1, 0, 2], [3, 8, 5], [4, 7, 6]]\n # state = [[2, 8, 5], [3, 1, 6], [0, 4, 7]]\n # state = [[2, 6, 5], [3, 8, 1], [4, 0, 7]]\n # state = [[7, 3, 1], [8, 4, 2], [6, 5, 0]]\n # state = [[6, 5, 1], [3, 8, 4], [2, 0, 7]]\n # state = [[8, 2, 1], [3, 4, 5], [6, 7, 0]]\n # state = [[8, 4, 2], [3, 1, 5], [6, 7, 0]]\n # print(state)\n # for i in range(0, n):\n # for j in range(0, n):\n # print(\"S[\",i+1,\"][\",j+1,\"] = \")\n # state[i][j] = int(input())\n print(state)\n solveDFS(state)\n return 0\n\nif __name__ == \"__main__\":\n main()","repo_name":"linhdev99/AI_Assignment1","sub_path":"Test/n_puzzle.py","file_name":"n_puzzle.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"14588557416","text":"import cv2 as cv\nimport time\nimport PoseEstimationModule as pm\n\ncap = cv.VideoCapture(0)\npTime = 0\ndetector = pm.poseDetector()\n\nwhile True:\n success, img = cap.read()\n img = detector.findPose(img)\n\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n\n cv.putText(img, str(int(fps)), (20, 50), cv.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 3)\n cv.imshow(\"Pose Estimation\", img)\n cv.waitKey(1)","repo_name":"BurakKaragol/Python_Image_Processing","sub_path":"BodyDetection/PoseEstimationExample.py","file_name":"PoseEstimationExample.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"8124016542","text":"import requests\r\nimport csv\r\n\r\ndef fetch_all_products():\r\n base_url = \"ADD YOUR STORE URL/wp-json/wc/v3/\"\r\n product_url = base_url + \"products\"\r\n params = {\r\n \"consumer_key\": \"ADD API KEY\",\r\n \"consumer_secret\": \"ADD API SECRET\",\r\n \"per_page\": 100,\r\n \"page\": 1\r\n }\r\n \r\n all_products = []\r\n batch_size = 10\r\n batch_counter = 0\r\n \r\n print(\"Fetching products...\")\r\n \r\n while True:\r\n response = requests.get(product_url, params=params)\r\n products = response.json()\r\n \r\n # Check if there's no more products to fetch\r\n if not products:\r\n break\r\n\r\n for product in products:\r\n if product['type'] == 'simple':\r\n sku = product['sku']\r\n price = product['price']\r\n stock_quantity = product['stock_quantity']\r\n all_products.append([sku, price, stock_quantity])\r\n elif product['type'] == 'variable':\r\n # Fetch variations for this product\r\n variations_url = product_url + f\"/{product['id']}/variations\"\r\n variations = requests.get(variations_url, params=params).json()\r\n for variation in variations:\r\n sku = variation['sku']\r\n price = variation['price']\r\n stock_quantity = variation['stock_quantity']\r\n all_products.append([sku, price, stock_quantity])\r\n \r\n batch_counter += 1\r\n if batch_counter % batch_size == 0:\r\n print(f\"Fetched {batch_counter} products...\")\r\n \r\n # Prepare to fetch the next page\r\n params['page'] += 1\r\n\r\n print(f\"Total products fetched: {len(all_products)}.\")\r\n \r\n # Write to CSV\r\n print(\"Writing to CSV...\")\r\n with open('woocommerce_products.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(['Product Code', 'Price', 'Quantity'])\r\n writer.writerows(all_products)\r\n \r\n print(\"CSV generation complete.\")\r\n\r\n# Call the function\r\nfetch_all_products()\r\n","repo_name":"ouzifeng/woocommerce_products_to_csv_over_api","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"30758868275","text":"from concurrent.futures import ThreadPoolExecutor\r\nimport time\r\nimport numpy as np\r\n\r\n\r\ndef calc_tasks_row(start_stop):\r\n global mat_size\r\n start = start_stop[0]\r\n stop = start_stop[1]\r\n total = 0\r\n i = start[0]\r\n j = start[1]\r\n while j < mat_size:\r\n if total <= stop:\r\n result_mat_row[i][j] = calc_one_elem(i, j)\r\n total += 1\r\n j += 1\r\n else:\r\n return\r\n for i in range(start[0] + 1, mat_size):\r\n for j in range(mat_size):\r\n if total <= stop:\r\n result_mat_row[i][j] = calc_one_elem(i, j)\r\n total += 1\r\n else:\r\n return\r\n\r\n\r\ndef calc_tasks_col(start_stop):\r\n global mat_size\r\n start = start_stop[0]\r\n stop = start_stop[1]\r\n total = 0\r\n i = start[0]\r\n j = start[1]\r\n while i < mat_size:\r\n if total <= stop:\r\n result_mat_col[i][j] = calc_one_elem(i, j)\r\n total += 1\r\n i += 1\r\n else:\r\n return\r\n for j in range(start[0] + 1, mat_size):\r\n for i in range(mat_size):\r\n if total <= stop:\r\n result_mat_col[i][j] = calc_one_elem(i, j)\r\n total += 1\r\n else:\r\n return\r\n\r\n\r\ndef calc_tasks_kth(task):\r\n global mat_size, nr_tasks\r\n total = nr_tasks - 1\r\n start = 0\r\n for i in range(mat_size):\r\n for j in range(mat_size):\r\n if start < task:\r\n start += 1\r\n elif total == nr_tasks - 1:\r\n total = 0\r\n result_mat_kth[i][j] = calc_one_elem(i, j)\r\n else:\r\n total += 1\r\n\r\n\r\n\r\ndef calc_one_elem(row_mat1, col_mat2):\r\n global mat1, mat2\r\n result = 0\r\n for i in range(len(mat1[row_mat1])):\r\n result += mat1[row_mat1][i] * mat2[i][col_mat2]\r\n return result\r\n\r\n\r\ndef calc_ss_point_row():\r\n global nr_tasks, mat_size\r\n elems_per_task = int((mat_size * mat_size) / nr_tasks)\r\n start_stop = []\r\n total = 0\r\n pair = []\r\n for i in range(mat_size):\r\n for j in range(mat_size):\r\n if total == 0:\r\n pair = [(i, j), 0]\r\n total += 1\r\n elif total == elems_per_task - 1:\r\n total = 0\r\n pair[1] = elems_per_task\r\n start_stop.append(pair)\r\n else:\r\n total += 1\r\n start_stop[len(start_stop) - 1][1] = (mat_size * mat_size) - (nr_tasks - 1) * elems_per_task\r\n return start_stop\r\n\r\n\r\ndef calc_ss_point_col():\r\n global nr_tasks, mat_size\r\n elems_per_task = int((mat_size * mat_size) / nr_tasks)\r\n start_stop = []\r\n total = 0\r\n pair = []\r\n for j in range(mat_size):\r\n for i in range(mat_size):\r\n if total == 0:\r\n pair = [(i, j), 0]\r\n total += 1\r\n elif total == elems_per_task - 1:\r\n total = 0\r\n pair[1] = elems_per_task\r\n start_stop.append(pair)\r\n else:\r\n total += 1\r\n start_stop[len(start_stop) - 1][1] = (mat_size * mat_size) - (nr_tasks - 1) * elems_per_task\r\n return start_stop\r\n\r\n\r\ndef generate_mat(n):\r\n global mat1, mat2, result_mat_row, result_mat_col, result_mat_kth\r\n mat1 = np.random.randint(1, 9, size=(n, n))\r\n mat2 = np.random.randint(1, 9, size=(n, n))\r\n result_mat_row = np.random.randint(0, 1, size=(n, n))\r\n result_mat_col = np.random.randint(0, 1, size=(n, n))\r\n result_mat_kth = np.random.randint(0, 1, size=(n, n))\r\n\r\n\r\nmat1 = []\r\nmat2 = []\r\nresult_mat_row = []\r\nresult_mat_col = []\r\nresult_mat_kth = []\r\nnr_tasks = 0\r\nmat_size = 0\r\n\r\nif __name__ == '__main__':\r\n mat_size = int(input(\"matrix size >>> \"))\r\n nr_tasks = int(input(\"nr of tasks >>> \"))\r\n nr_threads = int(input(\"nr of threads for the pool >>> \"))\r\n generate_mat(mat_size)\r\n\r\n start_stop_row = calc_ss_point_row()\r\n start_row = time.time()\r\n executor_row = ThreadPoolExecutor(nr_threads)\r\n executor_row.map(calc_tasks_row, start_stop_row)\r\n end_row = time.time()\r\n print(\"For row:\", end_row - start_row)\r\n\r\n start_stop_col = calc_ss_point_col()\r\n start_col = time.time()\r\n executor_col = ThreadPoolExecutor(nr_threads)\r\n executor_col.map(calc_tasks_col, start_stop_col)\r\n end_col = time.time()\r\n print(\"For col:\", end_col - start_col)\r\n\r\n task_kth = list(range(0, nr_tasks))\r\n start_kth = time.time()\r\n executor_kth = ThreadPoolExecutor(nr_threads)\r\n executor_kth.map(calc_tasks_kth, task_kth)\r\n end_kth = time.time()\r\n print(\"For kth:\", end_kth - start_kth)\r\n\r\n print(mat1)\r\n print(mat2)\r\n print(\"--------------------------------\")\r\n print(result_mat_row)\r\n print(\"--------------------------------\")\r\n print(result_mat_col)\r\n print(\"--------------------------------\")\r\n print(result_mat_kth)\r\n","repo_name":"nicolagutanu/Uni","sub_path":"semester 5/Parallel and Distributed Programming/lab3/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"1583787390","text":"# -*- coding:utf-8 -*-\nimport time\nimport requests\nimport json\n\nfrom datetime import datetime\nfrom tb_logger import logger\nfrom config import global_config\n\n\nclass Timer(object):\n def __init__(self, sleep_interval=0.5):\n\n self.buy_time = datetime.strptime(global_config.getRaw('config','buy_time'), \"%Y-%m-%d %H:%M:%S.%f\")\n self.buy_time_ms = int(time.mktime(self.buy_time.timetuple()) * 1000.0 + self.buy_time.microsecond / 1000)\n self.sleep_interval = sleep_interval\n\n self.diff_time = self.local_jd_time_diff()\n\n def jd_time(self):\n \"\"\"\n 从淘宝服务器获取时间毫秒\n :return:\n \"\"\"\n\n url = 'http://api.m.taobao.com/rest/api3.do?api=mtop.common.getTimestamp'\n ret = requests.get(url).text\n js = json.loads(ret)\n v=js[\"data\"]\n a=dict(v).values()\n b=list(a)\n return int(b[0])\n\n def local_time(self):\n \"\"\"\n 获取本地毫秒时间\n :return:\n \"\"\"\n return int(round(time.time() * 1000))\n\n def local_jd_time_diff(self):\n \"\"\"\n 计算本地与淘宝服务器时间差\n :return:\n \"\"\"\n return self.local_time() - self.jd_time()\n\n def start(self):\n logger.info('正在等待到达设定时间:{},检测本地时间与淘宝服务器时间误差为【{}】毫秒'.format(self.buy_time, self.diff_time))\n while True:\n\n\n if self.local_time() - self.diff_time >= self.buy_time_ms:\n logger.info('时间到达,开始执行……')\n break\n else:\n time.sleep(self.sleep_interval)\n","repo_name":"1181691792/TM_maotai","sub_path":"taobao_maotai1/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"55"}
+{"seq_id":"4257154990","text":"# encoding=utf8\nimport selenium as se\nfrom selenium.webdriver.common.by import By\nimport pymongo\nfrom pprint import pprint\nimport sys\nimport time\n\nCITY = \"San Jose\"\n\n\n\ndef weather(city):\n options = se.webdriver.ChromeOptions()\n options.add_argument('headless')\n driver = se.webdriver.Chrome(chrome_options=options)\n url = 'https://www.google.com/search?hl=en&authuser=0&ei=M5YkXM_tOYzBjwTu47jABA&q='+city+'weather&oq=zhuozhou+weather&gs_l=psy-ab.3..35i39.1104929.1106580..1106843...0.0..0.97.684.9......0....1..gws-wiz.......0i71j0i7i30j0i7i5i10i30j0i13j0i13i30j0i8i13i30j35i304i39.kcZOPDwMDfs'\n driver.get(url)\n city = driver.find_element(By.XPATH,'//div[@class=\"vk_gy vk_h\"][@id=\"wob_loc\"]')\n #time = driver.find_element(By.XPATH,'//div[@class=\"vk_gy vk_sh\"][@id=\"wob_dts\"]')\n weather = driver.find_element(By.XPATH,'//img[@style=\"margin:1px 4px 0;height:48px;width:48px\"]')\n tempMax = driver.find_element(By.XPATH,'//div[@class=\"vk_gy\"]/span[@class=\"wob_t\"]')\n tempMin = driver.find_element(By.XPATH,'//div[@class=\"vk_lgy\"]/span[@class=\"wob_t\"]')\n degree_sign= u'°'\n strOutput = city.text+'\\n'+weather.get_attribute('alt')+'\\n'+\"Tempature: \"+str(FtoC(tempMax.text))+degree_sign+' -- '+str(FtoC(tempMin.text))+degree_sign+'\\n'\n aveTemp = (FtoC(tempMax.text)-FtoC(tempMin.text))/2+FtoC(tempMin.text)\n if aveTemp<=10:\n \tstrOutput = strOutput+\"温度低,多穿点儿哈\"\n print(strOutput)\n return strOutput\n\ndef FtoC(F):\n C = int((int(F)-32)*5/9+0.5)\n return C\n\ndef main():\n mongoConnectString = \"mongodb://gaoyuan0702:Gw295459784!@ds213229.mlab.com:13229/react_node_express\"\n myclient = pymongo.MongoClient(mongoConnectString)\n\n db = myclient[\"react_node_express\"]\n col = db[\"weather\"]\n\n query = {\"City\":CITY}\n col.delete_one(query)\n newValue = {\"City\":CITY,\"Weather\":weather(CITY)}\n col.insert_one(newValue)\n\n for x in col.find():\n pprint(x)\n\nwhile(True):\n main()\n time.sleep(1800)\n","repo_name":"YuanGao0702/DataCrawling","sub_path":"Python_Connect_To_MongoDB/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"27413059727","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n#from bs4 import BeautifulSoup\n\n\nclass BiqugebookSpider(scrapy.Spider):\n name = 'biqugebook'\n# allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/3/3322']\n\n def parse(self, response):\n \n for href in response.css('a::attr(href)').extract():\n pattern = re.compile(r\"/3/3322/\\d+\")\n try:\n chapter = re.findall(pattern, href)[0]\n url = 'http://www.xbiquge.la/' + chapter + '.html' \n yield scrapy.Request(url, callback=self.parse_chapter)\n except:\n continue\n \n# pattern = re.compile(\"(.*?) \")\n# \n# try:\n# chapter = re.findall(pattern, response.extract())[0]\n# url = 'http://www.xbiquge.la/' + chapter\n# yield scrapy.Request(url, callback=self.parse_chapter)\n# except:\n# pass\n \n \n \n def parse_chapter(self, response):\n \n# soup = BeautifulSoup(response, 'lxml')\n# \n# content = soup.find(\"div\",{\"id\":\"content\"}).get_text()\n# \n# title = soup.find('title').string\n \n title = response.css('h1').extract()\n title = re.findall(r'>(.*?)',title[0])\n# title = title[5:]\n# content = response.css('div','id:content::text')\n contents = response.xpath('//*[@id=\"content\"]/text()')\n content = ''\n \n for each in contents:\n if len(each.re('\\S+')) > 0:\n content += each.re('\\S+')[0]\n \n# content = re.findall(r'>(.*?)', content)\n \n# book_content = []\n# book_content.append(title)\n# book_content.append('\\n')\n# book_content.append(content)\n\n \n infoDict = {}\n \n infoDict['章名'] = title\n infoDict['内容'] = content\n \n yield infoDict\n# yield book_content","repo_name":"gitpNser/learndata","sub_path":"biquge/biquge/spiders/biqugebook.py","file_name":"biqugebook.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"30093617889","text":"\"\"\"urls for the pugorugh app\"\"\"\nfrom django.conf.urls import url\nfrom django.views.generic import TemplateView\nfrom django.views.generic.base import RedirectView\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n\n# API endpoints\nurlpatterns = format_suffix_patterns([\n\n # favicon\n url(r'^favicon\\.ico$',\n RedirectView.as_view(\n url='/static/icons/favicon.ico',\n permanent=True\n )),\n\n # application\n url(r'^$', TemplateView.as_view(template_name='index.html')),\n\n # -------- Api ------------------------\n\n # change userdog status\n url(r'^api/dog/(?P\\d+)/liked/$',\n views.userdog_update_status_liked_view,\n name='userdog_update_like'),\n url(r'^api/dog/(?P\\d+)/disliked/$',\n views.userdog_update_status_disliked_view,\n name='userdog_update_dislike'),\n url(r'^api/dog/(?P\\d+)/undecided/$',\n views.userdog_update_status_undecided_view,\n name='userdog_update_undecided'),\n\n # get next dog with same status from pk\n url(r'^api/dog/(?P-?\\d+)/liked/next/$',\n views.userdog_retrieve_next_liked_view,\n name='userdog_retrieve_next_liked'),\n url(r'^api/dog/(?P-?\\d+)/disliked/next/$',\n views.userdog_retrieve_next_disliked_view,\n name='userdog_retrieve_next_disliked'),\n url(r'^api/dog/(?P-?\\d+)/undecided/next/$',\n views.userdog_retrieve_next_undecided_view,\n name='userdog_retrieve_next_undecided'),\n\n])\n","repo_name":"sabinem/treehouse-python-techdegree","sub_path":"project11_djangoapi_react_dogsite/pugorugh/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"55"}
+{"seq_id":"72702858732","text":"import threading\nimport requests\nimport urllib.request\nimport os\nimport subprocess\nfrom subprocess import Popen\nimport time\nfrom omxplayer.player import OMXPlayer\n\ndef screen():\n\tglobal screen_status\n\turl= 'http://143.198.132.112/smarthh/pantalla.php'\n\tstatus_code = 200\n\ttry:\n\t\tstatus_code = requests.get(url,timeout = 10)\n\t\tprint(status_code)\n\n\texcept requests.exceptions.ConnectTimeout:\n\t\tstatus_code = 3\n\texcept requests.exceptions.ConnectionError:\n\t\tstatus_code = 3\n\tif status_code == 200 or \"[200]\":\n\t\tquery = {'lat':'45','lon':'180'}\n\t\tr = requests.post('http://143.198.132.112/smarthh/pantalla.php')\n\t\tscreen_status = r.text\n#\t\tprint(r.text)\n\t\tprint('request realizado')\n\telse:print('no hubo request')\n \nglobal screen_status\nscreen_status = ''\nflag = 1\nvideo = (\"/home/pi/Downloads/hnd/camalion.mp4\")\nvideo2 = (\"/home/pi/Downloads/AnimatedSF.mp4\")\na = 'no video'\n\nwhile True:\n screen()\n if((screen_status == '2') and (a != 'omx1')):\n omx1 = Popen(['omxplayer','-b', video])\n a = 'omx1'\n elif((screen_status == '1') and (a != 'omx2')):\n omx1.quit()\n omx2 = Popen(['omxplayer','-b', video2])\n a = 'omx2'\n elif(screen_status == '3'):\n os.system('sudo killall omxplayer.bin')\n a = 'no video'\n else:\n pass\n \n","repo_name":"ns1202-j/videoshnd","sub_path":"Downloads/vdd/vd2.py","file_name":"vd2.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"19591118006","text":"from django.db.models import F\nfrom .models import *\n\n#!What Django Middleware\n# In Django, middleware is a lightweight plugin that processes during request and response execution. \n# Middleware is used to perform a function in the application.\n# The functions can be a security, session, csrf protection, authentication etc\n\n\nclass DemoMiddleware(object):#*”object” is a kind of placeholder, letting Python know you don't want to inherit the properties of some other class.\n def __init__(self,get_response):#*This function is passed to our middleware by the Django framework, and its purpose is to pass the request object over to the next middleware, and the get the value of the response.\n print('Working __init__')\n self.get_response = get_response\n self.num_requests = 0\n self.num_exceptions = 0\n self.context_message = {\n \"message\":{\n \"info\":\"Please request again this page\"\n }\n }\n \n \n def listOperatingSystemData(self,os_info):\n if 'Windows' in os_info:\n print('noldu amk buna')\n NewStats.objects.all().update(win=F('win')+1)\n if 'Mac' in os_info:\n NewStats.objects.all().update(mac=F('mac')+1)\n if 'Iphone' in os_info:\n NewStats.objects.all().update(iph=F('iph')+1)\n if 'Android' in os_info:\n NewStats.objects.all().update(android=F('android')+1)\n else:\n #this block each request working and save to database keeped own data\n NewStats.objects.all().update(oth=F('oth')+1)\n\n def __call__(self,request):#*This is where we put our actual middleware logic. This method is called by the Django framework to invoke our middleware.__class__ is an attribute on the object that refers to the class from which the object was created.\n # Code that is executed in each request the view is called,when we called view,first get_response(request) and request data send to view and then return response data line 17\n print('__call__ working')\n self.num_requests+=1\n response = self.get_response(request)\n \n \n a = NewStats.objects.all().values('win','mac','iph','android','oth')\n b = NewStats.objects.all().annotate().values('win','mac','iph','android','oth')\n \n if 'admin' not in request.path:\n #?call listOperatingSystemData function each request\n self.listOperatingSystemData(str(request.META['HTTP_USER_AGENT']))\n \n # print('Request path : ', request.path)\n # print('Request headers host : ', request.headers['HOST'])\n # print('Request headers accept-language : ', request.headers['Accept-Language'])\n # print('Request headers method : ', request.META['REQUEST_METHOD'])\n # print('Request domain adress ', request.META['REMOTE_ADDR'])REMOTE_ADDR refers to the IP address of the client.\n \n \n print('return response __call__ attribute')\n # Code that is executed in each request the view is called\n return response\n\n def process_view(self,request,view_func,view_args,view_kwargs):\n #This code is executed just before the view is called\n #This method is called each time Django receives a request and routes it to a view\n #Logic execute before a call to view\n #Gives access to the view itself & arguments\n #process_view allow to us connect to view before request,working before request to view\n print('Worked view_name ', view_func.__name__)\n print('View value name ', view_kwargs)\n print('Working process_view function')\n \n \n def process_exception(self,request,exception):\n # This code is executed if an exception is raised\n #This method is called whenever a view raises an exception that isn't caught within the view itself. \n # Hence, process_exception is invoked after the request has reached and returned from the view.\n print('process_exception run...')\n self.num_exceptions+=1\n print('Summery of exception when running server django ', self.num_exceptions)\n \n \n def process_template_response(self,request,response):\n #This method is also invoked after the view has finished executing. \n # It is only called if the resultant response contains a render() method, which indicates a template is being rendered. \n # You can use this method to alter the content of the template, including its context data, if required.\n #This methods starting working,when view return to response client\n print('Working process_template_response view')\n print('response... ', response)\n print('response context_data... ', response.context_data) \n response.context_data['new_message'] = self.context_message\n return response","repo_name":"riadelimemmedov/Django-Middleware","sub_path":"demo_middleware/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"8410805640","text":"import re\nimport sys\n\nfrom securesystemslib import exceptions\n\n\nclass Schema:\n \"\"\"\n \n A schema matches a set of possible Python objects, of types\n that are encodable in JSON. 'Schema' is the base class for\n the other classes defined in this module. All derived classes\n should implement check_match().\n \"\"\"\n\n def matches(self, object): # pylint: disable=redefined-builtin\n \"\"\"\n \n Return True if 'object' matches this schema, False if it doesn't.\n If the caller wishes to signal an error on a failed match, check_match()\n should be called, which will raise a 'exceptions.FormatError' exception.\n \"\"\"\n\n try:\n self.check_match(object)\n except exceptions.FormatError:\n return False\n\n return True\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n \"\"\"\n \n Abstract method. Classes that inherit from 'Schema' must\n implement check_match(). If 'object' matches the schema, check_match()\n should simply return. If 'object' does not match the schema,\n 'exceptions.FormatError' should be raised.\n \"\"\"\n\n raise NotImplementedError()\n\n\nclass Any(Schema):\n \"\"\"\n \n Matches any single object. Whereas other schemas explicitly state\n the required type of its argument, Any() does not. It simply does a\n 'pass' when 'check_match()' is called and at the point where the schema\n is instantiated.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): passed\n\n \n\n >>> schema = Any()\n >>> schema.matches('A String')\n True\n >>> schema.matches([1, 'list'])\n True\n \"\"\"\n\n def __init__(self):\n pass\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n pass\n\n\nclass String(Schema):\n \"\"\"\n \n Matches a particular string. The argument object must be a string and be\n equal to a specific string value. At instantiation, the string is set and\n any future comparisons are checked against this internal string value.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = String('Hi')\n >>> schema.matches('Hi')\n True\n >>> schema.matches('Not hi')\n False\n \"\"\"\n\n def __init__(self, string):\n if not isinstance(string, str):\n raise exceptions.FormatError(\n \"Expected a string but\" \" got \" + repr(string)\n )\n\n self._string = string\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if self._string != object:\n raise exceptions.FormatError(\n \"Expected \" + repr(self._string) + \" got \" + repr(object)\n )\n\n\nclass AnyString(Schema):\n \"\"\"\n \n Matches any string, but not a non-string object. This schema\n can be viewed as the Any() schema applied to Strings, but an\n additional check is performed to ensure only strings are considered.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = AnyString()\n >>> schema.matches('')\n True\n >>> schema.matches('a string')\n True\n >>> schema.matches(['a'])\n False\n >>> schema.matches(3)\n False\n >>> schema.matches(u'a unicode string')\n True\n >>> schema.matches({})\n False\n \"\"\"\n\n def __init__(self):\n pass\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, str):\n raise exceptions.FormatError(\n \"Expected a string\" \" but got \" + repr(object)\n )\n\n\nclass AnyNonemptyString(AnyString):\n \"\"\"\n \n Matches any string with one or more characters.\n This schema can be viewed as the Any() schema applied to Strings, but an\n additional check is performed to ensure only strings are considered and\n that said strings have at least one character.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = AnyNonemptyString()\n >>> schema.matches('')\n False\n >>> schema.matches('a string')\n True\n >>> schema.matches(['a'])\n False\n >>> schema.matches(3)\n False\n >>> schema.matches(u'a unicode string')\n True\n >>> schema.matches({})\n False\n \"\"\"\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n AnyString.check_match(self, object)\n\n if object == \"\":\n raise exceptions.FormatError(\n \"Expected a string\"\n \" with at least one character but got \" + repr(object)\n )\n\n\nclass AnyBytes(Schema):\n \"\"\"\n \n Matches any byte string, but not a non-byte object. This schema can be\n viewed as the Any() schema applied to byte strings, but an additional check\n is performed to ensure only strings are considered. Supported methods\n include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = AnyBytes()\n >>> schema.matches(b'')\n True\n >>> schema.matches(b'a string')\n True\n >>> schema.matches(['a'])\n False\n >>> schema.matches(3)\n False\n >>> schema.matches({})\n False\n \"\"\"\n\n def __init__(self):\n pass\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, bytes):\n raise exceptions.FormatError(\n \"Expected a byte string\" \" but got \" + repr(object)\n )\n\n\nclass LengthString(Schema):\n \"\"\"\n \n Matches any string of a specified length. The argument object must be a\n string. At instantiation, the string length is set and any future\n comparisons are checked against this internal string value length.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = LengthString(5)\n >>> schema.matches('Hello')\n True\n >>> schema.matches('Hi')\n False\n \"\"\"\n\n def __init__(self, length):\n if isinstance(length, bool) or not isinstance(length, int):\n # We need to check for bool as a special case, since bool\n # is for historical reasons a subtype of int.\n raise exceptions.FormatError(\n \"Got \" + repr(length) + \" instead of an integer.\"\n )\n\n self._string_length = length\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, str):\n raise exceptions.FormatError(\n \"Expected a string but\" \" got \" + repr(object)\n )\n\n if len(object) != self._string_length:\n raise exceptions.FormatError(\n \"Expected a string of\" \" length \" + repr(self._string_length)\n )\n\n\nclass LengthBytes(Schema):\n \"\"\"\n \n Matches any Bytes of a specified length. The argument object must be either\n a str() in Python 2, or bytes() in Python 3. At instantiation, the bytes\n length is set and any future comparisons are checked against this internal\n bytes value length.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n\n >>> schema = LengthBytes(5)\n >>> schema.matches(b'Hello')\n True\n >>> schema.matches(b'Hi')\n False\n \"\"\"\n\n def __init__(self, length):\n if isinstance(length, bool) or not isinstance(length, int):\n # We need to check for bool as a special case, since bool\n # is for historical reasons a subtype of int.\n raise exceptions.FormatError(\n \"Got \" + repr(length) + \" instead of an integer.\"\n )\n\n self._bytes_length = length\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, bytes):\n raise exceptions.FormatError(\n \"Expected a byte but\" \" got \" + repr(object)\n )\n\n if len(object) != self._bytes_length:\n raise exceptions.FormatError(\n \"Expected a byte of\" \" length \" + repr(self._bytes_length)\n )\n\n\nclass OneOf(Schema):\n \"\"\"\n \n Matches an object that matches any one of several schemas. OneOf() returns\n a result as soon as one of its recognized sub-schemas is encountered in the\n object argument. When OneOf() is instantiated, its supported sub-schemas\n are specified by a sequence type (e.g., a list, tuple, etc.). A mismatch\n is returned after checking all sub-schemas and not finding a supported\n type.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = OneOf([ListOf(Integer()), String('Hello'), String('bye')])\n >>> schema.matches(3)\n False\n >>> schema.matches('bye')\n True\n >>> schema.matches([])\n True\n >>> schema.matches([1,2])\n True\n >>> schema.matches(['Hi'])\n False\n \"\"\"\n\n def __init__(self, alternatives):\n # Ensure each item of the list contains the expected object type.\n if not isinstance(alternatives, list):\n raise exceptions.FormatError(\n \"Expected a list but\" \" got \" + repr(alternatives)\n )\n\n for alternative in alternatives:\n if not isinstance(alternative, Schema):\n raise exceptions.FormatError(\n \"List contains an\" \" invalid item \" + repr(alternative)\n )\n\n self._alternatives = alternatives\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n # Simply return as soon as we find a match.\n # Raise 'exceptions.FormatError' if no matches are found.\n for alternative in self._alternatives:\n if alternative.matches(object):\n return\n raise exceptions.FormatError(\n \"Object did not match a\"\n \" recognized alternative.\" # pylint: disable=implicit-str-concat\n )\n\n\nclass AllOf(Schema):\n \"\"\"\n \n Matches the intersection of a list of schemas. The object being tested\n must match all of the required sub-schemas. Unlike OneOf(), which can\n return a result as soon as a match is found in one of its supported\n sub-schemas, AllOf() must verify each sub-schema before returning a result.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = AllOf([Any(), AnyString(), String('a')])\n >>> schema.matches('b')\n False\n >>> schema.matches('a')\n True\n \"\"\"\n\n def __init__(self, required_schemas):\n # Ensure each item of the list contains the expected object type.\n if not isinstance(required_schemas, list):\n raise exceptions.FormatError(\n \"Expected a list but\" \" got\" + repr(required_schemas)\n )\n\n for schema in required_schemas:\n if not isinstance(schema, Schema):\n raise exceptions.FormatError(\n \"List contains an\" \" invalid item \" + repr(schema)\n )\n\n self._required_schemas = required_schemas[:]\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n for required_schema in self._required_schemas:\n required_schema.check_match(object)\n\n\nclass Boolean(Schema):\n \"\"\"\n \n Matches a boolean. The object argument must be one of True or False. All\n other types are flagged as mismatches.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = Boolean()\n >>> schema.matches(True) and schema.matches(False)\n True\n >>> schema.matches(11)\n False\n \"\"\"\n\n def __init__(self):\n pass\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, bool):\n raise exceptions.FormatError(\n \"Got \" + repr(object) + \" instead of a boolean.\"\n )\n\n\nclass ListOf(Schema):\n \"\"\"\n \n Matches a homogeneous list of some sub-schema. That is, all the sub-schema\n must be of the same type. The object argument must be a sequence type\n (e.g., a list, tuple, etc.). When ListOf() is instantiated, a minimum and\n maximum count can be specified for the homogeneous sub-schema list. If\n min_count is set to 'n', the object argument sequence must contain 'n'\n items. See ListOf()'s __init__ method for the expected arguments.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = ListOf(RegularExpression('(?:..)*'))\n >>> schema.matches('hi')\n False\n >>> schema.matches([])\n True\n >>> schema.matches({})\n False\n >>> schema.matches(['Hi', 'this', 'list', 'is', 'full', 'of', 'even', 'strs'])\n True\n >>> schema.matches(['This', 'one', 'is not'])\n False\n >>> schema = ListOf(Integer(), min_count=3, max_count=10)\n >>> schema.matches([3]*2)\n False\n >>> schema.matches([3]*3)\n True\n >>> schema.matches([3]*10)\n True\n >>> schema.matches([3]*11)\n False\n \"\"\"\n\n def __init__(\n self, schema, min_count=0, max_count=sys.maxsize, list_name=\"list\"\n ):\n \"\"\"\n \n Create a new ListOf schema.\n\n \n schema: The pattern to match.\n min_count: The minimum number of sub-schema in 'schema'.\n max_count: The maximum number of sub-schema in 'schema'.\n list_name: A string identifier for the ListOf object.\n \"\"\"\n\n if not isinstance(schema, Schema):\n message = \"Expected Schema type but got \" + repr(schema)\n raise exceptions.FormatError(message)\n\n self._schema = schema\n self._min_count = min_count\n self._max_count = max_count\n self._list_name = list_name\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, (list, tuple)):\n raise exceptions.FormatError(\n \"Expected object of type {} but got type {}\".format( # pylint: disable=consider-using-f-string\n self._list_name, type(object).__name__\n )\n )\n\n # Check if all the items in the 'object' list\n # match 'schema'.\n for item in object:\n try:\n self._schema.check_match(item)\n\n except exceptions.FormatError as e:\n raise exceptions.FormatError(\n str(e) + \" in \" + repr(self._list_name)\n )\n\n # Raise exception if the number of items in the list is\n # not within the expected range.\n if not (self._min_count <= len(object) <= self._max_count):\n raise exceptions.FormatError(\n \"Length of \" + repr(self._list_name) + \" out of range.\"\n )\n\n\nclass Integer(Schema):\n \"\"\"\n \n Matches an integer. A range can be specified. For example, only integers\n between 8 and 42 can be set as a requirement. The object argument is also\n checked against a Boolean type, since booleans have historically been\n considered a sub-type of integer.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = Integer()\n >>> schema.matches(99)\n True\n >>> schema.matches(False)\n False\n >>> schema.matches('a string')\n False\n >>> Integer(lo=10, hi=30).matches(25)\n True\n >>> Integer(lo=10, hi=30).matches(5)\n False\n \"\"\"\n\n def __init__(self, lo=-2147483648, hi=2147483647):\n \"\"\"\n \n Create a new Integer schema.\n\n \n lo: The minimum value the int object argument can be.\n hi: The maximum value the int object argument can be.\n \"\"\"\n\n self._lo = lo\n self._hi = hi\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if isinstance(object, bool) or not isinstance(object, int):\n # We need to check for bool as a special case, since bool\n # is for historical reasons a subtype of int.\n raise exceptions.FormatError(\n \"Got \" + repr(object) + \" instead of an integer.\"\n )\n\n if not (self._lo <= object <= self._hi):\n int_range = \"[\" + repr(self._lo) + \", \" + repr(self._hi) + \"].\"\n raise exceptions.FormatError(\n repr(object) + \" not in range \" + int_range\n )\n\n\nclass DictOf(Schema):\n \"\"\"\n \n Matches a mapping from items matching a particular key-schema to items\n matching a value-schema (i.e., the object being checked must be a dict).\n Note that in JSON, keys must be strings. In the example below, the keys of\n the dict must be one of the letters contained in 'aeiou' and the value must\n be a structure containing any two strings.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = DictOf(RegularExpression(r'[aeiou]+'), Struct([AnyString(), AnyString()]))\n >>> schema.matches('')\n False\n >>> schema.matches({})\n True\n >>> schema.matches({'a': ['x', 'y'], 'e' : ['', '']})\n True\n >>> schema.matches({'a': ['x', 3], 'e' : ['', '']})\n False\n >>> schema.matches({'a': ['x', 'y'], 'e' : ['', ''], 'd' : ['a', 'b']})\n False\n \"\"\"\n\n def __init__(self, key_schema, value_schema):\n \"\"\"\n \n Create a new DictOf schema.\n\n \n key_schema: The dictionary's key.\n value_schema: The dictionary's value.\n \"\"\"\n\n if not isinstance(key_schema, Schema):\n raise exceptions.FormatError(\n \"Expected Schema but\" \" got \" + repr(key_schema)\n )\n\n if not isinstance(value_schema, Schema):\n raise exceptions.FormatError(\n \"Expected Schema but\" \" got \" + repr(value_schema)\n )\n\n self._key_schema = key_schema\n self._value_schema = value_schema\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, dict):\n raise exceptions.FormatError(\n \"Expected a dict but\" \" got \" + repr(object)\n )\n\n for key, value in object.items():\n self._key_schema.check_match(key)\n self._value_schema.check_match(value)\n\n\nclass Optional(Schema):\n \"\"\"\n \n Provide a way for the Object() schema to accept optional dictionary keys.\n The Object() schema outlines how a dictionary should look, such as the\n names for dict keys and the object type of the dict values. Optional()'s\n intended use is as a sub-schema to Object(). Object() flags an object as a\n mismatch if a required key is not encountered, however, dictionary keys\n labeled Optional() are not required to appear in the object's list of\n required keys. If an Optional() key IS found, Optional()'s sub-schemas are\n then verified.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = Object(k1=String('X'), k2=Optional(String('Y')))\n >>> schema.matches({'k1': 'X', 'k2': 'Y'})\n True\n >>> schema.matches({'k1': 'X', 'k2': 'Z'})\n False\n >>> schema.matches({'k1': 'X'})\n True\n \"\"\"\n\n def __init__(self, schema):\n if not isinstance(schema, Schema):\n raise exceptions.FormatError(\n \"Expected Schema, but\" \" got \" + repr(schema)\n )\n self._schema = schema\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n self._schema.check_match(object)\n\n\nclass Object(Schema):\n \"\"\"\n \n Matches a dict from specified keys to key-specific types. Unrecognized\n keys are allowed. The Object() schema outlines how a dictionary should\n look, such as the names for dict keys and the object type of the dict\n values. See schema.Optional() to learn how Object() incorporates optional\n sub-schemas.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = Object(a=AnyString(), bc=Struct([Integer(), Integer()]))\n >>> schema.matches({'a':'ZYYY', 'bc':[5,9]})\n True\n >>> schema.matches({'a':'ZYYY', 'bc':[5,9], 'xx':5})\n True\n >>> schema.matches({'a':'ZYYY', 'bc':[5,9,3]})\n False\n >>> schema.matches({'a':'ZYYY'})\n False\n \"\"\"\n\n def __init__(self, object_name=\"object\", **required):\n \"\"\"\n \n Create a new Object schema.\n\n \n object_name: A string identifier for the object argument.\n\n A variable number of keyword arguments is accepted.\n \"\"\"\n\n # Ensure valid arguments.\n for schema in required.values():\n if not isinstance(schema, Schema):\n raise exceptions.FormatError(\n \"Expected Schema but\" \" got \" + repr(schema)\n )\n\n self._object_name = object_name\n self._required = list(required.items())\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, dict):\n raise exceptions.FormatError(\n \"Wanted a \" + repr(self._object_name) + \".\"\n )\n\n # (key, schema) = (a, AnyString()) = (a=AnyString())\n for key, schema in self._required:\n # Check if 'object' has all the required dict keys. If not one of the\n # required keys, check if it is an Optional().\n try:\n item = object[key]\n\n except KeyError:\n # If not an Optional schema, raise an exception.\n if not isinstance(schema, Optional):\n raise exceptions.FormatError( # pylint: disable=raise-missing-from\n \"Missing key \"\n + repr(key)\n + \" in \"\n + repr(self._object_name)\n )\n\n # Check that 'object's schema matches Object()'s schema for this\n # particular 'key'.\n else:\n try:\n schema.check_match(item)\n\n except exceptions.FormatError as e:\n raise exceptions.FormatError(\n str(e) + \" in \" + self._object_name + \".\" + key\n )\n\n\nclass Struct(Schema):\n \"\"\"\n \n Matches a non-homogeneous list of items. The sub-schemas are allowed to\n vary. The object argument must be a sequence type (e.g., a list, tuple,\n etc.). There is also an option to specify that additional schemas not\n explicitly defined at instantiation are allowed. See __init__() for the\n complete list of arguments accepted.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = Struct([ListOf(AnyString()), AnyString(), String('X')])\n >>> schema.matches(False)\n False\n >>> schema.matches('Foo')\n False\n >>> schema.matches([[], 'Q', 'X'])\n True\n >>> schema.matches([[], 'Q', 'D'])\n False\n >>> schema.matches([[3], 'Q', 'X'])\n False\n >>> schema.matches([[], 'Q', 'X', 'Y'])\n False\n >>> schema = Struct([String('X')], allow_more=True)\n >>> schema.matches([])\n False\n >>> schema.matches(['X'])\n True\n >>> schema.matches(['X', 'Y'])\n True\n >>> schema.matches(['X', ['Y', 'Z']])\n True\n >>> schema.matches([['X']])\n False\n >>> schema = Struct([String('X'), Integer()], [Integer()])\n >>> schema.matches([])\n False\n >>> schema.matches({})\n False\n >>> schema.matches(['X'])\n False\n >>> schema.matches(['X', 3])\n True\n >>> schema.matches(['X', 3, 9])\n True\n >>> schema.matches(['X', 3, 9, 11])\n False\n >>> schema.matches(['X', 3, 'A'])\n False\n \"\"\"\n\n def __init__(\n self,\n sub_schemas,\n optional_schemas=None,\n allow_more=False,\n struct_name=\"list\",\n ):\n \"\"\"\n \n Create a new Struct schema.\n\n \n sub_schemas: The sub-schemas recognized.\n optional_schemas: Optional list. If none is given, it will be \"[]\".\n allow_more: Specifies that an optional list of types is allowed.\n struct_name: A string identifier for the Struct object.\n \"\"\"\n\n if optional_schemas is None:\n optional_schemas = []\n\n # Ensure each item of the list contains the expected object type.\n if not isinstance(sub_schemas, (list, tuple)):\n raise exceptions.FormatError(\n \"Expected Schema but got \" + repr(sub_schemas)\n )\n\n for schema in sub_schemas:\n if not isinstance(schema, Schema):\n raise exceptions.FormatError(\n \"Expected Schema but\" \" got \" + repr(schema)\n )\n\n self._sub_schemas = sub_schemas + optional_schemas\n self._min = len(sub_schemas)\n self._allow_more = allow_more\n self._struct_name = struct_name\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, (list, tuple)):\n raise exceptions.FormatError(\n \"Expected \"\n + repr(self._struct_name)\n + \"; but got \"\n + repr(object)\n )\n\n if len(object) < self._min:\n raise exceptions.FormatError(\n \"Too few fields in \" + self._struct_name\n )\n\n if len(object) > len(self._sub_schemas) and not self._allow_more:\n raise exceptions.FormatError(\n \"Too many fields in \" + self._struct_name\n )\n\n # Iterate through the items of 'object', checking against each schema in\n # the list of schemas allowed (i.e., the sub-schemas and also any optional\n # schemas. The lenth of 'object' must be less than the length of the\n # required schemas + the optional schemas. However, 'object' is allowed to\n # be only as large as the length of the required schemas. In the while\n # loop below, we check against these two cases.\n index = 0\n while index < len(object) and index < len(self._sub_schemas):\n item = object[index]\n schema = self._sub_schemas[index]\n schema.check_match(item)\n index = index + 1\n\n\nclass RegularExpression(Schema):\n \"\"\"\n \n Matches any string that matches a given regular expression. The RE pattern\n set when RegularExpression is instantiated must not be None. See\n __init__() for a complete list of accepted arguments.\n\n Supported methods include:\n matches(): returns a Boolean result.\n check_match(): raises 'exceptions.FormatError' on a mismatch.\n\n \n >>> schema = RegularExpression('h.*d')\n >>> schema.matches('hello world')\n True\n >>> schema.matches('Hello World')\n False\n >>> schema.matches('hello world!')\n False\n >>> schema.matches([33, 'Hello'])\n False\n \"\"\"\n\n def __init__(self, pattern=None, modifiers=0, re_object=None, re_name=None):\n \"\"\"\n \n Create a new regular expression schema.\n\n \n pattern: The pattern to match, or None if re_object is provided.\n modifiers: Flags to use when compiling the pattern.\n re_object: A compiled regular expression object.\n re_name: Identifier for the regular expression object.\n \"\"\"\n\n if not isinstance(pattern, str):\n if pattern is not None:\n raise exceptions.FormatError(\n repr(pattern) + \" is not a string.\"\n )\n\n if re_object is None:\n if pattern is None:\n raise exceptions.FormatError(\n \"Cannot compare against an unset regular expression\"\n )\n\n if not pattern.endswith(\"$\"):\n pattern += \"$\"\n re_object = re.compile(pattern, modifiers)\n self._re_object = re_object\n\n if re_name is None:\n if pattern is not None:\n re_name = \"pattern /\" + pattern + \"/\"\n\n else:\n re_name = \"pattern\"\n self._re_name = re_name\n\n def check_match(self, object): # pylint: disable=redefined-builtin\n if not isinstance(object, str) or not self._re_object.match(object):\n raise exceptions.FormatError(\n repr(object) + \" did not match \" + repr(self._re_name)\n )\n\n\nif __name__ == \"__main__\":\n # The interactive sessions of the documentation strings can\n # be tested by running schema.py as a standalone module.\n # python -B schema.py.\n import doctest\n\n doctest.testmod()\n","repo_name":"secure-systems-lab/securesystemslib","sub_path":"securesystemslib/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":30667,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"55"}
+{"seq_id":"72141938093","text":"#Game to guess a random number\n\ndef numbergame():\n\tmagic_number = 7\n\tuser_number = input(\"Pick a number between 1-10: \")\n\tif int(user_number)== magic_number:\n\t\tprint(\"You won! You picked {}\".format(user_number))\n\telse:\n\t\tprint(\"You picked {}, you lost! Play again\".format(user_number))\n # Run again if user lost\n\t\tnumbergame()\n\nnumbergame()\n","repo_name":"franciscorafart/guessing_game_python","sub_path":"guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"72469840490","text":"import asyncio\nimport random\nimport time\nimport discord\nfrom discord.ext import commands\nimport json\nimport os\nimport openai\nimport requests\n\nfrom promt_creation import get_prompt, get_art_styles\nfrom receipt_creation import image_creation\nfrom api_secrets import *\nfrom logger import Logger\n\nclass ConversationHandler():\n \n \n def __init__(self, user, bot_name , init_prompt = None, conversation = None, author = None):\n self.user = user\n self.bot_name = bot_name\n self.dir_path = f\"{self.bot_name}_conversations\"\n self.file_path = os.path.join(self.dir_path, f\"{self.user}.json\")\n self.init_prompt = init_prompt\n self.author = author\n self.base_prompt = {\"role\": \"system\", \"content\": self.init_prompt}\n \n if not conversation is None:\n self.conversation = conversation\n else:\n try:\n self.checkDir()\n self.fetchConversation()\n except FileNotFoundError:\n self.conversation = [self.base_prompt]\n \n \n def awaitingResponse(self):\n return self.conversation[-1][\"role\"] == \"user\"\n \n def updateGPT(self, message):\n self.conversation.append({\"role\": \"assistant\", \"content\": message})\n \n def updateUser(self, message):\n self.conversation.append({\"role\": \"user\", \"content\": message})\n \n def appendUserMessage(self, message:str):\n self.conversation[-1][\"content\"] + \"\\n\" + message\n \n def checkDir(self):\n try:\n os.mkdir(self.dir_path)\n except FileExistsError:\n return\n \n def writeConversation(self):\n with open(self.file_path, \"w\") as f:\n f.write(json.dumps(self.conversation))\n \n def saveConversation(self):\n for i in range(100):\n if os.path.exists(os.path.join(self.dir_path, f\"{self.user}_{i}.json\")):\n continue\n else:\n with open(os.path.join(self.dir_path, f\"{self.user}_{i}.json\"), \"w\") as f:\n f.write(json.dumps(self.conversation))\n break\n \n def fetchConversation(self):\n if os.path.exists(self.file_path):\n with open(self.file_path, \"r\") as f:\n self.conversation = json.loads(f.read())\n else: \n raise FileNotFoundError\n\n def deleteConversation(self):\n self.saveConversation()\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n else: raise FileNotFoundError\n \n def listConversations(bot_name : str):\n return os.listdir(f\"{bot_name}_conversations\")\n\n def loadConversation(name : str, number, bot_name):\n dir_path = f\"{bot_name}_conversations\"\n if number == None:\n if os.path.exists(os.path.join(dir_path, f\"{name}.json\")):\n with open(os.path.join(dir_path, f\"{name}.json\"), \"r\") as f:\n return json.loads(f.read())\n else: \n raise FileNotFoundError\n else:\n if os.path.exists(os.path.join(dir_path, f\"{name}_{number}.json\")):\n with open(os.path.join(dir_path, f\"{name}_{number}.json\"), \"r\") as f:\n return json.loads(f.read())\n else: \n raise FileNotFoundError\n \n def saveMedia(name : str, medias):\n dir_path = f\"{name}_media\"\n try:\n os.makedirs(dir_path, exist_ok=True) # Create directory if it doesn't exist\n except OSError as e:\n print(f\"Error creating directory: {e}\")\n return\n\n for media in medias:\n file_path = os.path.join(dir_path, media.filename)\n if not os.path.exists(file_path):\n try:\n r = requests.get(media.url, allow_redirects=True)\n with open(file_path, 'wb') as file:\n file.write(r.content)\n except Exception as e:\n print(f\"Error saving media: {e}\")\n else:\n i = 0\n while True:\n filename :str = media.filename\n filename_split = filename.split(\".\")\n new_name = \"\"\n for j in range(len(filename_split)-1):\n new_name += filename_split[j]\n new_name += f\"_{i}.{filename_split[-1]}\"\n file_path = os.path.join(dir_path, new_name)\n if not os.path.exists(file_path):\n try:\n r = requests.get(media.url, allow_redirects=True)\n with open(file_path, 'wb') as file:\n file.write(r.content)\n break\n except Exception as e:\n print(f\"Error saving media: {e}\")\n break\n i+=1\n \n\nclass QueueItem():\n message : discord.Message\n timestamp : float\n \n def __init__(self, message: discord.Message) -> None:\n self.message = message\n self.timestamp = time.time()\n\nclass GPTBot():\n 'TODO get type defs going'\n queue: asyncio.Queue\n \n \n def __init__(self, bot_token = None, gpt_api_key = None, bot_name = None,\n channel_id = None, guild_id = None,\n streamer_name = None, timer_duration = 300, art_styles = None, \n test_mode = False, temperature = 0.7, max_tokens = 256, \n use_test_prompt = False, commands_enabled = True, admin_pw = None,\n stream_link = None, debug = False, model = \"gpt-3.5-turbo\"):\n self.conversations = []\n self.channel_id = channel_id\n self.guild_id = guild_id\n self.commands_enabled = commands_enabled\n self.__admin_pw = admin_pw\n self.bot = None\n self.debug = debug\n self.commands = {\n \"!delete_conv\": {\n \"perm\": 5,\n \"help\": \"!delete_conv: Deletes this Conversation from bot Memory\",\n \"value_type\": None,\n \"func\": self.del_conv\n },\n \"!load_conv\": {\n \"perm\": 10,\n \"help\": '!load_conv \"user\" [\"number\"]: Loads specific Conversation',\n \"value_type\": [str,int],\n \"func\": self.load_conv\n },\n \"!list_conv\": {\n \"perm\": 10,\n \"help\": \"!list_conv: Lists availabe conversations\",\n \"value_type\": None,\n \"func\": self.list_conv\n },\n \"!get_config\": {\n \"perm\": 5,\n \"help\": \"!get_config: returns current configuration\",\n \"value_type\": None,\n \"func\": self.get_config\n },\n \"!repeat_conv\": {\n \"perm\": 5,\n \"help\": \"!repeat_conv: repeats current conversation WARNING: might be a lot! will return nothing when conversation is not in memory\",\n \"value_type\": None,\n \"func\": self.repeat_conv\n },\n \"!toggle_testmode\":{\n \"perm\": 10,\n \"help\": \"!toggle_testmode: toggles testmode for shorter response time.\",\n \"value_type\": None,\n \"func\": self.toggle_test_mode\n }, \n \"!set_temperature\": {\n \"perm\": 10,\n \"help\": \"!set_temperature value: Changes temperature\",\n \"value_type\": float,\n \"func\": self.set_temp\n },\n \"!set_max_token\": {\n \"perm\": 10,\n \"help\": \"!set_max_token value: sets the maximal Amount of Tokens used\",\n \"value_type\": int,\n \"func\": self.set_max_tokens\n },\n \"!set_delay\": {\n \"perm\": 10,\n \"help\": \"!set_delay value: will set minimum reply delay\",\n \"value_type\": int,\n \"func\": self.set_delay\n },\n \"!toggle_test_prompt\": {\n \"perm\": 10,\n \"help\": \"!toggle_test_prompt: toggles usage of a test prompt\",\n \"value_type\": None,\n \"func\": self.toggle_test_prompt\n },\n \"!get_init_prompt\": {\n \"perm\": 15,\n \"help\": \"!get_init_prompt: returns initial prompt of this conversation\",\n \"value_type\": None,\n \"func\": self.get_init_prompt\n },\n \"!command_help\":{\n \"perm\": 1,\n \"help\": \"!command_help: returns all available commands\",\n \"value_type\": None,\n \"func\": self.help\n },\n \"!disable_commands\":{\n \"perm\": 15,\n \"help\": \"!disable_commands passwort: disables all commands until restart, passwort is set in api_secrets.py\",\n \"value_type\": str,\n \"func\": self.disable_commands\n },\n \"!del_specific\":{\n \"perm\":10,\n \"help\":'!del_specific \"user\": deletes conversation log of specific user from memory',\n \"value_type\": str,\n \"func\": self.del_specific_conv\n },\n \"!shutdown\":{\n \"perm\":15,\n \"help\":\"!shutdown: shutsdown this bot\",\n \"value_type\": None,\n \"func\": self.shutdown\n },\n \"!save_all\":{\n \"perm\":10,\n \"help\":\"!save_all: saves all on going conversations\",\n \"value_type\": None,\n \"func\": self.save_all\n },\n \"!get_msg_log\":{\n \"perm\": 10,\n \"help\": \"!get_msg_log user_id: Returns all DMs by user\",\n \"value_type\": int,\n \"func\": self.getMessageLog\n },\n \"!force_resend\":{\n \"perm\": 10,\n \"help\": '!force_resend \"name\" [\"message\"]: Tries to send last message or specified message',\n \"value_type\": str,\n \"func\": self.resendMsg\n },\n \"!load_author\":{\n \"perm\": 10,\n \"help\": \"!load_author user_id: Tries to load author by ID, load conversation first!\",\n \"value_type\": int,\n \"func\": self.loadAuthor\n },\n \"!clear_memory\":{\n \"perm\": 10,\n \"help\": \"!clear_memory: clears conversations from memory, while retaining .jsons unchanged\",\n \"value_type\": None,\n \"func\": self.clearMemory\n },\n \"!ban\":{\n \"perm\": 15,\n \"help\": \"!ban user_id: Bans user_id from interacting with bot\",\n \"value_type\": int,\n \"func\": self.ban\n },\n \"!unban\":{\n \"perm\": 15,\n \"help\": \"!unban user_id: Unbans user_id from interacting with bot\",\n \"value_type\": int,\n \"func\": self.unban\n },\n \"!whitelist\":{\n \"perm\": 15,\n \"help\": '!whitelist \"user\" \"value\": whitelists user with permission value 1-15, to deactivate commands set value to 0',\n \"value_type\": [str, int],\n \"func\": self.whitelist\n },\n \"!reload_whitelist\":{\n \"perm\": 15,\n \"help\": \"!reload_whitelist: reloads whitelist from disk\",\n \"value_type\": None,\n \"func\": self.reload_whitelist\n },\n \"!reload_blacklist\":{\n \"perm\": 15,\n \"help\": \"!reload_blacklist: reloads blacklist from disk\",\n \"value_type\": None,\n \"func\": self.reload_blacklist\n },\n \"!init_conv\":{\n \"perm\": 10,\n \"help\": '!init_conv \"user\" \"id\" \"message\": Initializes conversation with message to user with id',\n \"value_type\": [str, int, str],\n \"func\": self.init_conv\n },\n \"!fake_receipt\":{\n \"perm\": 10,\n \"help\": '!fake_receipt \"user\" \"id\" \"store name\" \"amount\": Fakes a PayPal receipt for the given Store name and Amount (Currently only in german)',\n \"value_type\": [str, int, str, int],\n \"func\": self.fake_receipt\n }\n \n \n }\n self.__bot_token = bot_token\n self.authors = []\n self.logger = Logger(True, True)\n if self.__admin_pw == None:\n self.logger.error(\"No admin password provided, you will not be able to disable commands!\")\n openai.api_key = gpt_api_key\n self.MODEL_NAME = model\n self.use_test_prompt = use_test_prompt \n self.streamer_name = streamer_name\n if art_styles == None:\n art_styles = get_art_styles()\n self.art_styles = art_styles\n self.init_prompt = get_prompt(bot_name, streamer_name, self.art_styles, use_test_prompt, stream_link)\n self.conv_data = \"something\"\n self.test_mode = test_mode \n self.temperature = temperature \n self.max_tokens = max_tokens\n self.bot_name = bot_name\n self.timer_duration = timer_duration\n loaded_black = self.load_blacklist()\n self.black_list = loaded_black\n loaded_white = self.load_whitelist()\n self.white_list = loaded_white\n self.threads = self.loadThreads()\n self.tasks = {} \n self.queue = asyncio.Queue()\n \n \n '''Utility methods'''\n \n \n async def unpackMessage(self, message_object):\n files = []\n attachments = message_object.attachments\n \n for a in attachments:\n files.append(await a.to_file())\n return message_object.content, message_object.author, files\n \n async def handleThread(self, author):\n user = author.name\n thread_id= None\n for thread in self.threads:\n if user in thread.keys():\n thread_id = thread[user][\"thread_id\"]\n if thread_id is None: \n thread = await self.createThread(author)\n thread_id = thread.id\n return thread_id\n \n async def collectMessage(self,message, author, sender, files = None):\n user = author.name\n for conversation in self.conversations:\n if conversation.user == user:\n if conversation.author == None:\n conversation.author = author\n if sender == \"gpt\":\n self.logger.chatReply(user, self.bot_name, message)\n thread_id = await self.handleThread(author)\n await self.replyToThread(thread_id, message, files, sender)\n conversation.updateGPT(message)\n conversation.writeConversation()\n return\n else:\n self.logger.userReply(user, message)\n thread_id = await self.handleThread(author)\n await self.replyToThread(thread_id, message, files, author)\n conversation.updateUser(message)\n conversation.writeConversation()\n return\n if sender == \"gpt\":\n newConv = ConversationHandler(user, self.bot_name, init_prompt=self.init_prompt, author = author)\n self.conversations.append(newConv)\n self.logger.chatReply(user, self.bot_name, message)\n thread_id = await self.handleThread(author)\n await self.replyToThread(thread_id, message, files, author)\n newConv.updateGPT(message)\n newConv.writeConversation()\n return\n newConv = ConversationHandler(user, self.bot_name, init_prompt=self.init_prompt, author = author)\n newConv.updateUser(message)\n newConv.writeConversation()\n self.conversations.append(newConv)\n self.logger.userReply(user, message)\n thread_id = await self.handleThread(author)\n await self.replyToThread(thread_id, message, files, author)\n \n async def messageHandler(self, message):\n user_prompt, author, files = await self.unpackMessage(message)\n name = author.name\n if f\"{author.id}\" in self.black_list:\n await author.send(\"You have no power here!\")\n return\n if await self.check_command(message):\n return\n media_amount = len(files)\n if media_amount > 0:\n ConversationHandler.saveMedia(name,message.attachments)\n filenames = \"\"\n for m in files:\n filenames += m.filename +\", \"\n user_prompt = f\"[{media_amount} amazing Media Attachments, namely:{filenames}]\\n\" + user_prompt\n await self.collectMessage(user_prompt, author, \"user\", files)\n for conversation in self.conversations:\n if conversation.user == name:\n conversation.appendUserMessage(user_prompt)\n if not conversation.awaitingResponse():\n return\n\n await self.queue.put(QueueItem(message))\n await self.gpt_sending()\n\n async def gpt_sending(self):\n \n q= await self.queue.get()\n author = q.message.author\n for conversation in self.conversations:\n if conversation.user == author.name:\n \n messages = [] #Kinda useless but also nice\n message = \"\"\n \n reversed_conv = conversation.conversation.copy()\n reversed_conv.reverse()\n for c in reversed_conv:\n if c[\"role\"] == \"user\":\n messages.append(c[\"content\"])\n else:\n break\n messages.reverse()\n for m in messages:\n message += f\"\\n{m}\"\n message_lenght = len(message)\n age = int(time.time()-q.timestamp)\n \n \n if not self.test_mode:\n async with author.typing():\n if not age > self.timer_duration:\n await asyncio.sleep(random.randint(self.timer_duration - age,self.timer_duration + message_lenght - age)) #wait for further messages\n else:\n async with author.typing():\n await asyncio.sleep(5)\n else: \n async with author.typing():\n await asyncio.sleep(5)\n messages= conversation.conversation\n if len(messages) > 20:\n old = messages\n messages = [old[0]]\n for m in old[-20:]:\n messages.append(m)\n if conversation.awaitingResponse():\n response = openai.ChatCompletion.create(\n model=self.MODEL_NAME,\n messages= messages,\n max_tokens=self.max_tokens, # maximal amout of tokens, one token roughly equates to 4 chars\n temperature=self.temperature, # control over creativity\n n=1, # amount of answers\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0 \n )\n\n # Die Antwort aus der response extrahieren\n response_message = response['choices'][0]['message']\n reply = response_message['content']\n # Die Antwort an den Absender der DM zurückschicken\n if conversation.awaitingResponse():\n await self.collectMessage(reply,author ,\"gpt\")\n if self.debug:\n self.logger.info(f\"Reply: {reply}\")\n else:\n await author.send(reply)\n self.queue.task_done()\n return\n \n async def gpt_sending_user(self,author):\n user = author.name\n for conversation in self.conversations:\n if conversation.user == user:\n if not conversation.awaitingResponse():\n return\n messages= conversation.conversation\n if len(messages) > 20:\n old = messages\n messages = [old[0]]\n for m in old[-20:]:\n messages.append(m)\n \n response = openai.ChatCompletion.create(\n model=self.MODEL_NAME,\n messages= messages,\n max_tokens=self.max_tokens, # maximal amout of tokens, one token roughly equates to 4 chars\n temperature=self.temperature, # control over creativity\n n=1, # amount of answers\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0 \n ) \n # Die Antwort aus der response extrahieren\n response_message = response['choices'][0]['message']\n reply = response_message['content']\n # Die Antwort an den Absender der DM zurückschicken\n await self.collectMessage(reply,author ,\"gpt\")\n if self.debug:\n self.logger.info(f\"Reply: {reply}\")\n else:\n await author.send(reply)\n \n async def replyToThread(self, thread_id, message, files = None, sender = None):\n channel = self.bot.get_channel(self.channel_id)\n if channel:\n thread = None\n for existing_thread in channel.threads:\n if existing_thread.id == thread_id:\n thread = existing_thread\n break\n\n if thread:\n reply = \"\"\n if sender == \"gpt\":\n reply = f\"{self.bot_name} replys: {message}\"\n else:\n reply = f\"{sender.name} says: {message}\"\n if files is None:\n len_files = 0\n else:\n len_files = len(files)\n self.logger.passing(f'Send reply: \"{reply}\" with {len_files} files')\n await thread.send(reply, files=files)\n else:\n self.logger.fail(f\"Thread with {thread_id} not found in channel {self.channel_id} of guild {self.guild_id}\")\n \n async def check_command(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n \n if not self.commands_enabled:\n return False\n reply = None\n try:\n for command, value in self.commands.items():\n if message.startswith(command) and int(self.white_list[author.name]) > 15:\n self.logger.warning(f\"{author.name} invoked {command} with too much permissions\")\n reply = \"Bruh\"\n elif message.startswith(command) and int(self.white_list[author.name]) >= value[\"perm\"]:\n reply = await value[\"func\"](message_object)\n elif message.startswith(command) and int(self.white_list[author.name]) < value[\"perm\"]:\n reply = f\"I'm sorry {author.name}. I'm afraid can't do that.\"\n self.logger.warning(f\"{author.name} invoked {command} without neccessary permissions\")\n elif self.white_list[author.name] == \"0\":\n self.logger.warning(f\"{author.name} invoked {command} with 0 permissions\")\n return True\n if message.startswith(\"!\") and reply == None:\n reply = \"Unknown Command.\"\n except KeyError:\n return False\n if not reply == None:\n await author.send(reply)\n return True\n \n \n return False\n \n def handleArgs(self, message:str):\n message_splits = message.split(sep=\" \")\n handling_name = False\n handling_value = False\n name = \"\"\n value = \"\"\n values = []\n for s in message_splits:\n if s.endswith('\"') and handling_value:\n value +=\" \" + s.replace('\"', '')\n values.append(value)\n value = \"\"\n continue\n elif s.startswith('\"') and handling_name:\n handling_value = True\n value += s.replace('\"', '')\n if s.endswith('\"'):\n values.append(value)\n value = \"\"\n continue\n elif handling_value:\n value += \" \"+s\n elif handling_name:\n if s.endswith('\"'):\n name +=\" \" + s.replace('\"', '')\n continue\n name += \" \"+s\n elif s.startswith('\"') and not handling_value:\n handling_name = True\n if s.endswith('\"'):\n name = s.replace('\"', '')\n continue\n name += s.replace('\"', '')\n continue\n \n \n return name, values\n \n def load_blacklist(self):\n if os.path.exists(f\"blacklist_{self.bot_name}.json\"):\n with open(f\"blacklist_{self.bot_name}.json\", \"r\") as f:\n return json.loads(f.read())\n else: \n self.logger.fail(\"Couldn't load Blacklist\")\n return {}\n \n def write_blacklist(self):\n with open(f\"blacklist_{self.bot_name}.json\", \"w\") as f:\n f.write(json.dumps(self.black_list))\n \n def load_whitelist(self):\n if os.path.exists(f\"whitelist_{self.bot_name}.json\"):\n with open(f\"whitelist_{self.bot_name}.json\", \"r\") as f:\n return json.loads(f.read())\n else: \n self.logger.fail(\"Couldn't load Whitelist\")\n return {}\n \n def write_whitelist(self):\n with open(f\"whitelist_{self.bot_name}.json\", \"w\") as f:\n f.write(json.dumps(self.white_list))\n\n def clcMem(self):\n for c in self.conversations:\n del self.conversations[self.conversations.index(c)]\n self.logger.error(\"Memory cleared\") \n \n def loadThreads(self):\n if os.path.exists(f\"threads_{self.bot_name}.json\"):\n with open(f\"threads_{self.bot_name}.json\", \"r\") as f:\n return json.loads(f.read())\n else: \n return []\n \n def writeThreads(self):\n with open(f\"threads_{self.bot_name}.json\", \"w\") as f:\n f.write(json.dumps(self.threads))\n \n def runBot(self):\n intents = discord.Intents.default()\n intents.messages = True\n intents.guilds = True\n bot = discord.Client(intents=intents) \n \n self.bot = bot\n \n @bot.event\n async def on_ready():\n self.logger.passing(f\"Logged in as {bot.user.name}, given name: {self.bot_name}\")\n \n\n @bot.event\n async def on_message(message):\n if isinstance(message.channel, discord.DMChannel) and message.author != bot.user:\n await self.messageHandler(message)\n bot.run(self.__bot_token)\n \n async def createThread(self, author):\n channel = self.bot.get_channel(self.channel_id)\n if channel:\n name = author.name\n id = author.id\n thread = await channel.create_thread(name=f\"{name}\", type= discord.ChannelType.public_thread)\n self.threads.append({\n name : {\n \"author_id\":id,\n \"thread_id\":thread.id\n }\n })\n self.writeThreads()\n return thread \n \n '''Command Methods'''\n \n \n async def help(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n self.logger.warning(f\"{author.name} asked for help.\")\n reply = \"Available Commands: \\n\"\n for command, value in self.commands.items():\n if int(self.white_list[author.name]) >= value[\"perm\"]:\n reply += value[\"help\"] + \"\\n\"\n self.logger.info(reply)\n return reply\n \n async def ban(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n splits = message.split(\" \")\n if len(splits) <2:\n reply = \"no user_id provided\"\n else:\n self.black_list.append(splits[1])\n self.write_blacklist()\n self.logger.warning(f\"{author.name} banned user with id {splits[1]}\")\n reply = f\"user with id {splits[1]} is now banned\"\n \n return reply\n \n async def unban(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n splits = message.split(\" \")\n if len(splits) <2:\n reply = \"no user_id provided\"\n else:\n del self.black_list[self.black_list.index(splits[1])]\n self.write_blacklist()\n self.logger.warning(f\"{author.name} unbanned user with id {splits[1]}\")\n reply = f\"user with id {splits[1]} is now unbanned\"\n \n return reply\n \n async def reload_blacklist(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n \n self.black_list = self.load_blacklist()\n self.logger.warning(f\"{author.name} reloaded blacklist with values {self.black_list}\")\n return f\"Blacklist loaded with values {self.black_list}\"\n \n async def whitelist(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n splits = message.split(\" \")\n if len(splits) <3:\n reply = \"no user and/or value provided\"\n else:\n name, value = self.handleArgs(message)\n self.white_list.update({name: int(value[0])})\n self.write_whitelist()\n self.logger.warning(f\"{author.name} whitelisted {name} with {value}\")\n reply = f\"{name} is now whitelisted with {value}\"\n\n \n return reply\n \n async def reload_whitelist(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n \n self.black_list = self.load_whitelist()\n self.logger.warning(f\"{author.name} reloaded whitelist with values {self.white_list}\")\n return f\"Whitelist loaded with values {self.white_list}\"\n \n async def init_conv(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n name, values = self.handleArgs(message)\n self.logger.warning(f\"{author.name} tries to initialze conversation\")\n \n if len(values) >= 2:\n await self.load_conv(message_object, force=True)\n await self.loadAuthor(message_object, id = values[0])\n send = values[1]\n await self.resendMsg(message_object)\n reply = f\"Initialized conversation with {name} with id {values[0]}.\\nMessage is: {send}\"\n else:\n reply = \"Too little information to initialize conversation\"\n self.logger.info(reply)\n return reply\n \n async def del_conv(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n found_conv = False\n for conversation in self.conversations:\n if conversation.user == author.name:\n found_conv = True\n self.logger.warning(f\"Clearing Message Log for {author.name}\")\n conversation.saveConversation()\n conversation.deleteConversation()\n del self.conversations[self.conversations.index(conversation)]\n reply = \"Conversation deleted\"\n break\n if not found_conv:\n conversation = ConversationHandler(author.name, self.bot_name, init_prompt=self.init_prompt) \n self.logger.warning(f\"Clearing Message Log for {author.name}\")\n try :\n conversation.deleteConversation()\n reply = \"Conversation deleted\"\n except FileNotFoundError:\n reply = \"Conversation does not exist\"\n self.logger.info(reply)\n return reply\n \n async def del_specific_conv(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n found_conv = False\n name, values = self.handleArgs(message)\n for conversation in self.conversations:\n if conversation.user == name:\n found_conv = True\n self.logger.warning(f\"Clearing Message Log for {name}, requested by: {author.name}\")\n conversation.deleteConversation()\n del self.conversations[self.conversations.index(conversation)]\n reply = \"Conversation deleted\"\n break\n if not found_conv:\n conversation = ConversationHandler(name, self.bot_name, init_prompt=self.init_prompt) \n self.logger.warning(f\"Clearing Message Log for {name}, requested by: {author.name}\")\n try:\n conversation.deleteConversation()\n reply = \"Conversation deleted\"\n except FileNotFoundError:\n reply = \"Conversation does not exist\"\n self.logger.info(reply)\n return reply\n \n async def load_conv(self, message_object, force = False):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n name, values = self.handleArgs(message)\n if len(values) >= 0 and int(self.white_list[author.name]) >= self.commands[\"!load_conv\"][\"perm\"]:\n self.logger.warning(f\"{author.name} loading conversation {name}\")\n try:\n for conversation in self.conversations:\n if conversation.user == name:\n conversation.saveConversation()\n del self.conversations[self.conversations.index(conversation)]\n loadedConv = ConversationHandler.loadConversation(name, None, self.bot_name)\n newConv = ConversationHandler(author.name, self.bot_name, conversation = loadedConv)\n self.conversations.append(newConv)\n loadedConv = ConversationHandler(name, self.bot_name, conversation = loadedConv)\n self.conversations.append(loadedConv)\n reply = \"Loaded conversation\"\n except FileNotFoundError:\n if force:\n loadedConv = ConversationHandler(name, self.bot_name, init_prompt=self.init_prompt)\n self.conversations.append(loadedConv)\n reply = f\"Fake loaded conversation {name}\"\n else:\n reply = f\"Conversation {name} not found\"\n elif len(values) == 0 and int(self.white_list[author.name]) < self.commands[\"!load_conv\"][\"perm\"]:\n self.logger.warning(f\"{author.name} tried loading conversation {name}, without neccessary permission\")\n reply = f\"Please provide a conversation number.\"\n elif len(values) > 1:\n self.logger.warning(f\"{author.name} loading conversation {name}_{values[0]}\")\n try:\n for conversation in self.conversations:\n if conversation.user == author.name:\n conversation.saveConversation()\n del self.conversations[self.conversations.index(conversation)]\n loadedConv = ConversationHandler.loadConversation(name, values[0], self.bot_name)\n newConv = ConversationHandler(author.name, self.bot_name, conversation = loadedConv)\n self.conversations.append(newConv)\n loadedConv = ConversationHandler(name, self.bot_name, conversation = loadedConv)\n self.conversations.append(loadedConv)\n reply = \"Loaded conversation\"\n except FileNotFoundError:\n reply = f\"Conversation {name}_{values[0]} not found\"\n \n else:\n reply = 'Command usage is !load_conv \"user\" \"number\"'\n self.logger.info(reply)\n \n return reply\n \n async def list_conv(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n self.logger.warning(f\"{author.name} listed all conversations\")\n reply = ConversationHandler.listConversations(self.bot_name)\n if reply is None:\n reply = \"No conversations Found\"\n self.logger.info(reply)\n return reply\n \n async def repeat_conv(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n splits = message.split(\" \")\n name = author.name\n if len(splits) >= 2:\n name = splits[1]\n for conv in self.conversations:\n self.logger.warning(f\"{author.name} asked to get the conversation.\")\n if conv.user == author.name:\n replys = []\n for c in conv.conversation:\n if c[\"role\"] == \"system\":\n continue\n elif c[\"role\"] == \"user\":\n replys.append(f\"{name}: {c['content']}\")\n else:\n replys.append(f\"{self.bot_name}: {c['content']}\")\n for r in replys[:-1]:\n self.logger.info(r)\n await author.send(r)\n asyncio.sleep(1)\n reply = replys[-1]\n self.logger.info(reply)\n if reply == \"\":\n reply = \"Found trailing data, report to Admin\"\n self.logger.error(reply)\n if reply == None:\n reply = \"No conversation found\"\n self.logger.warning(reply)\n return reply\n \n async def loadAuthor(self, message_object, id = None):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n found_author = False\n parts = message.split(\" \")\n if not id is None:\n parts[1] = id\n if len(parts) >= 2:\n self.logger.warning(f\"{author.name} requested to load author with ID {parts[1]}\")\n target_user = await self.bot.fetch_user(int(parts[1]))\n if target_user == None:\n reply = f\"Loading author failed\"\n else:\n for c in self.conversations:\n if c.user == target_user.name:\n c.author = target_user\n found_author = True\n break\n if not found_author:\n reply = \"Author not found/Conversation not found\"\n else: reply = \"Author found and loaded\"\n \n elif len(parts) < 2:\n reply = \"No ID provided\"\n self.logger.warning(reply)\n return reply\n \n async def toggle_test_mode(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n self.logger.warning(f\"{author.name} toggled test_mode\")\n self.test_mode= not self.test_mode\n reply = f\"Test Mode is now: {self.test_mode}\"\n self.logger.info(reply)\n return reply\n \n async def toggle_test_prompt(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n self.logger.warning(f\"{author.name} toggled test_test_prompt\")\n self.use_test_prompt= not self.use_test_prompt\n self.init_prompt = get_prompt(self.bot_name, self.streamer_name, self.art_styles, self.use_test_prompt)\n reply = f\"use_test_prompt is now: {self.use_test_prompt}\"\n self.logger.info(reply)\n return reply\n\n async def set_temp(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n parts = message.split(sep=\" \")\n reply = None\n self.logger.warning(f\"{author.name} changed Temperature\")\n self.temperature = float(parts[1])\n reply = f\"Temparature is now: {self.temperature}\"\n self.logger.info(reply)\n return reply\n \n async def set_max_tokens(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n parts = message.split(sep=\" \")\n reply = None\n self.logger.warning(f\"{author.name} changed Max Tokens\")\n self.max_tokens = int(parts[1])\n reply = f\"Max_tokends is now: {self.max_tokens}\"\n self.logger.info(reply)\n return reply\n \n async def set_delay(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n parts = message.split(sep=\" \")\n reply = None\n self.logger.warning(f\"{author.name} changed delay\")\n self.timer_duration = int(parts[1])\n reply = f\"Minimum delay is now: {self.timer_duration}\"\n self.logger.info(reply)\n return reply\n \n async def get_config(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n \n reply = None\n self.logger.warning(f\"{author.name} requested settings\")\n replys = []\n replys.append(f\"Bot Name is: {self.bot_name}\")\n replys.append(f\"Model name is: {self.MODEL_NAME}\")\n replys.append(f\"Streamer name is: {self.streamer_name}\")\n replys.append(f\"Art Styles are: {self.art_styles}\")\n replys.append(f\"Temparature is: {self.temperature}\")\n replys.append(f\"min Delay is: {self.timer_duration}s\")\n replys.append(f\"Max Tokens is: {self.max_tokens}\")\n replys.append(f\"Test Mode is: {self.test_mode}\")\n replys.append(f\"use_test_prompt is: {self.use_test_prompt}\")\n for r in replys[:-1]:\n self.logger.info(r)\n await author.send(r)\n reply = replys[-1]\n self.logger.info(reply)\n return reply\n \n async def clearMemory(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = f\"{author.name} cleared memory\"\n self.logger.warning(f\"{author.name} cleared memory\")\n self.clcMem()\n return reply\n \n async def get_init_prompt(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\t\n self.logger.warning(f\"{author.name} asked for the prompt.\")\n for conv in self.conversations:\n if conv.user == author.name:\n prompt = conv.init_prompt\n splits = prompt.split(\"\\n\")\n for l in splits[:-1]:\n self.logger.info(l)\n await author.send(l)\n reply = splits[-1]\n if reply == None:\n reply = \"No prompt found\"\n self.logger.info(reply)\n return reply\n \n async def disable_commands(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n parts = message.split(sep=\" \")\n reply = None\n \n if len(parts) > 0:\n if parts[1] == self.__admin_pw:\n reply = \"DISABLED COMMANDS; THIS CAN NOT BE REVERTED WITHOUT A RESTART\"\n self.logger.error(reply)\n self.commands_enabled = False\n else:\n reply = \"The Password provided does not match, this event will be reported!\"\n self.logger.error(reply)\n else:\n reply = \"No Password provided, this event will be reported!\"\n self.logger.error(reply)\n return reply\n \n async def save_all(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n self.logger.warning(f\"{author.name} requested to save all conversations.\")\n if len(self.conversations) > 0:\n for c in self.conversations:\n c.saveConversation()\n reply = \"Saved all conversations\"\n else:\n reply = \"No conversations in memory\"\n self.logger.info(reply)\n return reply\n \n async def shutdown(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n self.logger.error(f\"{author.name} initiated shutdown, saving conversations.\")\n await self.save_all(message_object)\n self.logger.error(\"Saved conversations.\\nShutting down.\")\n \n exit()\n \n async def getMessageLog(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n reply = None\n splits = message.split(\" \")\n if not bot == None:\n \n target_user = self.bot.get_user(int(splits[1]))\n if target_user:\n self.logger.warning(f'Fetching DMs from {target_user.name} ({target_user.id}), requested by {author.name}')\n self.logger.info('------')\n # Fetch the DM channel between the bot and the target user\n dm_channel = target_user.dm_channel or await target_user.create_dm()\n # Fetch all messages from the DM channel\n messages = []\n async for message in dm_channel.history(limit=None):\n messages.append(message)\n for m in messages:\n reply = (f\"{m.author.name} ({m.author.id}): {m.content}\")\n self.logger.info(reply)\n await author.send(reply)\n else:\n reply = f'Unable to find user with ID {splits[1]}'\n self.logger.warning(reply)\n if reply == None:\n reply = \"Bot not initialized, How did we get here?\"\n self.logger.warning(reply)\n return reply\n \n async def resendMsg(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n if len(files) == 0:\n files = None\n fetch_last_message = False\n name, values = self.handleArgs(message)\n reply = None\n if len(values) > 0:\n reply = \"\"\n self.logger.warning(\"Sending User defined Message\")\n if len(values) >1:\n reply = values[1]\n else:\n reply = values[0]\n for c in self.conversations:\n if c.user == name:\n await self.collectMessage(reply, c.author, \"gpt\", files)\n await c.author.send(reply, files=files)\n return \"Sending User defined Message\"\n elif len(values) == 0:\n fetch_last_message = True\n else:\n reply = \"No Arguments given!\"\n return reply\n self.logger.warning(f\"{author.name} requested resend to {name}\")\n for c in self.conversations:\n if c.user == name:\n if fetch_last_message:\n last_conv = c.conversation[-1]\n if last_conv[\"role\"] == \"user\":\n self.gpt_sending_user(c.author)\n return \"Requested new Message from GPT\"\n else:\n reply = last_conv[\"content\"]\n \n \n if not c.author == None:\n self.logger.warning(\"Resending Message\")\n await self.collectMessage(reply, c.author, \"user\")\n await c.author.send(reply)\n for u,t in self.tasks.items():\n t.cancel()\n return \"Resending Message\"\n \n if c.author == None:\n reply = \"User has no Author.\"\n self.logger.warning(reply)\n return reply\n reply = \"Conversation not found.\"\n self.logger.warning(reply)\n return reply\n \n async def fake_receipt(self, message_object):\n message, author, files = await self.unpackMessage(message_object)\n name, values = self.handleArgs(message)\n user_id = values[0]\n store_name = values[1]\n amount = values[2]\n target_user = await self.bot.fetch_user(user_id)\n file = image_creation(amount,store_name)\n files = [file,]\n file_size=file.fp.__sizeof__()\n self.logger.warning(f\"Sending Fake receipt to {name}\\n store name: {store_name}, amount: {amount}, file_size: {file_size}\")\n chat_reply = f\"{self.streamer_name} shared the receipt with me, please check that the addressee ({store_name}) and the amount ({amount}.00$) are indeed correct:\"\n await target_user.send(chat_reply, files=files)\n file = image_creation(amount,store_name)\n files = [file,]\n await self.collectMessage(chat_reply, target_user, \"gpt\", files=files)\n return \"Send faked receipt\"\n\n \n \n \n#Not actually Used \ndef test_all(bot : GPTBot):\n l = Logger(True, False)\n message = discord.Message()\n message.author = \"Test\"\n message.content = \"\"\n message.channel = discord.DMChannel\n \n for c,v in bot.commands.items():\n message.content = c\n if not v[\"value_type\"] == None:\n if v[\"value_type\"] is str:\n bot.runBot().on_message(message)\n message.content += ADMIN_PASSWORT\n l.info(message.content)\n bot.runBot().on_message(message)\n message.content = c\n message.content += \"justsomerandomshit\"\n if v[\"value_type\"] is [str,int]:\n message.content += \" caesar 0\"\n l.info(message.content)\n bot.runBot().on_message(message)\n message.content = c\n message.content += \" caesar 100\"\n l.info(message.content)\n bot.runBot().on_message(message)\n if v[\"value_type\"] is int:\n l.info(message.content)\n bot.runBot().on_message(message)\n message.content += f\" {random.randint(1,20)}\"\n l.info(message.content)\n bot.runBot().on_message(message)\n if v[\"value_type\"] is float:\n l.info(message.content)\n bot.runBot().on_message(message)\n message.content += f\" {random.random()}\"\n l.info(message.content)\n bot.runBot().on_message(message)\n else: \n l.info(message.content)\n bot.runBot().on_message(message)\n \nif __name__ == '__main__':\n bot = GPTBot(bot_token=DISCORD_TOKEN_ALEX, gpt_api_key=OPENAI_API_KEY, bot_name=\"Alex\", channel_id=1129125304993067191,guild_id=877203185700339742, streamer_name=\"Caesar\", test_mode=True, admin_pw=ADMIN_PASSWORT, debug=True)\n \n bot.runBot()","repo_name":"caesarakalaeii/dc-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":51753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"39897599677","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport arrow\nfrom bs4 import BeautifulSoup\nimport re\nimport dateutil\nimport requests\nimport json\n\n# RU-1: European and Uralian Market Zone (Zone 1)\n# RU-2: Siberian Market Zone (Zone 2)\n# RU-AS: Russian Far East\n\n\nMAP_GENERATION = {\n 'P_AES': 'nuclear',\n 'P_GES': 'hydro',\n 'P_GRES': 'unknown',\n 'P_TES': 'unknown',\n 'P_BS': 'unknown',\n 'P_REN': 'solar'\n}\n\nexchange_ids = {'CN->RU-AS': \"764\",\n 'MN->RU': \"276\",\n 'MN->RU-2': \"276\",\n 'KZ->RU': \"785\",\n 'KZ->RU-1': \"2394\",\n 'KZ->RU-2': \"344\",\n 'RU-1->RU-2': \"139\",\n 'GE->RU': \"752\",\n 'GE->RU-1': \"752\",\n 'AZ->RU': \"598\",\n 'AZ->RU-1': \"598\",\n 'BY->RU': \"321\",\n 'BY->RU-1': \"321\",\n 'RU->UA': \"880\",\n 'RU-1->UA':\"880\"}\n\n# Each exchange is contained in a div tag with a \"data-id\" attribute that is unique.\n\n\ntz = 'Europe/Moscow'\n\n\ndef fetch_production(zone_key='RU', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n r = session or requests.session()\n today = arrow.now(tz=tz).format('YYYY.MM.DD')\n\n if zone_key == 'RU':\n url = 'http://br.so-ups.ru/webapi/api/CommonInfo/PowerGeneration?priceZone[]=-1&startDate={date}&endDate={date}'.format(\n date=today)\n elif zone_key == 'RU-1':\n url = 'http://br.so-ups.ru/webapi/api/CommonInfo/PowerGeneration?priceZone[]=1&startDate={date}&endDate={date}'.format(\n date=today)\n elif zone_key == 'RU-2':\n url = 'http://br.so-ups.ru/webapi/api/CommonInfo/PowerGeneration?priceZone[]=2&startDate={date}&endDate={date}'.format(\n date=today)\n else:\n raise NotImplementedError('This parser is not able to parse given zone')\n\n response = r.get(url)\n json_content = json.loads(response.text)\n dataset = json_content[0]['m_Item2']\n\n data = []\n for datapoint in dataset:\n row = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'so-ups.ru'\n }\n\n for k, production_type in MAP_GENERATION.items():\n if k in datapoint:\n gen_value = float(datapoint[k]) if datapoint[k] else 0.0\n row['production'][production_type] = row['production'].get(production_type,\n 0.0) + gen_value\n else:\n row['production']['unknown'] = row['production'].get('unknown', 0.0) + gen_value\n\n # Date\n hour = '%02d' % int(datapoint['INTERVAL'])\n date = arrow.get('%s %s' % (today, hour), 'YYYY.MM.DD HH')\n\n row['datetime'] = date.replace(tzinfo=dateutil.tz.gettz(tz)).datetime\n\n current_dt = arrow.now(tz).datetime\n\n # Drop datapoints in the future\n if row['datetime'] > current_dt:\n continue\n\n # Default values\n row['production']['biomass'] = None\n row['production']['geothermal'] = None\n\n data.append(row)\n\n return data\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two zones\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n today = arrow.now(tz=tz)\n date = today.format('YYYY-MM-DD')\n hour = today.format('HH')\n exchanges_url = 'http://br.so-ups.ru/webapi/api/flowDiagramm/GetData?Date={}&Hour={}'.format(date, hour)\n\n r = session or requests.session()\n response = r.get(exchanges_url)\n json_content = json.loads(response.text)\n\n sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))\n\n if sortedcodes not in exchange_ids.keys():\n raise NotImplementedError('This exchange pair is not implemented.')\n\n current_dt = arrow.now('Europe/Moscow').datetime\n exchange_id = int(exchange_ids[sortedcodes])\n\n try:\n exchange = [item for item in json_content['Flows'] if item['Id'] == exchange_id][0]\n except:\n raise NotImplementedError('The exchange {} is not implemented'.format(sortedcodes))\n\n exchange = {\n 'sortedZoneKeys': sortedcodes,\n 'datetime': current_dt,\n 'netFlow': exchange.get('NumValue'),\n 'source': 'so-ups.ru'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_production(RU-1) ->')\n print(fetch_production('RU-1'))\n print('fetch_production(RU-2) ->')\n print(fetch_production('RU-2'))\n print('fetch_exchange(CN, RU-AS) ->')\n print(fetch_exchange('CN', 'RU-AS'))\n print('fetch_exchange(MN, RU) ->')\n print(fetch_exchange('MN', 'RU'))\n print('fetch_exchange(MN, RU-2) ->')\n print(fetch_exchange('MN', 'RU-2'))\n print('fetch_exchange(KZ, RU) ->')\n print(fetch_exchange('KZ', 'RU'))\n print('fetch_exchange(KZ, RU-1) ->')\n print(fetch_exchange('KZ', 'RU-1'))\n print('fetch_exchange(KZ, RU-2) ->')\n print(fetch_exchange('KZ', 'RU-2'))\n print('fetch_exchange(GE, RU) ->')\n print(fetch_exchange('GE', 'RU'))\n print('fetch_exchange(GE, RU-1) ->')\n print(fetch_exchange('GE', 'RU-1'))\n print('fetch_exchange(AZ, RU) ->')\n print(fetch_exchange('AZ', 'RU'))\n print('fetch_exchange(AZ, RU-1) ->')\n print(fetch_exchange('AZ', 'RU-1'))\n print('fetch_exchange(BY, RU) ->')\n print(fetch_exchange('BY', 'RU'))\n print('fetch_exchange(BY, RU-1) ->')\n print(fetch_exchange('BY', 'RU-1'))\n print('fetch_exchange(RU, UA) ->')\n print(fetch_exchange('RU', 'UA'))\n print('fetch_exchange(RU-1, UA) ->')\n print(fetch_exchange('RU-1', 'UA'))\n print('fetch_exchange(RU-1, RU-2) ->')\n print(fetch_exchange('RU-1', 'RU-2'))\n","repo_name":"mhilmiasyrofi/carbonmap","sub_path":"parsers/RU.py","file_name":"RU.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"31347380961","text":"# Strongly recommend to use this piece of code in a sub-process.\n# Cause decoding an online video stream consumes about 100M memory and 40% CPU.\n\nimport os\nimport pdb\n\nimport pyFFmpeg\n\n\nstream_path = \"rtmp://live.demo.uavcmlc.com:1935/live/DEV02001270?token=03c7986c15e0\"\n\nstream_state = False # to control whether to restart the stream context\nwhile True:\n\n # initialize the stream context\n while True:\n print('init ... ')\n stream_obj = pyFFmpeg.StreamParser(stream_path)\n if stream_obj.stream_state is True:\n # it means that the stream context has been opened successfully.\n # otherwise, the stream can not be reached,\n # probably the path is wrong or stream is empty\n stream_state = stream_obj.stream_state\n break\n\n # to get frames in a loop until encountering an error\n while True:\n frame = stream_obj.get_one_frame(image_format='numpy')\n if type(frame) is int:\n if frame == -5:\n stream_state = stream_obj.stream_state\n break\n elif frame == -4:\n stream_state = stream_obj.stream_state\n break\n\n else:\n pass\n # process this frame according to your needs.\n # rgb_image.save(os.path.join(dir_path, 'rgb_file_{}.jpg'.format(i)))\n\n # then reconnect to the stream and rebuild a stream context in the next loop.\n # you have to release the memory containing stream context manually\n stream_obj.release_memory()\n print('successfully release memory of stream context.')\n","repo_name":"dongrixinyu/pyFFmpeg","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"51"}
+{"seq_id":"3637002366","text":"__version__ = '0.6.1'\n\nimport sys\nimport logging\n\nfrom SocketServer import ThreadingMixIn\nfrom wsgiref.simple_server import WSGIServer, make_server\n\nimport flask\nimport pymongo\n\nfrom regenwolken import views, mongonic\n\n\nclass ThreadedWSGIServer(ThreadingMixIn, WSGIServer):\n pass\n\n\nclass Regenwolken(flask.Flask):\n\n def __init__(self):\n\n flask.Flask.__init__(self, __name__)\n self.config.from_object('regenwolken.utils.conf')\n self.config.from_envvar('REGENWOLKEN_SETTINGS', silent=True)\n\n if len(sys.argv) > 1:\n path = sys.argv[1] if sys.argv[1].startswith('/') else '../' + sys.argv[1]\n self.config.from_pyfile(path, silent=True)\n\n self.setup_routes()\n self.setup_mongodb()\n self.setup_extensions()\n\n if '--debug' in sys.argv:\n self.config['DEBUG'] = True\n\n if not self.config['DEBUG']:\n self.setup_logger() # this circumvents issues with cram BDT\n\n def setup_routes(self):\n\n for endpoint, rule, methods in [\n ('index', '/', ['GET', 'POST']),\n ('items_view', '/', ['GET']),\n\n ('account', '/account', ['PUT', 'GET']),\n ('account_stats', '/account/stats', ['GET', ]),\n\n ('register', '/register', ['POST', ]),\n ('domains', '/domains/', ['GET', ]),\n\n ('items', '/items', ['GET', ]),\n ('bookmark', '/items', ['POST', ]),\n ('items_new', '/items/new', ['HEAD', 'GET']),\n ('items_edit', '/items/', ['HEAD', 'PUT', 'DELETE']),\n\n ('trash', '/items/trash', ['POST', ]),\n\n ('blob', '/items//', ['GET']),\n ('blob', '//', ['GET']),\n\n ('thumb', '/thumb/', ['GET', ])\n\n ]:\n self.add_url_rule(rule, endpoint, view_func=getattr(views, endpoint), methods=methods)\n\n def setup_mongodb(self):\n\n con = pymongo.Connection(\n self.config['MONGODB_HOST'],\n self.config['MONGODB_PORT']\n )[self.config['MONGODB_NAME']]\n\n con.items.create_index('short_id')\n con.accounts.create_index('email')\n\n self.db = con\n self.fs = mongonic.GridFS(con)\n self.sessions = mongonic.Sessions(con, size=self.config['MONGODB_SESSION_SIZE'])\n\n def setup_logger(self):\n\n path = self.config.get('LOGFILE', '/var/log/regenwolken.log')\n file_handler = logging.FileHandler(path)\n file_handler.setLevel(logging.WARNING)\n file_handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n\n self.logger.addHandler(file_handler)\n\n def setup_extensions(self):\n\n try:\n import pygments\n except ImportError:\n if self.config['SYNTAX_HIGHLIGHTING']:\n print >> sys.stderr, \"'pygments' not found, syntax highlighting disabled\"\n self.config['SYNTAX_HIGHLIGHTING'] = False\n try:\n import markdown\n except ImportError:\n if self.config['MARKDOWN_FORMATTING']:\n print >> sys.stderr, \"'markdown' not found, markdown formatting disabled\"\n self.config['MARKDOWN_FORMATTING'] = False\n\n try:\n import PIL\n except ImportError:\n if self.config['THUMBNAILS']:\n print >> sys.stderr, \"'PIL' not found, thumbnails disabled\"\n self.config['THUMBNAILS'] = False\n\n\ndef main():\n\n app = Regenwolken()\n httpd = make_server(\n app.config['BIND_ADDRESS'], app.config['PORT'],\n app, server_class=ThreadedWSGIServer)\n httpd.serve_forever()\n\n\ntry:\n import uwsgi\nexcept ImportError:\n pass\nelse:\n application = Regenwolken()\n","repo_name":"posativ/regenwolken","sub_path":"regenwolken/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"51"}
+{"seq_id":"70066486560","text":"from django.shortcuts import render\r\nfrom myapp.forms import StudentReg\r\n\r\ndef req_val(request):\r\n if request.method ==\"POST\":\r\n data = StudentReg(request.POST)\r\n #print(data) thsi is the first type\r\n #print(request.method)\r\n if data.is_valid():\r\n print(\"validated\")\r\n name =data.cleaned_data['name']\r\n email =data.cleaned_data['email']\r\n print(name+email)\r\n else:\r\n data = StudentReg()\r\n print(request.method,data)\r\n return render(request,\"index.html\",{\"form\":data})\r\n","repo_name":"Ramachandra-2096/Django_validations","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"26304879038","text":"import os\n\nimport horizons.main # this import needs to stay to avoid errors with BuildTab\nfrom horizons.gui.tabs.buildtabs import BuildTab\nfrom horizons.util.yamlcache import YamlCache\n\nROOT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../')\n\n\ndef _get_referenced_buildings(yaml_data):\n\t\"\"\"\n\tParse referenced building from yaml data.\n\t\"\"\"\n\tfor section in yaml_data.values():\n\t\tfor value in section:\n\t\t\tif type(value) != list:\n\t\t\t\tcontinue\n\n\t\t\tfor text in value:\n\t\t\t\tif text.startswith('BUILDING'):\n\t\t\t\t\tyield text\n\n\ndef test_build_menu_consistency():\n\t\"\"\"\n\tCheck that the same buildings are referenced in both configurations of the build menu.\n\t\"\"\"\n\tassert len(BuildTab.build_menus) == 2, 'Expected 2 build menu configs'\n\n\tbuildings = []\n\tfor filename in BuildTab.build_menus:\n\t\twith open(os.path.join(ROOT_DIR, filename)) as f:\n\t\t\tdata = YamlCache.load_yaml_data(f)\n\t\t\tbuildings.append(sorted(list(_get_referenced_buildings(data))))\n\n\tassert buildings[0] == buildings[1]\n","repo_name":"unknown-horizons/unknown-horizons","sub_path":"tests/misc/test_build_menu_consistency.py","file_name":"test_build_menu_consistency.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":1376,"dataset":"github-code","pt":"51"}
+{"seq_id":"29614754566","text":"#!/bin/env python3\n\n# AUTHORS:\n# Hakan Ozadam\n#\n# Moore Laboratory\n# UMASS Medical School / HHMI\n# RNA Therapeutics Institute\n# Albert Sherman Center, ASC4-1009\n# 368 Plantation Street\n# Worcester, MA 01605\n# USA\n#\n####################################################################\n\nimport argparse\nimport os\nimport sys\nfrom collections import defaultdict\n\n#####################################################################\n\ndef handle_arguments():\n parser = argparse.ArgumentParser(description=\n '''\n Given a bed file of branchpoints, it filters out those\n ''')\n parser.add_argument(\"-i\" ,\n help = \"Input bed file\" ,\n required = True ,\n metavar = \"input_bed_file\" ,\n type = str)\n parser.add_argument(\"-d\" ,\n help = \"Distance Threshold\" ,\n required = True ,\n metavar = \"radius\" ,\n type = int)\n parser.add_argument(\"-o\" ,\n help = \"Output file\" ,\n required = True ,\n metavar = \"output_fastq_file\" ,\n type = str)\n parser.add_argument(\"-g\" ,\n help = \"Output distances strictly GREATER than d (defualt is False).\"\n \"That is, in the absence of this option, it reports BP's\"\n \"with distance d or less.\" ,\n action = \"store_true\",\n dest = \"greater_than\")\n\n arguments = parser.parse_args()\n return arguments\n\n#####################################################################\n\ndef main():\n arguments = handle_arguments()\n distance_index = 5\n\n with open(arguments.i, 'r') as input_stream,\\\n open(arguments.o, 'w') as output_stream:\n\n if arguments.greater_than:\n print(\"Reporting branchpoints with distance strictly greater than \" + str( arguments.d ))\n for line in input_stream:\n contents = line.strip().split('__')\n if len(contents) < 6:\n continue\n if int(contents[distance_index]) > int(arguments.d):\n print(line.strip(), file = output_stream)\n else:\n print(\"Reporting branchpoints with distance less than \" + str( arguments.d ))\n for line in input_stream:\n contents = line.strip().split('__')\n if len(contents) < 6:\n continue\n if int(contents[distance_index]) <= int(arguments.d):\n print(line.strip(), file = output_stream)\n\n#####################################################################ß\n\nif __name__ == '__main__':\n main()\nelse:\n exit(1)\n\n####################################################################\n","repo_name":"hakanozadam/bal","sub_path":"bal/bin/scripts/filter_bp_by_distance_from_bed.py","file_name":"filter_bp_by_distance_from_bed.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"24428719786","text":"import pygame \r\nfrom pygame.locals import *\r\nfrom pygame import sprite, image, Rect\r\nfrom random import choice,randint\r\n\r\nclass Enemy(sprite.Sprite):\r\n\r\n def __init__(self, imagelist, x = 0, y = 0, width = 64, height = 64):\r\n super().__init__()\r\n self.imagelist = imagelist\r\n self.rect = Rect(x, y, width, height)\r\n self.setcount(0)\r\n self.setimage(0)\r\n self.setspeed(0)\r\n \r\n def setcount(self, count):\r\n self.count = count\r\n \r\n def setimage(self,num):\r\n self.num = num\r\n self.image = image.load(self.imagelist[self.num])\r\n \r\n def setspeed(self,speed):\r\n self.speed = speed\r\n\r\n def setshot(self, shot):\r\n self.shot = shot\r\n\r\n def update(self):\r\n\r\n self.count += 1\r\n if self.count == 20:\r\n self.setimage(1)\r\n if self.count == 40:\r\n self.setimage(0)\r\n self.setcount(0)\r\n speed = [-4,+4]\r\n self.speed = choice(speed)\r\n\r\n if self.shot.rect.y >= 640 and randint(0,3) == 0:\r\n self.shot.setplace(self.rect.x, self.rect.y)\r\n self.shot.setimage(1)\r\n self.shot.setspeed(8)\r\n\r\n if self.rect.x > 0 and self.speed < 0: \r\n self.rect.move_ip(self.speed, 0)\r\n if self.rect.x < 576 and self.speed > 0:\r\n self.rect.move_ip(self.speed,0)\r\n","repo_name":"merl2801/Coin-Game","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"38933710807","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport pickle #to load a saved modelimport base64 #to open .gif files in streamlit app\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n# from bokeh.plotting import figure\n\ninput_size = 8\nhidden_size = 32\noutput_size = 1\n\nclass DiabetesModel(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(DiabetesModel, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size)\n self.relu = nn.ReLU()\n self.fc2 = nn.Linear(hidden_size, output_size)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.sigmoid(out)\n return out\n\n@st.cache(suppress_st_warning=True)\ndef get_fvalue(val): \n\tfeature_dict = {\"No\":1,\"Yes\":2} \n\tfor key,value in feature_dict.items(): \n\t\tif val == key: \n\t\t\treturn value\n\ndef get_value(val,my_dict): \n\tfor key,value in my_dict.items(): \n\t\tif val == key: \n\t\t\treturn value\napp_mode = st.sidebar.selectbox('选择页面',['主页','预测']) #two pages\n\n\nif app_mode=='主页': \n\tst.title('糖尿病:') \n\t# st.image('loan_image.jpg') \n\tst.markdown('数据:') \n\tdata=pd.read_csv('diabetes.csv') \n\tst.write(data.head(15)) \n\tst.markdown('图表')\n\t# st.bokeh_chart(data[['Glucose','BloodPressure']].head(20))\n\t# fig = plt.plots()\n\t# st.bar_chart(data[['Glucose','BloodPressure']].head(20))\n\t# ax1 = data.plot.scatter(x='Glucose',\n # y='BMI')\n\t# plt.scatter(data.Glucose, data.BMI ) \n\t# st.pyplot(fig)\n\t# st.bar_chart(data[['Glucose','BMI']].head(20))\n\tfig=plt.figure()\n\tax=fig.add_axes([0,0,1,1])\n\tax.scatter(data['Glucose'], data['BloodPressure'], color='r')\n\t# ax.scatter(grades_range, boys_grades, color='b')\n\tax.set_xlabel('Glucose')\n\tax.set_ylabel('Blood Pressure')\n\tax.set_title('Glucose vs Blood Pressure')\n\tst.pyplot(fig)\n\n\tfig1=plt.figure()\n\tax1=fig1.add_axes([0,0,1,1])\n\tax1.scatter(data['Age'], data['BloodPressure'], color='r')\n\t# ax.scatter(grades_range, boys_grades, color='b')\n\tax1.set_xlabel('Age')\n\tax1.set_ylabel('Blood Pressure')\n\tax1.set_title('Age vs Glucose')\n\tst.pyplot(fig1)\n\n\tfig2=plt.figure()\n\tax2=fig2.add_axes([0,0,1,1])\n\tax2.scatter(data['BloodPressure'], data['Outcome'], color='r')\n\t# ax.scatter(grades_range, boys_grades, color='b')\n\tax2.set_xlabel('BloodPressure')\n\tax2.set_ylabel('Outcome,1 indicating have diabetes')\n\tax2.set_title('Blood Pressure vs Outcome')\n\tst.pyplot(fig2)\n\nelif app_mode == '预测':\n\t# st.image('slider-short-3.jpg') \n\tst.subheader('先生/女士,您需要填写所有必要的信息,以便得到对您的诊断请求的答复(模型精度--0.71)!')\n\tst.sidebar.header(\"Informations about the client :\") \n\t# gender_dict = {\"Male\":1,\"Female\":2} \n\t# feature_dict = {\"No\":1,\"Yes\":2} \n\t# edu={'Graduate':1,'Not Graduate':2} \n\t# prop={'Rural':1,'Urban':2,'Semiurban':3} \n\tpreg=st.sidebar.number_input('怀孕次数',0,20,0,) \n\tgluc=st.sidebar.number_input('葡萄糖(md/dl)',0.00,250.00,0.00,) \n\tbloodpr=st.sidebar.number_input('舒张性血压(mm hg)', 0.00,150.00,0.00) \n\tskinthic=st.sidebar.number_input('表皮厚度', 0.00,100.00,0.00) \n\tinsulin = st.sidebar.number_input('胰岛素', 0.00,850.00,0.00) \n\tbmi = st.sidebar.number_input('暴模指数', 0.00,68.00,0.00) \n\tdiapedig = st.sidebar.number_input('糖尿病谱系功能', 0.00,3.00,0.00) \n\tage = st.sidebar.number_input('年龄', 0.00,90.00,0.00) \n\n\t# data1={ \n\t# \t'Gender':Gender, \n\t# \t'Married':Married, \n\t# \t'Dependents':[class_0,class_1,class_2,class_3], \n\t# \t'Education':Education, \n\t# \t'ApplicantIncome':ApplicantIncome, \n\t# \t'CoapplicantIncome':CoapplicantIncome, \n\t# \t'Self Employed':Self_Employed, \n\t# \t'LoanAmount':LoanAmount, \n\t# \t'Loan_Amount_Term':Loan_Amount_Term, \n\t# \t'Credit_History':Credit_History, \n\t# \t'Property_Area':[Rural,Urban,Semiurban], \n\t# \t} \n\t# feature_list=[\n\t# \tApplicantIncome,\n\t# \tCoapplicantIncome,\n\t# \tLoanAmount,\n\t# \tLoan_Amount_Term,\n\t# \tCredit_History,\n\t# \tget_value(Gender,gender_dict),\n\t# \tget_fvalue(Married),\n\t# \tdata1['Dependents'][0],\n\t# \tdata1['Dependents'][1],\n\t# \tdata1['Dependents'][2],\n\t# \tdata1['Dependents'][3],\n\t# \tget_value(Education,edu),\n\t# \tget_fvalue(Self_Employed),\n\t# \tdata1['Property_Area'][0],\n\t# \tdata1['Property_Area'][1],\n\t# \tdata1['Property_Area'][2]\n\t# \t] \n\t# single_sample = np.array(feature_list).reshape(1,-1)\n\tfeature_list=[\n\t\tpreg,\n\t\tgluc,\n\t\tbloodpr,\n\t\tskinthic,\n\t\tinsulin,\n\t\tbmi,\n\t\tdiapedig,\n\t\tage\n\t \t] \n\t# single_sample = np.array(feature_list).reshape(1,-1)\n\n\tif st.button(\"预测\"): \n\t\t# file_ = open(\"6m-rain.gif\", \"rb\") \n\t\t# contents = file_.read() \n\t\t# data_url = base64.b64encode(contents).decode(\"utf-8\") \n\t\t# file_.close() \n\t\t# file = open(\"green-cola-no.gif\", \"rb\") \n\t\t# contents = file.read() \n\t\t# data_url_no = base64.b64encode(contents).decode(\"utf-8\") \n\t\t# file.close() \n\t\t# Load the model\n\t\tmodel = DiabetesModel(input_size, hidden_size, output_size)\n\t\tmodel.load_state_dict(torch.load(\"diabetes_model.pt\"))\n\t\t# Set the model to evaluation mode\n\t\tmodel.eval()\n\t\t# loaded_model = pickle.load(open('Random_Forest.sav', 'rb')) \n\t\t# prediction = loaded_model.predict(single_sample) \n # Make predictions on a sample input\n\t\tsample_input = torch.tensor([[feature_list]])\n\t\twith torch.no_grad():\n\t\t predicted_output = model(sample_input.float())\n\t\t # print(f\"Predicted output: {predicted_output.item()}\")\n\t\t st.success( f\"患糖尿病的概率:: {predicted_output.item()}\" )\n\n\t\t# if prediction[0] == 0 : \n\t\t# \tst.error( 'According to our Calculations, you will not get the loan from Bank' ) \n\t\t# \t# st.markdown( f' ', unsafe_allow_html=True,) \n\t\t# elif prediction[0] == 1 : \n\t\t# \tst.success( 'Congratulations!! you will get the loan from Bank' ) \n\t\t# \tst.markdown( f' ', unsafe_allow_html=True, )\n\t\n\n\n","repo_name":"daguang123456/diabetes_classify","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"38532099193","text":"import networkx as nx\nimport numpy as np\nfrom loren_frank_data_processing.track_segment_classification import (\n get_track_segments_from_graph, project_points_to_segment)\nimport pandas as pd\nimport xarray as xr\n\n\ndef _get_MAP_estimate_2d_position_edges(posterior, track_graph, decoder):\n # Get 2D position on track from decoder MAP estimate\n map_position_ind = (\n posterior.where(decoder.is_track_interior_).argmax(\n \"position\", skipna=True).values\n )\n map_position_2d = decoder.place_bin_center_2D_position_[\n map_position_ind]\n\n # Figure out which track segment it belongs to\n track_segment_id = decoder.place_bin_center_ind_to_edge_id_[\n map_position_ind]\n map_edges = np.array(list(track_graph.edges))[track_segment_id]\n\n return map_position_2d, map_edges\n\n\ndef _get_animal_2d_projected_position_edges(\n track_graph, position_2D, track_segment_id):\n # Get animal's 2D position projected onto track\n track_segments = get_track_segments_from_graph(track_graph)\n projected_track_positions = project_points_to_segment(\n track_segments, position_2D)\n n_time = projected_track_positions.shape[0]\n actual_projected_position = projected_track_positions[(\n np.arange(n_time), track_segment_id)]\n\n # Add animal's position at time to track graph\n actual_edges = np.array(list(track_graph.edges))[track_segment_id]\n\n return actual_projected_position, actual_edges\n\n\ndef add_node(pos, edge, graph, node_name):\n node1, node2 = edge\n x3, y3 = pos\n\n x1, y1 = graph.nodes[node1]['pos']\n left_distance = np.sqrt((x3 - x1)**2 + (y3 - y1)**2)\n nx.add_path(graph, [node1, node_name], distance=left_distance)\n\n x2, y2 = graph.nodes[node2]['pos']\n right_distance = np.sqrt((x3 - x2)**2 + (y3 - y2)**2)\n nx.add_path(\n graph, [node_name, node2], distance=right_distance)\n\n#\n\n\ndef calculate_replay_distance(\n posterior, track_graph, decoder, position_2D, track_segment_id):\n\n track_segment_id = np.asarray(track_segment_id).astype(int).squeeze()\n position_2D = np.asarray(position_2D)\n map_position_2d, map_edges = _get_MAP_estimate_2d_position_edges(\n posterior, track_graph, decoder)\n (actual_projected_position,\n actual_edges) = _get_animal_2d_projected_position_edges(\n track_graph, position_2D, track_segment_id)\n\n copy_graph = track_graph.copy()\n replay_distance_from_animal_position = []\n\n for actual_pos, actual_edge, map_pos, map_edge in zip(\n actual_projected_position, actual_edges, map_position_2d,\n map_edges):\n\n # Add actual position node\n add_node(actual_pos, actual_edge, copy_graph, 'actual_position')\n add_node(map_pos, map_edge, copy_graph, 'map_position')\n if np.all(actual_edge == map_edge):\n (x1, y1), (x2, y2) = actual_pos, map_pos\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n nx.add_path(\n copy_graph, ['actual_position', 'map_position'],\n distance=distance)\n\n replay_distance_from_animal_position.append(\n nx.shortest_path_length(copy_graph, source='actual_position',\n target='map_position', weight='distance'))\n copy_graph.remove_node('actual_position')\n copy_graph.remove_node('map_position')\n\n return np.asarray(replay_distance_from_animal_position)\n\n\ndef get_place_field_max(classifier):\n try:\n max_ind = classifier.place_fields_.argmax('position')\n return np.asarray(\n classifier.place_fields_.position[max_ind].values.tolist())\n except AttributeError:\n return np.asarray(\n [classifier.place_bin_centers_[gpi.argmax()]\n for gpi in classifier.ground_process_intensities_])\n\n\ndef maximum_a_posteriori_estimate(posterior_density):\n '''\n\n Parameters\n ----------\n posterior_density : xarray.DataArray, shape (n_time, n_x_bins, n_y_bins)\n\n Returns\n -------\n map_estimate : ndarray, shape (n_time,)\n\n '''\n try:\n stacked_posterior = np.log(posterior_density.stack(\n z=['x_position', 'y_position']))\n map_estimate = stacked_posterior.z[stacked_posterior.argmax('z')]\n map_estimate = np.asarray(map_estimate.values.tolist())\n except KeyError:\n map_estimate = posterior_density.position[\n np.log(posterior_density).argmax('position')]\n map_estimate = np.asarray(map_estimate)[:, np.newaxis]\n return map_estimate\n\n\ndef get_probability_of_state(results, posterior_type='acausal_posterior'):\n fragmented = (results[posterior_type]\n .sel(state=['Inbound-Fragmented', 'Outbound-Fragmented'])\n .sum(['state', 'position'])\n .assign_coords({'state': 'Fragmented'}))\n probability = (results[posterior_type]\n .sum('position')\n .drop_sel(state=['Inbound-Fragmented', 'Outbound-Fragmented']))\n return xr.concat((probability, fragmented), dim='state')\n\n\ndef classify_states(probability, probability_threshold=0.8, sampling_frequency=500):\n is_classified = (probability > probability_threshold).sum('state').astype(bool)\n max_state = probability.idxmax('state')\n classified_states_by_time = max_state.isel(time=is_classified)\n\n indexes = np.unique(classified_states_by_time.values, return_index=True)[1]\n classified_states = classified_states_by_time.values[sorted(indexes)]\n is_state = (probability > probability_threshold).sum('time') > 0\n state_duration = (probability > probability_threshold).sum('time') / sampling_frequency\n return classified_states_by_time, classified_states, is_state.values, state_duration.values\n\n\ndef get_replay_info(data, results, epoch_key):\n classified_states = []\n is_state = []\n state_duration = []\n\n for ripple_number in data['ripple_times'].index:\n ripple = data['ripple_times'].loc[ripple_number]\n\n start_time = ripple.start_time\n end_time = ripple.end_time\n\n probability = get_probability_of_state(\n results.sel(time=slice(start_time / np.timedelta64(1, 's'),\n end_time / np.timedelta64(1, 's'))))\n _, classified_states_temp, is_state_temp, state_duration_temp = classify_states(probability)\n classified_states.append(classified_states_temp)\n is_state.append(is_state_temp)\n state_duration.append(state_duration_temp)\n \n is_state = pd.DataFrame(np.stack(is_state),\n columns=probability.state,\n index=data['ripple_times'].index)\n is_state['Animal'] = epoch_key[0]\n is_state['Day'] = epoch_key[1]\n is_state['Epoch'] = epoch_key[2]\n \n state_duration = pd.DataFrame(np.stack(state_duration),\n columns=probability.state + '_duration',\n index=data['ripple_times'].index)\n replay_info = pd.concat((is_state, state_duration), axis=1)\n replay_info = replay_info.reset_index().set_index(['Animal', 'Day', 'Epoch', 'replay_number'])\n \n return replay_info, classified_states","repo_name":"edeno/pose_analysis","sub_path":"src/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"11002362200","text":"from django.http import HttpResponse\n\nfrom shop.models import Shop , Category , sepet , Slider\nfrom django.shortcuts import get_object_or_404,render\nfrom django.core.paginator import Paginator\n\n# Create your views here.\ndef index(request):\n #list comphension alt satırda yapılan şeyin adı\n urunler = Shop.objects.filter(isActive=True).order_by(\"urunismi\")\n kategoriler = Category.objects.all()\n sliders= Slider.objects.filter(is_active=True)\n\n paginator = Paginator(urunler,5)\n page = request.GET.get('page', 1)\n page_obj = paginator.page(page)\n\n return render(request, 'pages/index.html' , {\n 'categories': kategoriler,\n 'urunler' : urunler,\n 'sliders' : sliders\n })\ndef detaylar(request , slug):\n shop = get_object_or_404(Shop , slug=slug)\n context = {\n 'urun': shop\n }\n return render(request, 'pages/detaylar.html', context)\n\ndef iletisim(request):\n return render(request, 'pages/iletisim.html')","repo_name":"zehrakaragul/care-shop-web-site-Django","sub_path":"careshop/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"29333800334","text":"import requests\nfrom DBCon import DBCon\nimport mysql.connector\ndef not_allowed(insert_statement):\n not_allowed_list = ['SELECT','DROP','DELETE','UPDATE']\n for i in not_allowed_list:\n if i.upper() in insert_statement.upper():\n insert_statement = \"\"\n return insert_statement \n \ndef split_txt_to_insert(podaj_wyciag, tabela, a1nazwa):\n lista = podaj_wyciag.split(\"\\n\")\n if lista[-1] == \"\":\n del lista[-1] \n lista_fix = []\n liczba_przecinkow = lista[0].count(\",\")\n a_wyc = podaj_wyciag\n a_insert_into = tabela\n a_kolumna1 = a1nazwa\n a_1 = \"\"\n a_data = \"\"\n a_otwarcie = 0\n a_maks = 0\n a_min = 0\n a_zamkniecie = 0\n a_insert = \"insert into notowania.{} ({}, DATA_KURSU, OTWARCIE, MAKSIMUM, MINIMUM, ZAMKNIECIE) values\".format(a_insert_into, a_kolumna1)\n \n with open(\"notowania_omitted.txt\",\"r\") as f:\n o_text = f.read()\n omitted = o_text.split(\"\\n\")\n \n with open(\"lista_wig.txt\",\"r\") as e:\n l_text = e.read()\n wigi = l_text.split(\"\\n\")\n\n if a_wyc == notowania:\n if a_insert_into == \"NOTOWANIA\":\n for a in lista:\n if a[0:a.find(\",\")] not in omitted:\n lista_fix.append(a)\n ### ogarnac indeksy wykluczone \n elif tabela == \"WIG\":\n for a in lista:\n if a[0:a.find(\",\")] in wigi:\n lista_fix.append(a)\n\n else:\n lista_fix = lista.copy()\n\n dbcon_read = open(\"dbconfig.txt\",\"r\")\n db_list = dbcon_read.read().split(\"\\n\")\n dbconfig = {}\n for i in db_list:\n dbconfig[str(i[0:i.find(\":\")])] = str(i[i.find(\":\")+1:])\n\n conn = mysql.connector.connect(**dbconfig)\n cursor = conn.cursor()\n sql = \"select concat(skrot,'@',id) from notowania.dict_spolki\"\n cursor.execute(sql)\n spolki = []\n for row, in cursor.fetchall():\n spolki.append(row) \n conn.close()\n\n dict_spolki = {}\n for i in spolki:\n od = str(i[0:i.find(\"@\")])\n do = i[i.find(\"@\")+1:]\n dict_spolki[od] = do\n \n for i in lista_fix:\n przecinek = i.find(\",\")\n a_1 = i[0:przecinek]\n a_2 = dict_spolki[a_1]\n przecinek = i.find(\",\",przecinek)\n a_data = i[przecinek+1:i.find(\",\",przecinek+1)]\n przecinek = i.find(\",\",przecinek+1)\n a_otwarcie = i[przecinek+1:i.find(\",\",przecinek+1)]\n przecinek = i.find(\",\",przecinek+1)\n a_maks = i[przecinek+1:i.find(\",\",przecinek+1)]\n przecinek = i.find(\",\",przecinek+1)\n a_min = i[przecinek+1:i.find(\",\",przecinek+1)]\n przecinek = i.find(\",\",przecinek+1)\n a_zamkniecie = i[przecinek+1:i.find(\",\",przecinek+1)]\n\n a_insert += (\"\\n ('{}',str_to_date('{}','%Y%m%d'), {}, {}, {}, {})\").format(a_2,\n a_data,\n a_otwarcie,\n a_maks,\n a_min,\n a_zamkniecie)\n if i != lista_fix[-1]:\n a_insert += \",\"\n\n a_insert = not_allowed(a_insert)\n \n return a_insert\n \n#część odpowiadająca za pobranie danych z polskiej gieldy i przerobienie na sql\nnotowania = requests.get(\"http://bossa.pl/pub/ciagle/omega/cgl/ndohlcv.txt\").text\nsql_notowania = split_txt_to_insert(notowania, \"NOTOWANIA\", \"ID_SPOLKA\")\nsql_wig = split_txt_to_insert(notowania,\"WIG\",\"INDEKS\")\n#część odpowiadająca za pobranie danych z giełd światowych i przerobienie na sql\nstrona_zagranica = requests.get(\"http://bossa.pl/pub/indzagr/mstock/sesjazgr/sesjazgr.prn\").text\nsql_zagranica = split_txt_to_insert(strona_zagranica, \"GIELDY\", \"GIELDA\")\n#część odpowiadająca za zaciągnięcie do bazy danych walut \nstrona_waluty = requests.get(\"http://bossa.pl/pub/waluty/mstock/sesjanbp/sesjanbp.prn\").text\nsql_waluty = split_txt_to_insert(strona_waluty, \"WALUTY\", \"WALUTA\")\n#część odpowiadająca za zaciągnięcie do bazy danych NC\nstrona_nc = requests.get(\"http://bossa.pl/pub/newconnect/mstock/sesjancn/sesjancn.prn\").text\nsql_nc = split_txt_to_insert(strona_nc, \"NC\", \"ID_SPOLKA\")\n#pakowanie do bazy\n\ndbconfig = {'host' : 'localhost',\n 'user' : 'root',\n 'password' : 'kzc1@3',\n 'database' : 'notowania'}\n\nwith DBCon(dbconfig) as cursor:\n cursor.execute(sql_notowania)\n cursor.execute(sql_wig)\n cursor.execute(sql_waluty)\n cursor.execute(sql_zagranica)\n cursor.execute(sql_nc)\n","repo_name":"mericetsdagger/notowania","sub_path":"notowania.py","file_name":"notowania.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"17668104112","text":"#!/usr/bin/env python\nimport socket\nimport select\n\nHOST = ''\nPORT = 1234\nBUFF = 1024\n\nsockets = {}\n\ndef chat_server():\n srv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n srv_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n srv_sock.bind((HOST, PORT))\n srv_sock.listen(5)\n\n _poll = select.poll()\n _poll.register(srv_sock.fileno(), select.POLLIN)\n\n while True:\n pollrst = _poll.poll(1000)\n for fd, event in pollrst:\n if fd == srv_sock.fileno():\n cli_socket, cli_addr = srv_sock.accept()\n print(\"%s,%s has connected!\" % cli_addr)\n sockets[cli_socket.fileno()] = cli_socket\n _poll.register(cli_socket.fileno())\n print(\"Total cnt:%d\" % len(sockets))\n elif (event & select.POLLHUP):\n cli_socket = sockets[fd]\n cli_socket.close()\n _poll.unregister(fd)\n sockets.pop(fd)\n print(\"total socket:%d\" % len(sockets))\n elif event & select.POLLIN:\n try:\n cli_socket = sockets[fd]\n received = cli_socket.recv(BUFF).decode()\n if received:\n print(\"%s received!\" % received)\n except:\n sockets[fd].close()\n sockets.pop(fd)\n _poll.unregister(fd)\n\n\nchat_server()\n","repo_name":"shaotao1988/python","sub_path":"socket_chat/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"41762590212","text":"import asyncore\nimport getopt\nimport logging\nimport sys\nimport time\n\nif __name__ == '__main__':\n sys.path.insert(0, '.')\n\nif sys.version_info[0] == 3:\n import http.client as lib_http\nelse:\n import httplib as lib_http\n\nfrom neubot.compat import json\n\ndef main(args):\n\n ''' Monitor Neubot state via command line '''\n\n try:\n options, arguments = getopt.getopt(args[1:], 'D:v')\n except getopt.error:\n sys.exit('Usage: neubot api.client [-v] [-D property=value]')\n if arguments:\n sys.exit('Usage: neubot api.client [-v] [-D property=value]')\n\n address, port, verbosity = '127.0.0.1', '9774', 0\n for name, value in options:\n if name == '-D':\n name, value = value.split('=', 1)\n if name == 'address':\n address = value\n elif name == 'port':\n port = value\n elif name == '-v':\n verbosity += 1\n\n timestamp = 0\n while True:\n try:\n\n connection = lib_http.HTTPConnection(address, port)\n connection.set_debuglevel(verbosity)\n connection.request('GET', '/api/state?t=%d' % timestamp)\n\n response = connection.getresponse()\n if response.status != 200:\n raise RuntimeError('Bad HTTP status: %d' % response.status)\n\n if response.getheader(\"content-type\") != \"application/json\":\n raise RuntimeError(\"Unexpected contenty type\")\n\n octets = response.read()\n dictionary = json.loads(octets)\n\n logging.info(\"APIStateTracker: received JSON: %s\",\n json.dumps(dictionary, ensure_ascii=True))\n\n if not \"events\" in dictionary:\n continue\n if not \"current\" in dictionary:\n raise RuntimeError(\"Incomplete dictionary\")\n\n timestamp = max(0, int(dictionary[\"t\"]))\n json.dumps(dictionary, sys.stdout)\n\n except KeyboardInterrupt:\n break\n except:\n error = asyncore.compact_traceback()\n logging.error('Exception: %s', str(error))\n time.sleep(5)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"neubot/neubot","sub_path":"neubot/api/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"51"}
+{"seq_id":"40879541145","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ## Read Dataset \n\n# COMMAND ----------\n\nimport pandas as pd\n\n# Read data into pandas data frame because Databricks can only read from local file\n# Not ideal solution but works for demo\ncancer_pd =pd.read_csv(os.path.join(os.getcwd(),\"cancer_dataset.csv\"))\ncancer_df=spark.createDataFrame(cancer_pd) \n\nprint(f\"Total Rows = {cancer_df.count()}\")\n\n# COMMAND ----------\n\ndisplay(cancer_df)\n\n# COMMAND ----------\n\n# Display schema\nfor x in cancer_df.schema:\n print(f\"File name : {x.name} | Data Type : {x.dataType}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Data Cleaning\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ###### Dropping columns\n\n# COMMAND ----------\n\n# Remove id column and the last column \nprint(f\"Total columns before = {len(cancer_df.columns)}\")\ncancer_df= cancer_df.drop(\"id\", \"Unnamed: 32\")\nprint(f\"Total columns after = {len(cancer_df.columns)}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ###### Change columns data types\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import DoubleType, StringType\nfrom pyspark.sql.functions import col\n\n# Change data types of all columns to double except the diagnosis column\n# diagnosis is the label column\nfor x in cancer_df.schema:\n if x.name!=\"diagnosis\":\n if x.dataType == StringType():\n cancer_df = cancer_df.withColumn(x.name, col(x.name).castTo(DoubleType()))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Train Model\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ###### Create String indexer and Vector assembler\n\n# COMMAND ----------\n\nfrom pyspark.ml.feature import StringIndexer, VectorAssembler\n\n# split dataset \ntrain_df, test_df = cancer_df.randomSplit([.8,.2], seed = 10)\n\n# convert label to integeres - LabelEncoder in sklearn\nstring_indexer = StringIndexer(inputCol = \"diagnosis\", outputCol =\"label\")\n\n# create vector indexer for mean features only - ten real valued features\nnumerical_cols = [x.name for x in train_df.schema if ((x.name!=\"diagnosis\") & (x.name!=\"label\") & (\"_mean\" in x.name))]\nvector_assembler = VectorAssembler(inputCols =numerical_cols , outputCol =\"features\")\n\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ###### Create pipeline and fit decision tree\n\n# COMMAND ----------\n\nfrom pyspark.ml.classification import DecisionTreeClassifier\nfrom pyspark.ml import Pipeline\n\n# create a decision tree\ndtree = DecisionTreeClassifier(featuresCol = \"features\", labelCol = \"label\")\n\n# create pipeline\nstages= [string_indexer, vector_assembler, dtree ]\npipeline = Pipeline(stages = stages)\n\n# fit pipeline - model\npipeline_model = pipeline.fit(train_df)\n\n# COMMAND ----------\n\n# display the tree\ndt_model = pipeline_model.stages[-1]\ndisplay(dt_model)\n\n# COMMAND ----------\n\n# display feature importance\nfeatures_df = pd.DataFrame(list(zip(vector_assembler.getInputCols(), dt_model.featureImportances)), columns=[\"feature\", \"importance\"])\nfeatures_df.sort_values(\"importance\", ascending =False)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ###### Generate predictions on test dataset\n\n# COMMAND ----------\n\n# transform the test data set\nresults_df = pipeline_model.transform(test_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ######Binary Classifier metrics\n\n# COMMAND ----------\n\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\n\n# Tranform test data\noutput_df = pipeline_model.transform(test_df)\n\n# Generate metrics\nbce = BinaryClassificationEvaluator(rawPredictionCol = \"prediction\" , labelCol= \"label\", metricName= \"areaUnderPR\")\nprint(f\"Area Under PR = {bce.evaluate(output_df)}\")\nbce.setMetricName(\"areaUnderROC\")\nprint(f\"Area Under ROC = {bce.evaluate(output_df)}\")\n\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ######Multiclass Classification Evaluation metrics\n\n# COMMAND ----------\n\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\n\nbce = MulticlassClassificationEvaluator(predictionCol = \"prediction\" , labelCol= \"label\", metricName= \"accuracy\")\nprint(f\"Accuracy = {bce.evaluate(output_df)}\")\nbce.setMetricName(\"f1\")\nprint(f\"F1 score = {bce.evaluate(output_df)}\")\nbce.setMetricName(\"precisionByLabel\")\nprint(f\"Precision = {bce.evaluate(output_df)}\")\nbce.setMetricName(\"recallByLabel\")\nprint(f\"Recall = {bce.evaluate(output_df)}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ######Confuison matrix\n\n# COMMAND ----------\n\nfrom pyspark.mllib.evaluation import MulticlassMetrics\nfrom pyspark.sql.types import DoubleType\nimport seaborn as sns\nimport matplotlib.pyplot as plt \n\n# stats about labels of test data set \nprint(f\"Total Malignant = {test_df.filter(col('diagnosis')=='M').count()}\")\nprint(f\"Total Bening = {test_df.filter(col('diagnosis')=='B').count()}\")\n\n# Compute raw scores on the test set\npredictionAndLabels = output_df[[\"prediction\", \"label\"]]\npredictionAndLabels= predictionAndLabels.withColumn(\"prediction\", col(\"prediction\").cast(DoubleType()))\npredictionAndLabels= predictionAndLabels.withColumn(\"label\", col(\"label\").cast(DoubleType()))\n\n# instantiate metrics object\nmetrics = MulticlassMetrics(predictionAndLabels.rdd)\n\n# generate a confusion matrix\nconfusion_matrix = metrics.confusionMatrix().toArray()\n\n# plot the matrix\nax= plt.subplot()\nsns.heatmap(confusion_matrix, annot=True, fmt='g', ax=ax, cmap= \"Blues\"); \n\n# labels, title and ticks\nax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \nax.set_title('Confusion Matrix'); \nax.xaxis.set_ticklabels(['Benign', 'Malignant']); ax.yaxis.set_ticklabels(['Benign', 'Malignant']);\n\n# COMMAND ----------\n\n\n","repo_name":"nyk2001/decision_tree_pyspark_classification","sub_path":"binary_classification.py","file_name":"binary_classification.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"75072351518","text":"class Queue:\n def __init__(self,size):\n self.items=[0]*size\n self.max_size=size\n self.head,self.tail,self.size=0,0,0\n \n def enqueue(self,item):\n if self.is_full():\n print(\"queue is full\")\n return\n\n print(\"Inserting Item\",item)\n self.items[self.tail]=item\n self.tail=(self.tail+1)%self.max_size\n self.size+=1\n \n def dqueue(self):\n item=self.items[self.head]\n self.head=(self.head+1)%self.max_size\n\n return item\n \n def is_empty(self):\n if self.size==0:\n return(\"Queue is empty\")\n return(\"no empty\")\n \n def is_full(self):\n if self.size==self.max_size:\n return True\n return False\n\n def remove(self):\n return self.items.pop(0)\n\n\nq=Queue(10)\nq.enqueue(9)\nq.enqueue(8)\nq.enqueue(5)\nq.enqueue(4)\nq.remove()\nq.remove()\n\nwhile not q.is_empty():\n itm=q.dequeue()\n print(itm)\n\nprint(q.items)\nprint(\"head\",q.head)\nprint(\"tail\",q.tail)\n\n\n \n ","repo_name":"Fahad2021/OPP-python-","sub_path":"data structure/Stack/cirque.py","file_name":"cirque.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"31186416659","text":"from django.contrib import admin\nfrom django.http import HttpRequest\n\nfrom course.actions.add_students_from_csv import add_students_from_csv\nfrom course.models import Course, CourseWindowRecord\n\n\n@admin.action(permissions=['change'])\ndef populate_students_list_from_csv(modeladmin, request, queryset):\n\n for course in queryset:\n add_students_from_csv(course, request)\n\n\nclass CourseAdmin(admin.ModelAdmin):\n list_display = ('id', 'course_code', 'course_name', 'section',\n 'semester', 'academic_year', 'is_active',)\n search_fields = ['id', 'course_code', 'course_name', 'section']\n actions = [populate_students_list_from_csv]\n\n def get_queryset(self, request: HttpRequest):\n user = request.user\n query_set = super().get_queryset(request)\n\n is_faculty = user.groups.filter(name=\"Faculty\").exists()\n is_student = user.groups.filter(name=\"Student\").exists()\n\n if is_faculty:\n filtered_data = query_set.filter(instructors=user)\n\n elif is_student:\n filtered_data = query_set.filter(students=user)\n\n else:\n filtered_data = query_set\n\n return filtered_data\n\n\nclass CourseWindowRecordAdmin(admin.ModelAdmin):\n list_display = ('id', 'course', 'start_timestamp',\n 'attendance_duration_in_minutes',)\n search_fields = ['id', 'course__course_name',\n 'course__course_code', 'course__section']\n raw_id_fields = ('course',)\n\n\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(CourseWindowRecord, CourseWindowRecordAdmin)\n","repo_name":"thecodepapaya/fase-backend","sub_path":"course/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"74859387358","text":"import json\nimport os\nimport asyncio\nimport discord\nfrom discord.ext import commands\n\n\nintents = discord.Intents.default()\nintents.message_content = True\nbot = commands.Bot(command_prefix=commands.when_mentioned_or(\">\"),description=\"Radio bot\",intents = intents)\n\n@bot.command()\nasync def h(ctx):\n\thelpEmbed = discord.Embed(title=\"Commands\")\n\thelpEmbed.add_field(name=\">play *radio id*\",value=\"Plays the requested radio stations (check >stations for available stations)\",inline=False)\n\thelpEmbed.add_field(name=\">stop\",value=\"Stops playing the current radio station and disconnects\",inline=False)\n\thelpEmbed.add_field(name=\">stations\",value=\"Lists available radio stations and their radio id\",inline=False)\n\tawait ctx.send(embed=helpEmbed)\n\nclass Radio(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.bot = bot\n\n\t\tradioids = open(\"radioid.json\")\n\t\tself.radioids = json.load(radioids)\n\t\tradioids.close()\n\n\t\tstations = open(\"stations.json\")\n\t\tself.stations = json.load(stations)\n\t\tstations.close()\n\n\t\tself.ffmpegPath = \"ffmpeg\\\\bin\\\\ffmpeg.exe\"\n\t\tself.ffmpegOptions = {'before_options':'-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5','options':'-vn'}\n\t\tself.currstation = None\n\n\t@commands.command()\n\tasync def play(self,ctx):\n\t\t\"\"\"join a voice channel and start playing requested radio station\"\"\"\n\t\tradioid = str(ctx.message.content).split()[1]\n\t\t#print(\"play\")\n\t\tif radioid in self.radioids.keys(): #validate\n\t\t\tif radioid != self.currstation:\n\t\t\t\tself.currstation = radioid\n\t\t\t\tvoice = ctx.voice_client\n\t\t\t\tif voice.is_playing():\n\t\t\t\t\tvoice.stop()\n\n\t\t\t\tvoice.play(discord.FFmpegPCMAudio(source=self.stations[radioid],options=self.ffmpegOptions,executable=self.ffmpegPath))\n\t\t\t\t#print(f\"playing {self.radioids[radioid]}\")\n\t\t\t\tawait ctx.send(f\"Playing {self.radioids[radioid]}\")\n\t\t\telse:\n\t\t\t\tawait ctx.send(f\"Already playing {self.radioids[radioid]}!\")\n\t\telse:\n\t\t\t\t#print(\"unknown radio id\")\n\t\t\t\tawait ctx.send(f\"Unknown radio id: {self.radioids[radioid]}\")\n\n\n\t@commands.command()\n\tasync def stop(self,ctx):\n\t\t\"\"\"check_voice then dc bot\"\"\"\n\t\t#print(\"disconnecting\")\n\t\tself.currstation = None\n\t\tawait ctx.voice_client.disconnect()\n\n\t@commands.command()\n\tasync def stations(self,ctx):\n\t\t\"\"\"show available radio stations\"\"\"\n\t\tf = open(\"stations.json\")\n\t\turls = json.load(f)\n\t\tf.close()\n\t\tstationsEmbed = discord.Embed(title=\"Available stations\")\n\t\tfor key in urls.keys():\n\t\t\tid = key.lower().replace(\" \",\"\")\n\t\t\tstationsEmbed.add_field(name=key,value=f\"type: >play {id}\",inline=\"false\")\n\t\tawait ctx.send(embed=stationsEmbed)\n\n\t@play.before_invoke\n\t@stop.before_invoke\n\tasync def check_voice(self,ctx):\n\t\t#print(\"check_voice\")\n\t\t\"\"\"check if author is in a voice channel\"\"\"\n\t\t\n\t\t\"\"\"if no voice_client or there is voice client but diff voice channel\"\"\"\n\t\tif ctx.voice_client is None:\n\t\t\tif ctx.author.voice: #if message author in vc\n\t\t\t\tawait ctx.author.voice.channel.connect() #connect the bot\n\t\t\telse:\n\t\t\t\tawait ctx.send(\"Connect to a voice channel!\") #message author not in the channel\n\t\telif ctx.voice_client.is_playing(): #if bot is currently playing something, stop. (for stop command)\n\t\t\tif len(str(ctx.message.content).split()) > 1: #there is message content so switch radio stations\n\t\t\t\t#switch radio stations\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tctx.voice_client.stop()\t\n\n@bot.event\nasync def on_ready():\n print(f\"{bot.user} has connected to discord\")\n\nasync def main():\n\tasync with bot:\n\t\t#with open(\"token.txt\",\"r\") as f:\n\t\t\t#token = f.readline()\n\t\t\n\t\tawait bot.add_cog(Radio(bot))\n\t\tawait bot.start(os.environ.get(\"BOT_KEY\"))\n\nif __name__ == \"__main__\":\n\tasyncio.run(main())\n","repo_name":"tobyL05/discord-radio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"29829182537","text":"import boto3\nimport csv\n\n# Initialize a DynamoDB client\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n\n# Select your DynamoDB table\ntable = dynamodb.Table('american-chip-cloud-instances-compare')\n\n# Initialize scan operation\nresponse = table.scan()\n\n# Get all unique headers\nheaders = set()\nfor item in response['Items']:\n for key in item.keys():\n headers.add(key)\n\n# Create CSV file\nwith open('dynamo_download.csv', 'w', newline='') as f:\n writer = csv.DictWriter(f, fieldnames=headers)\n writer.writeheader()\n for item in response['Items']:\n writer.writerow({header: item.get(header) for header in headers})\n \n # Check if there are more items to fetch\n while 'LastEvaluatedKey' in response:\n response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])\n for item in response['Items']:\n writer.writerow({header: item.get(header) for header in headers})\n","repo_name":"Jungas80/testacpublic","sub_path":"carbon/dynamo_downloader.py","file_name":"dynamo_downloader.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"41466682063","text":"import os\nimport yaml\n\n\nclass Error(Exception):\n pass\n\n\nclass FileNotFoundError(Error):\n def __init__(self, filename, paths):\n self._filename = filename\n self._paths = paths\n\n def __str__(self):\n message = [\n '%s: file not found' % self._filename,\n 'search paths:',\n ] + self._paths\n return os.linesep.join(message)\n\ndef find_file(filename, paths=['.', ]):\n for path in paths:\n path_to_file = os.path.join(path, filename)\n if os.path.isfile(path_to_file):\n return path_to_file\n return None\n\n\ndef find_file_in_search_path(filename, envvar):\n paths = os.environ.get(envvar, os.getcwd()).split(os.pathsep)\n found = find_file(filename, paths=paths)\n if found:\n return found\n else:\n raise FileNotFoundError(filename, paths)\n\n\ndef find_simulation_input_file(filename):\n return find_file_in_search_path(filename, 'WMT_INPUT_FILE_PATH')\n\n\ndef yaml_dump(filename, envvar):\n with open(filename, 'w') as fp:\n yaml.safe_dump(envvar, fp, default_flow_style=False)\n","repo_name":"csdms/wmt","sub_path":"wmt/utils/hook.py","file_name":"hook.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"30498303901","text":"kleuren = ['oranje', 'blauw', 'groen', 'bruin']\nkleurendic = {\n \"groen\" : 2,\n \"blauw\" : 4,\n \"oranje\" : 5,\n \"bruin\" : 10\n}\n\nimport random\nfrom random import randrange\nhvl = input('Hoeveel kleuren moeten er toegevoegd worden? ')\ndef kleurm(hvl):\n sav = 0\n for i in range(0,int(hvl)):\n num = random.randrange(0,4)\n kleurendic[kleuren[num]] += 1\n sav = kleurendic\n print(\"--------------\")\n print(\"Gesorteerde M&M's:\")\n print(\"\")\n print(\"Groen: \"+ str(sav[\"groen\"]))\n print(\"Blauw: \"+ str(sav[\"blauw\"]))\n print(\"Oranje: \"+str(sav[\"oranje\"]))\n print(\"Bruin: \"+str(sav[\"bruin\"]))\n\nkleurm(hvl)","repo_name":"TobiasKooijman/Collections","sub_path":"M&M2.py","file_name":"M&M2.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"35011357253","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\"\"\"\nPay attention when you need to return \n\"\"\"\n\n\nclass Solution:\n # @param A : root node of tree\n # @param B : integer\n # @return an integer\n def findSum(self, node, Sum, B):\n if not node:\n return\n if not node.left and not node.right:\n if Sum == B:\n return True\n if node.left:\n if self.find_sum(node.left, Sum + node.left.val, B):\n return True\n if node.right:\n if self.find_sum(node.right, Sum + node.right.val, B):\n return True\n\n def hasPathSum(self, A, B):\n Sum = A.val\n k = self.find_sum(A, Sum, B)\n if k:\n return 1\n else:\n return 0\n","repo_name":"amirkashi/interviewBit","sub_path":"Tree_Data_Structure/Path_Sum.py","file_name":"Path_Sum.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"22643907526","text":"#\n# @lc app=leetcode id=283 lang=python3\n#\n# [283] Move Zeroes\n#\n\n# @lc code=start\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n# solution 1:\n# remove all 0s, and then append to the end.\n '''\n length_previous = len(nums)\n while True:\n try:\n nums.remove(0)\n except ValueError:\n break\n length_next = len(nums)\n nums = nums + [0 for _ in range(length_previous - length_next)]\n '''\n# solution 2:\n# swap the first zero and the first non-zero.\n count_zero = 0\n for i in range(len(nums)):\n if nums[i] == 0:\n count_zero += 1\n else:\n temp = nums[i]\n nums[i] = 0\n nums[i - count_zero] = temp\n# @lc code=end\n\n","repo_name":"shhqq/leetcodeTe","sub_path":"283.move-zeroes.py","file_name":"283.move-zeroes.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"16021364202","text":"#MQTT Client\n\nimport paho.mqtt.client as mqtt\n\ndef on_connect(client, userdata, flags, rc):\n\tprint(\"Connected with result code \" + str(rc))\n\t\n\tclient.subscribe(\"Kaiyuan/jidinghe\")\n\tclient.subscribe(\"Kaiyuan/topic\")\n\t\ndef on_message(client, userdata, msg):\n\tprint(msg.topic + \" \" + str(msg.payload))\n\t\n\tif msg.payload == \"电源\":\n\t\tprint(\"电源\")\n\t\tos.system('irsend SEND_ONCE kaiyuanTV KEY_POWER')\n\t\t\n\t\t\n# Create and MQTT client\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"test.mosquitto.org\", 1883, 60)\n\nclient.loop_forever()","repo_name":"hybian/HomeRemote","sub_path":"RaspberryPi/Python Scripts/mqtt_client.py","file_name":"mqtt_client.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"12342832281","text":"# --------------\nimport pandas as pd \nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')\n# Load the dataset and create column `year` which stores the year in which match was played\ndata_ipl = pd.read_csv(path)\ndata_ipl['year'] = data_ipl['date'].str[:4]\n\n# Plot the wins gained by teams across all seasons\nmatch_wise_data = data_ipl.drop_duplicates(subset = 'match_code')\ntotal_wins = match_wise_data['winner'].value_counts().sort_values()\ntotal_wins.plot(kind='barh',title = 'The No. of Wins accross all Seasons')\nplt.show()\n# Plot Number of matches played by each team through all seasons\ntemp_data = pd.melt(match_wise_data, id_vars=['match_code', 'year'], value_vars= ['team1', 'team2'])\nmatches_played = temp_data['value'].value_counts().sort_values()\nmatches_played.plot(kind = 'barh',title = 'The Total No of Matches Played by Each in all Seasons are')\nplt.show()\n# Top bowlers through all seasons\nwickets = data_ipl[(data_ipl['wicket_kind']=='bowled')|(data_ipl['wicket_kind']=='caught')|(data_ipl['wicket_kind']=='lbw')|(data_ipl['wicket_kind']=='caught and bowled')]\nbowlers_wickets = wickets.groupby(['bowler'])['wicket_kind'].count().sort_values(ascending=False)\nbowlers_wickets[:10].plot(kind='barh',title = 'The Total No of Wickets per bowler is')\nplt.show()\n# How did the different pitches behave? What was the average score for each stadium?\nscore_per_venue = data_ipl.loc[:,['match_code','inning','venue','total']]\naverage_score_per_venue = score_per_venue.groupby(['match_code','venue','inning'])[['total']].sum()\naverage_score_per_venue = average_score_per_venue.groupby(['venue'])[['total']].mean().sort_values(by='total',ascending=False)\naverage_score_per_venue[:10].plot(kind = 'barh',title = 'Average Score Per Venue ')\nplt.show()\n\n# Types of Dismissal and how often they occur\ndismissed = data_ipl.groupby(['wicket_kind']).count().reset_index()\ndismissed = dismissed[['wicket_kind','delivery']]\ndismissed = dismissed.rename(columns = {'delivery':'counts'})\ndismissed = dismissed.sort_values(by='counts',ascending=False)\ndismissed = dismissed.set_index('wicket_kind')\ndismissed[:5].plot(kind='barh',title = 'Top 5 Dismissal')\nplt.show()\n# Plot no. of boundaries across IPL seasons\nboundaries_data = data_ipl.loc[:,['runs','year']]\nboundaries_four = boundaries_data[boundaries_data['runs']==4]\nfours = boundaries_four.groupby(['year'])[['runs']].count().sort_values(by='runs',ascending=False)\nfours.plot(kind='barh',title = 'No of Boundaries Across all Seasons')\nplt.show()\n# Average statistics across all seasons\nper_match_data = data_ipl.drop_duplicates(subset='match_code', keep='first').reset_index(drop=True)\n\ntotal_runs_per_season = data_ipl.groupby('year')['total'].sum()\nballs_delivered_per_season = data_ipl.groupby('year')['delivery'].count()\nno_of_match_played_per_season = per_match_data.groupby('year')['match_code'].count()\navg_balls_per_match = balls_delivered_per_season/no_of_match_played_per_season\navg_runs_per_match = total_runs_per_season/no_of_match_played_per_season\navg_runs_per_ball = total_runs_per_season/balls_delivered_per_season\navg_data = pd.DataFrame([no_of_match_played_per_season, avg_runs_per_match, avg_balls_per_match, avg_runs_per_ball])\navg_data.index =['No.of Matches', 'Average Runs per Match', 'Average balls bowled per match', 'Average runs per ball']\navg_data.T.plot(kind='bar', figsize = (12,10), colormap = 'coolwarm')\nplt.xlabel('Season')\nplt.ylabel('Average')\nplt.legend(loc=9,ncol=4);\n\n\n\n","repo_name":"niketkaria/data-visualization-matplotlib-practice","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"41168950173","text":"# This migration has no effect in practice. It exists to stop\n# Django from autodetecting migrations in taggit when users\n# update to Django 4.0.\n# See https://docs.djangoproject.com/en/stable/releases/4.0/#migrations-autodetector-changes\nimport django.db.models.deletion\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"contenttypes\", \"0002_remove_content_type_name\"),\n (\"taggit\", \"0003_taggeditem_add_unique_index\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"taggeditem\",\n name=\"content_type\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"%(app_label)s_%(class)s_tagged_items\",\n to=\"contenttypes.contenttype\",\n verbose_name=\"content type\",\n ),\n ),\n migrations.AlterField(\n model_name=\"taggeditem\",\n name=\"tag\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"%(app_label)s_%(class)s_items\",\n to=\"taggit.tag\",\n ),\n ),\n ]\n","repo_name":"jazzband/django-taggit","sub_path":"taggit/migrations/0004_alter_taggeditem_content_type_alter_taggeditem_tag.py","file_name":"0004_alter_taggeditem_content_type_alter_taggeditem_tag.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":3143,"dataset":"github-code","pt":"51"}
+{"seq_id":"13435179288","text":"import copy\nimport sys\nfrom datetime import datetime\n\nimport numpy\n\nfrom algorithms.Perceptron import MultiPerceptron\nfrom utils.Config_p import Config\nfrom utils.Graph import graph, graph_multi\nfrom utils.PerceptronParameters import PerceptronParameters\n\n\ndef __main__():\n print('Argument List:', str(sys.argv))\n assert len(sys.argv) == 4, 'Missing arguments'\n f = open(sys.argv[1])\n config: Config = Config(f.read())\n f.close()\n\n x = []\n with open(sys.argv[2], 'r') as inputs_file:\n for line in inputs_file:\n values = line.split()\n aux = []\n for v in values:\n aux.append(float(v))\n aux.append(float(1))\n x.append(aux)\n x = numpy.array(x)\n\n k = config.k\n if len(x) % k != 0:\n print(\"length of training set is not divisible by k-fold parameter.\")\n return 0\n\n y: [] = []\n with open(sys.argv[3], 'r') as expected_outputs_file:\n for line in expected_outputs_file:\n values = line.split()\n aux = []\n for v in values:\n aux.append(float(v))\n y.append(numpy.array(aux))\n\n y = numpy.array(y)\n\n perceptron_parameters: PerceptronParameters = PerceptronParameters(config)\n\n perceptron: MultiPerceptron = MultiPerceptron(perceptron_parameters, len(x[0]), len(y[0]))\n\n # Seleccionar cuál es el mejor betha para entrenar a la red\n bethas = [0.1, 0.2, 0.5, 0.8, 1, 1.2, 1.5, 2]\n aux_parameters = copy.deepcopy(perceptron_parameters)\n errors_logistic = []\n errors_tanh = []\n\n for i in range(len(bethas)):\n train_aux(perceptron, x, y, bethas[i], 'tanh', errors_tanh, aux_parameters)\n train_aux(perceptron, x, y, bethas[i], 'logistic', errors_logistic, aux_parameters)\n\n graph(bethas, errors_logistic, 'Betha', 'Error', 'Errores para distintos bethas (logistic)')\n graph(bethas, errors_tanh, 'Betha', 'Error', 'Errores para distintos bethas (tanh)')\n\n print('Running ' + config.perceptron_algorithm + '...')\n perceptron.__init__(perceptron_parameters, len(x[0]), len(y[0]))\n results = perceptron.train(x, y)\n print(config.perceptron_algorithm + ' finished.')\n output_dir = './errors_' + config.perceptron_algorithm + '_' + datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") + '.png'\n graph(range(results.iterations), results.errors, 'x', 'y', 'Errores por Iteración', output_dir=output_dir)\n\n # Cuál es la mejor cantidad de capas y unidades\n layers = [[2], [2, 2], [3, 3], [6, 6], [3, 2, 3]]\n aux_parameters = copy.deepcopy(perceptron_parameters)\n\n x_graph = []\n y_graph = []\n labels = []\n for i in range(len(layers)):\n aux_parameters.layers = layers[i]\n perceptron.__init__(aux_parameters, len(x[0]), len(y[0]))\n results = perceptron.train(x, y)\n x_graph.append(range(results.iterations))\n y_graph.append(results.errors)\n labels.append(str(layers[i]))\n\n graph_multi(x_graph, y_graph, 'x', 'y', 'Errores por Iteración usando distinta cantidad de capas', labels)\n\n\ndef train_aux(perceptron, x, y, betha, function, errors, parameters):\n parameters.betha = betha\n parameters.function = function\n perceptron.__init__(parameters, len(x[0]), len(y[0]))\n r_train = perceptron.train(x, y)\n errors.append(r_train.errors[-1])\n\n\nif __name__ == \"__main__\":\n __main__()\n","repo_name":"JuArce/SIA_TPs","sub_path":"TP3/src/ej3_1.py","file_name":"ej3_1.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"16470744761","text":"class Node:\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def insert(self, key):\n self.root = self._insert(self.root, key)\n\n def _insert(self, root, key):\n if root is None:\n return Node(key)\n\n if key < root.key:\n root.left = self._insert(root.left, key)\n elif key > root.key:\n root.right = self._insert(root.right, key)\n return root\n\n def search(self, key):\n return self._search(self.root, key)\n\n def _search(self, root, key):\n if root is None or root.key == key:\n return root\n\n if key < root.key:\n return self._search(root.left, key)\n else:\n return self._search(root.right, key)\n\n def inorder_traversal(self):\n result = []\n self._inorder_traversal(self.root, result)\n return result\n\n def _inorder_traversal(self, root, result):\n if root:\n self._inorder_traversal(root.left, result)\n result.append(root.key)\n self._inorder_traversal(root.right, result)\n\n def delete(self, key):\n self.root = self._delete(self.root, key)\n\n def _delete(self, root, key):\n if root is None:\n return root\n\n if key < root.key:\n root.left = self._delete(root.left, key)\n elif key > root.key:\n root.right = self._delete(root.right, key)\n else:\n # node with only one child or no child\n if root.left is None:\n return root.right\n elif root.right is None:\n return root.left\n\n # Node with two children, get the inorder successor\n # (smallest in the right subtree)\n root.key = self._min_value_node(root.right).key\n\n # delete the inorder successor\n root.right = self._delete(root.right, root.key)\n return root\n\n def _min_value_node(self, node):\n current = node\n\n while current.left is not None:\n current = current.left\n return current\n\n\nif __name__ == '__main__':\n bst = BST()\n\n bst.insert(5)\n bst.insert(3)\n bst.insert(7)\n bst.insert(2)\n bst.insert(4)\n bst.insert(6)\n bst.insert(8)\n\n print(f\"Inorder traversal: {bst.inorder_traversal()}\")\n\n # search for a key\n search_key = 4\n result = bst.search(search_key)\n\n if result:\n print(f\"Key {search_key} found in the tree\")\n else:\n print(f\"Key {search_key} not found in the tree\")\n\n # delete a key\n delete_key = 3\n bst.delete(delete_key)\n print(f\"Key {delete_key} deleted from the tree.\")\n\n print(f\"Inroder traversal after deletion: {bst.inorder_traversal()}\")\n","repo_name":"ralphmarondev/PythonPractice","sub_path":"src/dsa/trees/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"14925902047","text":"import sys\r\nimport time\r\nimport numpy as np\r\nimport cv2\r\nfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\r\nfrom tqdm import tqdm\r\n\r\nclass Video(object):\r\n MIN_MATCHES = 15\r\n\r\n def __init__(self, ref_path, tem_path, video_path):\r\n self.ref_path = ref_path\r\n self.tem_path = tem_path\r\n self.video_path = video_path\r\n\r\n self.ref_img, \\\r\n self.tem_img, \\\r\n self.video, \\\r\n self.videowriter = self._getdata()\r\n self.frames = self._getframe()\r\n\r\n self.surf = cv2.xfeatures2d.SURF_create()\r\n self.bf = cv2.BFMatcher_create()\r\n\r\n self.kp_tem, self.des_tem = self.surf.detectAndCompute(self.tem_img, None)\r\n \r\n def _getdata(self):\r\n tem_image = cv2.imread(self.tem_path)\r\n ref_image = cv2.imread(self.ref_path)\r\n video = cv2.VideoCapture(self.video_path)\r\n\r\n ref_h, ref_w = tem_image.shape[0], tem_image.shape[1]\r\n ref_image = cv2.resize(ref_image, (ref_h, ref_w))\r\n \r\n film_h, film_w = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n film_fps = video.get(cv2.CAP_PROP_FPS)\r\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\r\n videowriter = cv2.VideoWriter(\"./output/ar_video.mp4\", fourcc, film_fps, (film_w, film_h))\r\n\r\n return ref_image, tem_image, video, videowriter\r\n \r\n def _getframe(self):\r\n frames = []\r\n\r\n while(self.video.isOpened()):\r\n ret, frame = self.video.read()\r\n\r\n if not ret:\r\n break\r\n \r\n frames.append(frame)\r\n \r\n return frames\r\n\r\n def process_frame(self, frame_id):\r\n kp_f, des_f = self.surf.detectAndCompute(self.frames[frame_id], None)\r\n knn_matches = self.bf.knnMatch(self.des_tem, des_f, k=2)\r\n\r\n ratio_thresh = 0.75\r\n good_matches = []\r\n for m,n in knn_matches:\r\n if m.distance < ratio_thresh * n.distance:\r\n good_matches.append(m)\r\n\r\n if len(good_matches) > Video.MIN_MATCHES:\r\n p_template = np.array([self.kp_tem[m.queryIdx].pt for m in good_matches], dtype=np.float64).reshape(-1, 1, 2)\r\n p_frame = np.array([kp_f[m.trainIdx].pt for m in good_matches], dtype=np.float64).reshape(-1, 1, 2)\r\n homography, _ = cv2.findHomography(p_template, p_frame, cv2.RANSAC, 5.0)\r\n\r\n h, w, _ = self.ref_img.shape\r\n self.frames[frame_id] = cv2.warpPerspective(self.ref_img, homography,\r\n (self.frames[frame_id].shape[1], self.frames[frame_id].shape[0]),\r\n dst=self.frames[frame_id],\r\n borderMode=cv2.BORDER_TRANSPARENT)\r\n \r\n def run(self):\r\n ts = time.time()\r\n\r\n for idx in range(len(self.frames)):\r\n print(idx)\r\n self.videowriter.write(self.process_frame(idx))\r\n\r\n te = time.time()\r\n print(te-ts)\r\n \r\n for frame in self.frames:\r\n self.videowriter.write(frame)\r\n\r\n self._clean()\r\n\r\n def _clean(self):\r\n self.video.release()\r\n self.videowriter.release()\r\n cv2.destroyAllWindows()\r\n\r\ndef main(ref_image,template,video):\r\n AR_video = Video(ref_image, template, video)\r\n AR_video.run()\r\n\r\nif __name__ == '__main__':\r\n ref_path = './input/sychien.jpg'\r\n template_path = './input/marker.png'\r\n video_path = sys.argv[1]\r\n main(ref_path,template_path,video_path)","repo_name":"Bonen0209/Computer_Vision","sub_path":"HW3/part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"13410560489","text":"#coding=utf-8\n\n'''\n\ngc的demo\n搞清gc的垃圾回收机制,引用计数 零代回收\n不用自己去定义新的回收机制\n\n在类中定义__del__() 方法后,要在内部添加object的gc回收功能,否则gc无法自动删除占用的内存空间\n\n'''\n\nimport gc\n\nclass Demo(object):\n def __init__(self):\n print(\"Object is born,id:%s\"%str(hex(id(self))))\n\n\ndef f2():\n while True:\n a = Demo()\n b = Demo()\n a.t1 = b\n b.t1 = a\n del a\n del b\n gc.collect()\n\ngc.disable()\nf2()\n","repo_name":"Thorjezar/PythonCore","sub_path":"第三日/01.垃圾回收机制-3.py","file_name":"01.垃圾回收机制-3.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"33399910139","text":"import math\nimport sys\n\nclass Transformations():\n def __init__(self, moveX, moveY, alterSizeX, alterSizeY=0, rotation=0):\n self.moveX = moveX\n self.moveY = moveY\n self.alterSizeX = alterSizeX\n self.alterSizeY = alterSizeY\n self.rotation = rotation\n\nclass Shape():\n def __init__(self, x, y, color, sizeX, sizeY, transformations):\n self.x = x\n self.y = y\n self.color = color\n self.transformations = transformations\n\n def move(self, addX, addY):\n self.x = self.x + addX\n self.y = self.y + addY\n\nclass Polygon(Shape):\n def __init__(self, x, y, color, transformations):\n self.x = x\n self.y = y\n self.color = color\n self.points = []\n self.origPoints = []\n self.rotation = 0\n self.transformations = transformations\n\n def contains(self, x, y):\n inf = sys.float_info.max\n tiny = 0.00001\n inside = False\n for edge in self.edges():\n Ax, Ay = edge[0][0], edge[0][1]\n Bx, By = edge[1][0], edge[1][1]\n if Ay > By:\n Ax, Ay, Bx, By = Bx, By, Ax, Ay\n if y == Ay or y == By:\n y += tiny\n if (y > By or y < Ay or x > max(Ax, Bx)):\n continue\n if x < min(Ax, Bx):\n inside = not inside\n continue\n try:\n m_edge = (By - Ay) / (Bx - Ax)\n except ZeroDivisionError:\n m_edge = inf\n try:\n m_point = (y - Ay) / (x - Ax)\n except ZeroDivisionError:\n m_point = inf\n if m_point >= m_edge:\n inside = not inside\n continue\n return inside\n\n def move(self, x, y):\n for p in self.origPoints:\n p[0] = p[0] + x\n p[1] = p[1] + y\n for p in self.points:\n p[0] = p[0] + x\n p[1] = p[1] + y \n\n def isOffscreen(self, screenX, screenY):\n xVals = [p[0] for p in self.points]\n yVals = [p[1] for p in self.points]\n right= max(xVals)\n left = min(xVals)\n top = min(yVals)\n bottom = max(yVals)\n return (top > screenY or bottom < 0 or right < 25 or left > 75)\n\n def rotate(self, degrees):\n self.rotation = (self.rotation + degrees) % 360\n rads = math.radians(self.rotation)\n cosang, sinang = math.cos(rads), math.sin(rads)\n for i in range(0, len(self.points)):\n x = self.origPoints[i][0]\n y = self.origPoints[i][1]\n tx, ty = x-self.x, y-self.y\n self.points[i][0] = (tx*cosang + ty*sinang) + self.x\n self.points[i][1] = (-tx*sinang +ty*cosang) + self.y\n\n def transform(self):\n self.move(self.transformations.moveX, self.transformations.moveY)\n self.rotate(self.transformations.rotation)\n\n def edges(self):\n edgeList = []\n for i, point in enumerate(self.points):\n p1 = point\n p2 = self.points[(i+1) % len(self.points)]\n edgeList.append([p1,p2])\n return edgeList\n\nclass Circle(Shape):\n def __init__(self, x, y, color, radius, transformations):\n self.x = x\n self.y = y\n self.color = color\n self.radius = radius\n self.transformations = transformations\n\n def alterSize(self, size):\n self.radius = self.radius + size\n \n def doTransform(self, moveX, moveY, alterSize):\n self.move(moveX, moveY)\n self.alterSize(alterSize)\n\n def transform(self):\n self.doTransform(self.transformations.moveX, self.transformations.moveY, self.transformations.alterSizeX)\n\n def contains(self, x, y):\n return (x-self.x)**2 + (y - self.y)**2 < self.radius**2\n\n def containsInBorder(self, x, y, borderWidth):\n hyp = (x-self.x)**2 + (y - self.y)**2 \n return hyp < self.radius**2 and hyp > max(0, (self.radius-borderWidth)**2)\n\n def isOffscreen(self, screenX, screenY):\n right= self.x + self.radius\n left = self.x - self.radius\n top = self.y - self.radius\n bottom = self.y + self.radius\n return (top > screenY or bottom < 0 or right < 25 or left > 75)\n\nclass Rectangle(Polygon):\n def __init__(self, x, y, color, sizeX, sizeY, transformations):\n self.x = x\n self.y = y\n self.color = color\n self.sizeX = sizeX\n self.sizeY = sizeY\n self.transformations = transformations\n self.points = [[x-sizeX/2, y-sizeY/2],[x-sizeX/2, y+sizeY/2],[x+sizeX/2, y+sizeY/2],[x+sizeX/2, y-sizeY/2]]\n self.origPoints = [[x-sizeX/2, y-sizeY/2],[x-sizeX/2, y+sizeY/2],[x+sizeX/2, y+sizeY/2],[x+sizeX/2, y-sizeY/2]]\n self.rotation = 0\n\n def containsWithoutRotation(self, x, y):\n centerXDist = self.sizeX / 2\n centerYDist = self.sizeY / 2\n return (x<(self.x + centerXDist) and x>(self.x-centerXDist) and y<(self.y + centerYDist) and y>(self.y-centerYDist))\n\n def isOffscreenWithoutRotation(self, screenX, screenY):\n centerXDist = self.sizeX / 2\n centerYDist = self.sizeY / 2\n right= self.x + centerXDist\n left = self.x - centerXDist\n top = self.y - centerYDist\n bottom = self.y + centerYDist\n return (top > screenY or bottom < 0 and right < 0 or left > screenX)\n\n\n def transform(self):\n self.sizeX = self.sizeX + self.transformations.alterSizeX\n self.sizeY = self.sizeY + self.transformations.alterSizeY\n self.origPoints = [[self.x-self.sizeX/2, self.y-self.sizeY/2],[self.x-self.sizeX/2, self.y+self.sizeY/2],[self.x+self.sizeX/2, self.y+self.sizeY/2],[self.x+self.sizeX/2, self.y-self.sizeY/2]]\n Polygon.transform(self)","repo_name":"peterbatridge/ennea-LED","sub_path":"shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":5765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"10039710772","text":"from collections import Counter\nclass Solution(object):\n def removeDuplicates(self, nums):\n i=0\n while i обработчик генератор\n self.storages = defaultdict(HandMadeStorage) # id чата -> хранилище\n logging.basicConfig(filename='bot.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\n def start(self):\n self.updater.start_polling()\n self.updater.idle()\n\n def handle_message(self, bot, update):\n chat_id = update.message.chat_id\n logging.info(f'chat_id: {chat_id}, received: {update.message}')\n\n if not chat_id in self.handlers:\n next(self.handlers[chat_id]) # запускаем генератор\n\n # обработка\n answer = ''\n try:\n answer = self.handlers[chat_id].send((update.message, self.storages[chat_id]))\n except StopIteration:\n # если при этом генератор закончился, начинаем общение с начала\n del self.handlers[chat_id]\n # повторно вызванный, этот метод запустит заново цикл обработки сообщений\n return self.handle_message(bot, update)\n except Exception as e:\n if hasattr(e, 'message'):\n logging.error(e.message)\n else:\n logging.error(e)\n\n logging.info(f'chat_id: {chat_id}, answer: {answer}')\n bot.sendMessage(chat_id=chat_id, text=answer)\n","repo_name":"RomanSteinberg/FirstBot","sub_path":"people_bot/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"70261314718","text":"#encoding:UTF-8\nimport urllib.request\nimport re\nimport os\nimport sqlite3\n\n\"\"\"\n程序名称:luowang_crapy.py\n设计者:Dash\n对落网(http://www.luoo.net/)上所有期刊推荐音乐的歌手,专辑及歌名进行爬取\n\"\"\"\n\ndef get_index(i):\n index = []\n url = r'http://www.luoo.net/tag/?p='+str(i)\n response = urllib.request.urlopen(url)\n data = response.read().decode('utf-8')\n pattern_index = re.compile(r'(.*?)')\n vol = re.findall(pattern_vol, data)\n #每期期刊名称\n pattern_title = re.compile('(.*?) ')\n title = re.findall(pattern_title, data)\n #歌名\n pattern_name = re.compile('(.*?)
')\n name = re.findall(pattern_name, data)\n #歌手\n pattern_artist = re.compile('Artist: (.*?)
')\n artist = re.findall(pattern_artist, data)\n #专辑名\n pattern_album = re.compile('Album: (.*?)
')\n album = re.findall(pattern_album, data)\n #专辑图片链接\n # pattern_img = re.compile(r' = court.width - 5:\r\n winner = \"red\"\r\n ball.stop_ball()\r\ndef score():\r\n global green_score\r\n global red_score\r\n global winner\r\n if winner == \"red\":\r\n red_score += 1\r\n winner = ''\r\n elif winner == \"green\":\r\n green_score += 1\r\n winner = ''\r\n court.draw_score(red_score, green_score)\r\nplay_game()\r\nwin.mainloop()","repo_name":"StarN2uk/python_S","sub_path":"python_S_12 연습/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"2571826880","text":"import time\nimport logging\nfrom selenium.common.exceptions import JavascriptException, TimeoutException\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\nclass ConnectAndExecuteScript:\n driver: WebDriver\n\n def __init__(self, driver):\n self.driver = driver\n\n def run(self, url, script=None, max_repeat=1, wait_element=None):\n for i in range(max_repeat):\n try:\n self.driver.get(url)\n if script is not None:\n self.driver.execute_script(script)\n\n if wait_element is not None:\n WebDriverWait(self.driver, 10).until(\n EC.presence_of_element_located((By.TAG_NAME, wait_element))\n )\n break\n except JavascriptException:\n if i == 3:\n logging.info('fail!1 {0}'.format(script))\n return None\n time.sleep(8)\n except IndexError:\n if i == 3:\n print('fail! {0}'.format(script))\n return None\n time.sleep(8)\n except TimeoutException:\n if i == 3:\n print('fail! {0}'.format(script))\n return None\n time.sleep(8)\n return self.driver.page_source\n","repo_name":"limdongjin/smart-crawler","sub_path":"crawl/connect_and_execute_script.py","file_name":"connect_and_execute_script.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"8909665983","text":"import asyncio\nimport functools\nimport socket\nfrom contextlib import contextmanager\nfrom random import choice\n\nimport aiohttp\nfrom termcolor import colored, COLORS\nfrom tqdm import tqdm\n\ncolors = list(COLORS)\n\n\ndef print_colored_kv(k, v):\n tqdm.write(\n colored(' ' + k + ': ', color=choice(colors), attrs=['bold']) +\n colored(v, color='white', attrs=['bold'])\n )\n\n\nclass ClosedRange:\n def __init__(self, begin, end):\n self.begin = begin\n self.end = end\n\n def __iter__(self):\n yield self.begin\n yield self.end\n\n def __str__(self):\n return '[{0.begin}, {0.end}]'.format(self)\n\n @property\n def size(self):\n return self.end - self.begin + 1\n\n\ndef retry(coro_func):\n @functools.wraps(coro_func)\n async def wrapper(self, *args, **kwargs):\n tried = 0\n while True:\n tried += 1\n try:\n return await coro_func(self, *args, **kwargs)\n except (aiohttp.ClientError, socket.gaierror) as exc:\n try:\n if 400 <= exc.code < 500:\n raise exc\n except AttributeError:\n pass\n if tried <= self.max_tries:\n sec = tried / 2\n await asyncio.sleep(sec)\n else:\n raise exc\n except asyncio.TimeoutError:\n await asyncio.sleep(1)\n\n return wrapper\n\n\n@contextmanager\ndef connecting(msg='Connecting'):\n length = len(msg)\n tqdm.write(colored(msg, 'grey', attrs=['bold']),end='')\n\n async def print_dots():\n while True:\n try:\n await asyncio.sleep(1)\n except asyncio.CancelledError:\n break\n tqdm.write(colored('.', 'grey', attrs=['bold']), end='')\n nonlocal length\n length += 1\n\n fut = asyncio.ensure_future(print_dots())\n try:\n yield\n finally:\n fut.cancel()\n print('\\r' + ' ' * length)\n","repo_name":"Zxilly/wire-size","sub_path":"wire_size/downloader/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"12091322616","text":"#!/usr/bin/python\n# Question 9\n# Write a program to read first n lines of a file\n\nfil = 'biglist.txt'\n\n# Again with my personal preference for opening files.\n#with open(fil) as f:\n# text = f.readlines()\n\nf = open(fil)\nrealtext = f.readlines()\n\ndef readXlines(x,text=realtext): # define a function that takes X as how many lines, and text for what text to scan (I have it set by default to our other text variable)\n for line in text:\n print(line)\n x-=1 # This is a weird way to iterate through a for loop x amounts of times, but it works. A while loop would work too, but that would make iterating through the lines more annoying.\n if x<=0:\n break\n\nreadXlines(12) # We don't have to define text since the default works with our variable\n","repo_name":"Sacmanxman2/Shared","sub_path":"Final-Q9.py","file_name":"Final-Q9.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"13663490431","text":"\"\"\"\nThis file starts the application.\n\"\"\"\n\nfrom base64 import b64decode\nimport os\nfrom io import BytesIO\n\nimport numpy as np\nfrom PIL import Image\nfrom PIL.ImageOps import invert\nfrom dash import callback_context, Input, Output, State\n\nfrom layout import app, prediction_elements\nfrom train_neural_network import NeuralNetwork\n\n\n# Load the pre-trained neural network\nmnist_test = np.genfromtxt(\"mnist_test.csv\",\n delimiter=',',\n skip_header=1,\n dtype='uint8')\nnp.random.shuffle(mnist_test)\npixel_values, labels = mnist_test.T[1:].T, mnist_test.T[0]\nneural_network = NeuralNetwork.load('network.pickle')\n\n\n# Updates the image shown to the screen\n@app.callback(Output('image', 'src'),\n Input('current-image', 'data'))\ndef handle_image(current_image):\n return current_image\n\n\n# Updates image caption\n@app.callback(Output('image-caption', 'children'),\n Input('current-image', 'data'),\n State('image-num', 'data'),\n State('using-upload', 'data'))\ndef handle_caption(current_image, image_num, using_upload):\n if using_upload:\n return \"Custom image\"\n return f\"{image_num + 1} of {len(labels)}\"\n\n\n# Disables unusable buttons, greys them out and changes their cursor type\n# Triggers when the image changes\n@app.callback(Output('next-button', 'disabled'),\n Output('back-button', 'disabled'),\n Output('reset-button', 'disabled'),\n Output('classify-all-button', 'disabled'),\n Output('next-button', 'style'),\n Output('back-button', 'style'),\n Output('reset-button', 'style'),\n Output('classify-all-button', 'style'),\n Input('current-image', 'data'),\n State('image-num', 'data'),\n State('n-predictions', 'data'),\n State('using-upload', 'data'),\n State('next-button', 'style'),\n State('back-button', 'style'),\n State('reset-button', 'style'),\n State('classify-all-button', 'style'))\ndef disable_unusable_buttons(current_image,\n image_num,\n n_predictions,\n using_upload,\n next_style,\n back_style,\n reset_style,\n classify_all_style):\n # Reset all buttons to their default state\n # Enabled, white background, pointer cursor\n next_, back, reset, classify_all = False, False, False, False\n next_style['background-color'] = 'white'\n back_style['background-color'] = 'white'\n reset_style['background-color'] = 'white'\n classify_all_style['background-color'] = 'white'\n back_style['cursor'] = 'pointer'\n next_style['cursor'] = 'pointer'\n reset_style['cursor'] = 'pointer'\n classify_all_style['cursor'] = 'pointer'\n\n # When buttons are disabled, their background colour is changed to light grey and the cursor is not-allowed\n\n # First image or using upload: disable back button\n if not image_num or using_upload:\n back = True\n back_style['background-color'] = '#d3d3d3'\n back_style['cursor'] = 'not-allowed'\n # Last image or using upload: disable next button\n if image_num == len(mnist_test) - 1 or using_upload:\n next_ = True\n next_style['background-color'] = '#d3d3d3'\n next_style['cursor'] = 'not-allowed'\n # Only one prediction seen and not using upload: disable reset button\n if n_predictions == 1 and not using_upload:\n reset = True\n reset_style['background-color'] = '#d3d3d3'\n reset_style['cursor'] = 'not-allowed'\n # All predictions seen or using upload: disable classify all button\n if n_predictions == len(mnist_test) or using_upload:\n classify_all = True\n classify_all_style['background-color'] = '#d3d3d3'\n classify_all_style['cursor'] = 'not-allowed'\n return (next_, back, reset, classify_all,\n next_style, back_style, reset_style, classify_all_style)\n\n\n# Handles the current prediction, accuracy counters and the current image\n@app.callback(Output('max-certainty-prediction', 'children'),\n *[Output(prediction_element.id, 'children')\n for prediction_element in prediction_elements],\n Output('prediction-accuracy', 'children'),\n Output('n-predictions', 'data'),\n Output('n-correct-predictions', 'data'),\n Output('current-image', 'data'),\n Output('image-num', 'data'),\n Output('using-upload', 'data'),\n Output('upload', 'contents'),\n Input('next-button', 'n_clicks'),\n Input('back-button', 'n_clicks'),\n Input('reset-button', 'n_clicks'),\n Input('classify-all-button', 'n_clicks'),\n Input('upload', 'contents'),\n State('n-predictions', 'data'),\n State('n-correct-predictions', 'data'),\n State('image-num', 'data'),\n State('using-upload', 'data'))\ndef handle_prediction_accuracy_current_image(\n next_,\n back,\n reset,\n classify_all,\n upload_contents,\n n_predictions,\n n_correct_predictions,\n image_num,\n using_upload):\n\n if callback_context.triggered_id == 'reset-button':\n # Disable using upload if in using upload mode\n if using_upload:\n using_upload = False\n # Else, reset accuracy counters and image counter\n else:\n n_predictions, n_correct_predictions, image_num = 0, 0, 0\n\n # Handle forward/back buttons\n elif callback_context.triggered_id == 'next-button':\n image_num += 1\n elif callback_context.triggered_id == 'back-button':\n image_num -= 1\n\n # Classify all sets the image_num to the last, classifies all images and changes the accuracy counter accordingly\n elif callback_context.triggered_id == 'classify-all-button':\n image_num = len(mnist_test) - 1\n n_predictions = len(mnist_test)\n certainties = neural_network.predict(pixel_values)\n max_certainty_predictions = np.argmax(certainties, axis=1)\n n_correct_predictions = np.sum(max_certainty_predictions == labels)\n\n # If a new image is uploaded, change using_upload to true\n elif callback_context.triggered_id == 'upload':\n using_upload = True\n\n # If using uploaded image, convert it to an array first\n if using_upload:\n *_, encoded = upload_contents.split(\",\")\n current_image = Image.open(BytesIO(b64decode(encoded)))\n current_image = current_image.resize((28, 28))\n current_image = current_image.convert('L')\n # Invert just for generating the array, since the model was trained on white text on black\n X = np.asarray(invert(current_image)).flatten()\n else:\n X = pixel_values[image_num]\n\n # Make prediction and generate the text for the GUI\n certainties, = neural_network.predict(X)\n prediction_texts = [f\"{i} - {certainty:.3f} certainty\"\n for i, certainty in enumerate(certainties)]\n # Find the prediction with the highest certainty, which is the prediction made\n max_certainty_prediction = max(enumerate(certainties),\n key=lambda x: x[1])\n max_certainty_text = (f\"{max_certainty_prediction[0]} with \"\n f\"{max_certainty_prediction[1]:.3f} certainty\")\n\n # If using an uploaded file, remove accuracy field\n if using_upload:\n accuracy_text = \"\"\n else:\n is_new_prediction = image_num + 1 > n_predictions\n is_correct = max_certainty_prediction[0] == labels[image_num]\n if is_new_prediction:\n # Only increment n_prediction or n_correct_predictions if this is a new prediction\n n_predictions = image_num + 1\n if is_correct:\n accuracy_text = \"Prediction was correct. \"\n n_correct_predictions += 1\n else:\n accuracy_text = \"Prediction was incorrect. \"\n else:\n if is_correct:\n accuracy_text = \"Prediction was correct. \"\n else:\n accuracy_text = \"Prediction was incorrect. \"\n accuracy_text += (f\"{n_correct_predictions}/{n_predictions} correct. \"\n f\"{n_correct_predictions/n_predictions*100:.2f}% accuracy.\")\n\n # If using upload, current image has already been generated\n if not using_upload:\n # Split 784 pixels into a into a 28x28 pixel grid\n pixels = np.array_split(pixel_values[image_num], 28)\n # Invert from white text on black to black text on white\n current_image = invert(Image.fromarray(np.array(pixels)))\n\n # Clear uploaded image, so that if the same image is uploaded again it will detect the change\n upload_contents = None\n return [max_certainty_text,\n *prediction_texts,\n accuracy_text,\n n_predictions,\n n_correct_predictions,\n current_image,\n image_num,\n using_upload,\n upload_contents]\n\n\n# If this is being run on Google Colab, display GUI inline\ndef main():\n if 'CLOUDSDK_PYTHON' in os.environ:\n app.run_server(mode='inline')\n else:\n app.run_server(debug=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"quezec/mnist-major-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"23500839178","text":"from django.urls import path, include\r\nfrom . import views\r\nfrom .views import RegisterUser, LoginUser, logout_user, create_event1, userprofile, usercards\r\n\r\nurlpatterns = [\r\n path('home', views.index, name='home'),\r\n path('events', views.events, name='events'),\r\n path('people', views.people, name='people'),\r\n path('create_event', create_event1.as_view(), name='create_event'),\r\n path('register', RegisterUser.as_view(), name='register'),\r\n path('enter', LoginUser.as_view(), name='login'),\r\n path('logout', logout_user, name='logout'),\r\n path('event_info//', views.event_info, name='event_info'),\r\n path('success', views.success, name='success'),\r\n path('success2', views.success2, name='success2'),\r\n path('userprofile', userprofile.as_view(), name='userprofile'),\r\n path('moderpage', views.moderpage, name='moderpage'),\r\n]\r\n","repo_name":"dvblx/educational-practice-mgd","sub_path":"practice/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"864387083","text":"import pygame\nfrom ..utils import load_image\nimport numpy as np\n\n\nclass Fist(pygame.sprite.Sprite):\n \"\"\"moves a clenched fist on the screen, following the mouse\"\"\"\n\n def __init__(self, camera):\n pygame.sprite.Sprite.__init__(self) # call Sprite initializer\n self.image, self.rect = load_image('fist.bmp')\n self.punching = 0\n self.camera = camera\n\n def update(self):\n \"move the fist based on the mouse position\"\n pos = np.array(pygame.mouse.get_pos()) - np.array(self.camera.blit_position)\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10)\n\n def punch(self, target):\n \"returns true if the fist collides with the target\"\n if not self.punching:\n self.punching = 1\n hitbox = self.rect.inflate(-5, -5)\n return hitbox.colliderect(target.rect)\n\n def unpunch(self):\n \"called to pull the fist back\"\n self.punching = 0\n","repo_name":"randomgamers/saving-crying-bryan","sub_path":"saving_crying_bryan/sprites/fist.py","file_name":"fist.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"21110900736","text":"from plotting import plot_scatter, plot_samples, counterfactual_projection\nfrom src.models.model import DSVAE_prior_MNIST\nfrom torch.utils.data import DataLoader, TensorDataset\nimport torch \nimport argparse\nimport numpy as np\nimport yaml\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-input', type=str)\n parser.add_argument('-batch_size', type=int, default=64)\n\n args = parser.parse_args()\n\n input_path = args.input\n\n with open(f\"{input_path}/.hydra/config.yaml\", \"r\") as f:\n cfg = yaml.safe_load(f)\n\n with open(f\"{input_path}/.hydra/hydra.yaml\", \"r\") as f:\n cfg_hydra = yaml.safe_load(f)\n\n hparams = cfg\n color = hparams['color']\n x_dim = 392 if color else 784\n data = 'mnist' if not color else 'cmnist'\n e_in = hparams['e']\n in_data = str(int(100*e_in))\n n = hparams['n']\n if n != 0:\n in_data += f'_{int(n*100)}'\n x_dim = 392\n data_path = './data'\n dataset_train = torch.load(f'{data_path}/{data}_train_{in_data}.pt')\n dataset_val = torch.load(f'{data_path}/{data}_valid_{in_data}.pt')\n dset_train = TensorDataset(dataset_train['images'], dataset_train['labels'], dataset_train['colors'])\n dset_val = TensorDataset(dataset_val['images'], dataset_val['labels'], dataset_val['colors'])\n batch_size = hparams['batch_size']\n train_loader = DataLoader(dset_train, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dset_val, batch_size=batch_size, shuffle=True)\n\n torch.manual_seed(hparams[\"seed\"])\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n csvae = DSVAE_prior_MNIST(x_dim)\n csvae_state = torch.load(f'{input_path}/csvae.pt', map_location=device)\n csvae.load_state_dict(csvae_state)\n csvae = csvae.to(device)\n\n x, y, c = next(iter(val_loader))\n w = csvae.posteriorW(x).sample()\n z = csvae.posteriorZ(x).sample()\n perm = torch.cat((torch.arange(196, 392), torch.arange(0, 196)), dim=0)\n x_CCF = x[:, perm]\n w_CCF = csvae.posteriorW(x_CCF).sample()\n # z_CCF = csvae.posteriorZ(x_CCF).sample()\n\n # x_CCF_rec = csvae.decode(w_CCF, z_CCF)\n # x_rec = csvae.decode(w, z)\n\n # fig, axes = plt.subplots(2, 2)\n # plot_samples(axes[0, 0], x, color=True)\n # plot_samples(axes[0, 1], x_rec, color=True)\n # plot_samples(axes[1, 0], x_CCF, color=True)\n # plot_samples(axes[1, 1], x_CCF_rec, color=True)\n # axes[0, 0].set_title('Original')\n # axes[0, 1].set_title('Reconstruction')\n # plt.savefig(f'{input_path}/reconstructions_valid.png')\n # plt.close(fig)\n\n dw = w_CCF - w\n fig, ax = plt.subplots()\n plot_scatter(ax, w, y)\n for i in range(batch_size):\n if y[i] == 0:\n ax.arrow(w[i, 0], w[i, 1], dw[i, 0], dw[i, 1], head_width=0.1)\n plt.savefig(f'{input_path}/flip_color.png')\n plt.close(fig)\n\n # counterfactual_projection(train_loader, val_loader, 0, csvae, 2, input_path)","repo_name":"fontaluc/master-thesis-results","sub_path":"src/debugging/inspect_color_encoding.py","file_name":"inspect_color_encoding.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"73247952479","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 12 14:18:52 2017\r\n\r\n@author: libing\r\n\"\"\"\r\n\r\nimport Tkinter\r\n\r\nimport numpy as np\r\nimport matplotlib\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nfrom matplotlib.figure import Figure\r\n\r\nimport regTree\r\n\r\n\r\nmatplotlib.use('TkAgg')\r\n\r\n\r\ndef reDraw(tolS, tolN):\r\n reDraw.f.clf()\r\n reDraw.a = reDraw.f.add_subplot(111)\r\n if chkBtnVar.get():\r\n if tolN < 2:\r\n tolN = 2\r\n myTree = regTree.createTree(reDraw.rawDat, regTree.modelLeaf,\r\n regTree.modelErr, (tolS, tolN))\r\n yHat = regTree.createForecast(myTree, reDraw.testDat,\r\n regTree.modelTreeEval)\r\n else:\r\n myTree = regTree.createTree(reDraw.rawDat, ops=(tolS, tolN))\r\n yHat = regTree.createForecast(myTree, reDraw.testDat)\r\n reDraw.a.scatter(reDraw.raw[:, 0], reDraw.rawDat[:, 1], s=5)\r\n reDraw.a.plot(reDraw.testDat, yHat, linewidth=2.0)\r\n reDraw.canvas.show()\r\n\r\n\r\ndef getInputs():\r\n try:\r\n tolN = int(tolNentry.get())\r\n except:\r\n tolN = 10\r\n print(\"enter Integer for tolN\")\r\n tolNentry.delete(0, END)\r\n tolNentry.insert(0, '10')\r\n try:\r\n tolS = float(tolSentry.get())\r\n except:\r\n tolS = 1.0\r\n print(\"enter Float for tolS\")\r\n tolSentry.delete(0, END)\r\n tolNentry.insert(0, '1.0')\r\n return tolN, tolS\r\n\r\n\r\ndef drawNewTree():\r\n tolN, tolS = getInputs()\r\n reDraw(tolS, tolN)\r\n\r\n\r\n# This creates a toplevel widget of Tk which usually is the main window\r\n# of an application\r\nroot = Tkinter.Tk()\r\n\r\nreDraw.f = Figure(figsize=(5, 4), dpi=100)\r\nreDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)\r\nreDraw.canvas.show()\r\nreDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)\r\n\r\nTkinter.Label(root, text=\"Plot Place Holder\").grid(row=0, columnspan=3)\r\n\r\nTkinter.Label(root, text=\"tolN\").grid(row=1, column=0)\r\ntolNentry = Tkinter.Entry(root)\r\ntolNentry.grid(row=1, column=1)\r\ntolNentry.insert(0, '10')\r\n\r\nTkinter.Label(root, text=\"tolS\").grid(row=2, column=0)\r\ntolSentry = Tkinter.Entry(root)\r\ntolSentry.grid(row=2, column=1)\r\ntolSentry.insert(0, '1.0')\r\nTkinter.Button(root, text='ReDraw', command=drawNewTree). \\\r\n grid(row=1, column=2, rowspan=3)\r\n\r\nchkBtnVar = Tkinter.IntVar()\r\nchkBtnVar = Tkinter.Checkbutton(root, text=\"Model Tree\", variable=chkBtnVar)\r\nchkBtnVar.grid(row=3, column=0, columnspan=2)\r\n\r\n\r\nreDraw.rawDat = np.mat(regTree.loadDataSet('sine.txt'))\r\nreDraw.rawData = np.arange(min(reDraw.rawDat[:, 0]),\r\n max(reDraw.rawDat[:, 0]), 0.01)\r\n\r\nreDraw(1.0, 10)\r\n\r\nroot.mainloop()\r\n","repo_name":"libingallin/MLinAction","sub_path":"regressionTree/TreeExplore.py","file_name":"TreeExplore.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"3007711831","text":"\"\"\"Miscellaneous support code shared by some of the tool scripts.\n\nThis includes option parsing code, HTML formatting code, and a couple of\nuseful helpers.\n\n\"\"\"\n__version__ = '$Revision$'\n\n\nimport getopt\nimport sys\n\n\nclass Options:\n __short_args = \"a:c:ho:\"\n __long_args = [\n # script controls\n \"columns=\", \"help\", \"output=\",\n\n # content components\n \"address=\", \"iconserver=\",\n \"title=\", \"uplink=\", \"uptitle=\"]\n\n outputfile = \"-\"\n columns = 1\n letters = 0\n uplink = \"./\"\n uptitle = \"Python Documentation Index\"\n\n def __init__(self):\n self.args = []\n self.variables = {\"address\": \"\",\n \"iconserver\": \"icons\",\n \"imgtype\": \"gif\",\n \"title\": \"Global Module Index\",\n }\n\n def add_args(self, short=None, long=None):\n if short:\n self.__short_args += short\n if long:\n self.__long_args += long\n\n def parse(self, args):\n try:\n opts, args = getopt.getopt(args, self.__short_args,\n self.__long_args)\n except getopt.error:\n sys.stdout = sys.stderr\n self.usage()\n sys.exit(2)\n self.args += args\n for opt, val in opts:\n if opt in (\"-a\", \"--address\"):\n val = val.strip()\n if val:\n val = \"\\n%s\\n \\n\" % val\n self.variables[\"address\"] = val\n elif opt in (\"-h\", \"--help\"):\n self.usage()\n sys.exit()\n elif opt in (\"-o\", \"--output\"):\n self.outputfile = val\n elif opt in (\"-c\", \"--columns\"):\n self.columns = int(val)\n elif opt == \"--title\":\n self.variables[\"title\"] = val.strip()\n elif opt == \"--uplink\":\n self.uplink = val.strip()\n elif opt == \"--uptitle\":\n self.uptitle = val.strip()\n elif opt == \"--iconserver\":\n self.variables[\"iconserver\"] = val.strip() or \".\"\n else:\n self.handle_option(opt, val)\n if self.uplink and self.uptitle:\n self.variables[\"uplinkalt\"] = \"up\"\n self.variables[\"uplinkicon\"] = \"up\"\n else:\n self.variables[\"uplinkalt\"] = \"\"\n self.variables[\"uplinkicon\"] = \"blank\"\n self.variables[\"uplink\"] = self.uplink\n self.variables[\"uptitle\"] = self.uptitle\n\n def handle_option(self, opt, val):\n raise getopt.error(\"option %s not recognized\" % opt)\n\n def get_header(self):\n return HEAD % self.variables\n\n def get_footer(self):\n return TAIL % self.variables\n\n def get_output_file(self, filename=None):\n if filename is None:\n filename = self.outputfile\n if filename == \"-\":\n return sys.stdout\n else:\n return open(filename, \"w\")\n\n\nNAVIGATION = '''\\\n\n'''\n\nHEAD = '''\\\n\n\n\n %(title)s \n \n \n \n\n\n''' + NAVIGATION + '''\\\n \n\n%(title)s \n\n'''\n\nTAIL = \" \\n\" + NAVIGATION + '''\\\n%(address)s\n\n'''\n","repo_name":"seanpm2001/WacOS","sub_path":"Python/CPython/2.0/Doc/tools/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"51"}
+{"seq_id":"2696933400","text":"# Name: Josh Garcia \n# Section: C\n# Description: The loan calculator tells you how much money you have to repay over the life of a loan.\n\nprint(\"\\n Welcome! to the Loan Calculator!\")\nprint(\" Please enter information below\")\nprint(\"----------------------------------\")\n#Creating my variables that I will grab input from user and then use to calculate the loan\nprincipal = (input(' How much money, do you still have left in loan? : '))\nyears = int(input(' How many years do you have left required to repay the loan? : '))\nrate = (input(' What is the intrest rate on the loan? : '))\n\n#print user inputs\nprint('\\n Principle: {}\\n Rate: {}\\n Years: {}'.format(principal,rate,years))\n\n#created two if statements that will check if there is a $ or %, and then removing it, so we can preform calculations\nif principal[0] == \"$\":\n principal = float(principal[1:])\nelse:\n principal = float(principal)\n\nif rate[-1] == \"%\":\n rate = float(rate[:-1])/100\nelse:\n rate = float(rate)\n\n#This will calculate the input given from the user\npayment = float( (((1+rate)**years)*principal*rate)/(((1+rate)**years)-1) )\n\n#displaying the results using the print() method\nprint('\\n Annual payment = ${:,.2f}'.format(payment))\n\n#calculating and print() the results of monthly payment\nmonthly = float(payment/12)\nprint(' Monthly payment = ${:,.2f}'.format(monthly))\n\n#Calculating and print() the results of total paid of loan\ntotal_payment = float(payment*years)\nprint(' Total paid for the life of the loan = ${:,.2f}'.format(total_payment))\n\n#Here I ask user for annual income, so I can compare user monthly income with monthly payment.\nuser_salary = input(\"\\n Please enter your annual income: \")\n\n#if user uses $ sign, this if statement will remove it\nif user_salary[0] == \"$\":\n user_salary = float(user_salary[1:])\nelse:\n user_salary = float(user_salary)\n\n#if statement will spit out whether user needs to refinance or they are on track to pay off loan \nif (user_salary/12) < monthly:\n if rate > .05:\n print(\"\\n Yikes! You should refinance! Please seek financial counseling.\\n\")\nelse:\n print(\"\\n Awesome! If you make all your payments, \\n you will get your loan paid off in time!\\n\")\n","repo_name":"J8sh/Loan_Calculator","sub_path":"garcia_joshua_hw1.py","file_name":"garcia_joshua_hw1.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"12408605425","text":"from fastapi import APIRouter, HTTPException\nfrom pydantic import BaseModel\nimport os\nimport stripe\nfrom be.api.v1.templates.non_auth_route import create_non_auth_router\n\nstripe.api_key = os.environ[\"STRIPE_SECRET_KEY\"]\n\nrouter = APIRouter(prefix=\"/payments/intent\", tags=[\"checkout\"])\n\nclass RequestBody(BaseModel):\n amount: int\n\n\n@router.post(\"\")\n# Create a stripe payment intent\nasync def post_payment_intent(body: RequestBody):\n try:\n payment_intent = stripe.PaymentIntent.create(\n payment_method_types=[\"paynow\"],\n payment_method_data={\"type\": \"paynow\"},\n amount=body.amount,\n currency=\"sgd\"\n )\n\n return {\n \"client_secret\": payment_intent.client_secret\n }\n except Exception as e:\n raise HTTPException(status_code=500, detail=e)\n\nhandler = create_non_auth_router(router)\n","repo_name":"ntuscse/be","sub_path":"be/api/v1/endpoints/payments/intent/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"73743410077","text":"from hashlib import md5\n\ndef doors(h,p):\n x,y = p\n if y-1 >= 0 and h[0] in \"bcdef\": yield 'U',(x,y-1)\n if y+1 <= 3 and h[1] in \"bcdef\": yield 'D',(x,y+1)\n if x-1 >= 0 and h[2] in \"bcdef\": yield 'L',(x-1,y)\n if x+1 <= 3 and h[3] in \"bcdef\": yield 'R',(x+1,y)\n\ndef bfs():\n global longest\n s = [('',(0,0))]\n while s:\n ns = []\n for path,pos in s:\n if pos == (3,3): #return path\n longest = len(path)\n continue\n h = md5((\"gdjjyniy\"+path).encode()).hexdigest()\n for d,p in doors(h,pos): ns.append((path+d, p))\n s = ns\n\ndef dfs(path,pos):\n global longest\n if pos == (3,3):\n if len(path) > longest: longest = len(path)\n return\n h = md5((\"gdjjyniy\"+path).encode()).hexdigest()\n for d,p in doors(h,pos): dfs(path+d, p)\n\nlongest = 0\nprint(bfs())\ndfs('',(0,0))\nprint(longest)\n","repo_name":"fnuttplus/advent","sub_path":"2016/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"38411420533","text":"import sys\nsys.stdin = open('input.txt')\n# 주어진 미로의 출발점으로부터 도착지점까지 갈 수 있는 길이 있는지 판단하는 프로그램\nT = 10\n\nfor tc in range(1, T+1):\n case_number = int(input())\n maze = [list(map(int, input())) for _ in range(16)]\n q = []\n visited = []\n\n dx = [-1, +1, 0, 0]\n dy = [0, 0, -1, +1]\n\n # 시작점 (1,1) 끝점 (end_x, end_y)\n for i in range(16):\n for j in range(16):\n if maze[i][j] == 3:\n end_x = i\n end_y = j\n\n q.append([1, 1])\n visited.append([1, 1])\n result = 0\n while q:\n point = q.pop(0)\n x = point[0]\n y = point[1]\n\n if maze[x][y] == 3:\n result = 1\n print(\"#{} 1\".format(tc))\n\n for i in range(4):\n new_x = x + dx[i]\n new_y = y + dy[i]\n\n if 0 <= new_x < 16 and 0 <= new_y < 16:\n if [new_x, new_y] not in visited:\n if maze[new_x][new_y] != 1:\n q.append([new_x, new_y])\n visited.append([new_x, new_y])\n if result == 0:\n print(\"#{} 0\".format(tc))","repo_name":"Hyojeong721/TIL","sub_path":"algorithm/SWA/date/0826/ase0574/1226_미로1/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"22225717429","text":"def my_max(numbers):\n max_num = numbers[0]\n index = 0\n for i in range(1, len(numbers)):\n if max_num < numbers[i]:\n max_num = numbers[i]\n index = i\n return max_num, index\n\ndef my_min(numbers):\n min_num = numbers[0]\n index = 0\n for i in range(1, len(numbers)):\n if min_num > numbers[i]:\n min_num = numbers[i]\n index = i\n return min_num, index\n\nfor i in range(1, 11):\n dump = int(input())\n boxes = list(map(int, input().split()))\n\n for d in range(dump):\n index_max = my_max(boxes)[1]\n boxes[index_max] -= 1\n index_min = my_min(boxes)[1]\n boxes[index_min] += 1\n \n print('#{} {}'.format(i, my_max(boxes)[0] - my_min(boxes)[0]))","repo_name":"seoul-ssafy-class-2-studyclub/heecheol","sub_path":"SWEA/d2/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"3017710565","text":"'''-------------------------------------------------Alert Control-----------------------------------------------------------------------------------'''\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nDriver = webdriver.Chrome()\n\nDriver.get('https://demoqa.com/alerts')\n\nAlert_Button = Driver.find_element(By.ID,'alertButton').click()\n\nDriver.switch_to.alert.accept()\n\ntime.sleep(20)\nprint('1st alert confirmed successfully')\n\n'''----------------------------------------------------------3rd Button >> Confirm Box-------------------------------------------------------------------------------------- '''\n\nConfirm_Button = Driver.find_element(By.ID,'confirmButton').click()\nDriver.switch_to.alert.accept() #To select OK\n# Driver.switch_to.alert.dismiss() #To select OK\ntime.sleep(2)\nprint('3rd alert confirmed successfully')\n\n\n'''----------------------------------------------------------4th Button >> Promt box-------------------------------------------------------------------------------------- '''\n\nPromt_box = Driver.find_element(By.ID,'promtButton') #Button is not clicking\ntime.sleep(2)\nA = Promt_box.click()\nprint('3rd alert clicked successfully')\ntime.sleep(2)\nPromt_box.Driver.switch_to.alert.send_keys('Amit')\nprint('keys to prompt box sent successfully')\ntime.sleep(5)\n\n","repo_name":"Shekharmehta02/Test","sub_path":"Selenium/AlertControl/alert_control.py","file_name":"alert_control.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"26724720793","text":"import os\nimport json\nfrom subprocess import check_call\n\n\nclass OBPEvent:\n def __init__(self, webhook_url=None, role_arn=None, obp_token=None):\n if webhook_url is not None:\n os.environ[\"METAFLOW_ARGO_EVENTS_WEBHOOK_URL\"] = webhook_url\n os.environ[\"METAFLOW_ARGO_EVENTS_WEBHOOK_AUTH\"] = \"service\"\n env = os.environ.copy()\n env.update(self._assume_role(role_arn))\n check_call([\"outerbounds\", \"configure\", \"-f\", obp_token], env=env)\n\n def _assume_role(self, role_arn):\n import boto3\n\n sts_client = boto3.client(\"sts\")\n\n assumed_role_object = sts_client.assume_role(\n RoleArn=role_arn, RoleSessionName=\"send_event\"\n )\n\n credentials = assumed_role_object[\"Credentials\"]\n return {\n \"AWS_ACCESS_KEY_ID\": credentials[\"AccessKeyId\"],\n \"AWS_SECRET_ACCESS_KEY\": credentials[\"SecretAccessKey\"],\n \"AWS_SESSION_TOKEN\": credentials[\"SessionToken\"],\n }\n\n def submit(self, event_name, payload=None):\n from metaflow.integrations import ArgoEvent\n\n ArgoEvent(name=event_name).publish(payload=payload)\n","repo_name":"outerbounds/obp-external-event","sub_path":"obpevent/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"18916266802","text":"from tkinter import *\r\nimport webbrowser\r\n\r\n#Code for opening the online database\r\ndef btn_clicked():\r\n webbrowser.open(\"https://www.mywot.com\")\r\n\r\n\r\n#Code for the GUI\r\nwebchck = Tk()\r\nwebchck.geometry(\"906x505\")\r\nwebchck.configure(bg = \"#ffffff\")\r\nwebchck.title(\"Website safety checker\")\r\n\r\ncanvas = Canvas(webchck, bg = \"#ffffff\", height = 505, width = 906, bd = 0, highlightthickness = 0, relief = \"ridge\")\r\ncanvas.place(x = 0, y = 0)\r\n\r\nbackground_img = PhotoImage(file = f\"background.png\")\r\nbackground = canvas.create_image(453.0, 258.0,image=background_img)\r\n\r\nimg0 = PhotoImage(file = f\"img0.png\")\r\nb0 = Button(image = img0, borderwidth = 0, highlightthickness = 0, command = btn_clicked, relief = \"flat\")\r\nb0.place(x = 1, y = 455, width = 76, height = 37)\r\n\r\nimg1 = PhotoImage(file = f\"img1.png\")\r\nb1 = Button(image = img1, borderwidth = 0, highlightthickness = 0, command = btn_clicked, relief = \"flat\")\r\nb1.place(x = 174, y = 388, width = 319, height = 54)\r\n\r\nwebchck.resizable(False, False)\r\nwebchck.mainloop()","repo_name":"RoobusBoi/VaulTech-V2.0","sub_path":"Website Checker.py","file_name":"Website Checker.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"45182459764","text":"from django.contrib import admin\nfrom .models import Publisher, Book, Member, Order, Review\n\n\n# Register your models here.\n#admin.site.register(Publisher)\n#admin.site.register(Book)\n#admin.site.register(Member)\n#admin.site.register(Order)\nadmin.site.register(Review)\n\ndef make_available(modeladmin, request, queryset ):\n\n for a in queryset:\n a.price += 10\n a.save()\n return\nmake_available.short_description = 'Update this fields'\n\nclass BookAdmin(admin.ModelAdmin):\n fields = [('title', 'category', 'publisher'), ('num_pages', 'price', 'num_reviews')]\n list_display = ('title', 'category', 'price')\n actions = [make_available]\n\n\nadmin.site.register(Book,BookAdmin)\n\nclass OrderAdmin(admin.ModelAdmin):\n fields = [('books'), ('member', 'order_type', 'order_date')]\n list_display = ('id','member','order_type','order_date','total_items')\n\nadmin.site.register(Order,OrderAdmin)\n\nclass PublisherAdmin(admin.ModelAdmin):\n fields = [('name','website','city','country')]\n list_display = ('name', 'website', 'city')\nadmin.site.register(Publisher,PublisherAdmin)\n\nclass MemberAdmin(admin.ModelAdmin):\n fields = ['first_name','last_name', 'status']\n list_display = ('first_name','last_name', 'status', 'books')\n\n def books(self,obj):\n return \"\\n\".join([b.title for b in obj.borrowed_books.all()])\n\nadmin.site.register(Member, MemberAdmin)","repo_name":"Shivani1304/e-Book-Library","sub_path":"myapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"40233914365","text":"from matplotlib.widgets import Button\nimport pysrt\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as lines\nfrom matplotlib.widgets import Slider, Button\nimport tkinter as tk\nfrom tkinter import Label, ttk\nfrom tkinter import filedialog as fd\nfrom tkinter import messagebox\nimport sys\n\nroot = tk.Tk()\nroot.title('Appilyzer')\nroot.resizable(False, False)\nroot.geometry('300x150')\n\ndef select_video_file():\n filetypes = (\n ('video files', '*.mp4'),\n )\n\n filename = fd.askopenfilename(\n title='Select Video File',\n initialdir='/',\n filetypes=filetypes)\n\n video_label.config(text=filename)\n\ndef select_srt_file():\n filetypes = (\n ('srt files', '*.srt'),\n )\n\n filename = fd.askopenfilename(\n title='Select Video File',\n initialdir='/',\n filetypes=filetypes)\n\n srt_label.config(text=filename)\n\n# open button\nopen_video = ttk.Button(\n root,\n text='Select Video File',\n command=select_video_file\n)\n\nopen_video.pack(expand=True)\nvideo_label = Label(root, text=\"No File selected\")\nvideo_label.pack()\n\n# open button\nopen_srt = ttk.Button(\n root,\n text='Select SRT File',\n command=select_srt_file\n)\n\nopen_srt.pack(expand=True)\nsrt_label = Label(root, text=\"No File selected\")\nsrt_label.pack()\n\n\n\n\ndef runProgram(videolabel, srtlabel):\n subs = pysrt.open(srtlabel)\n input_name=videolabel\n time = [0]\n height = []\n\n\n for sub in subs:\n he = sub.text.find('rel_alt')\n height.append(float(sub.text[he:].split(']')[0].split(\" \")[1]))\n time.append(time[-1] + 0.042)\n\n fig = plt.figure(figsize=(8, 6))\n fig.subplots_adjust(bottom=0.25)\n ax = fig.add_subplot(111)\n ax.plot( time[1:], height)\n highLine = ax.axhline(max(height), ls=\"--\", c=\"r\")\n axfreq = fig.add_axes([0.25, 0.15, 0.65, 0.03])\n highSlider = Slider(\n ax=axfreq,\n label='High Threshold',\n valmin=-10,\n valmax=10,\n valinit=max(height),\n )\n def updateHigh(val):\n highLine.set_ydata(highSlider.val)\n\n highSlider.on_changed(updateHigh)\n\n lowLine = ax.axhline(min(height), ls=\"--\", c=\"r\")\n axfreq2 = fig.add_axes([0.25, 0.1, 0.65, 0.03])\n lowSlider = Slider(\n ax=axfreq2,\n label='Lower Threshold',\n valmin=-10,\n valmax=10,\n valinit=min(height),\n )\n def update(val):\n lowLine.set_ydata(lowSlider.val)\n\n lowSlider.on_changed(update)\n\n resetax = fig.add_axes([0.25, 0.05, 0.65, 0.03])\n button = Button(resetax, 'Confirm', hovercolor='0.975')\n def reset(event):\n plt.close()\n button.on_clicked(reset)\n\n plt.show()\n\n high = highSlider.val\n low = lowSlider.val\n\n clips = []\n current = []\n allC = []\n start = 0\n for sub in subs:\n he = sub.text.find('rel_alt')\n h = float(sub.text[he:].split(']')[0].split(\" \")[1])\n if h > low and h < high:\n if start == 0:\n start = sub.start\n current.append(h)\n allC.append(h)\n else:\n if len(current) > 0:\n clips.append([start,sub.end])\n start = 0\n current = []\n\n # fig = plt.figure(figsize=(8, 6))\n # ax = fig.add_subplot(111)\n # ax.plot(allC)\n # plt.show()\n\n i = 0\n with open(\"videos.txt\", \"w\") as file:\n for c in clips:\n start = c[0].to_time()\n end = c[1].to_time()\n os.system('ffmpeg -i {0} -vcodec copy -acodec copy -ss {1} -to {2} ./output/out{3}.mp4'.format(input_name,start,end,i))\n file.write(\"file './output/out{0}.mp4' \\n\".format(i))\n i += 1\n os.system('ffmpeg -f concat -safe 0 -i videos.txt -c copy ./output/{0}.mp4'.format(video_label['text'].split(\"/\")[-1]+\"cut\"))\n os.remove(\"videos.txt\")\n i = 0\n for c in clips:\n os.remove(\"./output/out{0}.mp4\".format(i))\n i += 1\n\n\ndef run():\n runProgram(video_label['text'],srt_label['text'])\n\n# open button\nrun_button = ttk.Button(\n root,\n text='Run',\n command=run\n)\n\nrun_button.pack()\ndef on_closing():\n if messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n root.destroy()\n sys.exit(\"Error message\")\n\nroot.protocol(\"WM_DELETE_WINDOW\", on_closing)\nroot.mainloop()\n\n","repo_name":"Deischox/VideoSRTCutter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"30554266739","text":"from math import inf\nimport torch\nimport tqdm\nimport torch.nn as nn\nfrom timm.scheduler import CosineLRScheduler\n\n\ndef get_optimizer(net, lr,wd, momentum):\n optimizer = torch.optim.SGD(net.parameters() ,lr=lr,weight_decay=wd, momentum=momentum)\n #optimizer = torch.optim.AdamW(net.parameters(), lr=lr, weight_decay=wd)\n return optimizer \n\ndef get_loss_function():\n loss_function = nn.CrossEntropyLoss()\n return loss_function\n\ndef train(net, data_loader, optimizer, loss_function, device):\n \n samples=0\n cumulative_loss=0\n cumulative_accuracy=0\n \n net.train()\n\n\n with tqdm.tqdm(total=len(data_loader)) as pbar:\n\n for batch_idx, (inputs, targets) in enumerate(data_loader):\n\n \n # Load data into GPU or cpu\n if device == 'cpu':\n inputs, targets = inputs.to(device), targets.to(device)\n # load data on GPU\n else:\n inputs, targets = inputs.to(device), targets.to(device)\n \n \n optimizer.zero_grad(set_to_none=True) # reset the optimizer\n\n with torch.autocast(device_type='cuda', dtype=torch.float16):\n outputs = net(inputs) # Forward pass\n loss = loss_function(outputs, targets) # Apply the loss\n \n loss.backward()\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n\n\n\n samples += inputs.shape[0]\n cumulative_loss += loss.item()\n _, predicted = outputs.max(1)\n cumulative_accuracy += predicted.eq(targets).sum().item()\n pbar.set_postfix_str(\"training with Current loss: {:.4f}, Accuracy: {:.4f}, at iteration: {:.1f}\".format(cumulative_loss/ samples, cumulative_accuracy / samples*100, float(batch_idx)))\n pbar.update()\n return cumulative_loss/samples, cumulative_accuracy/samples*100\n\n # we define a test function\ndef test(net, data_loader, loss_function, device='cuda:0'):\n samples = 0.\n cumulative_loss = 0.\n cumulative_accuracy = 0.\n net.eval() # Strictly needed if network contains layers which have different behaviours between train and test\n \n with tqdm.tqdm(total=len(data_loader)) as pbar: \n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(data_loader):\n \n # Load data into GPU or cpu\n if device == 'cpu':\n inputs, targets = inputs.to(device), targets.to(device)\n # load data on GPU\n else:\n inputs, targets = inputs.to(device), targets.to(device)\n \n\n with torch.autocast(device_type='cuda', dtype=torch.float16):\n outputs = net(inputs) # Forward pass\n loss = loss_function(outputs, targets) # Apply the loss\n\n _, predicted = outputs.max(1)\n \n samples += inputs.shape[0]\n cumulative_loss += loss.item()\n cumulative_accuracy += predicted.eq(targets).sum().item()\n pbar.set_postfix_str(\"validation with Current loss: {:.4f}, Accuracy: {:.4f}, at iteration: {:.1f}\".format(cumulative_loss/ samples, cumulative_accuracy / samples*100, float(batch_idx)))\n pbar.update()\n return cumulative_loss/samples, cumulative_accuracy/samples*100\n\n\n\ndef trainer(\n # lets define the basic hyperparameters\n train_loader,\n val_loader,\n test_loader,\n learning_rate=0.01,\n weight_decay=0.000001,\n momentum=0.9,\n epochs=2,\n model=None,\n device=\"cuda:0\",\n early_stopping=False):\n \n \n model = model.to(device)\n # defining the optimizer\n optimizer = get_optimizer(model, learning_rate,wd=weight_decay, momentum=momentum)\n\n # defining the loss function\n loss_function = get_loss_function()\n\n\n # In order to save the accuracy and loss we use a list to save them in each epoch \n val_loss_list = []\n val_accuracy_list = []\n train_loss_list = []\n train_accuracy_list = []\n\n last_loss = inf \n trigger_times = 0\n patience = 4\n\n start_epoch = 0\n ## Resume checkpoint if\n if start_epoch > 0:\n PATH = f\"check_MobileNetV3_{start_epoch}\"\n checkpoint = torch.load(PATH) \n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n start_epoch = checkpoint['epoch']\n train_loss = checkpoint['loss']\n model.train()\n\n for e in range(start_epoch,epochs):\n print('training epoch number {:.2f} of total epochs of {:.2f}'.format(e,epochs))\n train_loss, train_accuracy = train(model, train_loader, optimizer, loss_function,device)\n val_loss, val_accuracy = test(model, val_loader, loss_function,device)\n val_loss_list.append(val_loss)\n val_accuracy_list.append(val_accuracy)\n train_loss_list.append(train_loss)\n train_accuracy_list.append(train_accuracy)\n\n\n current_loss = val_loss \n print('Epoch: {:d}'.format(e+1))\n print('\\t Training loss {:.5f}, Training accuracy {:.2f}'.format(train_loss,\n train_accuracy))\n print('\\t Validation loss {:.5f}, Validation accuracy {:.2f}'.format(val_loss,\n val_accuracy))\n\n\n ## Early stopping\n if early_stopping:\n if current_loss > last_loss:\n trigger_times += 1\n print(\"Trigger times: \", trigger_times)\n\n if trigger_times >= patience:\n print(\"Early stopping!\\n Terminate training\")\n break\n else:\n trigger_times = 0\n \n last_loss = current_loss\n\n ## store checkpoint training\n if e % 5 == 0 or e == epochs-1:\n PATH = f\"check_MobileNetV3_{e}\"\n torch.save({\n 'epoch': e,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': train_loss,\n }, PATH)\n \n with open(\"result_random_search.txt\",\"a+\") as f:\n f.write(f\"\\nEpochs: {e}\")\n f.write(f\"\\nTrain loss: {train_loss_list}\")\n f.write(f\"\\nTrain accuracy: {train_accuracy_list}\")\n f.write(f\"\\nValidation loss: {val_loss_list}\")\n f.write(f\"\\nValidation loss: {val_accuracy_list}\")\n\n\n print('-----------------------------------------------------')\n print('After training:')\n train_loss, train_accuracy = test(model, train_loader, loss_function,device)\n val_loss, val_accuracy = test(model, val_loader, loss_function,device)\n test_loss, test_accuracy = test(model, test_loader, loss_function,device)\n print('\\t Training loss {:.5f}, Training accuracy {:.2f}'.format(train_loss,\n train_accuracy))\n print('\\t Validation loss {:.5f}, Validation accuracy {:.2f}'.format(val_loss,\n val_accuracy))\n print('\\t Test loss {:.5f}, Test accuracy {:.2f}'.format(test_loss, test_accuracy))\n print('-----------------------------------------------------')\n return val_loss_list, val_accuracy_list, train_loss_list, train_accuracy_list\n","repo_name":"suyash444/MLDL_Tiny_Visual_Wake_Words","sub_path":"MLDL_Tiny_Visual_Wake_Words/MLDL_Tiny_Visual_Wake_Words/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"41135693821","text":"\nn = int(input())\narr = [64]\n\nwhile sum(arr) > n:\n t = arr.pop()//2\n arr.append(t)\n if sum(arr) < n:\n arr.append(t)\n\nprint(len(arr))\n\n\n# 이진법 변환 후 1만 계산\na=int(input())\nprint(str(bin(a)).count('1'))","repo_name":"taepd/study","sub_path":"Algorithm/BOJ/silver5_1094_막대기.py","file_name":"silver5_1094_막대기.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"9586343556","text":"import requests\nimport re\n\n\ndef is_interesting_subdomain(subdomain):\n # Define a list of common services/protocols\n common_services = ['mail', 'ftp', 'admin', 'vpn', 'test', 'dev', 'secure',\n 'portal', 'backup', 'internal', 'legacy', 'api', 'intranet', 'owa', 'git']\n\n # Check if any part of the subdomain contains a common service\n if any(service in subdomain for service in common_services):\n return True\n return False\n\n\ndef get_country_code_from_ip(ip_address):\n url = f'https://ipinfo.io/{ip_address}'\n response = requests.get(url)\n data = response.json()\n return data.get('country')\n\n\ndef extract_ip_addresses(line):\n # Define a regex pattern to find IP addresses\n ip_pattern = r'\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b|\\b[0-9a-fA-F:]+\\b'\n # Find all IP addresses in the line\n ip_addresses = re.findall(ip_pattern, line)\n if not len(ip_addresses):\n return None\n return \".\".join(ip_addresses)\n\n\ndef extract_subdomains(main_domain, line):\n # Define a regex pattern to find subdomains\n subdomain_pattern = r'\\b(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\\.)+' + re.escape(\n main_domain) + r'\\b'\n # Find all subdomains in the line\n subdomains = re.findall(subdomain_pattern, line)\n if not len(subdomains):\n return None\n # Remove \"92m\" from subdomains\n subdomains = [subdomain.replace('92m', '') for subdomain in subdomains]\n return \".\".join(subdomains)\n","repo_name":"EliottElek/recon-ui","sub_path":"home/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"39691343927","text":"class Solution:\n def isMonotonic(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: bool\n \"\"\"\n judge = []\n for i in range(len(A)-1):\n judge.append(A[i] - A[i+1])\n\n\n\nif __name__ == \"__main__\":\n\n s = Solution()\n a = [6,5,4,4,7]\n a = [2,2,2,1,4,5]\n r = s.isMonotonic(a)\n print(r)","repo_name":"siddharthcurious/Pythonic3-Feel","sub_path":"LeetCode/896-1.py","file_name":"896-1.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"30124130355","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def minDepth(self, root: Optional[TreeNode]) -> int:\n \n if root == None:\n return 0\n \n depth = 1\n dq = collections.deque([])\n dq.append(root)\n \n while dq:\n n = len(dq)\n \n for _ in range(n):\n node = dq.popleft()\n\n if node.left == None and node.right == None:\n return depth\n if node.left != None:\n dq.append(node.left)\n if node.right != None:\n dq.append(node.right)\n \n depth += 1\n \n \n \n ","repo_name":"djeong20/FA22-Daily-LeetCode-Challenge","sub_path":"0111-minimum-depth-of-binary-tree/0111-minimum-depth-of-binary-tree.py","file_name":"0111-minimum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"40843390234","text":"# Tanner Sirota 61932813, Steven Le 51942618\n\nfrom os import listdir\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nfrom math import log10\nfrom ast import literal_eval\nfrom json import dumps\nimport re\nfrom tkinter import *\n\ndef Index():\n docCount = 0\n indexes = defaultdict(dict)\n bkd = eval(open('bookkeeping.json', 'r').read())\n for direct in listdir('WEBPAGES_RAW'):\n if direct != '.DS_Store':\n for docs in listdir('WEBPAGES_RAW/' + direct):\n if docs != '.DS_Store':\n if bkd[direct + '/' + docs].endswith(\".txt\") or bkd[direct + '/' + docs].endswith(\".java\") or bkd[direct + '/' + docs].endswith(\".py\"):\n continue\n docCount += 1\n file = open('WEBPAGES_RAW/' + direct + \"/\" + docs, 'r', encoding=\"utf-8\")\n soup = BeautifulSoup(file.read(), 'html.parser')\n tf = defaultdict(int)\n for words in soup.get_text().split():\n if re.match(\"^[A-Za-z]+$\", words) or re.match(\"^[0-9]+$\", words):\n if len(words) > 1:\n tf[words.lower()] += 1\n for words in tf:\n indexes[words][direct + '/' + docs] = 1 + log10(tf[words])\n if soup.title is not None:\n if words in soup.title:\n indexes[words][direct + '/' + docs] = indexes[words][direct + '/' + docs] + .3\n file.close()\n for unique in indexes:\n for docID in indexes[unique]:\n indexes[unique][docID] = indexes[unique][docID] * log10(docCount / len(indexes[unique]))\n \n jsonString = dumps(dict(indexes))\n newFile = open(\"IndexDict.json\", \"w\")\n newFile.write(jsonString)\n newFile.close()\n \n \ndef search(fileDictIndex):\n for i in list(searchGUI.children.values()):\n if type(i) != Entry and type(i) != Button:\n i.destroy()\n bkd = eval(open('bookkeeping.json', 'r').read())\n try:\n userSearchTerm = searchInput.get()\n searchList = userSearchTerm.lower().split()\n rankDict = defaultdict(int)\n for term in searchList:\n for docID in fileDictIndex[term]:\n rankDict[docID] += fileDictIndex[term][docID]\n counter = 0\n for k, v in sorted(rankDict.items(), key=lambda item: item[1], reverse=True):\n if counter == 5:\n break\n Label(searchGUI, text=bkd[k]).pack()\n print(bkd[k])\n counter += 1\n except:\n print(\"No matches found.\")\n \n \nif __name__ == '__main__':\n FDI = eval(open(\"IndexDict.json\", 'r').read())\n searchGUI = Tk()\n searchGUI.title(\"CS 121 Project 3 - Search Engine\")\n searchGUI.geometry(\"500x500\")\n searchInput = StringVar()\n searchEntry = Entry(searchGUI, textvariable=searchInput).pack()\n serachButton = Button(searchGUI, text=\"Search\", command=lambda: search(FDI)).pack()\n searchGUI.mainloop()\n \n \n","repo_name":"Tanner12/SearchParser","sub_path":"SearchParser.py","file_name":"SearchParser.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"21718394191","text":"import datetime\nimport json\nimport logging\n\nfrom yee.core.httputils import RequestUtils\nfrom yee.core.stringutils import StringUtils\nfrom yee.notify.notify import Notify\n\n\"\"\"\n企业微信通知\n\"\"\"\nclass QywechatNotify(Notify):\n req = RequestUtils(request_interval_mode=False)\n\n def __init__(self, **args):\n args.setdefault('touser', '@all')\n self.args = args\n self.message_template = args['message_template']\n self.token_cache = None\n self.token_expires_time = None\n\n def get_access_token(self):\n if self.token_expires_time is not None and self.token_expires_time >= datetime.datetime.now():\n return self.token_cache\n res = self.req.get(\n 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s' % (\n self.args[\"corpid\"], self.args[\"corpsecret\"]))\n json = res.json()\n if json['errcode'] == 0:\n self.token_expires_time = datetime.datetime.now() + datetime.timedelta(seconds=json['expires_in'] - 500)\n self.token_cache = json['access_token']\n return self.token_cache\n else:\n return None\n\n def send(self, message_template: str, context: dict):\n access_token = self.get_access_token()\n if access_token is None:\n logging.error('获取企业微信access_token失败,请检查你的corpid和corpsecret配置')\n return\n if message_template not in self.message_template:\n logging.error('找不到通知消息模版:%s' % (message_template))\n return\n mt = self.message_template[message_template]\n title = mt['title']\n message_pattern = mt['message']\n url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + access_token\n res = self.req.post_res(url, params=json.dumps({\n 'touser': self.args['touser'],\n 'agentid': self.args['agentid'],\n 'msgtype': 'news',\n 'news': {\n \"articles\": [\n {\n \"title\": StringUtils.replace_var(title, context),\n \"description\": StringUtils.replace_var(message_pattern, context),\n \"url\": context['url'],\n \"picurl\": context['cover']\n }\n ]\n }\n }))\n if res.json()['errcode'] != 0:\n logging.error('企业微信推送失败:%s' % res.json())\n","repo_name":"eehong1986/movie_robot","sub_path":"yee/notify/qywechat.py","file_name":"qywechat.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"}
+{"seq_id":"14821161564","text":"#-*- coding: utf-8 -*-\nimport os\nimport json\nimport cv2\n\n\n\ndef load_all_face_pic_list(alignFaceDir):\n imgType = [\"jpg\", \"jpeg\", \"png\"]\n facePathList = []\n for picName in os.listdir(alignFaceDir):\n if picName in imgType:\n facePathList.append(os.path.join(alignFaceDir, picName))\n return facePathList\n\n\ndef load_pose_data(jsonfilePath):\n with open(jsonfilePath, \"r\") as fr:\n pose_data = json.load(fr)\n return pose_data \n\n\ndef load_det_data(detfilePath):\n with open(detfilePath, \"r\") as fr:\n det_data = json.load(fr)\n return det_data \n\n\n\ndef get_pose_json_list(josnPath):\n poseList = []\n detList = []\n for jsonfilename in os.listdir(josnPath):\n if jsonfilename.startswith(\"pose\") and jsonfilename.endswith(\".json\"):\n poseList.append(os.path.join(josnPath, jsonfilename))\n return poseList\n\n \n\n\n\ndef draw_result(imgPath, saveDir, det_dict, pose_dict, genderList, ageList, landmark_flag=0):\n # if landmark_flag is 0 do not mark it on img else mark on img\n if not os.path.exists(saveDir):\n os.mkdir(saveDir)\n #frame = cv2.imread(imgPath)\n img = cv2.imread(imgPath)\n\n pose_result = pose_dict[\"result\"][\"landmarks\"] \n det_result = det_dict[\"result\"][\"detections\"]\n\n nums = 0\n for det in det_result:\n if det[\"class\"] == \"face\":\n topleft = (int(det[\"pts\"][0][0]), int(det[\"pts\"][0][1]))\n bottomright = (int(det[\"pts\"][2][0]), int(det[\"pts\"][2][1]))\n cv2.rectangle(img, topleft, bottomright, (255, 255, 0), 2)\n \n if genderList[nums] == 0:\n gender = \"Male\"\n else:\n gender = \"Female\"\n\n text = gender + \",\" + str(ageList[nums])\n # 输入参数为图像、文本、位置、字体、大小、颜色数组、粗细\n # cv2.putText(img, text, (x,y), Font, Size, (B,G,R), Thickness)\n cv2.putText(img, text, (topleft[0], topleft[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) \n nums += 1\n\n if landmark_flag:\n for landmark in pose_result:\n topleftpoint = [99999, 99999]\n for point in landmark[\"landmark\"]:\n point = (int(point[0]), int(point[1]))\n cv2.circle(img, point, 1, (0, 255, 255), 1)\n if point[0] < topleftpoint[0]:\n topleftpoint[0] = point[0]\n if point[1] < topleftpoint[1]:\n topleftpoint[1] = point[1]\n \n cv2.imwrite(os.path.join(saveDir, os.path.basename(imgPath)), img)\n \n","repo_name":"binzh93/face_gender_age_api","sub_path":"script/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"40456016781","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport os\nimport sys\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.stats as stats\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.preprocessing import normalize\n\nfrom sklearn import manifold\nfrom scipy.special import expit\n\nfrom optimizer import OptimizerVAE\nfrom input_data import *\nfrom model import *\nfrom preprocessing import *\n\n# Settings\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('dataset', 'cora', 'Dataset string.')\nflags.DEFINE_integer('features', 0, 'Whether to use features (1) or not (0).')\nflags.DEFINE_integer('seeded', 1, 'Set numpy random seed')\nflags.DEFINE_integer('test_count', 10, 'Set num tests')\nflags.DEFINE_integer('emb_size', 128, 'Number of eigenvectors for embedding')\nflags.DEFINE_integer('connected_split', 1, 'use split with training set always connected')\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\nif FLAGS.seeded:\n np.random.seed(1)\n\ndataset_str = FLAGS.dataset\n\n# Load data\nadj, features = load_data(dataset_str)\n\nadj_def = adj\n\n# Store original adjacency matrix (without diagonal entries) for later\nadj_orig = adj\nadj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)\nadj_orig.eliminate_zeros()\n\nif FLAGS.features == 0:\n features = sp.identity(features.shape[0]) # featureless\n\nfeatures = sparse_to_tuple(features.tocoo())\nnum_features = features[2][1]\nfeatures_nonzero = features[1].shape[0]\n\nrocs = np.zeros(FLAGS.test_count)\naps = np.zeros(FLAGS.test_count)\n\nfor k in range(FLAGS.test_count):\n\n func = get_test_edges\n if FLAGS.connected_split == 0:\n func = mask_test_edges\n\n adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = func(adj_def)\n val_edges = tuple(zip(*val_edges))\n val_edges_false = tuple(zip(*val_edges_false))\n test_edges = tuple(zip(*test_edges))\n test_edges_false = tuple(zip(*test_edges_false))\n adj = adj_train\n\n z = manifold.spectral_embedding(adj, n_components=FLAGS.emb_size, random_state=k)\n adj_rec = np.dot(z, z.T)\n\n preds = sigmoid(adj_rec[test_edges])\n preds_neg = sigmoid(adj_rec[test_edges_false])\n\n preds_all = np.hstack([preds, preds_neg])\n labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])\n roc_score = roc_auc_score(labels_all, preds_all)\n ap_score = average_precision_score(labels_all, preds_all)\n rocs[k] = roc_score\n aps[k] = ap_score\n\nprint((np.mean(rocs), stats.sem(rocs)))\nprint((np.mean(aps), stats.sem(aps))) \n","repo_name":"aaronzweig/gae_single","sub_path":"gae/gae/spectral_clustering.py","file_name":"spectral_clustering.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"34240642664","text":"import heapq\nimport sys\nfrom collections import deque\n\nclass Graph(object):\n def __init__(self, cnt):\n self.count = cnt\n self.adj = [[] for _ in range(cnt)]\n \n def add_directed_edge(self, source, destination, cost=1):\n edge = (destination, cost)\n self.adj[source].append(edge)\n \n def add_undirected_edge(self, source, destination, cost=1):\n self.add_directed_edge(source, destination, cost)\n self.add_directed_edge(destination, source, cost)\n \n def print(self):\n for i in range(self.count):\n print(f\"Vertex {i} is connected to:\", end=' ')\n for edge in self.adj[i]:\n print(f\"{edge[0]}(cost:{edge[1]})\", end=' ')\n print()\n\n def dfs_util(self, index, visited):\n visited[index] = True\n for edge in self.adj[index]:\n destination = edge[0]\n if visited[destination] == False:\n self.dfs_util(destination, visited)\n\n def dfs(self, source, target):\n visited = [False] * self.count\n self.dfs_util(source, visited)\n return visited[target]\n\n def dfs_stack(self, source, target):\n visited = [False] * self.count\n stk = []\n stk.append(source)\n visited[source] = True\n while len(stk) != 0:\n curr = stk.pop()\n for edge in self.adj[curr]:\n destination = edge[0]\n if visited[destination] == False:\n stk.append(destination)\n visited[destination] = True\n return visited[target]\n \n def bfs(self, source, target):\n visited = [False] * self.count\n visited[source] = True\n que = deque([])\n que.append(source)\n while len(que) != 0:\n curr = que.popleft()\n for edge in self.adj[curr]:\n destination = edge[0]\n if visited[destination] == False:\n que.append(destination)\n visited[destination] = True\n return visited[target]\n\n def topological_sort_dfs(self, index, visited, stk):\n visited[index] = True\n for edge in self.adj[index]:\n destination = edge[0]\n if visited[destination] == False:\n self.topological_sort_dfs(destination, visited, stk)\n stk.append(index)\n\n def topological_sort(self):\n count = self.count\n visited = [False] * count\n stk = []\n for i in range(count):\n if visited[i] == False:\n self.topological_sort_dfs(i, visited, stk)\n \n print(\"topological_sort::\", end=' ')\n while len(stk) != 0:\n print(stk.pop(), end=' ') \n print(\"\")\n\n def path_exist(self, source, destination):\n count = self.count\n visited = [False] * count\n self.dfs_util(source, visited)\n return visited[destination]\n\n def count_all_path_dfs(self, visited, source ,dest):\n if source == dest:\n return 1\n\n count = 0\n visited[source] = 1\n for edge in self.adj[source]:\n if visited[edge[0]] == 0:\n count += self.count_all_path_dfs(visited, edge[0], dest)\n visited[source] = 0\n return count\n\n def count_all_path(self, src ,dest):\n visited = [0]*self.count\n return self.count_all_path_dfs(visited, src, dest)\n\n def print_all_path_dfs(self, visited, source ,dest, path):\n path.append(source)\n if source == dest:\n print(path)\n path.pop()\n return\n\n visited[source] = 1\n for edge in self.adj[source]:\n if visited[edge[0]] == 0:\n self.print_all_path_dfs(visited, edge[0], dest, path)\n visited[source] = 0\n path.pop()\n\n def print_all_path(self, src ,dest):\n visited = [0]*self.count\n path = []\n self.print_all_path_dfs(visited, src, dest, path)\n\n def root_vertex(self):\n count = self.count\n visited = [False] * count\n ret_val = -1\n for i in range(count):\n if visited[i] == False:\n self.dfs_util(i, visited)\n ret_val = i\n \n # ret_val may be the root vertex.\n visited = [False] * count\n self.dfs_util(i, visited)\n for i in range(count):\n if visited[i] == False:\n print(\"Disconnected graph!\")\n return -1 \n print(\"Root vertex is ::\", ret_val)\n return ret_val\n\n def transitive_closure_util(self, source ,index, tc):\n if tc[source][index] == 1:\n return\n tc[source][index] = 1\n for edge in self.adj[index]:\n self.transitive_closure_util(source, edge[0], tc)\n\n def transitive_closure(self):\n count = self.count\n tc = [[0 for _ in range(count)] for _ in range(count)]\n for source in range(count):\n self.transitive_closure_util(source, source, tc)\n for row in tc:\n print(row)\n return tc\n\n def bfs_distance(self, source, dest):\n count = self.count\n visited = [False] * count\n visited[source] = True\n que = deque([])\n que.append((source, 0))\n while len(que) != 0:\n node = que.popleft()\n curr = node[0]\n depth = node[1]\n for edge in self.adj[curr]:\n if edge[0] == dest:\n return depth+1\n if visited[edge[0]] == False:\n que.append((edge[0], depth+1))\n visited[edge[0]] = True\n return -1\n\n def bfs_level_node(self, source):\n count = self.count\n visited = [False] * count\n visited[source] = True\n que = deque([])\n que.append((source, 0))\n print(\"\\nNode - Level\")\n while len(que) != 0:\n node = que.popleft()\n curr = node[0]\n depth = node[1]\n print(curr ,\" - \", depth)\n for edge in self.adj[curr]:\n destination = edge[0]\n if visited[destination] == False:\n que.append((destination, depth+1))\n visited[destination] = True\n\n def is_cycle_present_dfs(self, index, visited, marked):\n visited[index] = True\n marked[index] = True\n\n for node in self.adj[index]:\n dest = node[0]\n if marked[dest] == True:\n return True\n\n if visited[dest] == False:\n if self.is_cycle_present_dfs(dest, visited, marked) :\n return True\n \n marked[index] = False\n return False\n\n def is_cycle_present(self):\n count = self.count\n visited = [False] * count\n marked = [False] * count\n for index in range(count):\n if visited[index] == False:\n if self.is_cycle_present_dfs(index, visited, marked) :\n return True\n return False\n\n def is_cycle_present_color_dfs(self, index, visited):\n visited[index] = \"Grey\"\n for node in self.adj[index]:\n dest = node[0]\n if visited[dest] == \"Grey\":\n return True\n \n if visited[dest] == \"White\":\n if self.is_cycle_present_color_dfs(dest, visited) :\n return True \n visited[index] = \"Black\"\n return False\n\n def is_cycle_present_color(self):\n count = self.count\n visited = [\"White\"] * count\n for index in range(count):\n if visited[index] == \"White\":\n if self.is_cycle_present_color_dfs(index, visited) :\n return True\n return False\n\n def is_cycle_present_undirected_dfs(self, index, parentIndex, visited):\n visited[index] = True\n for node in self.adj[index]:\n dest = node[0]\n if visited[dest] == False:\n if self.is_cycle_present_undirected_dfs(dest, index, visited) :\n return True\n elif parentIndex != dest :\n return True\n return False\n\n def is_cycle_present_undirected(self):\n count = self.count\n visited = [False] * count\n for index in range(count):\n if visited[index] == False:\n if self.is_cycle_present_undirected_dfs(index, -1, visited) :\n return True\n return False\n\n def find2(self, parent, index) :\n p = parent[index]\n while (p != -1) :\n index = p\n p = parent[index]\n return index\n\n def union2(self, parent, x, y) :\n parent[y] = x\n\n def is_cycle_present_undirected2(self) :\n count = self.count\n parent = [-1] * count\n edge = []\n flags = [[False] * count for _ in range(count)]\n\n for i in range(count) :\n ad = self.adj[i]\n for adn in ad :\n # Using flags[][] list, if considered edge x to y, then ignore edge y to x.\n if (flags[adn[0]][i] == False) :\n edge.append((i, adn[0]))\n flags[i][adn[0]] = True\n\n for e in edge:\n x = self.find2(parent, e[0])\n y = self.find2(parent, e[1])\n if (x == y) :\n return True\n self.union2(parent, x, y)\n return False\n\n class Sets :\n def __init__(self, p, r) :\n self.parent = p\n self.rank = r\n\n def find(self, sets, index) :\n p = sets[index].parent\n while (p != index) :\n index = p\n p = sets[index].parent\n return index\n\n # consider x and y are roots of sets.\n def union(self, sets, x, y) :\n if (sets[x].rank < sets[y].rank) : \n sets[x].parent = y\n elif (sets[y].rank < sets[x].rank) : \n sets[y].parent = x\n else :\n sets[x].parent = y\n sets[y].rank += 1\n\n def is_cycle_present_undirected3(self) :\n count = self.count\n # Different subsets are created.\n sets = [None] * count\n for i in range(count) :\n sets[i] =self.Sets(i, 0) \n\n edge = []\n flags = [[False] * count for _ in range(count)]\n for i in range(count) :\n ad = self.adj[i]\n for adn in ad:\n # Using flags[][] list, if considered edge x to y, \n # then ignore edge y to x.\n if (flags[adn[0]][i] == False) :\n edge.append((i, adn[0]))\n flags[i][adn[0]] = True\n\n for e in edge:\n x = self.find(sets, e[0])\n y = self.find(sets, e[1])\n if (x == y) :\n return True\n self.union(sets, x, y)\n return False\n\n def transpose_graph(self):\n count = self.count\n gph = Graph(count)\n for i in range(count):\n for edge in self.adj[i]:\n destination = edge[0]\n gph.add_directed_edge(destination, i)\n return gph\n\n def is_connected_undirected(self):\n count = self.count\n visited = [False] * count\n self.dfs_util(0, visited)\n for i in range(count):\n if visited[i] == False:\n return False\n return True\n\n def is_strongly_connected(self):\n count = self.count\n visited = [False] * count\n self.dfs_util(0, visited)\n for i in range(count):\n if visited[i] == False:\n return False\n graph_reversed = self.transpose_graph()\n visited = [False] * count\n graph_reversed.dfs_util(0, visited)\n for i in range(count):\n if visited[i] == False:\n return False\n return True\n\n def dfs_util2(self, index, visited, stk):\n visited[index] = True\n for edge in self.adj[index]:\n destination = edge[0]\n if visited[destination] == False:\n self.dfs_util2(destination, visited, stk)\n stk.append(index)\n\n def strongly_connected_component(self):\n count = self.count\n visited = [False] * count\n stk = []\n for i in range(count):\n if visited[i] == False:\n self.dfs_util2(i, visited, stk)\n \n graph_reversed = self.transpose_graph()\n visited = [False] * count\n while len(stk) != 0:\n index = stk.pop()\n if visited[index] == False:\n stk2 = []\n graph_reversed.dfs_util2(index, visited, stk2)\n print(stk2)\n\n def prims_mst(self):\n previous = [-1] * self.count\n distance = [sys.maxsize] * self.count\n visited = [False] * self.count\n \n source = 0\n distance[source] = 0\n previous[source] = source\n\n pq = PriorityQueue()\n for i in range(self.count):\n pq.add(sys.maxsize, i)\n pq.update_key(0, source)\n \n while pq.size() != 0:\n val = pq.pop()\n src = val[1]\n visited[src] = True\n\n for edge in self.adj[src]:\n destination = edge[0]\n cost = edge[1]\n if cost < distance[destination] and visited[destination] == False:\n distance[destination] = cost\n previous[destination] = src\n pq.update_key(cost, destination)\n \n total_cost = 0\n print(\"Edges are : \", end=\"\")\n for i in range(self.count):\n if distance[i] == sys.maxsize:\n print(f\"({previous[i]}->{i} @ Unreachable )\", end=\"\")\n elif i != previous[i]:\n print(f\"({previous[i]}->{i} @ {distance[i]}) \", end=\"\")\n total_cost += distance[i]\n\n print(\"\\nTotal MST cost:\", total_cost)\n\n def kruskalMST(self) :\n count = self.count\n # Different subsets are created.\n sets = []\n for i in range(count) :\n sets.append(self.Sets(i, 0)) \n \n # Edges are added to list and sorted.\n edges = []\n\n for i in range(count) :\n for adn in self.adj[i]:\n edges.append((i, adn[0], adn[1]))\n E = len(edges)\n\n edges.sort(key=lambda edge : edge[2])\n sum = 0\n print(\"Edges are : \", end=\"\")\n for i in range(E) :\n x = self.find(sets, edges[i][0])\n y = self.find(sets, edges[i][1])\n if (x != y) :\n print(f\"({edges[i][0]}->{edges[i][1]} @ {edges[i][2]}) \", end =\"\")\n sum += edges[i][2]\n self.union(sets, x, y)\n print(\"\\nTotal MST cost:\", sum)\n\n def print_path_util(self, previous, source, dest) :\n if (dest == source) :\n print(source, end=\"\")\n else :\n self.print_path_util(previous, source, previous[dest])\n print(f\"->{dest}\", end=\"\")\n\n def print_path(self, previous, dist, count, source) :\n print(\"Shortest Paths : \", end=\"\")\n for i in range(count) :\n if (dist[i] == sys.maxsize) :\n print(f\"({source}->{i} @ Unreachable)\", end=\"\")\n elif(i != previous[i]) :\n print(\"(\", end=\"\")\n self.print_path_util(previous, source, i)\n print(f\" @ {dist[i]}) \", end=\"\")\n print()\n\n def shortest_path(self, source):\n count = self.count\n distance = [sys.maxsize] * count\n previous = [-1] * count\n\n que = deque([])\n que.append(source)\n distance[source] = 0\n previous[source] = source\n\n while len(que) != 0:\n curr = que.popleft()\n for edge in self.adj[curr]:\n destination = edge[0]\n if distance[destination] == sys.maxsize:\n distance[destination] = distance[curr] + 1\n previous[destination] = curr\n que.append(destination)\n\n self.print_path(previous, distance, count, source)\n\n def dijkstra(self, source):\n previous = [-1] * self.count\n distance = [sys.maxsize] * self.count\n visited = [False] * self.count\n\n distance[source] = 0\n previous[source] = 0\n pq = PriorityQueue()\n for i in range(self.count):\n pq.add(sys.maxsize, i)\n pq.update_key(0, source)\n \n while pq.size() != 0:\n val = pq.pop()\n src = val[1]\n visited[src] = True\n for edge in self.adj[src]:\n destination = edge[0]\n cost = edge[1]\n alt = cost + distance[src]\n if alt < distance[destination] and visited[destination] == False:\n distance[destination] = alt\n previous[destination] = src\n pq.update_key(alt, destination)\n\n self.print_path(previous, distance, self.count, source)\n\n def bellman_ford_shortest_path(self, source):\n count = self.count\n distance = [sys.maxsize] * count\n previous = [-1] * count\n\n distance[source] = 0\n previous[source] = source\n\n # Outer loop will run (V-1) number of times. \n # Inner for loop and loop runs combined will \n # run for Edges number of times.\n # Which make the total complexity as O(V*E)\n for _ in range(count - 1):\n for j in range(count):\n for edge in self.adj[j]:\n newDistance = distance[j] + edge[1]\n if distance[edge[0]] > newDistance:\n distance[edge[0]] = newDistance\n previous[edge[0]] = j\n \n self.print_path(previous, distance, count, source)\n\n def floyd_warshall(self) :\n V = self.count\n distance = [[sys.maxsize] * (V) for _ in range(V)]\n previous = [[-1] * (V) for _ in range(V)]\n \n for i in range(V) : \n previous[i][i] = i\n \n for i in range(V) :\n adl = self.adj[i]\n for adn in adl:\n previous[i][adn[0]] = i\n distance[i][adn[0]] = adn[1]\n\n # Pick intermediate vertices.\n for k in range(V) :\n # Pick source vertices one by one.\n for i in range(V) :\n # Pick destination vertices.\n for j in range(V) :\n # If we have a shorter path from i to j via k.\n # then update dist[i][j] and and path[i][j]\n if (distance[i][k] + distance[k][j] < distance[i][j]) :\n distance[i][j] = distance[i][k] + distance[k][j]\n previous[i][j] = previous[k][j]\n # dist[i][i] is 0 in the start.\n # If there is a better path from i to i and is better path then we have -ve cycle. //\n if (distance[i][i] < 0) :\n print(\"Negative-weight cycle found.\")\n return\n self.print_solution(distance, previous, V)\n\n def print_solution(self, distance, previous, V) :\n print(\"Shortest Paths : \", end=\"\")\n for u in range(V) :\n for v in range(V) :\n if (u != v and previous[u][v] != -1) :\n print(\"(\", end=\"\")\n self.print_path2(previous, u, v)\n print(f\" @ {distance[u][v]} ) \", end =\"\")\n print()\n\n def print_path2(self, previous, u, v) :\n if (previous[u][v] == u) :\n print(f\"{u}->{v}\", end =\"\")\n return\n self.print_path2(previous, u, previous[u][v])\n print(f\"->{v}\", end =\"\")\n\n\n def is_connected(self):\n count = self.count\n visited =[False]*count\n\n # Find a vertex with non-zero degree\n for i in range(count):\n if len(self.adj[i]) > 1:\n # dfs traversal of graph from a vertex with non-zero degree\n self.dfs_util(i, visited)\n break\n\n # Check if all non-zero degree vertices are visited\n for i in range(count):\n if visited[i]==False and len(self.adj[i]) > 0:\n return False\n \n return True\n\n def is_eulerian(self):\n count = self.count\n # Check if all non-zero degree vertices are connected\n if self.is_connected() == False:\n print(\"graph is not Eulerian\")\n return 0\n else:\n # Count vertices with odd degree\n odd = 0\n for i in range(count):\n if len(self.adj[i]) % 2 !=0:\n odd +=1\n\n if odd > 2:\n print(\"graph is not Eulerian\")\n return 0\n elif odd == 2:\n print(\"graph is Semi-Eulerian\")\n return 1\n elif odd == 0:\n print(\"graph is Eulerian\")\n return 2\n\n def is_strongly_connected2(self):\n count = self.count\n visited = [False] * count\n \n # Find a vertex with non-zero degree\n for index in range(count):\n if len(self.adj[index]) > 1:\n break\n # dfs traversal of graph from a vertex with non-zero degree\n self.dfs_util(index, visited)\n \n for i in range(count):\n if visited[i] == False and len(self.adj[i]) > 0:\n return False\n\n graph_reversed = self.transpose_graph()\n visited = [False] * count\n graph_reversed.dfs_util(index, visited)\n \n for i in range(count):\n if visited[i] == False and len(self.adj[i]) > 0:\n return False\n return True\n\n def is_eulerian_cycle(self):\n # Check if all non-zero degree vertices \n # are connected\n if self.is_strongly_connected2() == False:\n return False\n count = self.count\n inDegree = [0] * count\n outDegree = [0] * count\n\n # Check if in degree and out degree of \n # every vertex is same\n for i in range(count):\n outDegree[i] = len(self.adj[i])\n for j in self.adj[i]:\n inDegree[j[0]] += 1\n \n for i in range(count):\n if inDegree[i] != outDegree[i]:\n return False\n return True\n\ndef height_tree_parent_arr(arr):\n count = len(arr)\n gph = Graph(count)\n for i in range(len(arr)):\n if arr[i] != -1 :\n gph.add_directed_edge(arr[i], i)\n else:\n source = i\n\n visited = [False] * count\n visited[source] = True\n que = deque([])\n que.append((source, 0))\n maxHeight = 0\n while len(que) != 0:\n node = que.popleft()\n curr = node[0]\n height = node[1]\n if height > maxHeight:\n maxHeight = height\n for edge in gph.adj[curr]:\n destination = edge[0]\n if visited[destination] == False:\n que.append((destination, height+1))\n visited[destination] = True\n return maxHeight\n\ndef get_height(arr, height, index):\n if arr[index] == -1:\n return 0\n else:\n return get_height(arr, height, arr[index]) + 1\n\ndef height_tree_parent_arr2(arr):\n count = len(arr)\n height = [-1] * count\n maxHeight = -1\n for i in range(len(arr)):\n height[i] = get_height(arr, height, i)\n maxHeight = max(maxHeight, height[i])\n return maxHeight\n\nclass PriorityQueue(object):\n def __init__(self):\n self.que = []\n self.count = 0\n \n def add(self, key, value):\n heapq.heappush(self.que, (key, value))\n \n def update_key(self, key, value):\n heapq.heappush(self.que, (key, value))\n\n def pop(self):\n val = heapq.heappop(self.que)\n return val\n\n def size(self):\n return len(self.que)\n\n# Testing code\ndef test1():\n gph = Graph(4)\n gph.add_undirected_edge(0, 1)\n gph.add_undirected_edge(0, 2)\n gph.add_undirected_edge(1, 2)\n gph.add_undirected_edge(2, 3)\n gph.print()\n\n\"\"\"\nVertex 0 is connected to: 1(cost:1) 2(cost:1) \nVertex 1 is connected to: 0(cost:1) 2(cost:1) \nVertex 2 is connected to: 0(cost:1) 1(cost:1) 3(cost:1) \nVertex 3 is connected to: 2(cost:1) \n\"\"\"\n\n# Testing code\ndef test2():\n gph = Graph(8)\n gph.add_undirected_edge(0, 1)\n gph.add_undirected_edge(0, 2)\n gph.add_undirected_edge(0, 3)\n gph.add_undirected_edge(1, 4)\n gph.add_undirected_edge(2, 5)\n gph.add_undirected_edge(3, 6)\n gph.add_undirected_edge(4, 7)\n gph.add_undirected_edge(5, 7)\n gph.add_undirected_edge(6, 7)\n print(\"Path between 0 & 6:\", gph.dfs(0, 6))\n print(\"Path between 0 & 6:\", gph.bfs(0, 6))\n print(\"Path between 0 & 6:\", gph.dfs_stack(0, 6))\n\n\"\"\"\nPath between 0 & 6: True\nPath between 0 & 6: True\nPath between 0 & 6: True\n\"\"\"\n\n# Testing code\ndef test3():\n gph = Graph(9)\n gph.add_directed_edge(0, 2)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(1, 3)\n gph.add_directed_edge(1, 4)\n gph.add_directed_edge(3, 2)\n gph.add_directed_edge(3, 5)\n gph.add_directed_edge(4, 5)\n gph.add_directed_edge(4, 6)\n gph.add_directed_edge(5, 7)\n gph.add_directed_edge(6, 7)\n gph.add_directed_edge(7, 8)\n gph.topological_sort()\n\n# topological_sort:: 1 4 6 3 5 7 8 0 2 \n\n# Testing code\ndef test4():\n gph = Graph(5)\n gph.add_directed_edge(0, 1, 1)\n gph.add_directed_edge(0, 2, 1)\n gph.add_directed_edge(2, 3, 1)\n gph.add_directed_edge(1, 3, 1)\n gph.add_directed_edge(3, 4, 1)\n gph.add_directed_edge(1, 4, 1)\n print(\"path_exist::\", gph.path_exist(0, 4))\n print(\"Path Count::\", gph.count_all_path(0, 4))\n gph.print_all_path(0, 4)\n\n\"\"\"\npath_exist :: True\nPath Count :: 3\n[0, 1, 3, 4]\n[0, 1, 4]\n[0, 2, 3, 4]\n\"\"\"\n\n# Testing code\ndef test5():\n gph = Graph(7)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(0, 2)\n gph.add_directed_edge(1, 3)\n gph.add_directed_edge(4, 1)\n gph.add_directed_edge(6, 4)\n gph.add_directed_edge(5, 6)\n gph.add_directed_edge(5, 2)\n gph.add_directed_edge(6, 0)\n gph.root_vertex()\n\n# Root vertex is :: 5\n\n# Testing code\ndef test6():\n gph = Graph(4)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(0, 2)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(2, 0)\n gph.add_directed_edge(2, 3)\n gph.add_directed_edge(3, 3)\n gph.transitive_closure()\n\n\"\"\"\n[1, 1, 1, 1]\n[1, 1, 1, 1]\n[1, 1, 1, 1]\n[0, 0, 0, 1]\n\"\"\"\n\n# Testing code\ndef test7():\n gph = Graph(7)\n gph.add_undirected_edge(0, 1)\n gph.add_undirected_edge(0, 2)\n gph.add_undirected_edge(0, 4)\n gph.add_undirected_edge(1, 2)\n gph.add_undirected_edge(2, 5)\n gph.add_undirected_edge(3, 4)\n gph.add_undirected_edge(4, 5)\n gph.add_undirected_edge(4, 6)\n print(gph.bfs_distance(1, 6))\n gph.bfs_level_node(1)\n\n# 3\n\"\"\"\nNode - Level\n1 - 0\n0 - 1\n2 - 1\n4 - 2\n5 - 2\n3 - 3\n6 - 3\n\"\"\"\n\n# Testing code\ndef test8():\n gph = Graph(6)\n gph.add_undirected_edge(0, 1)\n gph.add_undirected_edge(1, 2)\n gph.add_undirected_edge(3, 4)\n gph.add_undirected_edge(4, 2)\n gph.add_undirected_edge(2, 5)\n print(\"Is cycle present:\", gph.is_cycle_present_undirected())\n print(\"Is cycle present:\", gph.is_cycle_present_undirected2())\n print(\"Is cycle present:\", gph.is_cycle_present_undirected3())\n\n gph.add_undirected_edge(3, 5)\n print(\"Is cycle present:\", gph.is_cycle_present_undirected())\n print(\"Is cycle present:\", gph.is_cycle_present_undirected2())\n print(\"Is cycle present:\", gph.is_cycle_present_undirected3())\n\n print(\"is_connected_undirected :\", gph.is_connected_undirected())\n\n\"\"\"\nIs cycle present: False\nIs cycle present: False\nIs cycle present: False\nIs cycle present: True\nIs cycle present: True\nIs cycle present: True\n\nis_connected_undirected : True\n\"\"\"\n\n# Testing code\ndef test9():\n gph = Graph(5)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(0, 2)\n gph.add_directed_edge(1, 3)\n gph.add_directed_edge(2, 3)\n gph.add_directed_edge(3, 4)\n print(\"Cycle present:\", gph.is_cycle_present())\n print(\"Cycle present:\", gph.is_cycle_present_color())\n gph.add_directed_edge(4, 1)\n print(\"Cycle present:\", gph.is_cycle_present())\n print(\"Cycle present:\", gph.is_cycle_present_color())\n\n\"\"\"\nCycle present: False\nCycle present: False\nCycle present: True\nCycle present: True\n\"\"\"\n\n# Testing code\ndef test10():\n gph = Graph(4)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(0, 2)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(2, 3)\n gph2 = gph.transpose_graph()\n gph2.print()\n\n\"\"\"\nVertex 0 is connected to: \nVertex 1 is connected to: 0(cost:1) \nVertex 2 is connected to: 0(cost:1) 1(cost:1) \nVertex 3 is connected to: 2(cost:1) \n\"\"\"\n\n# Testing code\ndef test11():\n # Create a graph given in the above diagram\n gph = Graph(5)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(2, 3)\n gph.add_directed_edge(3, 0)\n gph.add_directed_edge(2, 4)\n gph.add_directed_edge(4, 2)\n print(\"is_strongly_connected :\", gph.is_strongly_connected())\n \n g2 = Graph(4)\n g2.add_directed_edge(0, 1)\n g2.add_directed_edge(1, 2)\n g2.add_directed_edge(2, 3)\n print(\"is_strongly_connected :\", g2.is_strongly_connected())\n\n\"\"\"\nis_strongly_connected : True\nis_strongly_connected : False\n\"\"\"\n\n# Testing code\ndef test12():\n gph = Graph(7)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(2, 0)\n gph.add_directed_edge(2, 3)\n gph.add_directed_edge(3, 4)\n gph.add_directed_edge(4, 5)\n gph.add_directed_edge(5, 3)\n gph.add_directed_edge(5, 6)\n gph.strongly_connected_component()\n\n\"\"\"\n[1, 2, 0]\n[4, 5, 3]\n[6]\n\"\"\"\n\n# Testing code\ndef test13():\n gph = Graph(9)\n gph.add_undirected_edge(0, 1, 4)\n gph.add_undirected_edge(0, 7, 8)\n gph.add_undirected_edge(1, 2, 8)\n gph.add_undirected_edge(1, 7, 11)\n gph.add_undirected_edge(2, 3, 7)\n gph.add_undirected_edge(2, 8, 2)\n gph.add_undirected_edge(2, 5, 4)\n gph.add_undirected_edge(3, 4, 9)\n gph.add_undirected_edge(3, 5, 14)\n gph.add_undirected_edge(4, 5, 10)\n gph.add_undirected_edge(5, 6, 2)\n gph.add_undirected_edge(6, 7, 1)\n gph.add_undirected_edge(6, 8, 6)\n gph.add_undirected_edge(7, 8, 7)\n gph.prims_mst()\n gph.kruskalMST()\n gph.dijkstra(0)\n\n\"\"\" prims_mst\nEdges are : (0->1 @ 4) (1->2 @ 8) (2->3 @ 7) (3->4 @ 9) (2->5 @ 4) (5->6 @ 2) (6->7 @ 1) (2->8 @ 2) \nTotal MST cost: 37\n\"\"\"\n\"\"\" kruskalMST\nEdges are : (6->7 @ 1) (2->8 @ 2) (5->6 @ 2) (0->1 @ 4) (2->5 @ 4) (2->3 @ 7) (0->7 @ 8) (3->4 @ 9) \nTotal MST cost: 37\n\"\"\"\n\"\"\"dijkstra\nShortest Paths : (0->1 @ 4) (0->1->2 @ 12) (0->1->2->3 @ 19) \n (0->7->6->5->4 @ 21) (0->7->6->5 @ 11) \n (0->7->6 @ 9) (0->7 @ 8) (0->1->2->8 @ 14) \n\"\"\"\n\n# Testing code\ndef test14():\n gph = Graph(9)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(0, 7)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(1, 7)\n gph.add_directed_edge(2, 3)\n gph.add_directed_edge(2, 8)\n gph.add_directed_edge(2, 5)\n gph.add_directed_edge(3, 4)\n gph.add_directed_edge(3, 5)\n gph.add_directed_edge(4, 5)\n gph.add_directed_edge(5, 6)\n gph.add_directed_edge(6, 7)\n gph.add_directed_edge(6, 8)\n gph.add_directed_edge(7, 8)\n gph.shortest_path(0)\n\n\"\"\"\nShortest Paths : (0->1 @ 1) (0->1->2 @ 2) (0->1->2->3 @ 3) \n (0->1->2->3->4 @ 4) (0->1->2->5 @ 3) \n (0->1->2->5->6 @ 4) (0->7 @ 1) (0->7->8 @ 2) \n\"\"\"\n\n# Testing code\ndef test15():\n gph = Graph(5)\n gph.add_directed_edge(0, 1, 3)\n gph.add_directed_edge(0, 4, 2)\n gph.add_directed_edge(1, 2, 1)\n gph.add_directed_edge(2, 3, 1)\n gph.add_directed_edge(4, 1, -2)\n gph.add_directed_edge(4, 3, 1)\n gph.bellman_ford_shortest_path(0)\n\n\"\"\"\nShortest Paths : (0->4->1 @ 0) (0->4->1->2 @ 1) \n (0->4->1->2->3 @ 2) (0->4 @ 2) \n\"\"\"\n\n# Testing code\ndef test16():\n parentArray = [-1, 0, 1, 2, 3]\n print(\"Height :\", height_tree_parent_arr(parentArray))\n print(\"Height :\", height_tree_parent_arr2(parentArray))\n parentArray = [-1, 0, 0, 0, 3, 1, 1, 2]\n print(\"Height :\", height_tree_parent_arr(parentArray))\n print(\"Height :\", height_tree_parent_arr2(parentArray))\n\n\"\"\"\nHeight : 4\nHeight : 4\nHeight : 2\nHeight : 2\n\"\"\"\n\n# Testing code\ndef test17():\n gph = Graph(5)\n gph.add_undirected_edge(1, 0)\n gph.add_undirected_edge(0, 2)\n gph.add_undirected_edge(2, 1)\n gph.add_undirected_edge(0, 3)\n gph.add_undirected_edge(3, 4)\n gph.is_eulerian()\n gph.add_undirected_edge(4, 0)\n gph.is_eulerian()\n\n\"\"\"\ngraph is Semi-Eulerian\ngraph is Eulerian\n\"\"\"\n\n# Testing code\ndef test18():\n gph = Graph(5)\n gph.add_directed_edge(0, 1)\n gph.add_directed_edge(1, 2)\n gph.add_directed_edge(2, 0)\n gph.add_directed_edge(0, 4)\n gph.add_directed_edge(4, 3)\n gph.add_directed_edge(3, 0)\n print(gph.is_eulerian_cycle())\n\n# True\n\ndef test19() :\n gph = Graph(4)\n gph.add_directed_edge(0, 0, 0)\n gph.add_directed_edge(1, 1, 0)\n gph.add_directed_edge(2, 2, 0)\n gph.add_directed_edge(3, 3, 0)\n gph.add_directed_edge(0, 1, 5)\n gph.add_directed_edge(0, 3, 10)\n gph.add_directed_edge(1, 2, 3)\n gph.add_directed_edge(2, 3, 1)\n gph.floyd_warshall()\n\n\"\"\"\nShortest Paths : (0->1 @ 5 ) (0->1->2 @ 8 ) (0->1->2->3 @ 9 ) \n (1->2 @ 3 ) (1->2->3 @ 4 ) (2->3 @ 1 ) \n\"\"\"\n\n\ntest1()\ntest2()\ntest3()\ntest4()\ntest5()\ntest6()\ntest7()\ntest8()\ntest9()\ntest10()\ntest11()\ntest12()\ntest13()\ntest14()\ntest15()\ntest16()\ntest17()\ntest18()\ntest19()\n","repo_name":"Hemant-Jain-Author/Problem-Solving-in-Data-Structures-Algorithms-using-Python","sub_path":"Graph/GraphAdjList.py","file_name":"GraphAdjList.py","file_ext":"py","file_size_in_byte":33791,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"51"}
+{"seq_id":"32120468097","text":"from src.database_handler import Database\nfrom datetime import datetime\nimport json\n\nconfig = json.load(open('config.json'))\ncollect_db = config['databases']['collect_acessos']\nprocessed_db = config['databases']['processed_acessos']\ndatabase = Database()\n\ndef printMenu():\n print('Funcionário entrou (1)')\n print('Funcionário saiu (2)')\n print('Visualizar acessos (3)')\n print('Visualizar processados (4)')\n op = input('Opção:')\n return op\n\ndef novaSaida():\n sql = f'SELECT * FROM acessos WHERE data_saida is NULL ORDER BY data_entrada, hora_entrada'\n data = database.collect.run(sql)\n if data:\n item = data.pop(0)\n now = datetime.now()\n data_saida = now.date()\n hora_saida = now.time()\n sql = f'UPDATE acessos SET data_saida = \"{data_saida}\", hora_saida = \"{hora_saida}\" WHERE id = {item[0]}'\n database.collect.run(sql, commit = True)\n print(f'saida inserida para id {item[0]}')\n else:\n print('nenhuma entrada')\n\ndef novaEntrada():\n id = len(database.collect.fetchTable(0, 'acessos'))\n now = datetime.now()\n data_entrada = now.date()\n hora_entrada = now.time()\n sql = f'INSERT INTO acessos (id, nome, cpf, data_entrada, hora_entrada, data_saida, hora_saida) VALUES ({id}, \"Fernando Burgos\", \"02576698506\", \"{data_entrada}\", \"{hora_entrada}\", NULL, NULL)'\n database.collect.run(sql, commit = True)\n print(f'Entrada inserida com id {id}')\n \ndef visualizarProcessados():\n data = database.processed.fetchTable(5, 'processados', ordered='id DESC')\n for item in data:\n print(item)\n\ndef visualizarAcessos():\n data = database.collect.fetchTable(5, 'acessos', ordered = 'id DESC')\n for item in data:\n print(item)\n\ndef start():\n database.collect.connect(collect_db)\n database.processed.connect(processed_db)\n try:\n while True:\n print()\n op = printMenu()\n if op == '1':\n novaEntrada()\n elif op == '2':\n novaSaida()\n elif op == '3':\n visualizarAcessos()\n elif op == '4':\n visualizarProcessados()\n \n except KeyboardInterrupt:\n print('End')\n \n \nstart()","repo_name":"nandobfer/agesbec","sub_path":"do_database.py","file_name":"do_database.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"35074365087","text":"from collections import deque\n\nn, m, k, x = map(int, input().split())\n\ngraph = [[] for _ in range(n + 1)]\nfor _ in range(m):\n a, b = map(int,input().split())\n graph[a].append(b)\n\ndistance = [-1] * (n + 1)\ndistance[x] = 0\n\nqueue = deque([x])\n\nwhile queue:\n now = queue.popleft()\n\n for next in graph[now]:\n if distance[next] == -1:\n distance[next] = distance[now] + 1\n queue.append(next)\n\nanswer = []\n\nfor i in range(len(distance)):\n if distance[i] == k:\n answer.append(i)\n\nif len(answer) == 0:\n print(-1)\nelse:\n for i in range(len(answer)):\n print(answer[i])\n","repo_name":"moong94/python_team_note","sub_path":"DFS & BFS/특정 거리의 도시 찾기.py","file_name":"특정 거리의 도시 찾기.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"13595667342","text":"# creates dataframe with columns for date and for sum of steps for that date\ndef stepsBYdate (df):\n import pandas as pd\n df['date/time'] = pd.to_datetime(df['date/time'].dt.date)\n grouped = df.groupby(['date/time']).sum()\n return(grouped)\n\n# creates dataframe with days of the week columns containing sum of steps for each date.\n# rows are weeks labeled by that week's Monday date\ndef stepsBYweekday (df):\n import pandas as pd\n\n # select only date from date/time column and add day of the week column\n df['date/time'] = pd.to_datetime(df['date/time'].dt.date)\n df['weekday'] = df['date/time'].dt.dayofweek\n\n # creates pivot table which sums steps by date and day of week\n pvt_stepsBYweekday = pd.pivot_table(df, index= ['date/time','weekday'], values= ['steps'], aggfunc= [sum])\n\n # selects step sums for each day of the week\n Mon = pvt_stepsBYweekday.query('weekday == [0]')\n Tue = pvt_stepsBYweekday.query('weekday == [1]')\n Wed = pvt_stepsBYweekday.query('weekday == [2]')\n Thu = pvt_stepsBYweekday.query('weekday == [3]')\n Fri = pvt_stepsBYweekday.query('weekday == [4]')\n Sat = pvt_stepsBYweekday.query('weekday == [5]')\n Sun = pvt_stepsBYweekday.query('weekday == [6]')\n\n # converts step sums for each day of the week to list\n MonSteps = pd.Series.tolist(Mon['sum'])\n TueSteps = pd.Series.tolist(Tue['sum'])\n WedSteps = pd.Series.tolist(Wed['sum'])\n ThuSteps = pd.Series.tolist(Thu['sum'])\n FriSteps = pd.Series.tolist(Fri['sum'])\n SatSteps = pd.Series.tolist(Sat['sum'])\n SunSteps = pd.Series.tolist(Sun['sum'])\n\n # creates dataframe with columns of steps by day of the week\n df_stepsBYweekday = pd.DataFrame(\n [MonSteps,\n TueSteps,\n WedSteps,\n ThuSteps,\n FriSteps,\n SatSteps,\n SunSteps]\n )\n df_stepsBYweekday_trans = df_stepsBYweekday.transpose()\n\n # adds day names for columns\n df_stepsBYweekday_trans.columns = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n # adds index as the first date of each week, starting with Monday\n PreWeeks = df[['date/time','weekday']]\n Weeks = PreWeeks[PreWeeks['weekday'] == 0]\n UniqueWeeks = Weeks.drop_duplicates()\n UniqueWeeks = UniqueWeeks.rename(columns={'date/time': 'Week Of'})\n df_stepsBYweekday_trans_weeks = df_stepsBYweekday_trans.set_index(UniqueWeeks['Week Of'])\n\n return(df_stepsBYweekday_trans_weeks)\n\ndef corr_steps_weekday(df):\n import pandas as pd\n # select only date from date/time column and add day of the week column\n df['date/time'] = pd.to_datetime(df['date/time'].dt.date)\n df['weekday'] = df['date/time'].dt.dayofweek\n\n #create dataframe with date, weekday, and daily total steps\n df = df.groupby(['date/time','weekday'], as_index=False).sum()\n #finds spearman correlation of daily steps and weekday\n corr_def = df.corr(method='spearman')\n return (corr_def)\n\n","repo_name":"bigdata-i523/hid312","sub_path":"project/code/Tables.py","file_name":"Tables.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"2318487882","text":"import numpy as np\n\ndef play_game(boards, masks, draws):\n for i, draw in enumerate(draws):\n win_axs = 0\n masks[np.where(boards == draw)] = True\n if np.all(masks, axis = 1).any() == True:\n win_axs = 1\n elif np.all(masks, axis = 2).any() == True:\n win_axs = 2\n if win_axs !=0:\n winning_board = np.where(np.all(masks, axis=win_axs) ==True )[0][0]\n winning_mask = np.squeeze(np.invert(masks[winning_board]))\n winning_sum = boards[winning_board,winning_mask].sum(dtype = np.int)\n # print(f'winner with draw {draw} and sum {winning_sum*winning_draw}')\n boards = np.delete(boards, winning_board, axis = 0)\n masks = np.delete(masks, winning_board, axis = 0)\n return(winning_sum * draw, boards, masks, draws[i:])\n\n\nboards = np.zeros((500,5))\n\nwith open('input.txt', 'r') as file:\n draws = file.readline().rstrip()\n draws = draws.split(',')\n draws = np.array(draws, dtype = np.int)\n lines = filter(None, (line.rstrip() for line in file))\n for i, line in enumerate(lines):\n line = line.rstrip()\n a = line.split(' ')\n while '' in a:\n a.remove('')\n boards[i, :] = np.array(a, dtype = np.int)\n\nboards = boards.reshape(100,5,5)\nmasks = np.zeros((100,5,5), dtype = np.bool)\n\nsum, boards, masks, draws = play_game(boards, masks, draws)\nprint(f'first winning sum {sum}')\nwhile len(boards) != 0:\n sum, boards, masks, draws= play_game(boards, masks, draws)\nprint(f'last winning sum: {sum}')\n\n","repo_name":"linaku94/aoc_2021","sub_path":"day_4/day_4.py","file_name":"day_4.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"3915766908","text":"'''\nArachnid depends on several packages that cannot or should not be installed\nusing easy_install. \n\nNumPY and SciPy fall into the should-not category because\nthey can be sped up substantially if you install a vendor tuned linear \nalgebra library first.\n\nPySide has a more extensive set of installation requirements\nand thus must be downloaded from their respective sites.\n\nMatplotlib can use the PySide library if installed after PySide.\n\nCompilers\n---------\n\nThe following compilers are required if you install from the source:\n\n - C compiler\n - C++ compiler\n - Fortran compiler\n\nSee `NumPY Installation `_ \nfor details on how to compile this source.\n\nChecking\n--------\n\nYou can check what dependencies you have installed (and accessible) with the following command:\n\n.. sourcecode:: sh\n\n $ python setup.py checkdep\n running check\n Checking for mpi4py: not found\n ---\n Checking for numpy: found - 1.6.1\n Checking for scipy: found - 0.10.0rc1\n Checking for matplotlib: found - 1.1.0\n Checking for PySide: found\n Checking for matplotlib: found - 1.1.0\n\nIn the above example, only `mpi4py` was not installed.\n\nPackages to Download\n--------------------\n\n - Vendor tuned linear algebra library (Required to run fast!)\n \n - `ACML`_\n - `MKL`_\n - `Atlas`_\n - `Lapack`_ / `Blas`_\n - `Goto Blas`_\n \n - Graphical user interface libraries (Required for GUI)\n \n - `QT4`_\n - `PySide`_\n \n - Single particle reconstruction package\n \n - `SPIDER`_ (Not required for installation but to run pySPIDER scripts)\n \n - Scientific Python packages (Required)\n \n - `Numpy`_\n - `Scipy`_\n - `Matplotlib`_\n - `scikit-learn`_\n - `scikit-image`_\n - `Basemap`_\n - `Pillow`_\n - `mpi4py`_\n \n - Database Packages\n \n - `SQLAlchemy`_\n - `MySQL-Python`_\n \n - Other Packages\n \n - `psutil`_\n - `setuptools`_\n\n.. _`mpi4py`: http://mpi4py.scipy.org/\n.. _`SPIDER`: http://www.wadsworth.org/spider_doc/spider/docs/spi-register.html\n.. _`ACML`: http://developer.amd.com/cpu/Libraries/acml/Pages/default.aspx\n.. _`MKL`: http://software.intel.com/en-us/intel-mkl/\n.. _`Atlas`: http://math-atlas.sourceforge.net/\n.. _`Lapack`: http://www.netlib.org/lapack/\n.. _`Blas`: http://www.netlib.org/blas/\n.. _`Goto Blas`: http://www.tacc.utexas.edu/tacc-projects/gotoblas2/\n.. _`QT4`: http://qt.nokia.com/\n.. _`PySide`: http://qt-project.org/wiki/PySide\n.. _`Numpy`: http://sourceforge.net/projects/numpy/files/\n.. _`Scipy`: http://sourceforge.net/projects/scipy/files/\n.. _`Matplotlib`: http://matplotlib.sourceforge.net/\n.. _`Sphinx`: http://sphinx.pocoo.org/\n.. _`scikit-image`: http://scikit-image.org/\n.. _`scikit-learn`: http://scikit-learn.org/stable/\n.. _`SQLAlchemy`: http://www.sqlalchemy.org/\n.. _`MySQL-Python`: http://mysql-python.sourceforge.net/\n.. _`Basemap`: http://matplotlib.org/basemap/\n.. _`Pillow`: https://pillow.readthedocs.org/\n.. _`psutil`: https://code.google.com/p/psutil/\n.. _`setuptools`: http://pythonhosted.org/setuptools/setuptools.html\n\nInstallation of Prerequisites\n-----------------------------\n\n#. Install Vendor-tuned Linear Algebra Library\n \n - `ACML`_\n - `MKL`_\n - `Atlas`_\n - `Lapack`_ / `Blas`_\n - `Goto Blas`_\n\n .. note ::\n \n For ACML, you need to install CBLAS from the source: http://www.netlib.org/clapack/cblas.tgz\n \n Change the line with BLLIB to the following line in the appropriate Makefile (e.g. Makefile.LINUX)\n \n BLLIB = -L/opt/acml4.4.0/gfortran64_mp/lib -lacml_mp -lacml_mv\n CFLAGS = -O3 -DADD\\_ -fPIC\n \n Then invoke make:\n \n $ make\n \n And copy the resulting library to the ACML directory (if you want to follow the later steps closely)\n \n cp lib/LINUX/cblas_LINUX.a /path-to-acml/libcblas.a\n\n#. Install Python 2.6 or 2.7\n\n#. Install setuptools\n\n#. Install Numpy\n\n Create `site.cfg` in the Numpy source root and add the following values depending\n on where your vendor tuned library is install (this example is for ACML):\n \n .. sourcecode :: sh\n \n [blas]\n blas_libs = cblas, acml_mp, acml_mv\n library_dirs = /opt/acml4.4.0/gfortran64_mp/lib\n include_dirs = /opt/acml4.4.0/gfortran64_mp/include\n \n [lapack]\n lapack_libs = cblas, acml_mp, acml_mv\n library_dirs = /opt/acml4.4.0/gfortran64_mp/lib\n include_dirs = /opt/acml4.4.0/gfortran64_mp/include\n \n\n#. Install Scipy\n\n#. Install Matplotlib (Required for plotting functions)\n \n .. note::\n\n If you plan on using the graphical user interface, install Qt4 and PySide (steps 9 and 10) before installing matplotlib\n\n#. Install Qt4 (Required for graphical user interface)\n\n#. Install PySide (Required for graphical user interface)\n\n#. Setup Environment\n \n For Bash:\n \n .. sourcecode :: sh\n \n # Setup path for BLAS Libraries\n \n # With ACML\n export BLAS_LIBS=acml:cblas:acml_mv\n export BLAS_PATH=/opt/acml4.4.0/gfortran64_mp\n export LD_LIBRARY_PATH=$BLAS_PATH/lib:$LD_LIBRARY_PATH\n\n.. Created on Sep 28, 2010\n.. codeauthor:: Robert Langlois \n'''\n\ntry: \n import setuptools\n setuptools;\nexcept: \n import ez_setup #@UnresolvedImport\n ez_setup.use_setuptools()\n import setuptools # @Reimport\n setuptools;\nimport arachnid.distutils.sdist\nimport arachnid.distutils.check_dep\n\nimport os\nimport arachnid.setup\nimport sys\n\n# QT UI support: https://bitbucket.org/jbmohler/qtviews/src/ead44bd27b38/setup.py\n\n# Classifiers http://pypi.python.org/pypi?%3Aaction=list_classifiers\n\ndef build_description(package, extra=None):\n '''Build a description from a Python package.\n \n This function builds a description from the __init__ of a Python package.\n \n :Parameters:\n\n package : str\n Name of the package\n extra : dict\n Keyword arguments to setup the package description\n \n :Returns:\n \n extra : dict\n Keyword arguments to setup the package description\n '''\n from distutils import log # Workaround for conda build with jinga\n import setuptools # @Reimport - Workaround for conda build with jinga\n import sys # @Reimport\n \n if extra is None: extra = {}\n description = [('name', 'project'), 'version', 'author', 'license', 'author_email', 'description', 'url', 'download_url', 'keywords', 'classifiers', 'platforms']#, ('long_description', 'doc')\n for d in description:\n if isinstance(d, tuple): key, field = d\n else: key, field = d, d\n if hasattr(package, \"__\"+field+\"__\"): \n val = getattr(package, \"__\"+field+\"__\")\n if val is not None: extra[key] = val\n \n try:\n __import__(package.__name__+\".setup\").setup\n log.info(\"Root config package %s\"%(package.setup.__name__))\n extra.update(package.setup.configuration(top_path='').todict())\n except: \n if os.path.basename(sys.argv[0]) != 'conda-build':\n log.error(\"No setup file found in root package to build extensions\")\n #raise\n extra['packages'] = setuptools.find_packages(exclude='pyspider')\n return extra\n\ndef rglob(pattern, root=os.curdir):\n '''Collect all files matching supplied filename pattern in and below supplied root directory.\n \n :Parameters:\n \n pattern : str\n Wild card pattern for file search\n root : str\n Directory root to start the search\n \n :Returns:\n \n val : list \n List of files\n '''\n \n import fnmatch # Workaround for conda build with jinga\n filenames = []\n for path, _, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n filenames.append( os.path.join(path, filename) )\n return filenames\n\ndef get_readme():\n '''\n '''\n \n try: return open(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'README.rst')).read()\n except: return None\n \ndef setup_package(**extra):\n '''\n '''\n import sys # @Reimport\n \n if ('build_sphinx' in sys.argv or 'sphinx-build' in sys.argv) and __name__ != '__main__': return # Do not invoke if sphinx is called\n if len(sys.argv) > 1 and (sys.argv[1] in ('--help-commands', 'egg_info', '--version', '.', '') or '--help' in sys.argv or sys.argv[1].find('conda') != -1):\n from setuptools import setup\n setup(**extra)\n else:\n from numpy.distutils.core import setup # @Reimport\n setup(**extra)\n\nother_requires= [] if sys.platform == 'darwin' else ['mysql-python']\n\ncmdclass = {'checkdep': arachnid.distutils.check_dep.check_dep, 'sdist':arachnid.distutils.sdist.sdist, 'version':arachnid.distutils.sdist.version}\ntry:\n import sphinx.setup_command\n cmdclass.update(build_docs=sphinx.setup_command.BuildDoc)\nexcept:\n from distutils import log\n log.warn(\"Sphinx not installed!\")\n\nkwargs = build_description(arachnid)\nsetup_package(entry_points = {\n 'console_scripts': arachnid.setup.console_scripts,\n 'gui_scripts': arachnid.setup.gui_scripts\n },\n long_description = get_readme(),\n data_files=[('rst', rglob(\"*.rst\"))],\n install_requires = [\n #'numpy', # conda bug\n 'scipy',\n 'psutil',\n 'scikit-learn',\n 'scikit-image',\n 'mpi4py',\n 'matplotlib',\n 'sqlalchemy', \n #'mysql-python', # changed from MySQL-Python to support Anaconda on Mac OSX\n \"Pillow\",\n 'basemap',\n 'setuptools', #distribute\n #'PySide',\n ] + other_requires,\n #extras_require = dict(pyside=\"PySide\"), #[pyside]\n #setup_requires = [ - causes conda build to fail\n #'Sphinx>=1.0.4',\n #'nose>=1.0',\n #],\n cmdclass = cmdclass,\n test_suite = 'nose.collector',\n **kwargs\n)\n\n\n","repo_name":"ezralanglois/arachnid","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":10194,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"}
+{"seq_id":"33833511056","text":"#!/usr/bin/env python3\n\nimport sys\nimport tomlkit\nimport re\nimport logging\nfrom intervaltree import *\n\nlogging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\nfrom scapy.all import *\n\nTRIM_WINDOW_SIZE=10000\n\nclass AttackDB:\n\n def __init__(self, signatures):\n self.signatures = signatures\n self.sequences = dict()\n \n def verifyChecksum(self, p:Packet):\n prevSum = p.chksum\n del p.chksum\n newCheckSum=p.__class__(raw(p)).chksum\n if prevSum==newCheckSum:\n return True\n return False\n \n def timeoutFlows(self, currTime):\n temp=list()\n for k,v in self.sequences.items():\n if (currTime - v.lastPacketTime) > v.ttl:\n temp.append(k)\n for x in temp:\n self.sequences.pop(x)\n\nclass Session:\n def __init__(self, init_seqNum, wscale, pkt_time=0, pkt_ttl=60):\n self.init_seq = init_seqNum\n self.stream = IntervalTree()\n self.status = False #If True session is active else session is inactive\n self.A = False #ACK Flag check\n self.wScaleFactor = 2**wscale\n self.lastPacketTime = pkt_time\n self.ttl = pkt_ttl\n\n def insertStream(self, start, end, payload):\n overlappedPacks=self.stream.overlap(start,end)\n insertFlag=False\n if len(overlappedPacks)==0:\n self.stream.addi(start,end,payload)\n insertFlag=True\n else:\n overlappedPacks=sorted(overlappedPacks)\n for currInt in overlappedPacks:\n if start < currInt.begin:\n self.stream.addi(start,currInt.begin,payload[start:currInt.begin])\n start=currInt.end\n insertFlag=True\n elif start >= currInt.begin and start < currInt.end:\n if end >= currInt.end:\n start=currInt.end\n if end < currInt.end and end >= currInt.begin:\n if start < currInt.begin:\n end=currInt.begin\n if start < end:\n self.stream.addi(start,end, payload[start:end])\n insertFlag=True\n if insertFlag:\n self.mergeStream()\n \n return insertFlag\n\n def mergeStream(self):\n s=sorted(self.stream)\n startInt=s[0]\n for i in range(1,len(s)):\n currInt=s[i]\n if(startInt.end == currInt.begin):\n self.stream.discard(startInt)\n self.stream.discard(currInt)\n currInt=Interval(startInt.begin, currInt.end, startInt.data+currInt.data)\n self.stream.add(currInt)\n startInt=currInt\n \n def detectAttack(self, attckDB: AttackDB):\n initInter=list(self.stream[self.stream.begin()])[0]\n for i, sig in enumerate(attckDB.signatures):\n if(sig.search(initInter.data)):\n self.status=False\n return i\n return -1 \n\n def trimming(self):\n initInter=list(self.stream[self.stream.begin()])[0]\n if len(initInter.data) > TRIM_WINDOW_SIZE:\n newInter = Interval(initInter.begin, initInter.end, initInter.data[0:(TRIM_WINDOW_SIZE*int(len(initInter.data)/TRIM_WINDOW_SIZE))]) \n self.stream.discard(initInter)\n self.stream.add(newInter)\n\n def clean(self):\n self.status = False\n self.SA = False\n del self.stream\n self.stream = IntervalTree()\n\n def setRecentPacket(self, currTime, ttl):\n self.ttl = ttl\n self.lastPacketTime = currTime\n\n\ndef logDetection(tv_sec, tv_usec, src_ip, src_port, dst_ip, dst_port, attackId):\n d = {\n 'tv_sec': tv_sec,\n 'tv_usec': tv_usec,\n 'source': {\n 'ipv4_address': src_ip,\n 'tcp_port': src_port\n },\n 'target': {\n 'ipv4_address': dst_ip,\n 'tcp_port': dst_port\n },\n 'attack': attackId\n }\n print(d)\n\ndef processPackets(attckDB:AttackDB, p:Packet):\n ip = p[IP]\n if not attckDB.verifyChecksum(ip):\n return\n\n if TCP in ip:\n tcp = p[TCP]\n\n if not attckDB.verifyChecksum(tcp):\n return\n\n flow = (ip.src, ip.sport, ip.dst, ip.dport)\n invflow = (ip.dst, ip.dport, ip.src, ip.sport)\n \n if tcp.flags.S:\n Wscale=0\n for i in tcp.options:\n if i[0]==\"WScale\":\n Wscale=i[1]\n break\n session = Session(tcp.seq, Wscale, p.time, ip.ttl)\n attckDB.sequences[flow]=session\n\n\n if tcp.flags.A:\n session = attckDB.sequences.get(invflow)\n if session:\n session.status=True\n \n elif tcp.flags.P and tcp.flags.A:\n payload = raw(tcp.payload)\n if(len(payload) > 0):\n session = attckDB.sequences.get(flow)\n if session and session.status and (len(payload) < (tcp.window * session.wScaleFactor)):\n if session.insertStream((tcp.seq - session.init_seq), ((tcp.seq - session.init_seq)+len(payload)), payload):\n session.setRecentPacket(p.time,ip.ttl)\n attackId=session.detectAttack(attckDB) \n if attackId != -1:\n logDetection(int(p.time), int((p.time - int(p.time))* (10**6)), ip.src, ip.sport, ip.dst, ip.dport, attackId)\n session.clean()\n attckDB.sequences.pop(flow)\n else:\n session.trimming()\n\n elif (tcp.flags.F and tcp.flags.A) or tcp.flags.R:\n session = attckDB.sequences.get(flow)\n if session:\n attckDB.sequences.pop(flow)\n\n elif tcp.flags.A:\n session = attckDB.sequences.get((invflow))\n if session:\n if session.status == False:\n session.status = True\n attckDB.timeoutFlows(p.time)\n\n else:\n payload=raw(ip.payload)\n if(len(payload) > 0):\n for i, sig in enumerate(attckDB.signatures):\n if(sig.search(payload)):\n logDetection(int(p.time), int((p.time - int(p.time))* (10**6)), ip.src, ip.sport, ip.dst, ip.dport, i)\n break\n \n \ndef main():\n attackRuleFile = sys.argv[1]\n packetSniffFile = sys.argv[2]\n\n attackSignatures = [re.compile(x.encode()) for x in tomlkit.load(open(attackRuleFile,'r'))['signatures']]\n attackDB = AttackDB(attackSignatures)\n \n sniff(offline=packetSniffFile, store=False, quiet=True, prn=lambda x: processPackets(attackDB,x))\n\nif __name__ == \"__main__\":\n main()","repo_name":"rjrakshit24/nids","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"43803218793","text":"# counting 4\r\n# from random import randint,seed\r\n# from datetime import datetime\r\n#\r\n# seed(datetime.now())\r\n#\r\n# s=0\r\n# for i in range(10**5):\r\n# s+=randint(1,6)\r\n# print(s/(10**5))\r\n\r\n# counting 5\r\nfrom random import randint,seed\r\nfrom datetime import datetime\r\n\r\nseed(datetime.now())\r\na=[2,2,2,2,3,3]\r\nb=[1,1,1,1,6,6]\r\ndef compute(a,b):\r\n assert len(a) == 6 and len(b) == 6\r\n num1 = 0\r\n num2 = 0\r\n rounds = 10 ** 5\r\n for i in range(rounds):\r\n result1 = a[randint(0, 5)]\r\n result2 = b[randint(0, 5)]\r\n if result1 > result2:\r\n num1 += 1\r\n else:\r\n num2 += 1\r\n return (num1,num2)\r\n\r\n# if num1>num2:\r\n# print(\"{} wins more than {} in {} rounds {} times\".format(a,b,rounds,num1))\r\n# else:\r\n# print(\"{} wins more than {} in {} rounds {} times\".format(b,a,rounds,num2))\r\nprint(compute(a,b))\r\n\r\n\r\n# def count_wins(dice1, dice2):\r\n# assert len(dice1) == 6 and len(dice2) == 6\r\n# dice1_wins, dice2_wins = 0, 0\r\n#\r\n# for i in range(6):\r\n# for j in range(6):\r\n# if dice1[i] > dice2[j]:\r\n# dice1_wins += 1\r\n# elif dice1[i] < dice2[j]:\r\n# dice2_wins += 1\r\n#\r\n# return (dice1_wins, dice2_wins)\r\n#\r\n#\r\n# def compute_strategy(dices):\r\n# assert all(len(dice) == 6 for dice in dices)\r\n#\r\n# strategy = dict()\r\n# strategy[\"choose_first\"] = True\r\n# strategy[\"first_dice\"] = 0\r\n# for i in range(len(dices)):\r\n# c = 0\r\n# for j in range(len(dices)):\r\n# p = tuple()\r\n# p = count_wins(dices[i], dices[j])\r\n# if p[1] > p[0]:\r\n# c = -1\r\n# break\r\n# if c == 0:\r\n# strategy[\"first_dice\"] = i\r\n# return strategy\r\n#\r\n# strategy = dict()\r\n# strategy[\"choose_first\"] = False\r\n# for i in range(len(dices)):\r\n# for j in range(len(dices)):\r\n# p = count_wins(dices[i], dices[j])\r\n# if p[0] > p[1]:\r\n# if i != (j + 1) % len(dices):\r\n# strategy[i] = (j + 1) % len(dices)\r\n# break\r\n# else:\r\n# strategy[i] = (i + 1) % len(dices)\r\n# break\r\n# else:\r\n# continue\r\n#\r\n# return strategy\r\n\r\n","repo_name":"hishamcse/Discrete-Math-Specialization-Coursera-","sub_path":"2.Combinatorics and Probability/Python/Practice.py","file_name":"Practice.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"39825217127","text":"import collections\nfrom typing import List\n\n\nclass Solution:\n def minDistance(self, houses: List[int], k: int) -> int:\n houses.sort()\n memo = collections.defaultdict(int)\n\n def dp(i, j, k):\n if k == 1:\n # if one mailbox left\n # we put it to the middle (i+j)//2\n # and then we need to sum distance between the middle and every house\n # in short, instead of doing houses[mid] -houses[i] and houses[j] - houses[mid]\n # we do houses[j] - houses[i] because it is the same\n total_distance = 0\n while i < j:\n total_distance += houses[j] - houses[i]\n i += 1\n j -= 1\n return total_distance\n if (i, j, k) not in memo:\n memo[(i, j, k)] = float('inf')\n for m in range(i + k - 2, j): # i + k - 2 somehow reduces runtime\n #for m in range(i, j):\n memo[(i, j, k)] = min(dp(i, m, k - 1) + dp(m + 1, j, 1), memo[(i, j, k)])\n return memo[(i, j, k)]\n\n return dp(0, len(houses) - 1, k)\n\nif __name__ == '__main__':\n s = Solution()\n s.minDistance([1,4,8,10,20], 3)","repo_name":"arsamigullin/problem_solving_python","sub_path":"leet/dp/1478_Allocate_Mailboxes.py","file_name":"1478_Allocate_Mailboxes.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"13022019459","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom contants import DEFAULT_DRIVER_WAIT, LINKEDIN_INVITATION_MANAGER_URL, SCROLL_PAUSE_TIME, \\\n XPATH_PATTERN_OF_LINKEDIN_CONNECTION_ACCEPT_BUTTON, EXPLICIT_WAIT_FOR_SIGN_IN, REQUEST_ACCEPT_WAIT\n\n\ndef _prepare_driver(chrome_driver_path: str):\n # create new chrom driver\n chrome_driver = webdriver.Chrome(chrome_driver_path)\n\n # selenium will wait for 30 secs before throwing an exception\n chrome_driver.implicitly_wait(DEFAULT_DRIVER_WAIT)\n\n chrome_driver.maximize_window()\n\n return chrome_driver\n\n\ndef main(chrome_driver_path: str):\n driver = _prepare_driver(chrome_driver_path)\n\n try:\n driver.get(LINKEDIN_INVITATION_MANAGER_URL)\n\n # wait for sign in to happen -> time in seconds\n wait = WebDriverWait(driver, EXPLICIT_WAIT_FOR_SIGN_IN)\n\n wait.until(\n EC.presence_of_element_located((By.XPATH, XPATH_PATTERN_OF_LINKEDIN_CONNECTION_ACCEPT_BUTTON)))\n\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # wait for the page to load\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Wait till element becomes clickable\n WebDriverWait(driver, SCROLL_PAUSE_TIME).until(\n EC.element_to_be_clickable((By.XPATH,\n XPATH_PATTERN_OF_LINKEDIN_CONNECTION_ACCEPT_BUTTON)))\n\n # find all accept buttons\n all_accept_buttons = driver.find_elements(By.XPATH, XPATH_PATTERN_OF_LINKEDIN_CONNECTION_ACCEPT_BUTTON)\n\n for button in all_accept_buttons:\n # click on button\n button.send_keys(Keys.RETURN)\n # Wait for some time before accepting another request\n time.sleep(REQUEST_ACCEPT_WAIT)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n # check if no other request is pending\n if new_height == last_height:\n break\n\n last_height = new_height\n\n finally:\n driver.quit()\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n\n driver_path = \"\"\n\n if driver_path == \"\":\n raise ValueError(\"Please enter the correct path of your webdriver\")\n\n main(driver_path)\n\n","repo_name":"aniketsingh0104/LinkedInAutoConnect","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"}
+{"seq_id":"37725322420","text":"\"\"\"\n-*- coding:utf-8 -*-\n@Time :2019/11/26 下午1:25\n@Author :wts\n@File :dataset.py\n@Version:1.0\n\"\"\"\nimport torch.utils.data as data\nimport torch\nimport numpy as np\nimport os\nfrom os import listdir\nfrom os.path import join\nfrom PIL import Image, ImageOps\nimport random\nfrom skimage import img_as_float\nfrom random import randrange\nimport matplotlib.pyplot as plt\nimport os.path\nimport cv2\nfrom torch.utils.data import Dataset\nimport random\n\nclass MyDataset(Dataset):\n '''an abstract class representing'''\n def __init__(self, dataset_type, transform=None, update_dataset=False, frames_n = 3):\n '''\n :param dataset_type: ['train','test']\n :param transform:\n :param update_dataset:\n '''\n dataset_path = '/home/wts/practice/dlpyprac/testmodel/dataset'\n self.frames_n = frames_n\n self.frames = frames_n * 2 + 1\n\n if update_dataset:\n print(\"update dataset\")\n dbtype_list = os.listdir(dataset_path)\n #dbtype_list.remove('datalist.txt')\n for dbtype in dbtype_list:\n each_path = os.path.join(dataset_path,dbtype)\n each_list = os.listdir(each_path)\n f = open(each_path + \"/datalist.txt\",\"w\")\n each_list.remove('datalist.txt')\n for each_db in each_list:\n each_sum_name = os.path.join(each_path, each_db)\n IG_name = os.listdir(each_sum_name)\n img_m_path0 = os.path.join(each_sum_name,IG_name[0])\n img_m_path1 = os.path.join(each_sum_name, IG_name[1])\n img_name = os.listdir(img_m_path0)\n img_name = sorted(img_name)\n for img in img_name:\n temp = os.path.join(img_m_path0,img)\n f.write(temp)\n f.write('*')\n temp = os.path.join(img_m_path1, img)\n f.write(temp)\n f.write('\\n')\n f.close()\n\n self.transform = transform\n self.sample_list = list()\n self.dataset_type = dataset_type\n f = open(dataset_path + self.dataset_type + '/datalist.txt')\n lines = f.readlines()\n for line in lines:\n self.sample_list.append(line.strip())\n f.close()\n\n\n def __getitem__(self, index):\n c_ind = index % 32\n if(c_ind < self.frames_n):\n index += self.frames_n\n elif(c_ind > 31 - self.frames_n):\n index -= self.frames_n\n\n sht = self.sample_list[index]\n imgs = Image.open(sht.split('*')[-1]).convert('RGB')\n imgs = np.array(imgs)\n img_frames = torch.empty((self.frames,3,64,64))\n label_frames = torch.empty((self.frames,3,256,256))\n img_bi_frames = torch.empty((self.frames, 3, 256, 256))\n hi = random.randint(0,(imgs.shape[1]-64))\n wi = random.randint(0,(imgs.shape[0]-64))\n hl = 4 * hi\n wl = 4 * wi\n t = 0\n for i in range(index-self.frames_n,index+self.frames_n+1):\n item = self.sample_list[i]\n img = Image.open(item.split('*')[-1]).convert('RGB')\n label = Image.open(item.split('*')[0]).convert('RGB')\n img = img.crop((hi,wi,hi+64,wi+64))\n size = img.size\n new_size = tuple([int (x * 4) for x in size])\n img_bi = img.resize(new_size, resample=Image.BICUBIC)\n label = label.crop((hl,wl,hl+256,wl+256))\n img = np.array(img)\n label = np.array(label)\n img_bi = np.array(img_bi)\n img = np.atleast_3d(img).transpose(2,0,1).astype(np.float32)\n label = np.atleast_3d(label).transpose(2,0,1).astype(np.float32)\n img_bi = np.atleast_3d(img_bi).transpose(2,0,1).astype(np.float32)\n img = torch.from_numpy(img).float()\n label = torch.from_numpy(label).float()\n img_bi = torch.from_numpy(img_bi).float()\n img_frames[t] = img\n label_frames[t]= label\n img_bi_frames[t] = img_bi\n t += 1\n if self.transform is not None:\n img = self.transform(img)\n label = self.transpose(label)\n\n img_frames = img_frames.transpose(0,1)\n label_frames = label_frames.transpose(0,1)\n img_bi_frames = img_bi_frames.transpose(0,1)\n\n return img_frames, label_frames, img_bi_frames\n #return img, label\n\n\n def __len__(self):\n return len(self.sample_list)\n\n def make_txt_file(self, path):\n return path\n\nif __name__ == '__main__':\n import torch.nn as nn\n ds = MyDataset(dataset_type = '/train')\n img, gt, bic = ds.__getitem__(3)\n print(img.shape)\n img1 = img.permute(1,2,3,0)\n gt1 = gt.permute(1,2,3,0)\n bic1 = bic.permute(1,2,3,0)\n img2 = img1.numpy()\n gt2 = gt1.numpy()\n bic2 = bic1.numpy()\n plt.subplot(271)\n plt.imshow(img2[0].astype(int))\n plt.subplot(272)\n plt.imshow(gt2[0].astype(int))\n plt.subplot(273)\n plt.imshow(img2[1].astype(int))\n plt.subplot(274)\n plt.imshow(gt2[1].astype(int))\n plt.subplot(275)\n plt.imshow(img2[2].astype(int))\n plt.subplot(276)\n plt.imshow(gt2[2].astype(int))\n plt.subplot(277)\n plt.imshow(img2[3].astype(int))\n plt.subplot(2,7,8)\n plt.imshow(gt2[3].astype(int))\n plt.subplot(2,7,9)\n plt.imshow(img2[4].astype(int))\n plt.subplot(2,7,10)\n plt.imshow(gt2[4].astype(int))\n plt.subplot(2,7,11)\n plt.imshow(img2[5].astype(int))\n plt.subplot(2,7,12)\n plt.imshow(gt2[5].astype(int))\n plt.subplot(2, 7, 13)\n plt.imshow(img2[6].astype(int))\n plt.subplot(2, 7, 14)\n plt.imshow(gt2[6].astype(int))\n\n plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"hyperionfalling/final_assignment","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"14382777562","text":"import torch\nimport torch.nn as nn\nfrom torchnet.meter import AverageValueMeter\n\nimport time\nimport os\n\nfrom utils import array_tool as at\n\nfrom collections import namedtuple\n\nLossTuple = namedtuple('LossTuple',\n ['rpn_loc_loss',\n 'rpn_cls_loss',\n 'roi_loc_loss',\n 'roi_cls_loss',\n 'total_loss'\n ])\n\nclass AutoEncoder(nn.Module):\n def __init__(self):\n super(AutoEncoder, self).__init__()\n \n self.encoder = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),\n nn.ReLU()\n )\n \n self.decoder = nn.Sequential(\n nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(),\n nn.ConvTranspose2d(16, 3, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.Tanh()\n )\n \n self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n \n def load(self, path):\n state_dict = torch.load(path)\n self.load_state_dict(state_dict)\n return self\n\n def get_optimizer(self, autoencoder_params, opt):\n return torch.optim.SGD(autoencoder_params, lr=opt.lr_atk, momentum=0.9)\n \n def update_meters(self, losses):\n loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}\n for key, meter in self.meters.items():\n meter.add(loss_d[key])\n\n def reset_meters(self):\n for key, meter in self.meters.items():\n meter.reset()\n\n def get_meter_data(self):\n return {k: v.value()[0] for k, v in self.meters.items()}\n\n def save(self, **kwargs):\n timestr = time.strftime('%m%d%H%M')\n save_path = 'checkpoints/autoencoder_%s' % timestr\n for k_, v_ in kwargs.items():\n save_path += '_%s' % v_\n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n torch.save(self.state_dict(), save_path)","repo_name":"jeongjin-shin/class-specific-backdoor","sub_path":"model/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"74264123981","text":"# Write a function calculation() such that it can accept two variables\r\n# and calculate their addition and subtraction. And also it must return\r\n# both addition and subtraction in a single return call\r\n\r\ndef calculation(a, b):\r\n sum = a + b\r\n diff = a - b\r\n return sum, diff\r\n\r\n\r\nn1 = int(input(\"Enter first number \"))\r\nn2 = int(input(\"Enter second number \"))\r\n\r\nadd, sub = calculation(n1, n2)\r\nprint(\"Addition of two numbers are : \", add)\r\nprint(\"Subtraction of two numbers are : \", sub)\r\n\r\n\"\"\"\r\nOUTPUT:\r\nEnter first number 50\r\nEnter second number 20\r\nAddition of two numbers are : 70\r\nSubtraction of two numbers are : 30\r\n\r\n\"\"\"","repo_name":"Kartikey0205/Python_Labs_Program","sub_path":"p036.py","file_name":"p036.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"35137792594","text":"# utils.py\nimport torch\nimport torchvision\nimport numpy as np\nimport torch.nn as nn\n\ndef imrotate(img, angle):\n \"\"\"\n :param img: N * C * H * W tensor\n :param angle: in degree\n :return: rotated img\n \"\"\"\n return torchvision.transforms.functional.rotate(img,\n angle, interpolation=torchvision.transforms.InterpolationMode.BILINEAR, fill=0)\n\n\ndef fft2(img):\n \"\"\"\n :param img: H * W tensor\n :return: 2D FFT of the img\n \"\"\"\n return torch.fft.fftshift(torch.fft.fft2(torch.fft.ifftshift(img)))\n\n\ndef ifft2(img):\n \"\"\"\n :param img: H * W tensor\n :return: 2D iFFT of the img\n \"\"\"\n return torch.fft.fftshift(torch.fft.ifft2(torch.fft.ifftshift(img)))\n\n\ndef power2(x):\n \"\"\"\n :param x: floating point\n :return:\n \"\"\"\n return (2 ** (np.ceil(np.log2(x)))).astype(int)\n\n\ndef _padup(nx, px):\n \"\"\"\n :param nx: floating point\n :param px: floating point\n :return:\n \"\"\"\n return np.ceil((power2(nx + px - 1) - nx) / 2).astype(int)\n\n\ndef _paddown(nx, px):\n \"\"\"\n :param nx: floating point\n :param px: floating point\n :return:\n \"\"\"\n return np.floor((power2(nx + px - 1) - nx) / 2).astype(int)\n\n\ndef _padleft(nz, pz):\n \"\"\"\n :param nz: floating point\n :param pz: floating point\n :return:\n \"\"\"\n return np.ceil((power2(nz + pz - 1) - nz) / 2).astype(int)\n\n\ndef _padright(nz, pz):\n \"\"\"\n :param nz: floating point\n :param pz: floating point\n :return:\n \"\"\"\n return np.floor((power2(nz + pz - 1) - nz) / 2).astype(int)\n\n\ndef pad2sizezero(img, padx, padz):\n \"\"\"\n :param img: H * W tensor\n :param padx: floating point\n :param padz: floating point\n :return:\n \"\"\"\n px, pz = img.shape\n pad_img = torch.zeros(padx, padz).to(img.device).to(img.dtype)\n padx_dims = np.ceil((padx - px) / 2).astype(int)\n padz_dims = np.ceil((padz - pz) / 2).astype(int)\n pad_img[padx_dims:padx_dims + px, padz_dims:padz_dims + pz] = img\n return pad_img\n\n\ndef fft_conv(img, ker):\n \"\"\"\n :param img: nx * nz\n :param ker: px * pz\n :return: nx * nz\n \"\"\"\n nx, nz = img.shape[0], img.shape[1]\n px, pz = ker.shape[0], ker.shape[1]\n padup = _padup(nx, px)\n paddown = _paddown(nx, px)\n padleft = _padleft(nz, pz)\n padright = _padright(nz, pz)\n m = nn.ReplicationPad2d((padleft, padright, padup, paddown))\n pad_img = m(img.unsqueeze(0).unsqueeze(0)).squeeze()\n\n padx, padz = pad_img.shape[0], pad_img.shape[1]\n\n pad_ker = pad2sizezero(ker, padx, padz)\n pad_img_fft = fft2(pad_img)\n pad_ker_fft = fft2(pad_ker)\n freq = torch.mul(pad_img_fft, pad_ker_fft)\n xout = torch.real(ifft2(freq))\n return xout[padup:padup + nx, padleft:padleft + nz]\n\n\ndef fft_conv_adj(img, ker):\n \"\"\"\n :param img: nx * nz\n :param ker: px * pz\n :return: nx * nz\n \"\"\"\n nx, nz = img.shape[0], img.shape[1]\n px, pz = ker.shape[0], ker.shape[1]\n padup = _padup(nx, px)\n paddown = _paddown(nx, px)\n padleft = _padleft(nz, pz)\n padright = _padright(nz, pz)\n m = nn.ZeroPad2d((padleft, padright, padup, paddown))\n pad_img = m(img.unsqueeze(0).unsqueeze(0)).squeeze()\n\n padx, padz = pad_img.shape[0], pad_img.shape[1]\n\n pad_ker = pad2sizezero(ker, padx, padz)\n pad_img_fft = fft2(pad_img)\n pad_ker_fft = fft2(pad_ker)\n freq = torch.mul(pad_img_fft, pad_ker_fft)\n xout = torch.real(ifft2(freq))\n xout[padup, :] += torch.sum(xout[0:padup, :], dim=0)\n xout[nx + padup - 1, :] += torch.sum(xout[nx + padup:, :], dim=0)\n xout[:, padleft] += torch.sum(xout[:, 0:padleft], dim=1)\n xout[:, nz + padleft - 1] += torch.sum(xout[:, nz + padleft:], dim=1)\n return xout[padup:padup + nx, padleft:padleft + nz]\n","repo_name":"ZongyuLi-umich/SPECTrecon-pytorch","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"36905214057","text":"from src.core.business_model import BusinessModel, ModelType\nfrom src.data_model.cart import CartItem\n\n\nclass HasCartItems(BusinessModel):\n def __init__(self, customer_id: int):\n super().__init__(\n model=CartItem(),\n model_type=ModelType.read,\n )\n\n self.customer_id = customer_id\n\n def run(self, data: dict = None, conditions: dict = None) -> bool:\n items = self.model.get(\n fields=[\n \"count(cai_id)\"\n ],\n condition={\n \"cu_id\": {\n \"$value\": str(self.customer_id)\n }\n }\n ).show(True)\n\n return items.result['count'] > 0\n","repo_name":"Book-Store-Grad/bookstore-api","sub_path":"src/business_model/cart/has_cart_items.py","file_name":"has_cart_items.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"8893560729","text":"import time, rfc822\nfrom zope import interface, component\nfrom zope.component import getUtility\nfrom zope.traversing.browser import absoluteURL\n\nfrom zojax.content.feeds.rss2 import RSS2Feed\nfrom zojax.catalog.interfaces import ICatalog\nfrom zojax.ownership.interfaces import IOwnership\nfrom zojax.content.space.interfaces import ISpace\nfrom zojax.principal.profile.interfaces import IPersonalProfile\n\nfrom interfaces import _, INewsRssFeed, INewsWorkspace\n\n\nclass NewsRssFeed(RSS2Feed):\n component.adapts(ISpace)\n interface.implementsOnly(INewsRssFeed)\n\n name = u'news'\n title = _(u'News')\n description = _(u'Information about latest site news.')\n\n def items(self):\n request = self.request\n catalog = getUtility(ICatalog)\n\n results = catalog.searchResults(\n searchContext=(self.context,),\n sort_on='effective', sort_order='reverse',\n draftContent = {'any_of': (False,)},\n typeType = {'any_of': ('News Item',)})[:15]\n\n for item in results:\n url = absoluteURL(item, request)\n\n info = {\n 'title': item.title,\n 'description': item.description,\n 'guid': '%s/'%url,\n 'pubDate': rfc822.formatdate(time.mktime(item.date.timetuple())),\n 'isPermaLink': True}\n\n principal = IOwnership(item).owner\n if principal is not None:\n profile = IPersonalProfile(principal)\n info['author'] = u'%s (%s)'%(profile.email, profile.title)\n\n yield info\n\n\nclass NewsWorkspaceRssFeed(RSS2Feed):\n component.adapts(INewsWorkspace)\n interface.implementsOnly(INewsRssFeed)\n\n name = u'news'\n title = _(u'News')\n description = _(u'Information about latest site news.')\n\n def items(self):\n request = self.request\n\n for item in self.context.news()[:15]:\n url = absoluteURL(item, request)\n\n info = {\n 'title': item.title,\n 'description': item.description,\n 'guid': '%s/'%url,\n 'pubDate': rfc822.formatdate(time.mktime(item.date.timetuple())),\n 'isPermaLink': True}\n\n principal = IOwnership(item).owner\n if principal is not None:\n profile = IPersonalProfile(principal)\n info['author'] = u'%s (%s)'%(profile.email, profile.title)\n\n yield info\n","repo_name":"Zojax/zojax.contenttype.newsitem","sub_path":"src/zojax/contenttype/newsitem/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"70299109583","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n x=[]\n while l1 and l2:\n if l1.val==l2.val:\n x.append(l1.val)\n x.append(l2.val)\n l1=l1.next\n l2=l2.next\n elif l1.valThen as a reply to it send /broadcast\",\n parse_mode = 'html'\n )\n m = await msg.reply_text(\n \"Broadcasting..\",\n parse_mode = 'html'\n )\n SUCE = 0\n FAIL = 0\n for userid in [document['userid'] for document in collection_login.find()]:\n try:\n await MSG.copy(userid)\n SUCE += 1\n except Exception as e:\n FAIL += 1\n await msg.reply_text(\n f\"Successfully Broadcasted to {SUCE} Chats\\nFailed - {FAIL} Chats!\"\n )\n await m.delete()\n return\n\n \n\n","repo_name":"AJBotVerse/MegaUploaderbot","sub_path":"plugins/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"47"}
+{"seq_id":"73333083342","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def closestValue(self, root: Optional[TreeNode], target: float) -> int:\n closest = diff = inf\n curr = root\n while curr:\n curr_diff = abs(curr.val - target)\n if curr_diff == diff:\n closest = min(closest, curr.val)\n elif curr_diff < diff:\n closest = curr.val\n \n diff = min(diff, curr_diff)\n \n if target < curr.val:\n curr = curr.left\n else:\n curr = curr.right\n return closest","repo_name":"adnanyaqoobvirk/leetcode","sub_path":"0270-closest-binary-search-tree-value/0270-closest-binary-search-tree-value.py","file_name":"0270-closest-binary-search-tree-value.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26746663017","text":"import jinja2\nimport os\nfrom jinja2 import Template\nlatex_jinja_env = jinja2.Environment(\n\tblock_start_string = '\\BLOCK{',\n\tblock_end_string = '}',\n\tvariable_start_string = '\\VAR{',\n\tvariable_end_string = '}',\n\tcomment_start_string = '\\#{',\n\tcomment_end_string = '}',\n\tline_statement_prefix = '%%',\n\tline_comment_prefix = '%#',\n\ttrim_blocks = True,\n\tautoescape = False,\n\tloader = jinja2.FileSystemLoader(os.path.abspath('.'))\n)\ntemplate = latex_jinja_env.get_template('better_test.tex')\nargs = [\n {\n \"points\" : 1,\n \"problemStatement\": \"What is the meaning of life\",\n \"answers\" : [\n \"health\",\n \"money\",\n \"love\"\n ]\n },\n \n {\n \"points\": 12,\n \"problemStatement\" : \"What is the best name for a dog?\",\n \"answers\" : [ \n \"rocko\", \n \"vinny\", \n \"fido\",\n \"bernadette\",\n \"guiseppe\"\n ]\n }\n\n]\n\nprint(template.render(arguments=args))\n#print(template.render(section1=\"Long Form\", section2=\"Short Form\"))\n","repo_name":"melvyniandrag/MathTestMakerLibrary","sub_path":"pdfmaker/jinja/better_test.py","file_name":"better_test.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"}
+{"seq_id":"27894327369","text":"divisible_by_7=[]\nfor numb in range(100,200):\n if(numb%7==0):\n divisible_by_7.append(numb)\nprint(divisible_by_7)\n\n\nx = ['a','b','a','e','d','b','c','e','f','g','h']\nx2=list(set(x))\nprint(x2)\n\nsmallest=[3,6,8,2,4,1,5,7]\nsmall=min([3,6,8,2,4,1,5,7])\nprint(small)\n\nx = [[1,2],[3,4],[5,6]]\nmo=x[0]+x[1]+x[2]\nprint(mo)\n\n\nnum = int (input (\"Enter the number:\"))\nif num%3 == 0:\n print (\"The number is divisible.\")\nelse:\n print (\"The number is not divisible.\")\n \n x = [100,110,120,130,140,150]\nm=[m*5 for m in x ]\nprint(m) ","repo_name":"ampurira-elizabeth/python-revision","sub_path":"python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10721983385","text":"\"\"\"tommy_cmdb URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom .views import *\n\nfrom django.views.static import *\nurlpatterns = [\n url(r'jifang_add', jifang_add,name=\"jifang_add\"),#机房\n url(r'jifang_del', jifang_del,name=\"jifang_del\"),#机房删除\n url(r'host_add', host_add,name=\"host_add\"),#主机添加\n url(r'host_list', host_list,name=\"host_list\"),#主机列表\n url(r'host_edit/(\\d+)', host_edit,name=\"host_edit\"),#主机添加\n url(r'host_del/', host_del,name=\"host_del\"),#主机删除\n url(r'command_ip', command_ip, name=\"command_ip\"), # 主机列表\n url(r'host_shell/',host_shell,name=\"host_shell\"), #执行shell命令\n url(r'history/',history,name=\"history\"), #执行shell命令\n url(r'host_info/(\\d+)',host_info,name=\"host_info\"), #主机当前信息\n url(r'text',text,name=\"text\")\n\n\n\n\n]\n","repo_name":"kspine/tommy-cmdb","sub_path":"assets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26934621386","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nwith open(r'C:\\Users\\USER\\Downloads\\myfile.txt') as word_file:\n valid_words = set(word_file.read().split())\n\n#wordle_word_assists\n\ndef wordle_word_assists(startswith='',endswith='',contain_in_position='',doesnotinclude='',contain_not_in_pos='',repetitive_letter='',length_of_word=5):\n \n final_word_list = [x for x in valid_words if len(x) == length_of_word]\n \n #doesnotinclude\n \n #check if any letter present in other filter.\n letters_with_number = contain_not_in_pos + contain_in_position + repetitive_letter\n letters_without_number = ''.join(filter(lambda x : not x.isdigit(),letters_with_number))\n total_input_letters = set(startswith+endswith+letters_without_number)\n \n for letter in total_input_letters:\n if letter in doesnotinclude:\n doesnotinclude = doesnotinclude.replace(letter,'')\n doesnotinclude_list = [x for x in doesnotinclude]\n \n if doesnotinclude_list:\n for letter in doesnotinclude_list:\n for word in final_word_list[:]:\n if letter in word:\n final_word_list.remove(word)\n \n #contains_but_not_in_nth_position\n contain_not_in_pos_list = [(contain_not_in_pos[x],contain_not_in_pos[x+1]) for x in range(0,len(contain_not_in_pos),2)]\n if contain_not_in_pos_list:\n include_list = []\n for tuples in contain_not_in_pos_list:\n include_list.append(tuples[1])\n for word in final_word_list[:]:\n if word[int(tuples[0])-1]==tuples[1]:\n final_word_list.remove(word)\n \n \n for letter in set(include_list):\n for word in final_word_list[:]:\n if letter not in word:\n final_word_list.remove(word)\n \n #startswith\n if startswith:\n for word in final_word_list[:]:\n if word[0]!=startswith:\n final_word_list.remove(word)\n \n #anyposition\n contain_in_position_list = [(contain_in_position[x],contain_in_position[x+1]) for x in range(0,len(contain_in_position),2)]\n if contain_in_position_list:\n for tuples in contain_in_position_list:\n for word in final_word_list[:]:\n if word[int(tuples[0])-1]!=tuples[1]:\n final_word_list.remove(word)\n \n #endswith\n if endswith:\n for word in final_word_list[:]:\n if word[-1]!=endswith:\n final_word_list.remove(word)\n \n repetitive_list = [(repetitive_letter[x],repetitive_letter[x+1]) for x in range(0,len(repetitive_letter),2)]\n if repetitive_list:\n for tuples in repetitive_list:\n for word in final_word_list[:]:\n if word.count(tuples[1])0:\n self.cap=cv2.VideoCapture(self.link)\n print('asdf')\n else:\n self.cap=None\n def run(self):\n while self.runFlag:\n self.videoPic()\n #self.testPic()\n def videoPic(self):\n det_results=[]\n ret,pic=self.cap.read()\n print('ret',ret)\n if ret==True:\n #init param\n det_result = {\n 'image':pic,\n 'image_name': self.link,\n 'bbox': [int(pic.shape[1]/10),int(pic.shape[0]/10),int(pic.shape[1]/10*8),int(pic.shape[0]/10*8)], # bbox format is 'xywh'\n 'camera_param': None,\n 'keypoints_3d_gt': None\n }\n #\n det_results.append([det_result])\n return det_results\n\n def testPic(self):\n time.sleep(0.05)\n\n det_results_list = self.addFrame()\n self.dataDeque.append(det_results_list)\n def addFrame(self):\n\n\n det_results=[]\n\n for pic in self.picList:\n\n #init param\n det_result = {\n 'image':pic,\n 'image_name': 'imagePath',\n 'bbox': [int(pic.shape[1]/10),int(pic.shape[0]/10),int(pic.shape[1]/10*8),int(pic.shape[0]/10*8)], # bbox format is 'xywh'\n 'camera_param': None,\n 'keypoints_3d_gt': None\n }\n #\n det_results.append([det_result])\n return det_results\nif __name__=='__main__':\n from collections import deque\n dataDeque=deque()\n mf = makeFrame(dataDeque, link='0')\n mf.start()\n\n","repo_name":"kevinfu1717/PlayPianoOnTableByHandPose","sub_path":"produceModule.py","file_name":"produceModule.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"}
+{"seq_id":"74948828302","text":"import csv\nimport json\nimport os\n#path='/home/zy3/Documents/testcsv/newcsv'\npath='/home/zy3/Project_SC/newcsv'\nos.chdir(path)\n#os.mkdir('testjson')\n\ndef csv_to_json(path,finalpath):\n # path='/home/zy3/Documents/testcsv/newcsv'\n\n folder = os.walk(path)\n files = list(folder)[0][2]\n os.chdir(path)\n for file in files:\n filepath = path + '/' + file\n csvfile = open(filepath, 'r')\n\n\n fieldnames = ('image_id', 'label_id')\n\n reader = csv.DictReader(csvfile, fieldnames)\n\n out = json.dumps([row for row in reader])\n os.chdir(finalpath) #should be changed\n jsonfile = open('%s.json' % file.split('.')[0], 'w')\n out='['+out[51:]\n jsonfile.write(out)\n\nfinalpath='/home/zy3/Project_SC/testjson'\ncsv_to_json(path,finalpath)","repo_name":"yzy1421/ppp","sub_path":"code/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"5062040468","text":"import typing as t\nfrom pathlib import Path\n\nfrom textual import on\nfrom textual.app import ComposeResult\nfrom textual.containers import Container, Horizontal\nfrom textual.css.query import NoMatches\nfrom textual.reactive import var\nfrom textual.screen import ModalScreen\nfrom textual.widgets import Label, Tree\n\nfrom ...types import InstruktDomNodeMixin\nfrom ..widgets.actionbar import ActionBar, ActionBinding\nfrom ..widgets.dirtree import DirectoryTree, not_hidden\n\n\nclass PathBrowserModal(ModalScreen[Path | None], InstruktDomNodeMixin):\n \"\"\"A modal screen to browse the filesystem.\"\"\"\n\n\n #NOTE: custom tree bindings \n TREE_BINDINGS = [\n ActionBinding(\"space\", \"select\", \"select\"),\n ActionBinding(\"enter\", \"toggle_node\", \"toggle node\"),\n ActionBinding(\"-\", \"dir_up\", \"dir_up\"),\n ]\n\n BINDINGS = [\n *TREE_BINDINGS,\n ActionBinding(\"escape\", \"cancel\", \"dismiss\"),\n ]\n\n\n AUTO_FOCUS = \"DirectoryTree\"\n\n path: var[Path] = var(Path.cwd())\n\n def __init__(self, path: Path | None = None, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.dirtree = DirectoryTree(path or Path.home(), not_hidden)\n self.dirtree.border_title = \"Select any file or directory to index:\"\n\n def on_mount(self) -> None:\n for binding in self.TREE_BINDINGS:\n self.dirtree._bindings.bind(binding.key,\n binding.action,\n binding.description)\n\n\n def compose(self) -> ComposeResult:\n with Container():\n yield self.dirtree\n with Horizontal(id=\"selected-path\", classes=\"container\"):\n yield Label(\"selected path:\", classes=\"path--label\")\n yield Label(f\"\\[ {self.path.resolve()} ]\",\n classes=\"path--selected\",\n id=\"selected-path\")\n yield ActionBar()\n\n def action_dir_up(self) -> None:\n if self.dirtree.path != Path.home():\n self.dirtree.path = self.dirtree.path.parent\n\n def action_toggle_node(self) -> None:\n self.dirtree.action_toggle_node()\n\n def action_select(self) -> None:\n self.dismiss(self.path)\n\n def action_cancel(self) -> None:\n self.dismiss(None)\n\n def watch_path(self, path: Path) -> None:\n try:\n lbl = t.cast(Label, self.query_one(\"Label#selected-path\"))\n lbl.update(f\"\\[ {path} ]\")\n except NoMatches:\n pass\n\n\n @on(Tree.NodeHighlighted)\n def _on_tree_node_highlighted(self, event: Tree.NodeHighlighted):\n dir_entry = event.node.data\n\n if dir_entry is None:\n return\n\n _path = dir_entry.path\n\n if not _path.is_absolute():\n _path = _path.resolve()\n # if path under home shorten it with ~\n if _path.home() in _path.parents:\n _path = \"~\" / _path.relative_to(_path.home())\n\n self.path = _path\n\n\n\n","repo_name":"blob42/Instrukt","sub_path":"instrukt/tuilib/modals/path_browser.py","file_name":"path_browser.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"47"}
+{"seq_id":"21010192290","text":"import sys\nfrom itertools import combinations\ninput = sys.stdin.readline\nn = int(input())\ngraph = []\nfor i in range(n):\n\tgraph.append(list(sys.stdin.readline().rstrip()))\narrList = [0] * 27\nfor i in graph:\n\tfor j in range(len(i)):\n\t\t# print(ord(i[j])-65, len(i)-j-1)\n\t\tarrList[ord(i[j])-65] += 10 ** (len(i)-j-1)\narrList.sort(reverse=True)\nanswer = 0\nfor i in range(9, -1, -1):\n\tanswer += arrList[9-i] * i\nprint(answer)","repo_name":"LeeJin0527/algorithm","sub_path":"BaekJoon/부르트포스/1339 단어수학.py","file_name":"1339 단어수학.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"4486380401","text":"from enum import Enum\nfrom pddl.atomic_formula import AtomicFormula, TypedParameter\n\nclass ExprBase:\n \"\"\"\n A class used to represent a numerical expression (base types)\n \"\"\"\n\n class ExprType(Enum):\n CONSTANT = \"constant\"\n FUNCTION = \"function\"\n BINARY_OPERATOR = \"operator\"\n UMINUS = \"uminus\"\n SPECIAL = \"special\"\n\n class BinaryOperator(Enum):\n ADD = \"+\"\n SUB = \"-\"\n MUL = \"*\"\n DIV = \"/\"\n\n class SpecialType(Enum):\n HASHT = \"#t\"\n TOTAL_TIME = \"total-time\"\n DURATION = \"?duration\"\n\n def __init__(self,\n expr_type : ExprType,\n constant : float = 0.0,\n function : AtomicFormula = None,\n op : BinaryOperator = None,\n special_type : SpecialType = None) -> None:\n self.expr_type = expr_type\n self.constant = constant\n self.function = function\n self.op = op\n self.special_type = special_type\n \n def __repr__(self) -> str:\n if self.expr_type == ExprBase.ExprType.CONSTANT:\n return str(self.constant)\n elif self.expr_type == ExprBase.ExprType.FUNCTION:\n return self.function.print_pddl()\n elif self.expr_type == ExprBase.ExprType.BINARY_OPERATOR:\n return self.op.value\n elif self.expr_type == ExprBase.ExprType.UMINUS:\n return \"-\"\n elif self.expr_type == ExprBase.ExprType.SPECIAL:\n return self.special_type.value\n\n def copy(self) -> 'ExprBase':\n \"\"\"\n Returns a deep copy of the expression.\n \"\"\"\n if self.expr_type == ExprBase.ExprType.CONSTANT:\n return ExprBase(ExprBase.ExprType.CONSTANT, constant=self.constant)\n elif self.expr_type == ExprBase.ExprType.FUNCTION:\n return ExprBase(ExprBase.ExprType.FUNCTION, function=self.function.copy())\n elif self.expr_type == ExprBase.ExprType.BINARY_OPERATOR:\n return ExprBase(ExprBase.ExprType.BINARY_OPERATOR, op=self.op)\n elif self.expr_type == ExprBase.ExprType.UMINUS:\n return ExprBase(ExprBase.ExprType.UMINUS)\n elif self.expr_type == ExprBase.ExprType.SPECIAL:\n return ExprBase(ExprBase.ExprType.SPECIAL, special_type=self.special_type)\n\n def visit(self, visit_function : callable, valid_types : tuple[type] = None, args=(), kwargs={}) -> None:\n if valid_types is None or isinstance(self, valid_types):\n visit_function(self, *args, **kwargs)\n if self.expr_type == ExprBase.ExprType.FUNCTION:\n self.function.visit(visit_function, valid_types, args, kwargs)\n \n def bind_parameters(self, parameters : list[TypedParameter]) -> 'ExprBase':\n \"\"\"\n Binds the parameters of a copy of the expression to the given list of parameters.\n \"\"\"\n if self.expr_type == ExprBase.ExprType.CONSTANT:\n return ExprBase(ExprBase.ExprType.CONSTANT, constant=self.constant)\n elif self.expr_type == ExprBase.ExprType.FUNCTION:\n return ExprBase(ExprBase.ExprType.FUNCTION, function=self.function.bind_parameters(parameters))\n elif self.expr_type == ExprBase.ExprType.BINARY_OPERATOR:\n return ExprBase(ExprBase.ExprType.BINARY_OPERATOR, op=self.op)\n elif self.expr_type == ExprBase.ExprType.UMINUS:\n return ExprBase(ExprBase.ExprType.UMINUS)\n elif self.expr_type == ExprBase.ExprType.SPECIAL:\n return ExprBase(ExprBase.ExprType.SPECIAL, special_type=self.special_type)\n\nclass ExprComposite: \n \"\"\"\n A class used to represent a numerical expression (composite)\n Stores a list of ExprBase in prefix notation.\n \"\"\"\n\n def __init__(self, tokens : list[ExprBase]) -> None:\n self.tokens = tokens\n\n def __repr__(self) -> str:\n token_ids = list(range(len(self.tokens)))\n return self._rec_repr_(token_ids)\n\n def _rec_repr_(self, token_ids) -> str:\n token = self.tokens[token_ids.pop(0)]\n if token.expr_type == ExprBase.ExprType.BINARY_OPERATOR:\n ret = '(' + repr(token) + ' '\n ret += self._rec_repr_(token_ids)\n ret += ' '\n ret += self._rec_repr_(token_ids)\n ret += ')'\n return ret\n return repr(token)\n\n def copy(self) -> 'ExprComposite':\n \"\"\"\n Returns a copy of the expression.\n \"\"\"\n return ExprComposite([token.copy() for token in self.tokens])\n\n def visit(self, visit_function : callable, valid_types : tuple[type] = None, args=(), kwargs={}) -> None:\n if valid_types is None or isinstance(self, valid_types):\n visit_function(self, *args, **kwargs)\n for token in self.tokens:\n token.visit(visit_function, valid_types, args, kwargs)\n\n def bind_parameters(self, parameters : list[TypedParameter]) -> 'ExprComposite':\n \"\"\"\n Binds the parameters of a copy of the expression to the given list of parameters.\n \"\"\"\n return ExprComposite([token.bind_parameters(parameters) for token in self.tokens])\n","repo_name":"taskplanning/otpl","sub_path":"pddl/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"}
+{"seq_id":"24436770780","text":"import re\nfrom ..command import *\n\nsed_re = re.compile(r's/([^\\/]+)/(.+)', re.IGNORECASE)\n\n@on('message')\nasync def sed_sub(bot, message):\n\tm = sed_re.match(message.content)\n\tif not m: return\n\tasync for msg in message.channel.history(before=message):\n\t\tif msg.author == message.author: break\n\telse:\n\t\tawait message.edit(content='No substitutable message found.')\n\t\tawait asyncio.sleep(2)\n\t\tawait message.delete()\n\t\treturn\n\t\n\tnew_content = msg.content.replace(m.group(1), m.group(2))\n\tif message.content != new_content:\n\t\tawait msg.edit(content=new_content)\n\tawait message.delete()\n\n@command\ndef box(ctx, text):\n\ttext = text.upper().replace(' ', '')\n\ttop = ' '.join(text)\n\tcenter_spacing = ' ' * (2 * len(text) - 3)\n\tsides = (left + center_spacing + right for left, right in zip(text[1:-1], reversed(text[1:-1])))\n\treturn '\\n'.join(('```', top, *sides, ''.join(reversed(top)), '```'))\n\n@command\ndef arrow(ctx, text, dir=0):\n\tleft, up = not (dir % 2), not (dir >> 1)\n\treverse_diag = left ^ up\n\ttext = text.upper().replace(' ', '')\n\trows = [[' ' for _ in text] for _ in text] # Grid of spaces to fill in\n\trows[0 if up else -1][:] = list(text)\n\tfor i, (row, letter) in enumerate(zip(rows, reversed(text) if reverse_diag else text)):\n\t\trow[-i-1 if reverse_diag else i] = letter\n\t\trows[i][0 if left else -1] = letter\n\treturn '\\n'.join(('```', *map(' '.join, rows), '```'))\n\n@command\ndef xbox(ctx, text):\n\ttext = text.upper().replace(' ', '')\n\trows = [[' ' for _ in text + ' '] for _ in text + ' '] # Add two extra rows and columns\n\trows[0][1:-1] = text\n\tfor i, (row, letter, rev_letter) in enumerate(zip(rows[1:-1], text, reversed(text))):\n\t\trow[0], row[-1], row[i+1] = (letter,) * 3\n\t\trow[-i-2] = rev_letter\n\trows[-1][1:-1] = text\n\treturn '\\n'.join(('```', *map(' '.join, rows), '```'))\n","repo_name":"hook321/endrebot0","sub_path":"endrebot0/commands/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"27897028212","text":"from __future__ import division\nimport sklearn\nimport numpy\nfrom itertools import chain, combinations\n\nfrom Node import Node\n\nclass CART(sklearn.base.BaseEstimator, sklearn.base.ClassifierMixin):\n def __init__(self, leaf_min_size, tree_depth, my_ord, my_cont, my_cat, my_cat_slow, criterion=\"gini\", barrier_improvement = 0.0):\n self.leaf_min_size = leaf_min_size\n self.tree_depth = tree_depth\n self.criterion = criterion\n self.barrier_improvement = barrier_improvement\n \n self.ORDINAL_COLUMNS = my_ord\n self.CONTINUOUS_COLUMNS = my_cont\n self.CATEGORICAL_COLUMNS = my_cat\n self.CATEGORICAL_COLUMNS_SLOW = my_cat_slow\n\n self.ALL_COLUMNS = self.ORDINAL_COLUMNS.copy()\n self.ALL_COLUMNS.update(self.CONTINUOUS_COLUMNS)\n self.ALL_COLUMNS.update(self.CATEGORICAL_COLUMNS_SLOW)\n self.ALL_COLUMNS.update(self.CATEGORICAL_COLUMNS)\n\n self.tree = None\n self.tree_root = None\n\n def gini(self, left_subset, right_subset):\n\n pr_left = left_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n pr_right = right_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n\n return self.gini_impurity(left_subset + right_subset) - pr_left*self.gini_impurity(left_subset) - pr_right*self.gini_impurity(right_subset)\n\n def gini_impurity(self, node_subset):\n impurity = 0\n\n subset_size = node_subset.__len__()\n\n for node_class_k in self.classes:\n for node_class_l in self.classes:\n if(node_class_k != node_class_l):\n class_k_count = [node_entry for node_entry in node_subset if node_entry == node_class_k].__len__()\n class_l_count = [node_entry for node_entry in node_subset if node_entry == node_class_l].__len__()\n impurity += class_k_count*class_l_count / subset_size**2\n\n return impurity\n\n\n def twoing(self, left_subset, right_subset):\n\n pr_left = left_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n pr_right = right_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n\n return pr_left * pr_right / 4 * (self.twoing_impurity(left_subset, right_subset)) ** 2\n\n def twoing_impurity(self, left_subset, right_subset):\n impurity = 0\n\n total_size = left_subset.__len__() + right_subset.__len__()\n\n for class_k in self.classes:\n impurity += abs([node_entry for node_entry in left_subset if node_entry == class_k].__len__()/total_size - \\\n [node_entry for node_entry in right_subset if node_entry == class_k].__len__()/total_size)\n\n return impurity\n\n def ordered_twoing_impurity(self, left_subset, right_subset):\n impurity = 0\n\n total_size = left_subset.__len__() + right_subset.__len__()\n\n for class_k in self.classes:\n if class_k == 1:\n continue\n impurity += abs([node_entry for node_entry in left_subset if node_entry == class_k].__len__()/total_size - \\\n [node_entry for node_entry in right_subset if node_entry == class_k].__len__()/total_size)\n\n return impurity\n\n def ordered_twoing(self, left_subset, right_subset):\n\n pr_left = left_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n pr_right = right_subset.__len__()/(left_subset.__len__() + right_subset.__len__())\n\n return pr_left * pr_right / 4 * (self.ordered_twoing_impurity(left_subset, right_subset)) ** 2\n\n\n def impurity_change(self, left_subset, right_subset):\n if self.criterion == \"gini\":\n return self.gini(left_subset, right_subset)\n elif self.criterion == \"twoing\":\n return self.twoing(left_subset, right_subset)\n elif self.criterion == \"ordered_twoing\":\n return self.ordered_twoing(left_subset, right_subset)\n else:\n #You could insert here call for ordered Twoing\n raise ValueError(\"Unknown criterion type!\")\n\n def print_tree(self):\n for node in self.tree:\n node.print_node()\n\n def dot_print_tree(self, filename):\n out_file = open(filename, \"w\")\n\n out_file.write(\"strict digraph G {\")\n\n for node in self.tree:\n if node.left != None and node.right != None:\n out_file.write(\"%d [label=\\\"ID = %d \\n X[%d (%s)] : %s \\n gini = %0.3f \\n samples = %d\\\"];\" \\\n % (node.index, node.index, node.splitting_feature, self.ALL_COLUMNS[node.splitting_feature], str(node.splitting_value), node.impurity, node.class_subset.__len__()))\n else:\n out_file.write(\"%d [label=\\\"ID = %d \\n samples = %d\\\"];\" % (node.index, node.index, node.class_subset.__len__()))\n\n if node.left != None:\n out_file.write(\"%d -> %d;\" % (node.index, node.left.index))\n\n if node.right != None:\n out_file.write(\"%d -> %d;\" % (node.index, node.right.index))\n\n out_file.write(\"}\")\n\n out_file.close()\n\n def get_all_subsets(self, set):\n return chain.from_iterable(combinations(set, r) for r in range(len(set)+1))\n #\n # def split_ordered_twoing(self, node):\n # splits = []\n #\n # for featureID in self.ORDINAL_COLUMNS.keys():\n # current_feature = [row[featureID] for row in node.data_subset]\n # current_feature.sort()\n #\n # max_impurity_change_point = None\n # max_impurity_change = float(\"-inf\")\n #\n # for split_point in current_feature:\n # cur_left_subset = []\n # cur_right_subset = []\n #\n # for row_index in range(node.data_subset.__len__()):\n # if node.data_subset[row_index][featureID] <= split_point:\n # cur_left_subset.append(node.class_subset[row_index])\n # else:\n # cur_right_subset.append(node.class_subset[row_index])\n #\n # if(cur_left_subset.__len__() == 0 or cur_right_subset.__len__() == 0):\n # continue\n #\n # cur_impurity_change = self.ordered_twoing(cur_left_subset, cur_right_subset, self.ORDINAL_COLUMNS)\n #\n # print(\"AAA\")\n # print(cur_impurity_change)\n #\n # if cur_impurity_change > max_impurity_change:\n # max_impurity_change = cur_impurity_change\n # max_impurity_change_point = split_point\n #\n # splits.append({\"ID\" : featureID, \"max\" : max_impurity_change, \"point\" : max_impurity_change_point})\n #\n # final_max_impurity_feature = None\n # final_max_impurity_change = float(\"-inf\")\n # final_max_impurity_change_point = None\n #\n # for split in splits:\n # print(\"MAXSPLIT \" + str(split[\"max\"]))\n # if split[\"max\"] > final_max_impurity_change:\n # final_max_impurity_feature = split[\"ID\"]\n # final_max_impurity_change = split[\"max\"]\n # final_max_impurity_change_point = split[\"point\"]\n #\n # left_subset = []\n # left_classes = []\n # right_subset = []\n # right_classes = []\n #\n # if final_max_impurity_feature in self.CATEGORICAL_COLUMNS:\n # for row_index in range(node.data_subset.__len__()):\n # if node.data_subset[row_index][final_max_impurity_feature] in final_max_impurity_change_point:\n # left_subset.append(node.data_subset[row_index])\n # left_classes.append(node.class_subset[row_index])\n # else:\n # right_subset.append(node.data_subset[row_index])\n # right_classes.append(node.class_subset[row_index])\n # else:\n # for row_index in range(node.data_subset.__len__()):\n # print(final_max_impurity_feature)\n # print(node.data_subset[row_index][final_max_impurity_feature])\n # if node.data_subset[row_index][final_max_impurity_feature] <= final_max_impurity_change_point:\n # left_subset.append(node.data_subset[row_index])\n # left_classes.append(node.class_subset[row_index])\n # else:\n # right_subset.append(node.data_subset[row_index])\n # right_classes.append(node.class_subset[row_index])\n #\n # left_node = Node(node.index * 2 + 1, node.node_depth + 1, left_subset, left_classes)\n # right_node = Node(node.index * 2 + 2, node.node_depth + 1, right_subset, right_classes)\n #\n # node.left = left_node\n # node.right = right_node\n # node.splitting_feature = final_max_impurity_feature\n # node.splitting_value = final_max_impurity_change_point\n # node.impurity_gain = final_max_impurity_change\n #\n # node.impurity = 0.0\n #\n # node.is_leaf = False\n #\n # return left_node, right_node\n\n def split_node(self, node):\n\n # if self.criterion == \"ordered_twoing\":\n # return self.split_ordered_twoing(node)\n\n # node.print_node()\n # print(node.data_subset)\n # raw_input()\n\n print(\"Current node: \" + str(node.index))\n\n splits = []\n\n for featureID in self.CONTINUOUS_COLUMNS.keys() + self.ORDINAL_COLUMNS.keys():\n # print(featureID)\n # print(node.data_subset)\n current_feature = [row[featureID] for row in node.data_subset]\n current_feature.sort()\n\n max_impurity_change_point = None\n max_impurity_change = float(\"-inf\")\n\n for split_point in current_feature:\n cur_left_subset = []\n cur_right_subset = []\n\n for row_index in range(node.data_subset.__len__()):\n if node.data_subset[row_index][featureID] <= split_point:\n cur_left_subset.append(node.class_subset[row_index])\n else:\n cur_right_subset.append(node.class_subset[row_index])\n\n # print(split_point)\n # print(cur_left_subset.__len__())\n\n if(cur_left_subset.__len__() == 0 or cur_right_subset.__len__() == 0):\n continue\n # raw_input()\n\n cur_impurity_change = self.impurity_change(cur_left_subset, cur_right_subset)\n\n # print(\"CURI \" + str(cur_impurity_change))\n\n # print(cur_impurity)\n #\n # raw_input()\n\n if cur_impurity_change > max_impurity_change:\n max_impurity_change = cur_impurity_change\n max_impurity_change_point = split_point\n\n # print(max_criterion_point)\n\n splits.append({\"ID\" : featureID, \"max\" : max_impurity_change, \"point\" : max_impurity_change_point})\n\n for featureID in self.CATEGORICAL_COLUMNS.keys():\n # print(featureID)\n current_feature = [row[featureID] for row in node.data_subset]\n current_feature_set = set(current_feature)\n current_feature_subsets = self.get_all_subsets(current_feature_set)\n\n max_impurity_change_point = None\n max_impurity_change = float(\"-inf\")\n\n for subset in current_feature_subsets:\n if(len(subset) != 0 and len(subset) != len(current_feature_set)):\n # print(subset)\n cur_left_subset = []\n cur_right_subset = []\n\n for row_index in range(node.data_subset.__len__()):\n if node.data_subset[row_index][featureID] in subset:\n cur_left_subset.append(node.class_subset[row_index])\n else:\n cur_right_subset.append(node.class_subset[row_index])\n\n # print(split_point)\n # print(cur_left_subset.__len__())\n\n if(cur_left_subset.__len__() == 0 or cur_right_subset.__len__() == 0):\n continue\n # raw_input()\n\n cur_impurity_change = self.impurity_change(cur_left_subset, cur_right_subset)\n\n # print(\"CURI \" + str(cur_impurity_change) + str(max_impurity_change))\n\n if cur_impurity_change > max_impurity_change:\n max_impurity_change = cur_impurity_change\n max_impurity_change_point = subset\n\n splits.append({\"ID\" : featureID, \"max\" : max_impurity_change, \"point\" : max_impurity_change_point})\n\n # print(splits)\n\n final_max_impurity_feature = None\n final_max_impurity_change = float(\"-inf\")\n final_max_impurity_change_point = None\n\n for split in splits:\n # print(\"MAXSPLIT \" + str(split[\"max\"]))\n if split[\"max\"] > final_max_impurity_change:\n final_max_impurity_feature = split[\"ID\"]\n final_max_impurity_change = split[\"max\"]\n final_max_impurity_change_point = split[\"point\"]\n\n # raw_input()\n\n # print(final_max_impurity_feature)\n # print(final_max_impurity_change)\n # print(final_max_impurity_change_point)\n\n left_subset = []\n left_classes = []\n right_subset = []\n right_classes = []\n\n if final_max_impurity_feature in self.CATEGORICAL_COLUMNS:\n for row_index in range(node.data_subset.__len__()):\n if node.data_subset[row_index][final_max_impurity_feature] in final_max_impurity_change_point:\n left_subset.append(node.data_subset[row_index])\n left_classes.append(node.class_subset[row_index])\n else:\n right_subset.append(node.data_subset[row_index])\n right_classes.append(node.class_subset[row_index])\n else:\n for row_index in range(node.data_subset.__len__()):\n # print(final_max_impurity_feature)\n # print(node.data_subset[row_index][final_max_impurity_feature])\n if node.data_subset[row_index][final_max_impurity_feature] <= final_max_impurity_change_point:\n left_subset.append(node.data_subset[row_index])\n left_classes.append(node.class_subset[row_index])\n else:\n right_subset.append(node.data_subset[row_index])\n right_classes.append(node.class_subset[row_index])\n\n left_node = Node(node.index * 2 + 1, node.node_depth + 1, left_subset, left_classes)\n right_node = Node(node.index * 2 + 2, node.node_depth + 1, right_subset, right_classes)\n\n node.left = left_node\n node.right = right_node\n node.splitting_feature = final_max_impurity_feature\n node.splitting_value = final_max_impurity_change_point\n node.impurity_gain = final_max_impurity_change\n if self.criterion == \"gini\":\n node.impurity = self.gini_impurity(node.class_subset)\n elif self.criterion == \"twoing\" or self.criterion == \"ordered_twoing\":\n node.impurity = 0.0\n node.is_leaf = False\n\n return left_node, right_node\n\n\n def pre_stopping_criterion(self, node):\n \"\"\"\n There are a few stopping criteria. You should implement them here.\n\n + If a node becomes pure; that is, all cases in a node have identical values of the dependent\n variable, the node will not be split.\n + If all cases in a node have identical values for each predictor, the node will not be split.\n + If the current tree depth reaches the user-specified maximum tree depth limit value, the\n tree growing process will stop.\n + If the size of a node is less than the user-specified minimum node size value, the node\n will not be split.\n\n Some sources call some of the criteria (e.g. tree depth) a pre-pruning, so you could check sources\n about it before implementation.\n \"\"\"\n\n if node.node_depth >= self.tree_depth:\n print(str(node.index) + \" not split because of max depth\")\n return True\n\n if len(set(node.class_subset)) == 1:\n print(str(node.index) + \" not split because of 1 class\")\n return True\n\n if len(node.data_subset) <= self.leaf_min_size:\n print(str(node.index) + \" not split because of small size\\n Size: \")\n print(len(node.data_subset))\n return True\n\n identical = True\n\n for index in range(node.data_subset.__len__() - 1):\n for column in range(node.data_subset[index].__len__()):\n if node.data_subset[index][column] != node.data_subset[index + 1][column]:\n identical = False\n\n if identical == True:\n print(str(node.index) + \" not split because of identical cases\")\n return True\n\n return False\n\n\n def post_stopping_criterion(self, node, lchild, rchild):\n \"\"\"\n There are a few stopping criteria. You should implement them here.\n\n + If the split of a node results in a child node whose node size is less than the user-specified\n minimum child node size value, the node will not be split.\n + If for the best split, the barrier_improvement is smaller than the user-specified minimum barrier_improvement,\n the node will not be split.\n\n Some sources call some of the criteria (e.g. tree depth) a pre-pruning, so you could check sources\n about it before implementation.\n \"\"\"\n\n\n if node.impurity_gain < self.barrier_improvement:\n print(str(node.index) + \" not split because of low gain\")\n return True\n\n if len(lchild.data_subset) < self.leaf_min_size or \\\n len(rchild.data_subset) < self.leaf_min_size:\n print(str(node.index) + \" not split because of small child\\n Sizes: \")\n print(len(lchild.data_subset))\n print(len(rchild.data_subset))\n return True\n\n return False\n\n def fit(self, X_train, y_train):\n \"\"\"\n 1. Find each predictor's best split.\n For each continuous and ordinal predictor, sort its values from the smallest to the largest.\n For the sorted predictor, go through each value from top to examine each candidate split\n point (call it v, if x \\leq v, the case goes to the left child node, otherwise, goes to the right.)\n to determine the best. The best split point is the one that maximize the splitting criterion\n the most when the node is split according to it. The definition of splitting criterion is in\n later section.\n For each nominal predictor, examine each possible subset of categories (call it A, if\n x \\in A, the case goes to the left child node, otherwise, goes to the right.) to find the best\n split.\n 2. Find the node's best split.\n Among the best splits found in step 1, choose the one that maximizes the splitting\n criterion.\n 3. Split the node using its best split found in step 2 if the stopping rules are not satisfied.\n\n Write your growing tree code here. You should use stopping_criterion while growing it.\n \"\"\"\n self.X_train = X_train\n self.y_train = y_train\n self.classes = set(y_train)\n\n self.tree = self.build_tree()\n\n # self.print_tree()\n\n # root.print_node()\n #\n # lchild.print_node()\n #\n # rchild.print_node()\n\n return self\n\n def build_tree(self):\n\n tree = []\n\n root = Node(0, 0, self.X_train, self.y_train)\n\n tree.append(root)\n\n self.tree_root = root\n\n for node in tree:\n\n # node.print_node()\n\n if self.pre_stopping_criterion(node) == True:\n continue\n\n lchild, rchild = self.split_node(node)\n\n if self.post_stopping_criterion(node, lchild, rchild) == True:\n node.left = None\n node.right = None\n continue\n\n tree.append(lchild)\n tree.append(rchild)\n\n return tree\n\n def get_node_by_id(self, ID):\n for node in self.tree:\n if node.index == ID:\n return node\n\n def find_best_leaf(self, case):\n # print(case)\n cur_node = self.tree_root\n\n # cur_node.print_node()\n # print(cur_node.is_leaf)\n\n while not cur_node.is_leaf:\n # print(case[cur_node.splitting_feature])\n if cur_node.splitting_feature in self.CATEGORICAL_COLUMNS:\n # print(cur_node.splitting_value)\n # print(cur_node.splitting_feature)\n if case[cur_node.splitting_feature] in cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n else:\n if case[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n\n # cur_node.print_node()\n\n return cur_node\n\n \n def predict(self, X_test):\n \"\"\"\n Predict labels for test data using this classifier. Take the tree growned in the fit stage.\n Don't forget to handle missing values: the label you choose should be from the sub-tree\n which has less enthropy.\n\n Inputs:\n - X_test: A numpy array of shape (num_test, D) containing test data consisting\n of num_test samples each of dimension D.\n\n Returns:\n - y: A numpy array of shape (num_test,) containing predicted labels for the\n test data, where y[i] is the predicted label for the test point X[i].\n \"\"\"\n from collections import Counter\n\n size = X_test.shape[0]\n\n y_pred = numpy.zeros(size)\n\n for case in range(size):\n best_leaf = self.find_best_leaf(X_test[case])\n count = Counter(best_leaf.class_subset)\n if case % 1000 == 0:\n print(case)\n # print(count)\n y_pred[case] = int(count.most_common(1)[0][0])\n\n return y_pred, best_leaf\n\n def find_internal_nodes(self, node, internal_nodes):\n if node.left != None or node.right != None:\n internal_nodes.append(node)\n\n if not node.left.is_leaf:\n self.find_internal_nodes(node.left, internal_nodes)\n\n if not node.right.is_leaf:\n self.find_internal_nodes(node.right, internal_nodes)\n\n def remove_subtree(self, node):\n if node.left != None or node.right != None:\n self.remove_subtree(node.left)\n self.remove_subtree(node.right)\n\n self.tree.remove(node)\n\n\n def prune(self, target_id, alpha = 0.1):\n internal_nodes = []\n\n self.find_internal_nodes(self.tree_root, internal_nodes)\n while len(internal_nodes) > 0:\n min_inc = float(\"inf\")\n pnode = None\n for node in internal_nodes:\n inc_loss = self.loss(node.data_subset, target_id) - self.loss(node.left.data_subset, target_id) - self.loss(node.right.data_subset, target_id)\n if inc_loss < min_inc:\n min_inc = inc_loss\n pnode = node\n if min_inc < alpha:\n self.remove_subtree(pnode.left)\n self.remove_subtree(pnode.right)\n pnode.is_leaf = True\n pnode.left = None\n pnode.right = None\n else:\n return\n internal_nodes = []\n self.find_internal_nodes(self.tree_root, internal_nodes)\n\n def loss(self, train, target_id):\n avg = sum([row[target_id] for row in train]) / float(len(train))\n ret = 0.0\n for row in train:\n ret += (row[target_id] - avg)**2\n return ret","repo_name":"alexeyqu/mipt-alexeyqu","sub_path":"3 term/ML/salary/CART/CART.py","file_name":"CART.py","file_ext":"py","file_size_in_byte":24304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"9501578807","text":"import os\nimport pickle\nimport sys\nfrom decimal import Decimal\n\nfrom google.auth.transport.requests import Request\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom hu import ObjectDict as OD\n\nTOKEN_FILE = \"token.pickle\"\nCREDENTIALS_FILE = os.environ.get(\n \"CREDENTIALS_FILE\", os.path.expanduser(\"~/.credentials.json\")\n)\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = [\n \"https://www.googleapis.com/auth/documents\",\n \"https://www.googleapis.com/auth/spreadsheets.readonly\",\n]\nMONTHS = \"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec\".split()\n\nDEBUG_FEATURE_FLAGS = {\"parse\"}\n\n#\n# Should we connect to the MongoDB on import? Ъци\n#\n\n\ndef debug(debug_class, *arg, **kw):\n if debug_class in DEBUG_FEATURE_FLAGS or \"all\" in DEBUG_FEATURE_FLAGS:\n print(*arg, **kw)\n\n\ndef authenticate():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(TOKEN_FILE):\n with open(TOKEN_FILE, \"rb\") as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS_FILE, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(TOKEN_FILE, \"wb\") as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef build_service():\n creds = authenticate()\n service = build(\"sheets\", \"v4\", credentials=creds)\n return service\n\n\ndef pull_data(sheet_id, range):\n ss_service = build_service()\n\n # Retrieve the spreadsheet's contents from the Sheets service.\n document = (\n ss_service.spreadsheets()\n .values()\n .get(spreadsheetId=sheet_id, range=range)\n .execute()\n )\n return document\n\n\ndef pull_props(sheet_id):\n ss_service = build_service()\n return ss_service.spreadsheets().get(spreadsheetId=sheet_id).execute()\n\n\ndef clean_percentage(tm, slot):\n \"\"\"\n Simple percentage conversion: stored as a Decimal to 2DP.\n \"\"\"\n if not tm[slot]:\n tm[slot] = None\n else:\n if tm[slot][-1] != \"%\":\n raise ValueError(f\"Row for '{tm.name}' has invalid percentage for {slot}\")\n tm[slot] = Decimal(tm[slot][:-1]) / Decimal(100).quantize(Decimal(\"0.01\"))\n\n\ndef clean_column_name(name):\n if name:\n name = name.lower().replace(\" \", \"_\")\n for char in \"/?+=\":\n name = name.replace(char, \"\")\n while \"__\" in name:\n name = name.replace(\"__\", \"_\")\n return \"item_id\" if name == \"id\" else name\n else:\n return \"unknown\" # The column with an unknown purpose\n\n\ndef clean_row(r, n_cols):\n \"\"\"\n Add necessary null string values to pad row to required length.\n \"\"\"\n return r + [\"\"] * (n_cols - len(r))\n\n\ndef load_data_rows(sheet_id, range_spec, item_type):\n \"\"\"\n Transform a range in a spreadsheet into a list of row dictionaries.\n\n This is an implementation-specific function, so maybe it shoould be a\n document method. Ideally most records will have enough semantic\n content (eventually) that a standard load method will suffice.\n \"\"\"\n raw_data_rows = pull_data(sheet_id, range_spec)[\"values\"]\n col_names = [clean_column_name(name) for name in raw_data_rows[0]]\n n_cols = len(col_names)\n del raw_data_rows[0]\n # The line below ignores blank rows and those with no column 1 This is\n # pretty arbitrary, and should ideally be somehow configurable per data\n # source\n data_rows = [clean_row(r, n_cols) for r in raw_data_rows if len(r) > 1 and r[1]]\n data_rows = [r[: len(col_names)] for r in data_rows]\n data_rows = [OD(dict(zip(col_names, slot))) for slot in data_rows]\n for row in data_rows:\n yield item_type.from_dict(row)\n\n\nif __name__ == \"__main__\":\n sys.exit(\"Test code required!\")\n","repo_name":"holdenweb/python-training","sub_path":"src/transformers/sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"39638800204","text":"#Exercise!\n#Display the image below to the right hand side where the 0 is going to be ' ', and the 1 is going to be '*'. This will reveal an image!\npicture = [\n [0,0,0,1,0,0,0],\n [0,0,1,1,1,0,0],\n [0,1,1,1,1,1,0],\n [1,1,1,1,1,1,1],\n [0,0,0,1,0,0,0],\n [0,0,0,1,0,0,0]\n]\n\n# iterate over picture\n# if 0 -> print ''\n# if 1 -> print *\n\n\nfor row in picture:\n for pixel in row:\n if (pixel == 1):\n print('*', end='') # We use end to don't go to new line \\n\n else:\n print(' ', end='') # We use end to don't go to new line \\n\n print('\\n')\n\n# Better Version below\n\n\nfill = '*'\nempty = ' '\nfor row in picture:\n for pixel in row:\n if (pixel): # We don't need pixel == 1 as pixel itself is a TRUE value\n print(fill, end='') # We use end to don't go to new line \\n\n else:\n print(empty, end='') # We use end to don't go to new line \\n\n print('\\n')","repo_name":"PatrykPetryszen/complete_python_developer_2020_zero_to_mastery","sub_path":"75.first_gui.py","file_name":"75.first_gui.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"39476977872","text":"import pygame\nimport sys\nimport os\n\n# Define constants for the board size and cell size\nBOARD_SIZE = 8\nCELL_SIZE = 60\nWINDOW_SIZE = (CELL_SIZE * BOARD_SIZE, CELL_SIZE * BOARD_SIZE)\n\n# Define colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 128, 0)\n\nclass OthelloGame:\n def __init__(self):\n self.board = [[2] * BOARD_SIZE for _ in range(BOARD_SIZE)]\n self.board[3][3] = 0 # White circle\n self.board[3][4] = 1 # Black circle\n self.board[4][3] = 1 # Black circle\n self.board[4][4] = 0 # White circle\n \n def switch_player(self, player):\n return 0 if player == 1 else 1\n\n def is_valid_move(self, row, col,player):\n valid_moves = self.get_valid_moves(player)\n return [row, col] in valid_moves\n\n # Check if the cell is empty and the move flips at least one opponent's piece\n # Implement the logic here\n\n def make_move(self, row, col,player):\n self.board[row][col]=player\n #right\n l=[]\n for r in range(row+1,8):\n \n if self.board[r][col]==1-player:\n l.append(r)\n if self.board[r][col]==2:\n l=[]\n break\n if self.board[r][col]==player:\n for x in l:\n self.board[x][col]=player\n l=[]\n break\n #left \n l=[]\n for r in range(row-1,-1,-1):\n \n if self.board[r][col]==1-player:\n l.append(r)\n if self.board[r][col]==2:\n l=[]\n break\n if self.board[r][col]==player:\n for x in l:\n self.board[x][col]=player\n l=[]\n break\n #up \n l=[]\n for c in range(col-1,-1,-1):\n \n if self.board[row][c]==1-player:\n l.append(c)\n if self.board[row][c]==2:\n l=[]\n break\n if self.board[row][c]==player:\n for x in l:\n self.board[row][x]=player\n l=[]\n break\n #down \n l=[]\n for c in range(col+1,8):\n \n if self.board[row][c]==1-player:\n l.append(c)\n if self.board[row][c]==2:\n l=[]\n break\n if self.board[row][c]==player:\n for x in l:\n self.board[row][x]=player\n l=[]\n break\n #right and up\n r=row-1\n c=col+1\n l=[]\n while r>=0 and c<8:\n if self.board[r][c]==1-player:\n l.append((r,c))\n r-=1\n c+=1\n if r>=0 and c<8 and self.board[r][c]==2:\n l=[]\n break\n if r>=0 and c<8 and self.board[r][c]==player:\n for a,b in l:\n self.board[a][b]=player\n l=[]\n break\n #right and down\n r=row+1\n c=col+1\n l=[]\n while r<8 and c<8:\n if self.board[r][c]==1-player:\n l.append((r,c))\n r+=1\n c+=1\n if r<8 and c<8 and self.board[r][c]==2:\n l=[]\n break\n if r<8 and c<8 and self.board[r][c]==player:\n for a,b in l:\n self.board[a][b]=player\n l=[]\n break\n #left and up\n r=row-1\n c=col-1\n l=[]\n while r>=0 and c>=0:\n if self.board[r][c]==1-player:\n l.append((r,c))\n r-=1\n c-=1\n if r>=0 and c>=0 and self.board[r][c]==2:\n l=[]\n break\n if r>=0 and c>=0 and self.board[r][c]==player:\n for a,b in l:\n self.board[a][b]=player\n l=[]\n break\n #left and down\n r=row+1\n c=col-1\n l=[]\n while r<8 and c>=0:\n if self.board[r][c]==1-player:\n l.append((r,c))\n r+=1\n c-=1\n if r<8 and c>=0 and self.board[r][c]==2:\n l=[]\n break\n if r<8 and c>=0 and self.board[r][c]==player:\n for a,b in l:\n self.board[a][b]=player\n l=[]\n break\n player=self.switch_player(player)\n # Place the player's piece on the board and flip opponent's pieces\n # Implement the logic here\n\n def get_valid_moves(self, player):\n valid_moves = []\n for row in range(8):\n for col in range(8):\n if self.board[row][col] == player: \n #right\n if row+1<8 and self.board[row+1][col]==1-player and not self.board[row+1][col]==2:\n for r in range(row+1,8):\n if self.board[r][col]==player:\n break\n if self.board[r][col]==2:\n valid_moves.append([r,col])\n break\n #left\n if row-1>=0 and self.board[row-1][col]==1-player and not self.board[row-1][col]==2:\n for r in range(row-1,-1,-1):\n if self.board[r][col]==player:\n break\n if self.board[r][col]==2:\n valid_moves.append([r,col])\n break\n #up\n if col-1>=0 and self.board[row][col-1]==1-player and not self.board[row][col-1]==2:\n for c in range(col-1,-1,-1):\n if self.board[row][c]==player:\n break\n if self.board[row][c]==2:\n valid_moves.append([row,c])\n break\n #down\n if col+1<8 and self.board[row][col+1]==1-player and not self.board[row][col+1]==2:\n for c in range(col+1,8):\n if self.board[row][c]==player:\n break\n if self.board[row][c]==2:\n valid_moves.append([row,c])\n break\n #left and down\n if row-1>=0 and col+1<8 and self.board[row-1][col+1]==1-player and not self.board[row-1][col+1]==2:\n # if row-1>=0 and col+1<8 and self.board[row-1][col+1]==1-player:\n r=row-1\n c=col+1\n while r-1>=0 and c+1<8:\n if self.board[r][c]==player:\n break\n if self.board[r][c]==2:\n valid_moves.append([r,c])\n break\n if self.board[r][c]==1-player:\n r=r-1\n c=c+1\n #left and up\n if row-1>=0 and col-1>=0 and self.board[row-1][col-1]==1-player and not self.board[row-1][col-1]==2:\n # if row-1>=0 and col-1>=0 and self.board[row-1][col-1]==1-player:\n r=row-1\n c=col-1\n while r-1>=0 and c-1>=0:\n if self.board[r][c]==player:\n break\n if self.board[r][c]==2:\n valid_moves.append([r,c])\n break\n if self.board[r][c]==1-player:\n r-=1\n c-=1\n #right and down\n if row+1<8 and col+1<8 and self.board[row+1][col+1]==1-player and not self.board[row+1][col+1]==2:\n # if row+1<8 and col+1<8 and self.board[row+1][col+1]==1-player:\n r=row+1\n c=col+1\n while r+1<8 and c+1<8:\n if self.board[r][c]==player:\n break\n if self.board[r][c]==2:\n valid_moves.append([r,c])\n break \n if self.board[r][c]==1-player:\n r+=1\n c+=1 \n #right and up\n if row+1<8 and col-1>=0 and self.board[row+1][col-1]==1-player and not self.board[row+1][col-1]==2:\n # if row+1<8 and col-1>=0 and self.board[row+1][col-1]==1-player:\n r=row+1\n c=col-1\n while r+1<8 and c-1>=0:\n if self.board[r][c]==player:\n break\n if self.board[r][c]==2:\n valid_moves.append([r,c])\n break\n if self.board[r][c]==1-player:\n r+=1\n c-=1\n return valid_moves\n\n def is_game_over(self,player):\n return not self.get_valid_moves(player) and not self.get_valid_moves(1 - player)\n\n # Check if the game is over (no valid moves for both players)\n # Implement the logic here\n\n def get_winner(self):\n black_count = sum(row.count(1) for row in self.board)\n white_count = sum(row.count(0) for row in self.board)\n\n if black_count > white_count:\n return 1 # Black wins\n elif white_count > black_count:\n return 0 # White wins\n else:\n return -1 # Tie\n \n # Return the winner of the game or None if it's a tie\n # Implement the logic here\n\nclass OthelloGUI:\n def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode(WINDOW_SIZE)\n pygame.display.set_caption(\"Othello\")\n self.clock = pygame.time.Clock()\n self.game = OthelloGame()\n\n def draw_board(self,player):\n self.screen.fill(GREEN)\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n x = col * CELL_SIZE\n y = row * CELL_SIZE\n pygame.draw.rect(self.screen, BLACK, (x, y, CELL_SIZE, CELL_SIZE), 1) \n \n if self.game.board[row][col] == 1:\n pygame.draw.circle(self.screen, BLACK, (x + CELL_SIZE // 2, y + CELL_SIZE // 2), CELL_SIZE // 2 - 5) \n elif self.game.board[row][col] == 0:\n pygame.draw.circle(self.screen, WHITE, (x + CELL_SIZE // 2, y + CELL_SIZE // 2), CELL_SIZE // 2 - 5) \n if self.game.is_valid_move(row, col, player):\n pygame.draw.circle(self.screen, (0, 0, 255, 100), (x + CELL_SIZE // 2, y + CELL_SIZE // 2), CELL_SIZE // 2 - 10) \n \n # Draw the game board, including circles and valid move indicators\n # Implement the drawing logic here\n\n def clear_terminal(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n def run(self):\n running = True\n player=1\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n row = event.pos[1] // CELL_SIZE\n col = event.pos[0] // CELL_SIZE\n \n for inner in self.game.board:\n print(inner)\n print(row)\n print(col)\n \n if self.game.is_valid_move(row,col,player):\n self.game.make_move(row,col,player)\n player=self.game.switch_player(player)\n print('Yes , a valid move')\n if player==0:\n print(\"White ki baari hai\")\n else:\n print(\"Black ki baari hai\")\n if not self.game.get_valid_moves(player):\n player=self.game.switch_player(player)\n \n if self.game.is_game_over(player):\n winner=self.game.get_winner()\n if winner == 0:\n winner_text = \"White wins!\"\n elif winner == 1:\n winner_text = \"Black wins!\"\n else:\n winner_text = \"It's a tie!\"\n \n self.screen.fill(GREEN)\n pygame.display.update()\n \n # Display the winner message for 3 seconds\n \n font = pygame.font.Font(None, 36)\n text_surface = font.render(winner_text, True, BLACK)\n text_rect = text_surface.get_rect(center=(WINDOW_SIZE[0] // 2, WINDOW_SIZE[1] // 2))\n self.screen.blit(text_surface, text_rect)\n pygame.display.update()\n self.clear_terminal()\n pygame.time.wait(3000)\n \n # Reset the game and continue\n \n self.game = OthelloGame()\n player = 1 \n \n # print(self.game.get_valid_moves(player))\n self.draw_board(player)\n \n pygame.display.update()\n self.clock.tick(60)\n\n pygame.quit()\n sys.exit()\n\nif __name__ == \"__main__\":\n gui = OthelloGUI()\n gui.run()\n","repo_name":"Tanish-pat/My-Own-Python-Othello-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"75141014541","text":"# pylint: disable=wrong-import-position\n\nAPP_NAME = \"resource_management\"\nOPERATION_NAME = \"update_user_details\"\nREQUEST_METHOD = \"put\"\nURL_SUFFIX = \"v2/user/profile/\"\n\nfrom .test_case_01 import TestCase01UpdateUserDetailsAPITestCase\n\n__all__ = [\n \"TestCase01UpdateUserDetailsAPITestCase\"\n]\n","repo_name":"bammidichandini/resource_management-chandini","sub_path":"resource_management/views/update_user_details/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"25004471130","text":"import cv2\nimport os\n\ndef dir_to_frames(inpath, writepath):\n writepath = writepath\n files = [i for i in os.listdir(inpath) if i != '.DS_Store' and i != '.ipynb_checkpoints']\n count = 1\n for f in files:\n vidpath = os.path.join(inpath, f)\n vidcap = cv2.VideoCapture(vidpath)\n success,image = vidcap.read()\n framecount = 0\n while success:\n try:\n success,image = vidcap.read()\n cv2.imwrite(writepath + str(count)+\"frame%d.jpg\" % framecount, image) # save frame as JPEG file \n print('Read a new frame: ', success)\n framecount += 1\n except:\n pass\n count+=1","repo_name":"lucashsg77/Eye-commands","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"2434849354","text":"import sys\n\n\nfrom nose.tools import with_setup, raises\n\n\nfrom azure.Kqlmagic.kql_magic import Kqlmagic as Magic\n\n\nip = get_ipython() # pylint:disable=undefined-variable\n\nclass KqlEnv(object):\n # Object constructor\n def __init__(self, connectstr):\n self.connectstr = connectstr\n def query(self, txt):\n return ip.run_line_magic('kql', \"%s %s\" % (self.connectstr, txt))\n\nkql_env = KqlEnv('$TEST_CONNECTION_STR')\n\nbasequery = \"let manycoltbl = view () { datatable(name:string, y1:real, y2:real, name2:string, y3:real) ['r1-txt1', 1.01, 1.02, 'r1-txt2', 1.04, 'r2-txt1', 2.01, 2.02, 'r2-txt2', 2.04, 'r3-txt1', 3.01, 3.02, 'r3-txt2', 3.04] }; \"\n\ndef setup():\n magic = Magic(shell=ip)\n ip.register_magics(magic)\n\ndef teardown():\n pass\n\nclass Harness(object):\n def run_query(self):\n return kql_env.query(self.query)\n\nclass TestOneNum(Harness):\n query = basequery + \"manycoltbl | project y1\"\n \n @with_setup(setup, teardown)\n def test_pie(self):\n results = self.run_query()\n results.guess_pie_columns(xlabel_sep=\"//\")\n assert results.ys[0].is_quantity\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.x == []\n assert results.xlabels == []\n assert results.xlabel == ''\n\n @with_setup(setup, teardown)\n def test_plot(self):\n results = self.run_query()\n results.guess_plot_columns()\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.x == []\n assert results.x.name == ''\n\nclass TestOneStrOneNum(Harness):\n query = basequery + \"manycoltbl | project name, y1\"\n \n @with_setup(setup, teardown)\n def test_pie(self):\n results = self.run_query()\n results.guess_pie_columns(xlabel_sep=\"//\")\n assert results.ys[0].is_quantity\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.xlabels == ['r1-txt1', 'r2-txt1', 'r3-txt1']\n assert results.xlabel == 'name'\n\n @with_setup(setup, teardown)\n def test_plot(self):\n results = self.run_query()\n results.guess_plot_columns()\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.x == []\n\n\nclass TestTwoStrTwoNum(Harness):\n query = basequery + \"manycoltbl | project name2, y3, name, y1\"\n \n @with_setup(setup, teardown)\n def test_pie(self):\n results = self.run_query()\n results.guess_pie_columns(xlabel_sep=\"//\")\n assert results.ys[0].is_quantity\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.xlabels == ['r1-txt2//1.04//r1-txt1',\n 'r2-txt2//2.04//r2-txt1',\n 'r3-txt2//3.04//r3-txt1']\n assert results.xlabel == 'name2, y3, name'\n\n @with_setup(setup, teardown)\n def test_plot(self):\n results = self.run_query()\n results.guess_plot_columns()\n assert results.ys == [[1.01, 2.01, 3.01]]\n assert results.x == [1.04, 2.04, 3.04]\n\n\nclass TestTwoStrThreeNum(Harness):\n query = basequery + \"manycoltbl | project name, y1, name2, y2, y3\"\n \n @with_setup(setup, teardown)\n def test_pie(self):\n results = self.run_query()\n results.guess_pie_columns(xlabel_sep=\"//\")\n assert results.ys[0].is_quantity\n print(f\"1--- {results.ys}\")\n print(f\"2--- {[[1.04, 2.04, 3.04]]}\")\n assert results.ys == [[1.04, 2.04, 3.04]]\n assert results.xlabels == ['r1-txt1//1.01//r1-txt2//1.02',\n 'r2-txt1//2.01//r2-txt2//2.02',\n 'r3-txt1//3.01//r3-txt2//3.02']\n\n @with_setup(setup, teardown)\n def test_plot(self):\n results = self.run_query()\n \n results.guess_plot_columns()\n assert results.ys == [[1.02, 2.02, 3.02], [1.04, 2.04, 3.04]]\n assert results.x == [1.01, 2.01, 3.01]\n \n","repo_name":"microsoft/jupyter-Kqlmagic","sub_path":"azure/tests/t_est_column_guesser.py","file_name":"t_est_column_guesser.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"47"}
+{"seq_id":"24302066399","text":"import cgi\nimport os, sys\nlib_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib')\nsys.path.insert(0, lib_path)\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import users\nfrom application import adminmodel as model\nimport webapp2\nfrom application import common\nimport re\nimport logging\n\ntpath = os.path.join(os.path.dirname(__file__), 'static/templates/')\n\nclass Home(webapp2.RequestHandler):\n\tdef get(self):\n\t\tfrom application import adminmodel as model\n\t\timport urllib\n\t\t\n\t\t#model.sourceRecount()\n\t\t\n\t\tpageSize = common.getValidatedParam(self.request, self.response, 'pageSize', 20, 'number')\n\t\toffset = common.getValidatedParam(self.request, self.response, 'offset', 0, 'number')\n\t\tfilters = common.getValidatedParam(self.request, self.response, 'filters', None, 'dict')\n\t\torder = common.getValidatedParam(self.request, self.response, 'order', None, 'list')\n\t\t\n\t\tlogging.info(order)\t\t\n\t\t\n\t\tstoryResults = model.getStories(filters, order, pageSize, offset)\n\t\tsourceResults = model.getSources()\n\t\t\n\t\tsources = []\n\t\tfor s in sourceResults:\n\t\t\tif s.storyCount:\n\t\t\t\twps = s.wordCount / s.storyCount\n\t\t\telse:\n\t\t\t\twps = 'N/A'\n\t\t\tsources.append({\n\t\t\t\t'title': s.title\n\t\t\t\t, 'storyCount' : s.storyCount\n\t\t\t\t, 'wordCount' : s.wordCount\n\t\t\t\t, 'wps': wps\n\t\t\t\t, 'urlSafeKey' : s.key.urlsafe()\n\t\t\t\t, 'urlSafeTitle':urllib.quote_plus(s.title)\n\t\t\t})\n\n\t\tstories = []\n\t\tfor s in storyResults:\n\t\t\tstories.append({\n\t\t\t\t'title' : s.title\n\t\t\t\t, 'creator' : s.creator\n\t\t\t\t, 'score':s.score\n\t\t\t\t, 'publication' : s.firstPub.publication\n\t\t\t\t, 'wordCount' : s.wordCount\n\t\t\t\t, 'urlSafeKey' : s.key.urlsafe()\n\t\t\t})\t\t \n\n\t\t\t \n\t\ttemplate_values = {'stories':stories, \n\t\t\t'sources':sources, \n\t\t\t'filterString':common.getParamString(self.request, self.response, 'filters'),\n\t\t\t'order':order,\n\t\t\t'orderString':common.getParamString(self.request, self.response, 'order')\n\t\t}\n\t\tlogging.info('orderString: {0}'.format(template_values['orderString']))\n\t\tself.response.out.write(template.render(tpath+'admin.html', template_values))\n\nclass BuildRecommendations(webapp2.RequestHandler):\n\tdef get(self):\n\t\tgUser = users.get_current_user()\n\t\tif gUser:\t\t\t\n\t\t\tuser = model.getUser(gUser.user_id(), 'google')\n\t\t\tif not user:\n\t\t\t\thandleError(request, response, error)\n\t\t\tmodel.setStream(user, 'fiction','sci-fi','english')\n\t\t\tmodel.buildRecommendations(0, 100, 0, True, self.response.out)\n\t\t\tself.response.out.write('build recommendations complete')\n\t\telse:\n\t\t\tself.response.out.write('build recommendations failed, no user logged in')\n\nclass ResetDataStore(webapp2.RequestHandler):\n\tdef get(self):\n\t\tmodel.clearDataStore('Rec')\n\t\tmodel.clearDataStore('User')\n\t\tmodel.clearDataStore('StreamNode')\n\t\tmodel.clearDataStore('Story')\n\nclass Preview(webapp2.RequestHandler):\n\tdef get(self):\n\t\turlSafeStoryKey = common.getValidatedParam(self.request, self.response, 'storyKey', None, None)\n\t\t\n\t\tif urlSafeStoryKey:\n\t\t\tstoryKey = model.ndb.Key(urlsafe=urlSafeStoryKey)\n\t\t\tstory = storyKey.get()\n\t\t\tif story:\n\t\t\t\tself.response.out.write(\"Return to Admin Home \")\n\t\t\t\tself.response.out.write(\"Reimport Story \".format(urlSafeStoryKey))\n\t\t\t\tself.response.out.write(\" \")\n\t\t\t\tself.response.out.write(\" \")\t\t\t\n\t\t\t\tself.response.out.write(\"Title: {0} \".format(model.encodeString(story.title)))\n\t\t\t\tif story.creator and story.creator[0]:\n\t\t\t\t\tself.response.out.write(\"Author: {0} \".format(story.creator[0]))\t\t\n\t\t\t\tif story.wordCount:\t\t\n\t\t\t\t\tself.response.out.write(\"Word Count: {0} \".format(story.wordCount))\n\t\t\t\tself.response.out.write(\"Publication: {0} \".format(story.firstPub.publication))\n\t\t\t\tif story.firstPub.date:\n\t\t\t\t\tself.response.out.write(\"Publication Date: {0}\".format(story.firstPub.date))\n\t\t\t\tself.response.out.write(\"URL: {0} \".format(story.firstPub.url))\n\t\t\t\tsocial = ['pageViews','facebook','twitter','comments','altScore']\n\t\t\t\tfor s in social:\n\t\t\t\t\tif hasattr(story.firstPub, s) and getattr(story.firstPub, s) != None:\n\t\t\t\t\t\tself.response.out.write('{0}: {1} '.format(s, getattr(story.firstPub,s)))\n\t\t\t\t\n\t\t\t\tif story.text:\n\t\t\t\t\tself.response.out.write(model.encodeString(story.text))\n\t\t\t\tif story.creatorInfo:\n\t\t\t\t\tself.response.out.write(\" {0}\".format(model.encodeString(story.creatorInfo)))\n\t\t\t\tself.response.out.write(\"\")\n\t\t\telse:\n\t\t\t\tself.response.out.write('Invalid storyKey provided')\n\t\telse:\n\t\t\tself.response.out.write('No storyKey provided')\t\n\t\tself.response.out.write(\"Return to Admin Home \")\n\t\t\t\nclass ImportStories(webapp2.RequestHandler):\n\tdef get(self):\n\t\tfrom application import adminmodel as model\n\t\tmodel.setup(self.request, self.response)\n\t\tsourceKey = common.getValidatedParam(self.request, self.response, 'sourceKey', None, None)\n\t\tstoryKey = common.getValidatedParam(self.request, self.response, 'storyKey', None, None)\n\n\t\tif storyKey:\n\t\t\tlogging.info('attempting to reimport story')\n\t\t\tfrom application import adminmodel as model\n\t\t\tmodel.setup(self.request, self.response)\n\t\t\tmodel.reimportStory(storyKey)\n\t\t\tself.response.out.write('Return to Admin Home ')\n\t\telif sourceKey:\n\t\t\tfirst = common.getValidatedParam(self.request, self.response, 'first', None, 'number')\n\t\t\treimport = common.getValidatedParam(self.request, self.response, 'reimport', False, 'bool')\n\t\t\tlast = common.getValidatedParam(self.request, self.response, 'last', 0, 'number')\n\t\t\tmodel.importStories(sourceKey, first, last, reimport)\n\t\t\tself.response.out.write('Return to Admin Home ')\n\t\telse:\n\t\t\tself.response.out.write('No storyKey provided. Could not reimport story requestUri: {0}'.format(self.request.url))\n\t\nclass Recount(webapp2.RequestHandler):\n\tdef get(self):\n\t\tmodel.setup(self.request, self.response)\n\t\tmodel.sourceRecount()\n\nclass ScoreStories(webapp2.RequestHandler):\n\tdef get(self):\n\t\tmodel.setup(self.request, self.response)\n\t\tmodel.scoreStories()\n\nclass UpdateSources(webapp2.RequestHandler):\n\tdef get(self):\n\t\tmodel.updateSources()\n\nclass Delete(webapp2.RequestHandler):\n\tdef get(self):\n\t\turlSafeSourceKey = common.getValidatedParam(self.request, self.response, 'sourceKey',None, None)\n\t\tif urlSafeSourceKey:\n\t\t\tsource = model.ndb.Key(urlsafe=urlSafeSourceKey).get()\n\t\t\tif source:\n\t\t\t\ts = \"\"\n\t\t\t\tself.response.out.write(s)\n\t\t\telse:\n\t\t\t\tresponse.out.write('Invalid source key passed in. ')\n\t\telse:\n\t\t\tself.response.out.write('No source key passed in. ')\n\t\tself.response.out.write('Return to Admin Home ')\n\t\t\n\tdef post(self):\n\t\turlSafeSourceKey = common.getValidatedParam(self.request, self.response, 'sourceKey',None, None)\n\t\tconfirm = common.getValidatedParam(self.request, self.response, 'confirm', None, None)\n\t\tif confirm and confirm == 'DELETE':\n\t\t\tif urlSafeSourceKey:\n\t\t\t\tsourceKey = model.ndb.Key(urlsafe=urlSafeSourceKey)\n\t\t\t\tsource = sourceKey.get()\n\t\t\t\tif source:\n\t\t\t\t\tmodel.clearDataStore('Story', source.title)\n\t\t\t\t\tsource.storyCount = 0\n\t\t\t\t\tsource.wordCount = 0\n\t\t\t\t\tsource.put()\n\t\t\t\t\tself.response.out.write('Deleted all stories from: {0}'.format(source.title))\n\t\t\t\telse:\n\t\t\t\t\tself.response.out.write('Invalid source key recieved.')\n\t\t\telse:\n\t\t\t\tself.response.out.write('No source key passed in. ')\n\t\telse:\n\t\t\tself.response.out.write('Invalid confirm string. Delete aborted. ')\t\n\t\tself.response.out.write('Return to Admin Home ')\n\napp = webapp2.WSGIApplication([\n\t('/admin',Home),\n\t('/admin/preview', Preview),\n\t('/admin/import',ImportStories),\n#\t('/admin/reset',ResetDataStore),\n\t('/admin/scorestories', ScoreStories),\n\t('/admin/delete', Delete),\t\n\t('/admin/recount',Recount),\n\t('/admin/buildrecs',BuildRecommendations),\n\t('/admin/updatesources', UpdateSources)\n], debug=True)\n\t\t\t\ndef main():\n\tlogging.info('strong again, like me')\n\tapp.run()\n\nif __name__ == \"__main__\":\n\tlogging.info('all monkeys are french')\n\tmain()\n\n\n \t","repo_name":"wordrift/bluewell-proto","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"17398471927","text":"\"\"\"\nThis module contains the Check class check results from previous models against current model results.\n\"\"\"\n\nimport os\nimport pickle\nimport time\nfrom PyRICE.model.pyrice import PyRICE\nfrom PyRICE.model.enumerations import *\n\n\nclass Check:\n \"\"\"\n This class is used to check the current model results against the original model results.\n \"\"\"\n\n def __init__(self, quick=False, save=False):\n \"\"\"\n @param quick: Boolean\n @param save: Boolean\n \"\"\"\n self.quick = quick\n self.start_time = time.time()\n\n if quick:\n self.srs = [0.270]\n self.mius = [2135]\n self.irstps = [0.015]\n else:\n self.srs = [0.270, 0.35]\n self.mius = [2135, 2070]\n self.irstps = [0.015, 0.07]\n\n self.spec = [ModelSpec.STANDARD,\n ModelSpec.Validation_1,\n ModelSpec.Validation_2]\n\n self.wf = [WelfareFunction.UTILITARIAN,\n WelfareFunction.EGALITARIAN,\n WelfareFunction.SUFFICIENTARIAN,\n WelfareFunction.PRIORITARIAN]\n\n self.f_damage = [DamageFunction.NORDHAUS,\n DamageFunction.NEWBOLD,\n DamageFunction.WEITZMAN]\n\n self.dicts = []\n\n if save:\n self.save_my_pickle(file='new_data')\n\n def run_models(self):\n\n \"\"\"\n Create and run several models.\n Return a list of results dictionaries.\n \"\"\"\n\n self.dicts = []\n\n counter = 0\n max_runs = len(self.srs) * len(self.mius) * len(self.irstps) * len(self.spec) * len(self.wf) * \\\n len(self.f_damage)\n\n print_step = int(max_runs / 10)\n\n for spec in self.spec:\n for welfare in self.wf:\n for damage in self.f_damage:\n for sr in self.srs:\n for miu in self.mius:\n for irstp in self.irstps:\n\n counter += 1\n if counter % print_step == 0 and counter != max_runs:\n print(f'Run #{counter}/{max_runs}')\n\n m = PyRICE(model_specification=spec, damage_function=damage, welfare_function=welfare)\n results = m(sr=sr, miu=miu, irstp=irstp)\n\n self.dicts.append(results)\n\n print(f'Run #{counter}/{max_runs}\\n')\n return self.dicts\n\n @staticmethod\n def load_my_pickle(folder='/testdata/', file='original_data'):\n \"\"\"\n @param folder: string\n @param file: string\n @return:\n original_data: pickle\n \"\"\"\n directory = os.getcwd()\n highest_directory = os.path.dirname(directory)\n original_pyrice_directory = highest_directory + \"/verification/pyrice\"\n\n with open(f'{original_pyrice_directory + folder}{file}.pickle', 'rb') as handle:\n original_data = pickle.load(handle)\n\n return original_data\n\n def save_my_pickle(self, folder='/testdata/', file='original_data'):\n \"\"\"\n\n @param folder: string\n @param file: string\n \"\"\"\n directory = os.getcwd()\n highest_directory = os.path.dirname(directory)\n original_pyrice_directory = highest_directory + \"/verification/pyrice\"\n\n results = self.run_models()\n modifier = '_quick' if self.quick else '_slow'\n\n with open(f'{original_pyrice_directory + folder}{file}{modifier}.pickle', 'wb') as handle:\n pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __call__(self):\n \"\"\"\n Checks whether the current model returns the same results as the original model by comparing current results\n with the saved results from the folder testdata.\n \"\"\"\n\n modifier = '_quick' if self.quick else '_slow'\n original_data = self.load_my_pickle(file=f'original_data{modifier}')\n new_data = self.run_models()\n\n run_time = round(time.time() - self.start_time, 2)\n print(f'Run time: {run_time} seconds')\n\n is_identical = original_data == new_data\n\n print(f'\\nOriginal and new results are identical: {is_identical}')\n\n modifier_ = \"GOOD JOB! :D\" if is_identical else \"OH, NOOO! :(\"\n\n message = f\"\\n####################################################################################\\n\" \\\n \"####################################################################################\\n\" \\\n \"####################################################################################\\n\" \\\n f\"################################# {modifier_} #####################################\\n\" \\\n \"####################################################################################\\n\" \\\n \"####################################################################################\\n\" \\\n \"####################################################################################\"\n\n print(message)\n","repo_name":"JazminZatarain/Hippo-DAI-Lab","sub_path":"PyRICE/verification/pyrice/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"3557757430","text":"from selenium import webdriver\n\nfrom selenium.webdriver.chrome.options import Options\n\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom lxml import etree\n\nimport pandas as pd\n\nimport numpy as np\n\nimport os \n\nimport time\n\nclass AlibabaBought:\n\n '''\n\n 抓取个人淘宝订单的已购买的订单\n\n '''\n\n url= 'https://www.taobao.com'\n\n mytaobao_xpath= '//*[@id=\"J_SiteNavMytaobao\"]/div[@class=\"site-nav-menu-hd\"]/a'\n\n bought_xpath= '//*[@id=\"bought\"]'\n\n # page_xpath= '//*[@id=\"tp-bought-root\"]/div[19]/div[2]/ul/li[@class=\"pagination-next\"]'\n page_xpath = '//*[@id=\"tp-bought-root\"]/div[@class=\"row-mod__row___1aPep js-actions-row-bottom\"]/div[2]/ul/li[@class=\"pagination-next\"]'\n\n date_list= []\n\n order_list= []\n\n title_list= []\n\n shop_list= []\n\n num_list= []\n\n price_list= []\n\n statecode_list= []\n\n unit_price_list= []\n\n page = 1\n\n def __init__(self):\n\n '''\n\n 实例化的时候自动设置浏览器的参数\n\n '''\n\n chrome_options= Options()\n\n chrome_options.add_argument('--window-size=1500,1366')\n\n self.browser= webdriver.Chrome(chrome_options=chrome_options)\n\n if not os.path.exists(\"./data\"):\n os.mkdir(\"./data\")\n\n def click_case(self, my_xpath):\n\n '''\n\n 点击事件\n\n '''\n\n button= WebDriverWait(self.browser, 20).until(\n\n EC.element_to_be_clickable(\n\n (By.XPATH, my_xpath))\n\n )\n\n button.click()\n\n def click_mytaobao(self):\n\n '''\n\n 打开需要访问的网站,点击我的淘宝\n\n '''\n\n self.browser.get(self.url)\n\n self.click_case(my_xpath=self.mytaobao_xpath)\n\n # 扫码登录\n\n def click_bought(self):\n\n '''\n\n 点击已买到的宝贝\n\n '''\n\n self.click_case(self.bought_xpath)\n\n def click_next_page(self):\n\n '''\n\n 点击下一页\n\n '''\n\n self.click_case(self.page_xpath)\n\n def analysis_web(self,order_list_xpath,order_list):\n\n '''解析网页源代码'''\n\n print(order_list_xpath)\n\n page_taobao_html= self.browser.page_source\n\n my_data = etree.HTML(page_taobao_html).xpath(order_list_xpath)\n\n print(my_data)\n # try:\n order_list.append(my_data[0])\n\n print(my_data[0])\n \n # except:\n # order_list.append(\"\")\n print(\"\")\n\n print('-' * 100)\n\n def make_data_xpath(self):\n\n '''构造订单信息的xpath路径并解析源码保存到列表中'''\n\n i = 4\n\n while True:\n try:\n date_xpath= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[1]/tr/td[1]/label/span[2]/text()' % i\n self.analysis_web(date_xpath,self.date_list)\n except:\n return \n\n try:\n order_xpath= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[1]/tr/td[1]/span/span[3]/text()' % i\n self.analysis_web(order_xpath,self.order_list)\n except:\n self.order_list.append(\"\")\n \n try:\n title_path = '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[2]/tr/td[1]/div/div[2]/p[1]/a/span[2]/text()' % i\n self.analysis_web(title_path,self.title_list)\n except:\n self.title_list.append(\"\")\n\n try:\n shop_path= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[1]/tr/td[2]/span/a/text()' % i\n self.analysis_web(shop_path,self.shop_list)\n except:\n self.shop_list.append(\"\")\n\n try:\n num_path= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[2]/tr/td[3]/div/p/text()' % i\n self.analysis_web(num_path,self.num_list)\n except:\n self.num_list.append(None)\n \n try:\n price_path= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[2]/tr/td[5]/div/div[1]/p/strong/span[2]/text()' % i\n self.analysis_web(price_path,self.price_list)\n except:\n self.price_list.append(None)\n \n try:\n statecode_path= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[2]/tr/td[6]/div/p/span/text()' % i\n self.analysis_web(statecode_path,self.statecode_list)\n except:\n self.statecode_list.append(\"\")\n\n try:\n unit_price_path= '//*[@id=\"tp-bought-root\"]/div[%s]/div/table/tbody[2]/tr/td[2]/div/p/span[2]/text()' % i\n self.analysis_web(unit_price_path,self.unit_price_list)\n except:\n self.unit_price_list.append(None)\n \n i += 1\n\n def save_date(self):\n\n '''保��到指定路径下面为excel'''\n\n taobao_dic = {\n 'date': self.date_list,\n 'order': self.order_list,\n 'title': self.title_list,\n 'shop': self.shop_list,\n 'num': self.num_list,\n 'unit_price': self.unit_price_list,\n 'price': self.price_list,\n 'statecode': self.statecode_list\n }\n\n print(taobao_dic)\n\n df= pd.DataFrame(taobao_dic)\n\n df.to_excel(f\"./data/page_{self.page}.xlsx\")\n\n \n def clear_lists(self):\n\n self.date_list= []\n\n self.order_list= []\n\n self.title_list= []\n\n self.shop_list= []\n\n self.num_list= []\n\n self.price_list= []\n\n self.statecode_list= []\n\n self.unit_price_list= []\n\n\n def run(self):\n\n self.click_mytaobao()\n\n self.click_bought()\n\n while True:\n\n if self.page <= 6:\n self.click_next_page()\n time.sleep(max(np.random.normal(5, 1), 0))\n self.page += 1\n continue \n\n self.make_data_xpath()\n\n self.save_date()\n\n self.clear_lists()\n\n # is_go_on= int(input(\"继续抓取请输入 1 ;退出请输入 2:\"))\n\n # if is_go_on== 1:\n\n try:\n\n self.click_next_page()\n\n self.page += 1\n\n time.sleep(max(np.random.normal(5, 1), 0))\n\n # elif is_go_on== 2:\n except:\n\n print(\"Close at\", self.page)\n\n self.browser.close()\n\n break\n\n # self.save_date()\n\ndef main():\n\n alibaba_bought = AlibabaBought()\n\n alibaba_bought.run()\n\nif __name__== '__main__':\n\n main()\n","repo_name":"whdxnty/taobaoScrape","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26620929798","text":"import csv\nchocolates = {'Milk Bars': 0, 'Peanut Butter Cubes': 0, 'Manuka Honey Choco': 0, 'Orange Choco': 0, 'Choco Coated Almonds': 0, 'Almond Choco': 0, 'White Choc': 0, \"Baker's Choco Chips\": 0, '99% Dark & Pure': 0, 'Drinking Coco': 0, '85% Dark Bars': 0, 'Fruit & Nut Bars': 0, 'Eclairs': 0, 'After Nines': 0, '50% Dark Bites': 0, 'Caramel Stuffed Bars': 0, 'Raspberry Choco': 0, 'Mint Chip Choco': 0, 'Smooth Sliky Salty': 0, '70% Dark Bites': 0, 'Organic Choco Syrup': 0, 'Spicy Special Slims': 0}\ncountries = set()\n\ndef read_csv(path):\n with open(path, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n header = next(reader)\n data = []\n for row in reader:\n iterable = zip(header, row)\n country_dict = {key: value for key, value in iterable}\n chocolates[country_dict['Product']] += 1 \n countries.add(country_dict['Geography'])\n data.append(country_dict)\n return data,header\n\nif __name__ == '__main__':\n data, header = read_csv('./chocolate-data.csv')\n print(header)\n print(data[0])\n print()\n print(chocolates)\n","repo_name":"carlos-gv/-chocolate_anglo_countries","sub_path":"read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"12045121749","text":"import json \nimport copy \nimport os\nimport argparse\nfrom convert_to_bio import write_bio\n\n\ndef load_json(file):\n with open(file, 'r', encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n\ndef dump_json(file, data, indent=0):\n with open(file, 'w', encoding='utf-8') as f:\n json.dump(data, f, indent=indent, ensure_ascii=False)\n\n\ndef string_to_span(span):\n span = span.split(':')\n start, end = int(span[0]), int(span[1])\n return start, end \n\n\ndef span_to_string(start, end):\n return f\"{start}:{end}\"\n\ndef get_new_span(lens, start, end):\n # turn token index to string index, 'Chale was allegedly chased', 'Chale': '0:0' -> '0:5'\n new_start = sum(lens[:start]) + start\n new_end = new_start + sum(lens[start:end+1]) + (end-start)\n return new_start, new_end\n\ndef get_sent_sep(paragraph):\n \"\"\"\n Obtain the indices of sentence boundry marker '[SEP]'\n Paragraph: list of token-label tuples\n\n return list of indices\n \"\"\"\n seps = []\n\n for i,tok in enumerate(paragraph):\n if tok[0] == '[SEP]':\n seps.append(i)\n seps.append(len(paragraph))\n return seps \n\ndef get_sent_event(sent):\n \"\"\"\n sent: list of tuples -> (token, label) \n\n return a list of events in this sent\n \"\"\"\n toks = [t[0] for t in sent]\n lens = [len(w) for w in toks]\n triggers = [[],[]]\n arguments = []\n labeled = []\n \n left = 0\n right = 0 \n found = False\n \n for i, tup in enumerate(sent):\n if tup[1].startswith('B-'):\n if found:\n labeled.append([left, right])\n else:\n found = True\n left, right = i, i\n elif tup[1].startswith('I-'):\n right = i\n else:\n if found:\n labeled.append([left, right])\n found = False\n if found:\n labeled.append([left, right])\n \n for span in labeled:\n l, r = span\n new_l, new_r = get_new_span(lens, l, r)\n new_span = span_to_string(new_l, new_r)\n \n if 'trigger' in sent[l][1]:\n triggers[0].append(\" \".join(toks[l:r+1]))\n triggers[1].append(new_span)\n else:\n arg_text = \" \".join(toks[l:r+1])\n arg_role = sent[l][1].split('-')[1]\n arguments.append([[arg_text], [new_span], arg_role])\n if len(labeled) > 0:\n event = [{\n 'trigger': triggers,\n 'arguments': arguments\n }]\n else:\n event = []\n return event \n\n# split multiple triggers into multiple events\ndef expand_data(data):\n new_data = []\n \n for sent in data:\n new_sent = {}\n new_sent['sent_id'] = sent['sent_id']\n new_sent['text'] = sent['text']\n new_sent['events'] = []\n \n triggers = sent['events'][0]['trigger']\n \n for i in range(len(triggers[0])):\n event_ = {\n 'trigger': [[triggers[0][i]], [triggers[1][i]]],\n 'arguments': copy.deepcopy(sent['events'][0]['arguments'])\n }\n new_sent['events'].append(event_)\n new_data.append(new_sent)\n return new_data\n\n\nclass TrainExtractor:\n def __init__(self, train_path, lang):\n self.path = train_path \n self.lang = lang\n self.events = []\n \n def read_file(self):\n \"\"\"\n read the conll format training data into tuples of token-label pair\n example: hacked\tB-trigger -> ('hacked', 'B-trigger')\n\n each sample is marked with 'SAMPLE_START\tO'\n each sentence in sample is separated with '[SEP]'\n \"\"\"\n\n with open(self.path, 'r', encoding='utf-8') as f:\n data = f.read()\n \n token_tuples = [[tuple(word.split('\\t')) for word in instance.strip().split('\\n')] for idx,instance in enumerate(data.split('SAMPLE_START\\tO')) if len(instance)>1]\n\n return token_tuples\n \n\n def get_sents(self):\n\n data = self.read_file()\n \n \n for i,paragraph in enumerate(data):\n \n seps = get_sent_sep(paragraph)\n \n for j in range(len(seps)):\n \n sent = {}\n sent_id = f\"{self.lang}/{i+1:04d}-{j+1:03d}\"\n sent['sent_id'] = sent_id\n \n if j == 0:\n tok_tuples = paragraph[:seps[j]]\n elif j == (len(seps)-1):\n tok_tuples = paragraph[seps[j-1]+1:]\n else:\n tok_tuples = paragraph[seps[j-1]+1:seps[j]]\n \n sent['text'] = ' '.join([tup[0] for tup in tok_tuples])\n sent['events'] = get_sent_event(tok_tuples)\n \n self.events.append(sent)\n\nclass TestExtractor:\n def __init__(self, test_path, lang):\n self.path = test_path\n self.lang = lang \n self.events = []\n \n def read_file(self):\n \"\"\"\n read the conll format test data into list of tokens\n\n each sample is marked with 'SAMPLE_START'\n each sentence in sample is separated with '[SEP]'\n \"\"\"\n\n with open(self.path, 'r', encoding='utf-8') as f:\n data = f.read() \n\n tokens = [[(word,) for word in instance.strip().split('\\n')] for idx,instance \\\n in enumerate(data.split(\"SAMPLE_START\")) if len(instance)>1]\n return tokens\n\n def get_sents(self):\n\n data = self.read_file()\n \n \n for i,paragraph in enumerate(data):\n \n seps = get_sent_sep(paragraph)\n \n for j in range(len(seps)):\n \n sent = {}\n sent_id = f\"{self.lang}/{i+1:04d}-{j+1:03d}\"\n sent['sent_id'] = sent_id\n \n if j == 0:\n tok_tuples = paragraph[:seps[j]]\n elif j == (len(seps)-1):\n tok_tuples = paragraph[seps[j-1]+1:]\n else:\n tok_tuples = paragraph[seps[j-1]+1:seps[j]]\n \n sent['text'] = ' '.join([tup[0] for tup in tok_tuples])\n sent['events'] = []\n \n self.events.append(sent)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', help='the directory of official shared task datasets')\n parser.add_argument('--output_dir', help='output directory of processed datasets')\n\n args = parser.parse_args()\n\n save_dir_all = f\"{args.output_dir}/case_all\"\n os.makedirs(save_dir_all, exist_ok=True)\n\n save_dir_final = f\"{args.output_dir}/case_final\"\n os.makedirs(save_dir_final, exist_ok=True) \n\n save_dir_all_split = f\"{args.output_dir}/case_all_split\"\n os.makedirs(save_dir_all_split, exist_ok=True)\n\n save_dir_final_split = f\"{args.output_dir}/case_final_split\"\n os.makedirs(save_dir_final_split, exist_ok=True) \n\n split_ids = {\n 'en': \"en/0733-001\",\n 'es': \"es/0030-001\",\n 'pr': \"pr/0030-001\"\n }\n\n # all training data\n train_full = []\n\n # all training data with dev data excluded\n train_all_wo_dev = []\n\n # all dev data\n dev_all = []\n\n # all test data\n test_all = []\n\n\n for lang in ['en', 'es', 'pr']:\n train_extractor = TrainExtractor(f\"{args.data_dir}/train/{lang}-train.txt\", lang)\n train_extractor.get_sents()\n\n dev_split = [i for i,sent in enumerate(train_extractor.events) if sent['sent_id'] == split_ids[lang]][0]\n\n _train_full = train_extractor.events\n _train_wo_dev = train_extractor.events[:dev_split]\n _dev = train_extractor.events[dev_split:]\n\n test_extractor = TestExtractor(f\"{args.data_dir}/test/{lang}-test.txt\", lang)\n test_extractor.get_sents()\n _test = test_extractor.events\n\n # append processed language-specific data\n train_full += _train_full\n train_all_wo_dev += _train_wo_dev\n dev_all += _dev\n\n test_all += _test\n \n # data with split triggers\n train_full_split = expand_data(train_full)\n train_all_split_wo_dev = expand_data(train_all_wo_dev)\n dev_all_split = expand_data(dev_all)\n\n # save data\n # combined triggers\n\n dump_json(f\"{save_dir_all}/train.json\", train_all_wo_dev)\n dump_json(f\"{save_dir_all}/dev.json\", dev_all)\n dump_json(f\"{save_dir_all}/test.json\", test_all)\n\n\n dump_json(f\"{save_dir_final}/train.json\", train_full)\n dump_json(f\"{save_dir_final}/test.json\", test_all)\n\n # split triggers\n\n dump_json(f\"{save_dir_all_split}/train.json\", train_all_split_wo_dev)\n dump_json(f\"{save_dir_all_split}/dev.json\", dev_all_split)\n dump_json(f\"{save_dir_all_split}/test.json\", test_all)\n\n dump_json(f\"{save_dir_final_split}/train.json\", train_full_split)\n dump_json(f\"{save_dir_final_split}/test.json\", test_all)\n\n\n # Convert to original bio format for evaluation\n write_bio(f\"{save_dir_all}/train.json\")\n write_bio(f\"{save_dir_all}/dev.json\")\n write_bio(f\"{save_dir_final}/train.json\")\n\n write_bio(f\"{save_dir_all_split}/train.json\")\n write_bio(f\"{save_dir_all_split}/dev.json\")\n write_bio(f\"{save_dir_final_split}/train.json\")\n\n\n\n\n\n\n\n","repo_name":"huiling-y/eventgraph_at_case","sub_path":"preprocess/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"11977484477","text":"from picongpu import picmi\n\nimport unittest\nimport typeguard\n\nfrom picongpu.pypicongpu import species\nfrom copy import deepcopy\nimport re\nimport logging\n\n\nclass TestPicmiSpecies(unittest.TestCase):\n def setUp(self):\n self.profile_uniform = picmi.UniformDistribution(\n density=42,\n rms_velocity=[1, 1, 1])\n\n self.species_electron = picmi.Species(\n name=\"e\",\n density_scale=3,\n particle_type=\"electron\",\n initial_distribution=self.profile_uniform)\n self.species_nitrogen = picmi.Species(\n name=\"nitrogen\",\n charge_state=+3,\n particle_type=\"N\",\n initial_distribution=self.profile_uniform)\n\n def __helper_get_distributions_with_rms_velocity(self):\n \"\"\"\n helper to get a list of (all) profiles (PICMI distributions) that have\n an rms_velocity attribute.\n\n intended to run tests against this temperature\n \"\"\"\n return [\n # TODO add profiles after implementation\n # picmi.GaussianBunchDistribution(4e10, 4e-15),\n picmi.UniformDistribution(8e24),\n # picmi.AnalyticDistribution(\"x+y+z\"),\n ]\n\n def test_basic(self):\n \"\"\"check that all params are translated\"\"\"\n # check that translation works\n for s in [self.species_electron, self.species_nitrogen]:\n pypic = s.get_as_pypicongpu()\n self.assertEqual(pypic.name, s.name)\n\n def test_mandatory(self):\n \"\"\"mandatory params are enforced with a somewhat reasonable message\"\"\"\n # required: name, particle type\n species_no_name = picmi.Species(particle_type=\"N\")\n species_empty = picmi.Species()\n species_invalid_list = [species_no_name,\n species_empty]\n\n for invalid_species in species_invalid_list:\n with self.assertRaises(AssertionError):\n invalid_species.get_as_pypicongpu()\n\n # (everything else is optional)\n\n def test_mass_charge(self):\n \"\"\"mass & charge are passed through\"\"\"\n picmi_s = picmi.Species(name=\"any\",\n mass=17,\n charge=-4)\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n\n mass_const = pypicongpu_s.get_constant_by_type(species.constant.Mass)\n self.assertEqual(17, mass_const.mass_si)\n\n charge_const = \\\n pypicongpu_s.get_constant_by_type(species.constant.Charge)\n self.assertEqual(-4, charge_const.charge_si)\n\n def test_density_scale(self):\n \"\"\"density scale is correctly transformed\"\"\"\n # simple example\n picmi_s = picmi.Species(name=\"any\",\n density_scale=37.2)\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n\n ratio_const = \\\n pypicongpu_s.get_constant_by_type(species.constant.DensityRatio)\n self.assertAlmostEqual(37.2, ratio_const.ratio)\n\n # no density scale\n picmi_s = picmi.Species(name=\"any\")\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n self.assertTrue(not pypicongpu_s.has_constant_of_type(\n species.constant.DensityRatio))\n\n def test_get_independent_operations(self):\n \"\"\"operations which can be set without external dependencies work\"\"\"\n picmi_s = picmi.Species(name=\"any\", mass=1, charge=2)\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n\n # note: placement is not considered independent (it depends on also\n # having no layout)\n self.assertNotEqual(None,\n picmi_s.get_independent_operations(pypicongpu_s))\n\n def test_get_independent_operations_type(self):\n \"\"\"arg type is checked\"\"\"\n picmi_s = picmi.Species(name=\"any\", mass=1, charge=2)\n for invalid_species in [[], None, picmi_s, \"name\"]:\n with self.assertRaises(typeguard.TypeCheckError):\n picmi_s.get_independent_operations(invalid_species)\n\n def test_get_independent_operations_different_name(self):\n \"\"\"only generate operations for pypicongpu species of same name\"\"\"\n picmi_s = picmi.Species(name=\"any\", mass=1, charge=2)\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n\n pypicongpu_s.name = \"different\"\n with self.assertRaisesRegex(AssertionError, \".*name.*\"):\n picmi_s.get_independent_operations(pypicongpu_s)\n\n # same name is okay:\n pypicongpu_s.name = \"any\"\n self.assertNotEqual(None,\n picmi_s.get_independent_operations(pypicongpu_s))\n\n def test_get_independent_operations_ionization_set_bound_electrons(self):\n \"\"\"SetBoundElectrons is properly generated\"\"\"\n picmi_species = picmi.Species(name=\"nitrogen\",\n particle_type=\"N\",\n charge_state=2)\n pypic_species = picmi_species.get_as_pypicongpu()\n\n ops = picmi_species.get_independent_operations(pypic_species)\n ops_types = list(map(lambda op: type(op), ops))\n self.assertEqual(1, ops_types.count(\n species.operation.SetBoundElectrons))\n self.assertEqual(0, ops_types.count(\n species.operation.NoBoundElectrons))\n\n for op in ops:\n if not isinstance(op, species.operation.SetBoundElectrons):\n continue\n\n self.assertEqual(pypic_species, op.species)\n self.assertEqual(5, op.bound_electrons)\n\n def test_get_independent_operations_ionization_no_bound_electrons(self):\n \"\"\"fully ionized ions get NoBoundElectrons\"\"\"\n picmi_species = picmi.Species(name=\"hydrogen\",\n particle_type=\"H\",\n charge_state=1)\n pypic_species = picmi_species.get_as_pypicongpu()\n\n ops = picmi_species.get_independent_operations(pypic_species)\n ops_types = list(map(lambda op: type(op), ops))\n self.assertEqual(1, ops_types.count(\n species.operation.NoBoundElectrons))\n self.assertEqual(0, ops_types.count(\n species.operation.SetBoundElectrons))\n\n for op in ops:\n if not isinstance(op, species.operation.NoBoundElectrons):\n continue\n\n self.assertEqual(pypic_species, op.species)\n\n def test_get_independent_operations_ionization_not_ionizable(self):\n \"\"\"ionization operation is not returned if there is no ionization\"\"\"\n picmi_species = picmi.Species(name=\"hydrogen\",\n particle_type=\"H\",\n picongpu_fully_ionized=True)\n pypic_species = picmi_species.get_as_pypicongpu()\n\n ops = picmi_species.get_independent_operations(pypic_species)\n ops_types = list(map(lambda op: type(op), ops))\n self.assertEqual(0, ops_types.count(\n species.operation.NoBoundElectrons))\n self.assertEqual(0, ops_types.count(\n species.operation.SetBoundElectrons))\n\n def test_get_independent_operations_momentum(self):\n \"\"\"momentum is correctly translated\"\"\"\n for set_drift in [False, True]:\n for set_temperature in [False, True]:\n for dist in \\\n self.__helper_get_distributions_with_rms_velocity():\n if set_temperature:\n dist.rms_velocity = 3 * [42]\n\n if set_drift:\n # note: same velocity, different representations\n if isinstance(dist, picmi.UniformDistribution) or \\\n isinstance(dist, picmi.AnalyticDistribution):\n # v (as is)\n dist.directed_velocity = [41363723.0,\n 8212468.0,\n 68174325.0]\n elif isinstance(dist, picmi.GaussianBunchDistribution):\n # v * gamma\n dist.centroid_velocity = [42926825.65008125,\n 8522810.724577945,\n 70750579.27176853]\n else:\n # fail: unkown distribution type\n assert False, \"unkown distribution type in \" \\\n \"test: {}\".format(type(dist))\n\n picmi_s = picmi.Species(name=\"name\",\n mass=1,\n initial_distribution=dist)\n\n pypicongpu_s = picmi_s.get_as_pypicongpu()\n ops = picmi_s.get_independent_operations(pypicongpu_s)\n\n momentum_ops = list(filter(\n lambda op: isinstance(\n op, species.operation.SimpleMomentum),\n ops))\n\n self.assertEqual(1, len(momentum_ops))\n # must pass silently\n momentum_ops[0].check_preconditions()\n self.assertEqual(pypicongpu_s, momentum_ops[0].species)\n\n if set_drift:\n self.assertEqual(\n momentum_ops[0].drift.direction_normalized,\n (0.5159938229615939,\n 0.10244684114313779,\n 0.8504440130927325))\n self.assertAlmostEqual(momentum_ops[0].drift.gamma,\n 1.0377892156874091)\n else:\n self.assertEqual(None, momentum_ops[0].drift)\n\n if set_temperature:\n self.assertAlmostEqual(\n momentum_ops[0].temperature.temperature_kev,\n 1.10100221e+19,\n delta=1e13)\n else:\n self.assertEqual(None, momentum_ops[0].temperature)\n\n def test_temperature_invalid(self):\n \"\"\"check that invalid rms_velocities are not converted\"\"\"\n for dist in self.__helper_get_distributions_with_rms_velocity():\n def get_rms_species(rms_velocity):\n dist_copy = deepcopy(dist)\n dist_copy.rms_velocity = rms_velocity\n new_species = picmi.Species(name=\"name\",\n mass=1,\n initial_distribution=dist_copy)\n return new_species\n\n # all components must be equal\n invalid_rms_vectors = [[0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 2, 3]]\n for invalid_rms_vector in invalid_rms_vectors:\n rms_species = get_rms_species(invalid_rms_vector)\n with self.assertRaisesRegex(Exception, \".*(equal|same).*\"):\n pypicongpu_species = rms_species.get_as_pypicongpu()\n rms_species.get_independent_operations(pypicongpu_species)\n\n def test_from_speciestype(self):\n \"\"\"mass & charge weill be derived from species type\"\"\"\n picmi_species = picmi.Species(name=\"nitrogen\", particle_type=\"N\")\n pypic_species = picmi_species.get_as_pypicongpu()\n\n # mass & charge derived\n self.assertTrue(\n pypic_species.has_constant_of_type(species.constant.Mass))\n self.assertTrue(\n pypic_species.has_constant_of_type(species.constant.Charge))\n\n mass_const = pypic_species.get_constant_by_type(species.constant.Mass)\n charge_const = pypic_species.get_constant_by_type(\n species.constant.Charge)\n\n nitrogen = species.util.Element.N\n self.assertAlmostEqual(mass_const.mass_si, nitrogen.get_mass_si())\n self.assertAlmostEqual(charge_const.charge_si,\n nitrogen.get_charge_si())\n\n # element properties are available\n self.assertTrue(\n pypic_species.has_constant_of_type(\n species.constant.ElementProperties))\n\n def test_charge_state_without_element_forbidden(self):\n \"\"\"charge state is not allowed without element name\"\"\"\n with self.assertRaisesRegex(Exception, \".*particle_type.*\"):\n picmi.Species(name=\"abc\",\n charge=1,\n mass=1,\n charge_state=-1).get_as_pypicongpu()\n\n # allowed with particle species\n # (actual charge state is inserted by )\n picmi.Species(name=\"abc\",\n particle_type=\"H\",\n charge_state=+1).get_as_pypicongpu()\n\n def test_has_ionizers(self):\n \"\"\"generated species gets ionizers when appropriate\"\"\"\n # only mass & charge: no ionizers\n no_ionizers_picmi = picmi.Species(name=\"simple\",\n mass=1,\n charge=2)\n self.assertTrue(not no_ionizers_picmi.has_ionizers())\n\n no_ionizers_pypic = no_ionizers_picmi.get_as_pypicongpu()\n self.assertTrue(\n not no_ionizers_pypic.has_constant_of_type(\n species.constant.Ionizers))\n\n # explicit charge state: has ionizers\n explicit_picmi = picmi.Species(name=\"nitrogen\",\n particle_type=\"N\",\n charge_state=0)\n self.assertTrue(explicit_picmi.has_ionizers())\n\n explicit_pypic = explicit_picmi.get_as_pypicongpu()\n self.assertTrue(\n explicit_pypic.has_constant_of_type(species.constant.Ionizers))\n\n # no charge state, but (theoretically) ionization levels known (as\n # particle type is given):\n with self.assertLogs(level=logging.WARNING) as implicit_logs:\n with_warn_picmi = picmi.Species(name=\"HELIUM\",\n particle_type=\"He\")\n self.assertTrue(not with_warn_picmi.has_ionizers())\n\n with_warn_pypic = with_warn_picmi.get_as_pypicongpu()\n self.assertTrue(\n not with_warn_pypic.has_constant_of_type(\n species.constant.Ionizers))\n\n self.assertEqual(1, len(implicit_logs.output))\n self.assertTrue(re.match(\n \".*HELIUM.*fully.*ionized.*picongpu_fully_ionized.*\",\n implicit_logs.output[0]))\n\n with self.assertLogs(level=logging.WARNING) as explicit_logs:\n # workaround b/c self.assertNoLogs() is not available yet\n logging.warning(\"TESTWARN\")\n no_warn_picmi = picmi.Species(name=\"HELIUM\",\n particle_type=\"He\",\n picongpu_fully_ionized=True)\n self.assertTrue(not no_warn_picmi.has_ionizers())\n no_warn_pypic = no_warn_picmi.get_as_pypicongpu()\n self.assertTrue(\n not no_warn_pypic.has_constant_of_type(\n species.constant.Ionizers))\n\n self.assertEqual(1, len(explicit_logs.output))\n self.assertTrue(\"TESTWARN\" in explicit_logs.output[0])\n\n def test_fully_ionized_warning_electrons(self):\n \"\"\"electrons will not have the fully ionized warning\"\"\"\n with self.assertLogs(level=logging.WARNING) as explicit_logs:\n # workaround b/c self.assertNoLogs() is not available yet\n logging.warning(\"TESTWARN\")\n no_warn_picmi = picmi.Species(name=\"ELECTRON\",\n particle_type=\"electron\")\n\n self.assertTrue(not no_warn_picmi.has_ionizers())\n no_warn_pypic = no_warn_picmi.get_as_pypicongpu()\n self.assertTrue(\n not no_warn_pypic.has_constant_of_type(\n species.constant.Ionizers))\n\n self.assertEqual(1, len(explicit_logs.output))\n self.assertTrue(\"TESTWARN\" in explicit_logs.output[0])\n\n def test_fully_ionized_charge_state_conflict(self):\n \"\"\"picongpu_fully_ionized may only be used if charge_state is None\"\"\"\n # charge state is not none\n with self.assertRaisesRegex(AssertionError, \".*charge_state.*\"):\n picmi.Species(name=\"x\",\n particle_type=\"H\",\n charge_state=1,\n picongpu_fully_ionized=True).get_as_pypicongpu()\n\n # particle_type is missing\n with self.assertRaisesRegex(AssertionError, \".*particle_type.*\"):\n picmi.Species(name=\"x\",\n mass=3,\n charge=2,\n picongpu_fully_ionized=True).get_as_pypicongpu()\n\n # non-elements may generally not be ionized\n with self.assertRaisesRegex(AssertionError, \".*[Ee]lement.*\"):\n picmi.Species(name=\"x\",\n particle_type=\"electron\",\n picongpu_fully_ionized=False).get_as_pypicongpu()\n\n def test_ionize_non_elements(self):\n \"\"\"non-elements may not have a charge_state\"\"\"\n with self.assertRaisesRegex(Exception, \".*[Ee]lement.*\"):\n picmi.Species(name=\"e\",\n particle_type=\"electron\",\n charge_state=-1).get_as_pypicongpu()\n\n def test_electron_from_particle_type(self):\n \"\"\"electron is correctly constructed from particle_type\"\"\"\n picmi_e = picmi.Species(name=\"e\", particle_type=\"electron\")\n pypic_e = picmi_e.get_as_pypicongpu()\n self.assertTrue(not pypic_e.has_constant_of_type(\n species.constant.Ionizers))\n self.assertTrue(not pypic_e.has_constant_of_type(\n species.constant.ElementProperties))\n\n mass_const = pypic_e.get_constant_by_type(\n species.constant.Mass)\n charge_const = pypic_e.get_constant_by_type(\n species.constant.Charge)\n\n self.assertAlmostEqual(mass_const.mass_si, picmi.constants.m_e)\n self.assertAlmostEqual(charge_const.charge_si, -picmi.constants.q_e)\n\n def test_fully_ionized_typesafety(self):\n \"\"\"picongpu_fully_ioinized is type safe\"\"\"\n for invalid in [1, \"yes\", [], {}]:\n with self.assertRaises(typeguard.TypeCheckError):\n picmi.Species(name=\"x\",\n picongpu_fully_ionized=invalid)\n\n # works:\n picmi_species = picmi.Species(name=\"x\",\n particle_type=\"He\",\n picongpu_fully_ionized=True)\n\n for invalid in [0, \"no\", [], {}]:\n with self.assertRaises(typeguard.TypeCheckError):\n picmi_species.picongpu_fully_ionized = invalid\n\n # None is allowed as value in general (but not in constructor)\n picmi_species.picongpu_fully_ionized = None\n\n def test_ionization_electron_explicit_types(self):\n \"\"\"explicit electron specification requires a PICMI species\"\"\"\n for invalid in [[], {}, \"electron\"]:\n with self.assertRaises(typeguard.TypeCheckError):\n picmi.Species(name=\"ion\",\n picongpu_ionization_electrons=invalid)\n\n # with correct type works\n electrons = picmi.Species(name=\"electron\", mass=1, charge=2)\n picmi.Species(name=\"ion\", picongpu_ionization_electrons=electrons)\n\n def test_particle_type_invalid(self):\n \"\"\"unkown particle type rejects\"\"\"\n for invalid in [\"\", \"elektron\", \"e\", \"e-\", \"Uux\"]:\n with self.assertRaisesRegex(NameError,\n \".*unkown.*\"):\n picmi.Species(name=\"x\",\n particle_type=invalid).get_as_pypicongpu()\n\n def test_ionization_electrons_attribute_present(self):\n \"\"\"picongpu_ionization_electrons is always present\"\"\"\n self.assertEqual(None,\n picmi.Species(name=\"x\").picongpu_ionization_electrons)\n self.assertEqual(\n None,\n picmi.Species(name=\"x\",\n particle_type=\"H\").picongpu_ionization_electrons)\n\n self.assertEqual(\n None,\n picmi.Species(name=\"x\",\n particle_type=\"H\",\n charge_state=-1).picongpu_ionization_electrons)\n\n def test_ionization_charge_state_too_large(self):\n \"\"\"charge state must be <= number of protons\"\"\"\n with self.assertRaises(AssertionError):\n picmi.Species(name=\"x\",\n particle_type=\"N\",\n charge_state=8).get_as_pypicongpu()\n","repo_name":"ComputationalRadiationPhysics/picongpu","sub_path":"test/python/picongpu/quick/picmi/species.py","file_name":"species.py","file_ext":"py","file_size_in_byte":20849,"program_lang":"python","lang":"en","doc_type":"code","stars":652,"dataset":"github-code","pt":"47"}
+{"seq_id":"31757638119","text":"# -*- coding: utf-8 -*-\n\n'''memory metrics input module\n'''\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n with_statement,\n)\n\nimport logging\nimport re\n\nfrom metricol.inputs import MetricInput\n\n\nLOG = logging.getLogger(__name__)\n\nUNITS = r'kMGTP'\nUNIT_BASE = 1024\nMETRIC_RE = re.compile(r'([A-Za-z]+):\\s*([0-9]+)(?:\\s*([' + UNITS + r'])B)?')\nMETRICS_MAP = {\n 'MemTotal': 'total',\n 'MemFree': 'free',\n 'Cached': 'cached',\n 'Buffers': 'buffers',\n 'SwapTotal': 'swap_total',\n 'SwapFree': 'swap_free',\n 'Active': 'active',\n 'Inactive': 'inactive',\n 'Unevictable': 'unevictable',\n 'Mlocked': 'mlocked',\n 'Dirty': 'dirty',\n 'Writeback': 'writeback',\n 'AnonPages': 'anon_pages',\n 'Shmem': 'shared',\n 'SReclaimable': 'slab_reclaim',\n 'SUnreclaim': 'slab_unreclaim',\n}\n\n\ndef parse_meminfo(buf):\n '''Parses meminfo output\n '''\n metrics_dc = {}\n for match in METRIC_RE.finditer(buf):\n key, val, unit = match.groups()\n if key not in METRICS_MAP:\n continue\n unit_val = 1\n if unit:\n try:\n unit_val = UNIT_BASE ** (UNITS.index(unit) + 1)\n except IndexError:\n pass\n metrics_dc[key] = int(val) * unit_val\n\n return metrics_dc\n\n\nclass MemInfo(MetricInput):\n '''memory info fetcher / parser class\n '''\n options = ['meminfo', 'prefix']\n\n def __init__(self, section, queue):\n super(MemInfo, self).__init__(section, queue)\n self.data_parser = parse_meminfo\n\n\n def fetch_data(self):\n '''Fetches data from service\n '''\n fpath = self.cfg['meminfo']\n try:\n with open(fpath, 'rb') as fd_obj:\n return str(fd_obj.read(), encoding='utf-8')\n except (IOError, OSError) as exc:\n LOG.warning('%s @ %s', repr(exc), repr(fpath))\n\n return ''\n\n\n def iter_metrics(self, key, val, tstamp):\n yield (\n self.cfg['prefix'] + key, val, MetricInput.METRIC_TYPE_GAUGE, tstamp)\n","repo_name":"soutys/metricol","sub_path":"metricol/inputs/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10954816267","text":"\"\"\"\nThis class defines the relation mapping used to match relations to their constraints in order to complete the last step\nof the application layer matching. It is also responsible for tracking the status of relations, whether they are enabled\nor disabled.\n\"\"\"\n\nfrom client.constant import *\n\n\ndef match_constraint(field: str, value: str, constraints: list):\n for c in constraints:\n if field == c.field:\n if c.f_type == FieldType.DEC:\n for interval in c.value:\n try:\n if interval[0] <= float(value) <= interval[1]:\n return True\n except ValueError:\n return False\n elif c.f_type == FieldType.STR:\n return value in c.value\n return False\n\n\ndef match_packet(packet, mapping_entry):\n if packet.subject in mapping_entry.relations:\n relation = mapping_entry.relations[packet.subject]\n if not relation.enabled:\n return False\n\n # no constraint on this relation\n if not relation.constraints:\n return True\n\n for data in packet.content:\n if len(data) > 1:\n if not match_constraint(data[0], data[1], relation.constraints):\n return False\n else:\n return False\n\n return True\n return False\n\n\nclass Relation:\n def __init__(self, subject: str, constraints: list):\n self.subject = subject\n self.constraints = constraints\n self.enabled = True\n\n\nclass Entry:\n def __init__(self, relation: Relation):\n self.relations = {relation.subject: relation}\n\n def add_relation(self, relation: Relation):\n self.relations[relation.subject] = relation\n\n\nclass RelationMapping:\n def __init__(self):\n self.mapping = {}\n\n def add_relation(self, mark: int, relation: Relation):\n if mark not in self.mapping:\n self.mapping[mark] = Entry(relation)\n else:\n self.mapping[mark].add_relation(relation)\n\n def del_relation(self, mark: int):\n del self.mapping[mark]\n\n def get_relation(self, mark: int):\n return self.mapping[mark]\n\n def enable_relation(self, mark: int, subject: str):\n self.mapping[mark].relations[subject].enabled = True\n\n def disable_relation(self, mark: int, subject: str):\n self.mapping[mark].relations[subject].enabled = False\n\n def decision(self, packet):\n mapping_entry = self.mapping[packet.mark]\n if match_packet(packet, mapping_entry):\n return Policy.ACCEPT\n else:\n return Policy.DROP\n","repo_name":"Riushda/MasterThesisFirewall","sub_path":"src/nfqueue/relation_mapping.py","file_name":"relation_mapping.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"14207260299","text":"TEST_INPUT_A = (0, 2, 7, 0)\n\nINPUT_DATA = (11, 11, 13, 7, 0, 15, 5, 5, 4, 4, 1, 1, 7, 1, 15, 11)\n\n\ndef required_redistribution_cycles(redistribution_history):\n return len(redistribution_history) - 1\n\n\ndef redistribution_loop_size(redistribution_history):\n loop_end_value = redistribution_history[-1]\n loop_start_cycle = redistribution_history.index(loop_end_value)\n total_cycles = required_redistribution_cycles(redistribution_history)\n return total_cycles - loop_start_cycle\n\n\ndef redistribute_until_loop(data):\n history = list()\n data = list(data)\n\n while tuple(data) not in history:\n history.append(tuple(data))\n\n largest_bucket = data.index(max(data))\n data = redistribute(data, largest_bucket)\n\n history.append(tuple(data))\n\n return history\n\n\ndef redistribute(data, idx):\n size = len(data)\n blocks = _empty_bucket(data, idx)\n\n for block in range(blocks):\n idx = _next_idx(idx, size)\n data[idx] += 1\n\n return data\n\n\ndef _empty_bucket(data, idx):\n blocks = data[idx]\n data[idx] = 0\n return blocks\n\n\ndef _next_idx(idx, size):\n idx += 1\n return 0 if idx == size else idx\n\n\ndef main():\n test()\n\n redistribution_history = redistribute_until_loop(INPUT_DATA)\n\n part_a = required_redistribution_cycles(redistribution_history)\n print('part a: {}'.format(part_a))\n\n part_b = redistribution_loop_size(redistribution_history)\n print('part b: {}'.format(part_b))\n\n\ndef test():\n test_history = redistribute_until_loop(TEST_INPUT_A)\n cycles = required_redistribution_cycles(test_history)\n loop_size = redistribution_loop_size(test_history)\n assert cycles == 5, cycles\n assert loop_size == 4, loop_size\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"alexanderson/advent-of-code","sub_path":"2017/aoc_06.py","file_name":"aoc_06.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"72903007503","text":"from builtins import range\nimport os, re\nfrom EMAN2 import *\nimport numpy as np\nfrom EMAN2_utils import natural_sort\n\ndef main():\n\tprogname = os.path.basename(sys.argv[0])\n\tusage = \"\"\"prog [options] --stackname myfile.hdf ...\n\tThis program will combine many image files into a single output file. \n\t\n\tIf the output name has a \".lst\" extension:\n\tthe output is a formatted text file, one line per image, describing the file containing the actual\n\timage data in a searchable form. .lst files can be used as if they contained actual images in any\n\tEMAN2 programs.\n\t\n\tIf the output is a normal image file (.hdf, .spi, etc.) then the images will be copied into the\n\toutput file sequentially in the order provided on the command-line. Some file formats will not\n\tsupport multiple images, or multiple volumes. Appropriate errors will be raised in these cases.\n\tHDF is the only format supporting full metadata retention for stacks of images or volumes.\n\t\n\tThe output file will be emptied and overwritten!\n\t\"\"\"\n\n\tparser = EMArgumentParser(usage=usage,version=EMANVERSION)\n\n\tparser.add_pos_argument(name=\"stack_files\",help=\"List of images to be stacked. Selecting a folder to use all images inside.\", default=\"micrographs\", guitype='filebox', browser=\"EMParticlesEditTable(withmodal=True,multiselect=True)\", row=0, col=0,rowspan=1, colspan=3, nosharedb=True,mode=\"default\")\n\tparser.add_pos_argument(name=\"tilt_images\",help=\"List of images to be stacked. Input order will determine the order of images in output tiltseries.\", default=\"\", guitype='filebox', browser=\"EMBrowserWidget(withmodal=True)\", row=0, col=0,rowspan=1, colspan=3, nosharedb=True,mode=\"tomo\")\n\tparser.add_argument(\"--output\",type=str,help=\"Name of the output stack to build (Extension will be .hdf unless specified). Note, all tiltseries will be stored in the 'tiltseries' directory.\", default=None, guitype='strbox',row=2, col=0, rowspan=1, colspan=1, mode=\"default,tomo\")\n\tparser.add_argument(\"--tilts\",action=\"store_true\",default=False,help=\"Write results to 'tiltseries' directory in current project.\", guitype='boolbox',row=4, col=0, rowspan=1, colspan=1,mode=\"tomo[True]\")\n\tparser.add_argument(\"--guess\",action=\"store_true\",default=False,help=\"Guess how to split micrographs into tilt series and the order of images in each tilt series from file names. Tilt angles must be incuded in file names. May and may not work depending on the file name format...\", guitype='boolbox',row=4, col=1, rowspan=1, colspan=1,mode=\"tomo[False]\")\t\n\tparser.add_argument(\"--guesscol\", type=int, help=\"column to separate tilt series if the guess mode fails\",default=-1)\n\tparser.add_argument(\"--mdoc\", type=str, help=\"Read info from mdoc files\",default=None)\n\n\t#parser.add_argument(\"--rawtlt\",type=str,help=\"Name of tilt angles text file.\\nNote, angles must correspond to stack file names in alphabetical/numerical order.\", default=\"\", guitype='filebox', browser=\"EMBrowserWidget(withmodal=True,multiselect=True)\",row=3, col=0, rowspan=1, colspan=1, mode=\"tomo\")\n\tparser.add_argument(\"--ppid\", type=int, help=\"Set the PID of the parent process, used for cross platform PPID\",default=-1)\n\tparser.add_argument(\"--verbose\", \"-v\", dest=\"verbose\", action=\"store\", metavar=\"n\", type=int, help=\"verbose level [0-9], higher number means higher level of verboseness\",default=1)\n\tparser.add_argument(\"--tltang\",type=str,help=\"a text file for tilt angle. will sort the images in each stack accrodingly\", default=None)\n\n\t(options, args) = parser.parse_args()\n\n\tif options.output==None:\n\t\tif options.guess or options.mdoc:\n\t\t\tpass\n\t\telse:\n\t\t\tprint(\"--output is required (output file)\")\n\t\t\tsys.exit(1)\n\n\tif len(args)==1 and os.path.isdir(args[0]):\n\t\tprint(\"input is a directory. reading all mrc/mrcs/hdf files in it...\")\n\t\tpath=args[0]\n\t\targs=[]\n\t\text=[\"mrc\", \"mrcs\", \"hdf\"]\n\t\tfor f in os.listdir(path):\n\t\t\tfor e in ext:\n\t\t\t\tif f.endswith(e):\n\t\t\t\t\targs.append(os.path.join(path, f))\n\t\tprint(\"found {} files\".format(len(args)))\n\t\t\n\tif options.tilts:\n\t\t\t\n\t\ttry:\n\t\t\tos.mkdir(\"tiltseries\")\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\tif options.guess:\t\n\n\t\t\tlst=[]\n\t\t\tlstpos=[]\n\t\t\tfor ag in args:\n\t\t\t\tl=[]\n\t\t\t\ts0=\"\"\n\t\t\t\tp=[0]\n\t\t\t\ta=ag.replace(\".\", \"_\")\n\t\t\t\tfor i,c in enumerate(a[:-1]):\n\t\t\t\t\tif c.isdigit() or (s0==\"\" and c=='-' and a[i+1].isdigit()):\n\t\t\t\t\t\ts0+=c\n\t\t\t\t\telif len(s0)>0:\n\t\t\t\t\t\tl.append(int(s0))\n\t\t\t\t\t\tp.append(i)\n\t\t\t\t\t\ts0=\"\"\n\t\t\t\tlst.append(l)\n\t\t\t\tlstpos.append(p)\n\t\t\t\n\t\t\tl0=len(lst[0])\n\t\t\tfor i,l in enumerate(lst):\n\t\t\t\tif len(l)!=l0:\n\t\t\t\t\tprint(\"File name mismatch detected\")\n\t\t\t\t\tprint(\"{}\\n\\t-> {}\".format(args[0], lst[0]))\n\t\t\t\t\tprint(\"{}\\n\\t-> {}\".format(args[i], l))\n\t\t\t\t\tbreak\n\t\t\ttry:\n\t\t\t\tlst=np.array(lst, dtype=float)\n\t\t\texcept:\n\t\t\t\tprint(\"Something is wrong in filename formatting. exit.\")\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tprint(\"File name of the first input:\")\n\t\t\tprint(\"\\t{}\".format(args[0]))\n\t\t\tprint(\"{} Columns\".format(len(lst[0])))\n\t\t\tprint(\"\\t\"+', '.join(lst[0].astype(int).astype(str)))\n\t\t\tdt=[]\n\t\t\tfor i in range(len(lst[0])):\n\t\t\t\tmn,mx=np.min(lst[:,i]), np.max(lst[:,i])\n\t\t\t\t#print(\"{}: range from {} to {}\".format(i, mn, mx))\n\t\t\t\tdt.append(abs(mn+60)+abs(mx-60))\n\t\t\t\n\t\t\tic=np.argmin(dt)\n\t\t\tprint(\"Guess column {} is for tilt angles,\\n\\tranging from {:.1f} to {:.1f}.\".format(ic, np.min(lst[:,ic]), np.max(lst[:,ic])))\n\t\t\t\n\t\t\t\n\t\t\tc=lst[:, ic]\n\t\t\tif len(c)>len(np.unique(c)):\n\t\t\t\tprint(\"Multiple tilt series exist...\")\n\t\t\t\tif options.guesscol<0:\n\t\t\t\t\tit=np.where(np.std(lst,axis=0)>0)[0][0]\n\t\t\t\t\tprint(\"Guess column {} separates different tilt series\".format(it))\n\t\t\t\telse:\n\t\t\t\t\tit=options.guesscol\n\t\t\t\t\tprint(\"Separates different tilt series using column {} \".format(it))\n\t\t\t\n\t\t\t\tfid=sorted(np.unique(lst[:,it]))\n\t\t\t\tprint(\"\\t{} files, from {:.0f} to {:.0f}.\".format(len(fid), np.min(lst[:,it]), np.max(lst[:,it])))\n\t\t\t\t\n\t\t\t\ttlts=[np.where(lst[:,it]==t)[0] for t in fid]\n\t\t\telse:\n\t\t\t\ttlts=[np.arange(len(lst), dtype=int)]\n\t\t\t\tit=0\n\t\t\t\n\t\t\tfor tid in tlts:\n\t\t\t\tl=lst[tid]\n\t\t\t\t#print(l[:,ic])\n\t\t\t\tif len(l)>len(np.unique(l[:,ic])):\n\t\t\t\t\tprint(\" duplicated tilt images exist...\")\n\t\t\t\t\taid=np.argsort(l[:,ic-1])\n\t\t\t\t\ttid2=[]\n\t\t\t\t\taid2=[]\n\t\t\t\t\tfor ii in aid:\n\t\t\t\t\t\tif l[ii, ic] not in aid2:\n\t\t\t\t\t\t\taid2.append(l[ii, ic])\n\t\t\t\t\t\t\ttid2.append(tid[ii])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\taid2[-1]=l[ii,ic]\n\t\t\t\t\t\t\ttid2[-1]=tid[ii]\n\t\t\t\t\ttid=np.array(tid2, dtype=int)\n\t\t\t\t\tprint(\" keeping {} out of {} images\".format(len(tid), len(l)))\n\t\t\t\t\tl=lst[tid]\n\t\t\t\t\t\n\t\t\t\taid=np.argsort(l[:,ic])\n\t\t\t\tfnames=[args[i] for i in tid[aid]]\n\t\t\t\tp=lstpos[tid[0]][it+1]\n\t\t\t\tprefix=fnames[0][fnames[0].rfind('/')+1:p]\n\t\t\t\tprefix=prefix.replace(\"__\", \"_\")\n\t\t\t\tprefix=prefix.replace(\".\", \"_\")\n\t\t\t\t\n\t\t\t\tlstname=os.path.join(\"tiltseries\", prefix+'.lst')\n\t\t\t\t\n\t\t\t\tprint(\"{} : {} images -> {}\".format(prefix, len(fnames), lstname))\n\t\t\t\tif options.tltang:\n\t\t\t\t\tif os.path.isdir(options.tltang):\n\t\t\t\t\t\ttnames=os.listdir(options.tltang)\n\t\t\t\t\t\tts=[t[:t.rfind('.')] for t in tnames]\n\t\t\t\t\t\tts=[k for k,t in enumerate(ts) if t in prefix]\n\t\t\t\t\t\tif len(ts)==0:\n\t\t\t\t\t\t\tprint(\"cannot fine tilt file\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif len(ts)>1:\n\t\t\t\t\t\t\tprint(\"multiple matching files exist\")\n\t\t\t\t\t\t\tprint([tnames[k] for k in ts])\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\t\t\t\t\ttname=tnames[ts[0]]\n\t\t\t\t\t\ttname=\"{}/{}\".format(options.tltang, tname)\n\t\t\t\t\t\tprint(\"using tlt file\",tname)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttname=options.tltang\n\t\t\t\t\t\t\n\t\t\t\t\tang=np.loadtxt(tname)\n\t\t\t\t\tif len(ang)==len(fnames):\n\t\t\t\t\t\tprint(\"Sorting by tilt angle file\")\n\t\t\t\t\t\tsrt=np.argsort(ang)\n\t\t\t\t\t\tfnames=[fnames[i] for i in srt]\n\t\t\t\t\t\tang=np.sort(ang)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"tilt file length mismatch\",len(ang), len(fnames))\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tlout=[{\"src\":fm,\"idx\":0} for fm in fnames]\n\t\t\t\tif options.tltang:\n\t\t\t\t\tfor li in range(len(lout)):\n\t\t\t\t\t\tlout[li][\"tilt_angle\"]=ang[li]\n\t\t\t\t\n\t\t\t\tsave_lst_params(lout, lstname)\n\t\t\t\t\n\n\t\telif options.mdoc:\n\t\t\tprint(\"Parsing mdoc files\")\n\t\t\tif os.path.isdir(options.mdoc):\n\t\t\t\tprint(\"input is a directory. reading all mdoc files in it...\")\n\t\t\t\tmdoc=[os.path.join(options.mdoc, f) for f in os.listdir(options.mdoc) if f.endswith(\".mdoc\")]\n\t\t\telse:\n\t\t\t\tmdoc=[options.mdoc]\n\t\t\t\t\n\t\t\tfor md in mdoc:\n\t\t\t\tprint(md)\n\t\t\t\tf=open(md, 'r')\n\t\t\t\tlines=f.readlines()\n\t\t\t\tfnames=[l for l in lines if l.startswith(\"SubFramePath\")]\n\t\t\t\tang=[l for l in lines if l.startswith(\"TiltAngle\")]\n\t\t\t\tang=[float(l.split('=')[-1]) for l in ang]\n\t\t\t\tsrt=np.argsort(ang)\n\t\t\t\tang=np.sort(ang)\n\t\t\t\tlst=[]\n\t\t\t\tfor l in fnames:\n\t\t\t\t\tp0=max(l.rfind('/'), l.rfind('\\\\'))+1\n\t\t\t\t\tp1=l.rfind('.')\n\t\t\t\t\tl=l[p0:p1]\n\t\t\t\t\tl1=l.replace('.', '_')\n\t\t\t\t\tl2=l[:l.rfind('_')]\n\t\t\t\t\tmatch=[a for a in args if l in a]\n\t\t\t\t\tmatch+=[a for a in args if l1 in a]\n\t\t\t\t\tmatch+=[a for a in args if l2 in a]\n\t\t\t\t\tif len(match)==0:\n\t\t\t\t\t\tprint(\"error: image file for {} does not exist\".format(l))\n\t\t\t\t\t\tbreak\n\t\t\t\t\tif len(match)>1:\n\t\t\t\t\t\tprint(\"error: multiple images for {} exist\".format(l))\n\t\t\t\t\t\tfor a in match:\n\t\t\t\t\t\t\tprint('\\t',a)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tlst.append({\"src\":match[0],\"idx\":0})\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tlst=[lst[i] for i in srt]\n\t\t\t\t\tfor i,l in enumerate(lst):\n\t\t\t\t\t\tl[\"tilt_angle\"]=ang[i]\n\t\t\t\t\ttname=md[md.rfind('/')+1:]\n\t\t\t\t\ttname=tname[:tname.find('.')]+\".lst\"\n\t\t\t\t\ttname=os.path.join(\"tiltseries\", tname)\n\t\t\t\t\tprint(f\"{len(lst)} images -> {tname}\")\n\t\t\t\t\tsave_lst_params(lst, tname)\n\t\t\t\t\n\t\t\t\tf.close()\n\t\t\t\n\t\telse:\n\t\t\tstdir = os.path.join(\".\",\"tiltseries\")\n\t\t\toptions.output = \"{}/{}\".format(stdir,options.output)\n\n\t\t\tif options.output.split(\".\")[-1] not in [\"hdf\",\"mrc\",\"mrcs\"]:\n\t\t\t\toptions.output = options.output + \".hdf\"\n\n\t\t\t# remove existing output file\n\t\t\tif os.path.exists(options.output) :\n\t\t\t\tprint(\"The file {} already exists.\".format(options.output))\n\t\t\t\tprint(\"Please move, rename, or remove this file to generate an alternate version with this program.\")\n\t\t\t\tsys.exit(1)\n\n\t\t\t\n\t\t\tfor n,arg in enumerate(args):\n\t\t\t\timg = EMData(arg)\n\t\t\t\timg.write_image(options.output,n)\n\t\t\t\n\t\t\t\n\t\t# if options.rawtlt:\n\t\t# \ttry:\n\t\t# \t\tangles = np.loadtxt(options.rawtlt)\n\t\t# \texcept:\n\t\t# \t\tprint(\"Error: Could not read tilt angles from {}\".format(options.rawtlt))\n\t\t# \t\tsys.exit(1)\n\t\t# \tif len(angles) != len(args):\n\t\t# \t\tprint(\"Error: There are not enough tilt angles in this tilt angles file.\")\n\t\t# \t\tsys.exit(1)\n\n\t\t# tlt_assoc = {}\n\t\t# for i,arg in enumerate(args):\n\t\t# \tif options.rawtlt: tlt_assoc[angles[i]] = arg\n\t\t# \telse:\n\t\t# \t\tdb=js_open_dict(info_name(arg,nodir=True))\n\t\t# \t\tang = float(db[\"tilt_angle\"])\n\t\t# \t\ttlt_assoc[ang] = arg\n\t\t# \t\tdb.close()\n\n\t\t#ordered_angles = sorted([float(a) for a in tlt_assoc.keys()])\n\t\t#sorted_args = [tlt_assoc[a] for a in ordered_angles] # order args according to tilt angle parameter\n\n\t\t#series_db=js_open_dict(info_name(options.output,nodir=True))\n\n\t\t#series_db[\"tilt_angles\"] = ordered_angles\n\n\t\t#for n,(angle,arg) in enumerate(zip(ordered_angles,sorted_args)):\n\n\t\t\t#series_db[angle] = arg\n\n\t\t\t#nimg = EMUtil.get_image_count(arg) # number of images in each input file as it is processed\n\n\t\t\t# if options.verbose:\n\t\t\t# \tif nimg==1: print(arg)\n\t\t\t# \telse: print(arg,nimg)\n\n\t\t\t#for i in xrange(nimg):\n\n\t\t\t#img=EMData(arg,0)\n\t\t\t#img[\"tilt_angle\"] = angle\n\n\t\t\t# if os.path.isfile(info_name(arg,nodir=True)):\n\t\t\t# \tdb=js_open_dict(info_name(arg,nodir=True))\n\t\t\t# \ttry: # this data may already be present\n\t\t\t# \t\timg[\"SerialEM.tilt_angle\"] = db[\"tilt_angle\"]\n\t\t\t# \t\timg[\"SerialEM.intensity\"] = db[\"intensity\"]\n\t\t\t# \t\timg[\"SerialEM.exposure_time\"] = db[\"exposure_time\"]\n\t\t\t# \t\timg[\"SerialEM.exposure_dose\"] = db[\"exposure_dose\"]\n\t\t\t# \t\timg[\"SerialEM.sub_frame_count\"] = db[\"sub_frame_count\"]\n\t\t\t# \t\timg[\"SerialEM.prior_record_dose\"] = db[\"prior_record_dose\"]\n\t\t\t# \t\timg[\"SerialEM.frames_per_second\"] = db[\"frames_per_second\"]\n\t\t\t# \texcept: pass\n\t\t\t# \tdb.close()\n\n\t\t\t#img.write_image(options.output,n)\n\n\t\t#series_db.close()\n\n\telse:\n\n\t\t# remove existing output file\n\t\tif os.path.exists(options.output) :\n\t\t\ttry: os.unlink(options.output)\n\t\t\texcept:\n\t\t\t\tprint(\"ERROR: Unable to remove \",options.output,\". Cannot proceed\")\n\t\t\t\tsys.exit(1)\n\n\t\t# if output is LSX format, we handle it differently, with a specific object for these files\n\t\tif options.output[-4:].lower()==\".lst\" :\n\t\t\toutfile=LSXFile(options.output)\n\t\telse: outfile=None\n\n\t\tn=0\t\t# number of images in output file\n\t\tfor infile in args:\n\t\t\tnimg = EMUtil.get_image_count(infile)\t\t# number of images in each input file as it is processed\n\n\t\t\tif options.verbose :\n\t\t\t\tif nimg==1 : print(infile)\n\t\t\t\telse : print(infile,nimg)\n\n\t\t\tfor i in range(nimg):\n\t\t\t\tif outfile!=None:\n\t\t\t\t\toutfile.write(n,i,infile)\n\t\t\t\telse:\n\t\t\t\t\timg=EMData(infile,i)\n\t\t\t\t\timg.write_image(options.output,n)\n\t\t\t\tn+=1\n\n\t\tif options.verbose : print(n,\" total images written to \",options.output)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"cryoem/eman2","sub_path":"programs/e2buildstacks.py","file_name":"e2buildstacks.py","file_ext":"py","file_size_in_byte":12613,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"47"}
+{"seq_id":"3115755459","text":"import os\nimport math\n\nfrom rootpy.plotting import Hist, Hist2D\nfrom rootpy.io import root_open\nfrom rootpy.stats import histfactory\nfrom rootpy.utils.path import mkdir_p\n\nfrom statstools.histfactory import (\n to_uniform_binning, apply_remove_window, is_signal)\n\nfrom . import log; log = log[__name__]\nfrom . import CONST_PARAMS, CACHE_DIR, MMC_MASS, POI\nfrom .categories import CATEGORIES\nfrom .plotting import hist_scores\n\nimport pickle\nimport os\n\n\ndef write_workspaces(path, prefix, year_mass_category_channel,\n controls=None,\n silence=False):\n log.info(\"writing workspaces ...\")\n if controls is None:\n controls = []\n if not os.path.exists(path):\n mkdir_p(path)\n for year, mass_category_channel in year_mass_category_channel.items():\n # write workspaces for each year\n for mass, category_channel in mass_category_channel.items():\n if isinstance(controls, dict):\n if isinstance(controls[year], dict):\n mass_controls = controls[year][mass].values()\n else:\n mass_controls = controls[year]\n else:\n mass_controls = controls\n channels = []\n # make workspace for each category\n # include the control region in each\n for category, channel in category_channel.items():\n name = \"{0}_{1}_{2}_{3}\".format(\n prefix, year % 1000, category, mass)\n log.info(\"writing {0} ...\".format(name))\n # make workspace\n measurement = histfactory.make_measurement(\n name, [channel] + mass_controls,\n POI=POI,\n const_params=CONST_PARAMS)\n workspace = histfactory.make_workspace(measurement, name=name,\n silence=silence)\n with root_open(os.path.join(path, '{0}.root'.format(name)),\n 'recreate') as workspace_file:\n workspace.Write()\n # mu=1 for Asimov data\n #measurement.SetParamValue('SigXsecOverSM', 1)\n histfactory.write_measurement(measurement,\n root_file=workspace_file,\n xml_path=os.path.join(path, name),\n silence=silence)\n channels.append(channel)\n # make combined workspace\n name = \"{0}_{1}_combination_{2}\".format(prefix, year % 1000, mass)\n log.info(\"writing {0} ...\".format(name))\n measurement = histfactory.make_measurement(\n name, channels + mass_controls,\n POI=POI,\n const_params=CONST_PARAMS)\n workspace = histfactory.make_workspace(measurement, name=name,\n silence=silence)\n with root_open(os.path.join(path, '{0}.root'.format(name)),\n 'recreate') as workspace_file:\n workspace.Write()\n # mu=1 for Asimov data\n #measurement.SetParamValue('SigXsecOverSM', 1)\n histfactory.write_measurement(measurement,\n root_file=workspace_file,\n xml_path=os.path.join(path, name),\n silence=silence)\n # write combined workspaces over all years\n years = year_mass_category_channel.keys()\n if len(years) == 1:\n return\n masses = year_mass_category_channel[years[0]].keys()\n categories = year_mass_category_channel[years[0]][masses[0]].keys()\n for mass in masses:\n if isinstance(controls, dict):\n if isinstance(controls[year], dict):\n mass_controls = [control for year in years\n for control in controls[year][mass].values()]\n else:\n mass_controls = [control for year in years\n for control in controls[year]]\n else:\n mass_controls = controls\n channels = []\n # make workspace for each category\n # include the control region in each\n # TODO: categories might be different across years\n \"\"\"\n for category in categories:\n cat_channels = [year_mass_category_channel[year][mass][category]\n for year in years]\n name = \"{0}_full_{1}_{2}\".format(\n prefix, category, mass)\n log.info(\"writing {0} ...\".format(name))\n # make workspace\n measurement = histfactory.make_measurement(\n name, cat_channels + mass_controls,\n POI=POI,\n const_params=CONST_PARAMS)\n workspace = histfactory.make_workspace(measurement, name=name,\n silence=silence)\n with root_open(os.path.join(path, '{0}.root'.format(name)),\n 'recreate') as workspace_file:\n workspace.Write()\n # mu=1 for Asimov data\n #measurement.SetParamValue('SigXsecOverSM', 1)\n histfactory.write_measurement(measurement,\n root_file=workspace_file,\n xml_path=os.path.join(path, name),\n silence=silence)\n channels.extend(cat_channels)\n \"\"\"\n channels = [chan for year in years\n for chan in year_mass_category_channel[year][mass].values()]\n # make combined workspace\n name = \"{0}_full_combination_{1}\".format(prefix, mass)\n log.info(\"writing {0} ...\".format(name))\n measurement = histfactory.make_measurement(\n name, channels + mass_controls,\n POI=POI,\n const_params=CONST_PARAMS)\n workspace = histfactory.make_workspace(measurement, name=name,\n silence=silence)\n with root_open(os.path.join(path, '{0}.root'.format(name)),\n 'recreate') as workspace_file:\n workspace.Write()\n # mu=1 for Asimov data\n #measurement.SetParamValue('SigXsecOverSM', 1)\n histfactory.write_measurement(measurement,\n root_file=workspace_file,\n xml_path=os.path.join(path, name),\n silence=silence)\n\n\ndef mva_workspace(analysis, categories, masses,\n clf_mass=None,\n clf_bins='optimal',\n clf_swap=False,\n unblind=False,\n systematics=False,\n cuts=None):\n hist_template = Hist(5, 0, 1.5, type='D')\n controls = analysis.make_var_channels(\n hist_template, 'dEta_tau1_tau2',\n CATEGORIES['mva_workspace_controls'],\n analysis.target_region,\n include_signal=True, masses=masses,\n systematics=systematics)\n mass_category_channel = {}\n for category in analysis.iter_categories(categories):\n for mass in masses:\n clf = analysis.get_clf(category, load=True,\n mass=clf_mass or mass,\n transform=True,\n swap=clf_swap)\n if isinstance(clf_bins, basestring):\n if clf_bins == 'optimal':\n # get the binning (see the optimize-binning script)\n bins = clf.binning(analysis.year, overflow=1E5)\n log.info(\"binning: {0}\".format(str(bins)))\n else:\n bins = int(clf_bins)\n else:\n bins = clf_bins\n # construct a \"channel\" for each mass point\n scores, channel = analysis.clf_channels(\n clf, category,\n region=analysis.target_region,\n bins=bins,\n mass=mass,\n mode='workspace',\n systematics=systematics,\n cuts=cuts,\n unblind=unblind or 2,\n hybrid_data=not unblind,\n uniform=True, mva=True)\n if mass not in mass_category_channel:\n mass_category_channel[mass] = {}\n mass_category_channel[mass][category.name] = channel\n return mass_category_channel, controls\n\n\ndef cuts_workspace(analysis, categories, masses,\n unblind=False,\n systematics=False,\n cuts=None,\n sideband=False):\n hybrid_data = None if unblind else {MMC_MASS:(100., 150.)}\n channels = {}\n for category in analysis.iter_categories(categories):\n binning = category.limitbins\n if isinstance(binning, dict):\n binning = binning[analysis.year]\n hist_template = Hist(binning, type='D')\n for mass in masses:\n channel = analysis.get_channel_array(\n {MMC_MASS: hist_template},\n category=category,\n region=analysis.target_region,\n cuts=cuts,\n include_signal=True,\n mass=mass,\n mode='workspace',\n systematics=systematics,\n hybrid_data=hybrid_data,\n uniform=False)[MMC_MASS]\n if sideband:\n # remove the signal window\n remove_window = (100, 150)\n channel.data.hist = apply_remove_window(\n channel.data.hist, remove_window)\n for s in channel.samples:\n s.hist = apply_remove_window(\n s.hist, remove_window)\n for histosys in s.histo_sys:\n histosys.high = apply_remove_window(\n histosys.high, remove_window)\n histosys.low = apply_remove_window(\n histosys.low, remove_window)\n # convert to uniform binning\n channel.data.hist = to_uniform_binning(channel.data.hist)\n for s in channel.samples:\n s.hist = to_uniform_binning(s.hist)\n for histosys in s.histo_sys:\n histosys.high = to_uniform_binning(histosys.high)\n histosys.low = to_uniform_binning(histosys.low)\n if mass not in channels:\n channels[mass] = {}\n channels[mass][category.name] = channel\n return channels, []\n\n\ndef feature_workspace(field, template,\n analysis, categories, masses,\n systematics=False,\n cuts=None):\n channels = {}\n for category in analysis.iter_categories(categories):\n for mass in masses:\n channel = analysis.get_channel_array(\n {field: template},\n category=category,\n region=analysis.target_region,\n cuts=cuts,\n include_signal=True,\n mass=mass,\n mode='workspace',\n systematics=systematics,\n uniform=False)[field]\n if mass not in channels:\n channels[mass] = {}\n channels[mass][category.name] = channel\n return channels, []\n\n\ndef weighted_mass_workspace(analysis, categories, masses,\n systematics=False,\n cuts=None):\n hist_template = Hist(20, 50, 250, type='D')\n channels = {}\n for category in analysis.iter_categories(categories):\n clf = analysis.get_clf(category, load=True, mass=125)\n clf_bins = clf.binning(analysis.year, overflow=1E5)\n scores = analysis.get_scores(\n clf, category, analysis.target_region,\n masses=[125], mode='combined',\n systematics=False,\n unblind=True)\n bkg_scores = scores.bkg_scores\n sig_scores = scores.all_sig_scores[125]\n min_score = scores.min_score\n max_score = scores.max_score\n bkg_score_hist = Hist(clf_bins, type='D')\n sig_score_hist = bkg_score_hist.Clone()\n hist_scores(bkg_score_hist, bkg_scores)\n _bkg = bkg_score_hist.Clone()\n hist_scores(sig_score_hist, sig_scores)\n _sig = sig_score_hist.Clone()\n sob_hist = (1 + _sig / _bkg)\n _log = math.log\n for bin in sob_hist.bins(overflow=True):\n bin.value = _log(bin.value)\n log.info(str(list(sob_hist.y())))\n for mass in masses:\n channel = analysis.get_channel_array(\n {MMC_MASS: hist_template},\n category=category,\n region=analysis.target_region,\n include_signal=True,\n weight_hist=sob_hist,\n clf=clf,\n cuts=cuts,\n mass=mass,\n mode='workspace',\n systematics=systematics)[MMC_MASS]\n if mass not in channels:\n channels[mass] = {}\n channels[mass][category.name] = channel\n return channels, []\n\n\ndef weighted_mass_cba_workspace(analysis, categories, masses,\n systematics=False,\n cuts=None):\n hist_template = Hist(20, 50, 250, type='D')\n channels = {}\n\n def scaled(hist, factor):\n new_hist = hist * factor\n new_hist.name = hist.name + '_scaled'\n return new_hist\n\n for category in analysis.iter_categories(categories):\n for mass in masses:\n channel = analysis.get_channel_array(\n {MMC_MASS: hist_template},\n category=category,\n region=analysis.target_region,\n include_signal=True,\n cuts=cuts,\n mass=mass,\n mode='workspace',\n systematics=systematics)[MMC_MASS]\n # weight by ln(1 + s / b)\n total_s = hist_template.Clone()\n total_s.Reset()\n total_b = total_s.Clone()\n for sample in channel.samples:\n if is_signal(sample):\n total_s += sample.hist\n else:\n total_b += sample.hist\n sob = math.log(1 + total_s.integral() / total_b.integral())\n channel.data.hist = scaled(channel.data.hist, sob)\n for sample in channel.samples:\n sample.hist = scaled(sample.hist, sob)\n for hsys in sample.histo_sys:\n hsys.high = scaled(hsys.high, sob)\n hsys.low = scaled(hsys.low, sob)\n if mass not in channels:\n channels[mass] = {}\n channels[mass][category.name] = channel\n return channels, []\n\n\ndef mass2d_workspace(analysis, categories, masses,\n systematics=False):\n hist_template = Hist2D(250, 0, 250, 200, -1, 1, type='D')\n channels = {}\n for category in analysis.iter_categories(categories):\n clf = analysis.get_clf(category, load=True)\n for mass in masses:\n channel = analysis.get_channel_array(\n {MMC_MASS: hist_template},\n category=category,\n region=analysis.target_region,\n clf=clf,\n include_signal=True,\n mass=mass,\n mode='workspace',\n systematics=systematics,\n ravel=False)[MMC_MASS]\n if mass not in channels:\n channels[mass] = {}\n channels[mass][category.name] = channel\n return channels, []\n","repo_name":"htautau/hhana","sub_path":"mva/workspace.py","file_name":"workspace.py","file_ext":"py","file_size_in_byte":15626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"74440137103","text":"from collections import Counter\nN = int(input())\na = [int(input()) for _ in range(N)]\na.sort()\nprint(round(sum(a) / N)) # 산술평균\nprint(a[N//2]) # 중앙값\nc = Counter(a)\nif len(c) == 1: # 최빈값\n c = c.most_common(1)\n print(c[0][0])\nelse:\n c = c.most_common(2)\n print(c[0][0] if c[0][1] > c[1][1] else c[1][0])\nprint(a[N - 1] - a[0]) # 범위","repo_name":"korjun1993/algo","sub_path":"src/boj/2108_통계학.py","file_name":"2108_통계학.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"}
+{"seq_id":"4919023863","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('schools', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='school',\n name='level',\n ),\n migrations.RemoveField(\n model_name='school',\n name='magnet',\n ),\n migrations.AddField(\n model_name='school',\n name='grade_max',\n field=models.IntegerField(default=20),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='school',\n name='grade_min',\n field=models.IntegerField(default=20),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='school',\n name='type',\n field=models.CharField(default='unset', max_length=20, choices=[(b'neighborhood', b'Neighborhood'), (b'magnet', b'Magnet'), (b'charter', b'Charter'), (b'speciality', b'Specialty')]),\n preserve_default=False,\n ),\n ]\n","repo_name":"codefordurham/school-navigator","sub_path":"schools/migrations/0002_auto_20141010_1304.py","file_name":"0002_auto_20141010_1304.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"47"}
+{"seq_id":"13175225726","text":"# MODULO VERIFICAR LOGIN TXT\n# -*- coding: utf-8 -*-\n#----------------------------------------------------------------------------\n# Created By : Ricardo Antonio Cardoso \n# Created Date: Fev-2022\n# version ='1.0'\n# ---------------------------------------------------------------------------\n\ndef consulta(nome_aut):\n local = str(\"c:\\login\\login.txt\")\n encontrados = 0\n with open(local, \"r\", encoding=\"utf-8\") as verificado:\n for name in verificado.readlines():\n i = name.strip().split(\",\")\n name = (i[0])\n if name == nome_aut:\n encontrados = 1\n break\n else:\n continue\n return encontrados","repo_name":"riatoso/loginEmTxt","sub_path":"verifica/verificar.py","file_name":"verificar.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"25965726429","text":"#108. Convert Sorted Array to Binary Search Tree\n#문제 링크 : https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/\n#실패\n#책 풀이 : 이진 검색 결과로 트리 구성\n\nfrom typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n\n mid = len(nums) // 2\n\n # 분할 정복으로 이진 검색 결과 트리 구성\n node = TreeNode(nums[mid])\n node.left = self.sortedArrayToBST(nums[:mid])\n node.right = self.sortedArrayToBST(nums[mid + 1:])\n\n return node\n\n'''\n반으로 쪼갠후에 좌, 우를 나눠서 재귀 호출로 처리를 한다.\n굉장히 간단하게 처리가 된다. 나도 시도할 때 root의 중간값을 사용하긴 했는데 탐색과정을\n너무 하나하나 생각한 것 같다.\n그냥 계속 절반씩 나눠서 인덱스의 중앙값을 찾아서 트리를 구성하면 된다.\n\n'''","repo_name":"wwyyww/algorithm","sub_path":"LeetCode/Tree/convert-sorted-array-to-binary-search-tree.py","file_name":"convert-sorted-array-to-binary-search-tree.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"14823668568","text":"import numpy as np\nfrom scipy.spatial.distance import cdist\nfrom scipy.spatial.distance import mahalanobis\n\nclass Discrimination:\n\n def __init__(self, date1, date2):\n self.date1 = date1\n self.date2 = date2\n\n def Euclidean_Distance(self):\n dataset1_center = np.mean(self.date1, axis=0)\n dataset2_center = np.mean(self.date2, axis=0)\n\n # 每个数据集中的样本与中心的欧几里得距离\n dataset1_distances = cdist(self.date1, [dataset1_center], 'euclidean')\n dataset2_distances = cdist(self.date2, [dataset2_center], 'euclidean')\n\n # 区分度\n discriminability = np.abs(np.mean(dataset1_distances) - np.mean(dataset2_distances))\n\n print(\"discriminability:\", discriminability)\n return discriminability\n\n def mahalanobis(self):\n dataset1_center = np.mean(self.date1, axis=0)\n dataset2_center = np.mean(self.date2, axis=0)\n\n covariance1 = np.cov(self.date1, rowvar=False)\n covariance2 = np.cov(self.date2, rowvar=False)\n\n print(np.linalg.det(covariance1))\n print(covariance1)\n\n\n md1 = [mahalanobis(x, dataset1_center, np.linalg.inv(covariance1)) for x in self.date1]\n md2 = [mahalanobis(x, dataset2_center, np.linalg.inv(covariance2)) for x in self.date2]\n\n # 区分度\n separation = np.abs(np.mean(md1) - np.mean(md2)) / np.sqrt(np.var(md1) + np.var(md2))\n return(separation)\n\n","repo_name":"hjw112/Emotion-detection","sub_path":"Discrimination.py","file_name":"Discrimination.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"8755114497","text":"import asyncio\nimport random\n\n\nasync def record_attendance(teacher):\n print(\"Recording attendance for\", teacher)\n await asyncio.sleep(random.randint(0, 5))\n print(\"Attendance recorded for\", teacher)\n\n\nasync def task_A(end_time, loop):\n print(\"task_A called\")\n await record_attendance(\"Teacher Level1\")\n if (loop.time() + 1.0) < end_time:\n loop.call_later(1, asyncio.ensure_future, task_B(end_time, loop))\n else:\n loop.stop()\n\n\nasync def task_B(end_time, loop):\n print(\"task_B called\")\n await record_attendance(\"Teacher Level2\")\n if (loop.time() + 1.0) < end_time:\n loop.call_later(1, asyncio.ensure_future, task_C(end_time, loop))\n else:\n loop.stop()\n\n\nasync def task_C(end_time, loop):\n print(\"task_C called\")\n await record_attendance(\"Teacher Level3\")\n if (loop.time() + 1.0) < end_time:\n loop.call_later(1, asyncio.ensure_future, task_A(end_time, loop))\n else:\n loop.stop()\n\n\nloop = asyncio.get_event_loop()\nend_loop = loop.time() + 10\nloop.call_soon(asyncio.ensure_future, task_A(end_loop, loop)) # Menjalankan task_A sebagai Future\nloop.run_forever()\nloop.close()\n","repo_name":"kerjabhakti/SISTER_3B","sub_path":"Chapter005/TugasKelompok/Kelompok3/1204053_ResaRianti/asyncio_event_loop.py","file_name":"asyncio_event_loop.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"}
+{"seq_id":"27193181560","text":"# Zane Bernard 2/28/2018 \n#User Defined Functions\n# Semester GPA Calculation\n# Purpose: This program will calculate a student’s semester grade\n# by using the students previous credits/GPA and then asking the student\n# for their letter grades and the matching credit hours for that grade.\n# The program will print out the students semester GPA followed by a\n# cumulative GPA\n#\n#-----function definition section------------------------------------------------\n#\n# Function 2 Name: getGrades\n#\n# Purpose: To get all of the semester grades the user would like to use in \n# the calculation. We will handle prompting the user for the letter grade\n# and the credit hours, for as many classes as they want. We will do error\n# handling to make sure it is a correct letter grade. We are going to store\n# the grades and credit hours in a list of sublists.\n#\n# Parameters or Arguments: none\n#\n# Return: The value returned is a list of sublists, in which each sublists\n# contains the letter grade for a given course and the associated number of\n# credits, for example [[‘A’,3], [‘B’,4],[‘A’,3],[‘C’,3]]\n#\ndef get_grades():\n #create a list to hold each of the (grade,points) pairings \n semester_info = []\n #create a Boolean flag to handle how many times we prompt the user \n more_grades = True\n #use the empty string variable to determine when the user is done \n empty_str = ''\n #while the user still wants to enter more_grades \n while more_grades:\n #prompt the user for the first grade \n course_grade = input(\"Enter grade (hit Enter if done): \")\n #while they didn’t enter a valid grade keep asking \n while course_grade not in ('A', 'B', 'C', 'D', 'F', empty_str):\n course_grade = input(\"Enter letter grade received: \")\n #if the user didn’t enter a grade they must be done \n if course_grade == empty_str:\n #since they are done we need to flip the flag \n more_grades = False\n else:\n #since they entered a grade we need to ask for the credits \n num_credits = int(input('Enter number of credits: '))\n #add the (grade, credits) pairing to the list \n semester_info.append([num_credits, course_grade])\n\n return semester_info\n#\n# Name: convert_grade\n#\n# Purpose: To convert a letter grade to its corresponding numerical value.\n# This value is used to calculate the quality points that are used to\n# calculate the GPA\n#\n# Parameters or Arguments: a string containing the letter grade, called grade\n#\n# Return: an integer value containing the corresponding quality points\n#\ndef convert_grade(grade):\n if grade == 'F':\n return 0\n else:\n #calculation for quality points \n return 4 - (ord(grade) - ord('A'))\n#\n# Function 2 Name: calculate_GPA\n#\n# Purpose: To get all of the semester grades the user would like to use in\n# the calculation. We will handle prompting the user for the letter grade\n# and the credit hours, for as many classes as they want. We will do error\n# handling to make sure it is a correct letter grade. We are going to store\n# the grades and credit hours in a list of sublists.\n#\n# Parameters or Arguments: the semester grades, and the cumulative gpa info\n# we got earlier.\n#\n# Return: The value returned is a list of sublists, in which each sublists\n# contains the letter grade for a given course and the associated number of\n#\ndef calculate_GPA(sem_grades_info, cumulative_gpa_info):\n #create two variables for the credit hours and quality points\n #used for summing up totals \n sem_quality_pts = 0\n sem_credits = 0\n #create two variables that hold a copy from the tuple we passed in\n #tuple can’t be changed, so we must make a copy that we can update \n current_cumulative_gpa, total_credits = cumulative_gpa_info\n #for each of the credit,grade pairings in the list \n for k in range(len(sem_grades_info)):\n #store the first credits,grade pairing into new variables \n num_credits, letter_grade = sem_grades_info[k]\n #totaling up the quality points using the numcredits and\n #our helper function to give us the quality points of that grade \n sem_quality_pts = sem_quality_pts + num_credits * convert_grade(letter_grade)\n #totaling up the credits for the semester \n sem_credits = sem_credits + num_credits\n #calculation for the semester gpa \n sem_GPA = sem_quality_pts / sem_credits\n #calculation for the cumulative gpa\n new_cumulative_GPA = (current_cumulative_gpa * total_credits +\\\n sem_GPA * sem_credits) / (total_credits +\\\n sem_credits)\n #return the semester gpa and the new cumulative gpa \n return (sem_GPA, new_cumulative_GPA)\n#This function determines if this is your first semester\n#Parameters: the input for 'y' or 'n' is passed through this function to convert it\n#into a boolean value\n#\n#Return: A Boolean value is returned of either True or False\ndef which_sem(answer):\n #initialize a flag that will stop the while loop\n flag = True\n while flag:\n if answer == 'y':\n first_semester = True\n flag = False\n elif answer == 'n':\n first_semester = False\n flag = False\n #handles invalid input\n while answer not in ('y', 'n'):\n answer = input(\"Please enter either 'y' or 'n'? \")\n return first_semester\n#----main section-----------------------------------------------------------------------\ndef main():\n #program greeting \n print(\"This program calculates semester and cumulative GPAs \\n\")\n # get first semester info\n answer = input(\"Is this your first semester (y/n)? \")\n #assigns boolean value to a variable\n first_semester = which_sem(answer)\n #if the this is not your first semester, it asks for information on current info\n if first_semester == False:\n # get current GPA info\n total_credits = int(input(\"Enter total number of earned credits: \"))\n cumulative_gpa = float(input(\"Enter your current cumulative GPA: \"))\n #else this is your first semester\n else:\n #set these values to be completely new\n total_credits = 0\n cumulative_gpa = 0\n #holds values in a tuple\n cumulative_gpa_info = (cumulative_gpa, total_credits)\n print()\n# call the function getGrades() and store the results into semester_grades \n semester_grades = get_grades()\n # calculate semester gpa and new cumulative gpa\n # we call the function calculateGPA and we pass it the list of semester\n # grades and the tuple holding the cumulative info\n # then we store the results from calculateGPA into two variables. \n semester_gpa, cumulative_gpa = calculate_GPA(semester_grades, cumulative_gpa_info)\n # display semester gpa and new cumulative gpa \n print('\\nYour semester GPA is', format(semester_gpa, '.2f'))\n print('Your new cumulative GPA is', format(cumulative_gpa, '.2f'))\n \nmain()\n\n","repo_name":"icodeforfunandprofit/CSC-131","sub_path":"calculategpamodifiedbernard.py","file_name":"calculategpamodifiedbernard.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"1051842266","text":"#!/usr/bin/python\nfrom __future__ import print_function\nimport sys\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom geometry_msgs.msg import Point\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np\nfrom openalpr import Alpr\nimport cv2\n\nWINDOW_NAME = 'openalpr'\nFRAME_SKIP = 2\nfont = cv2.FONT_HERSHEY_DUPLEX\n\n\nclass image_converter:\n def __init__(self):\n self.publisher = rospy.Publisher('object_pose', Point, queue_size=10)\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber(\n \"camera/image_raw\", Image, self.callback)\n self.alpr = Alpr(\"eu\", \"/etc/openalpr/openalpr.conf\",\n \"/usr/local/home/u180107/openalpr/runtime_data\")\n self.alpr.set_detect_region(True)\n self.currentConfidence = 0\n self.currentDistance = 0\n self.criteria = (cv2.TERM_CRITERIA_EPS +\n cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n self.objp = np.float32([\n [0, 0, 0],\n [7, 0, 0],\n [0, 5, 0],\n [7, 5, 0]\n ])\n self.axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)\n with np.load('calibration.npz') as X:\n self.mtx, self.dist = [X[i] for i in ('mtx', 'dist')]\n\n def draw(self, img, corners, imgpts):\n corner = tuple(corners[0].ravel())\n img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255, 0, 0), 5)\n img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0, 255, 0), 5)\n img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0, 0, 255), 5)\n return img\n\n def getPose(self, cv_image):\n\n gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (8, 6), None, cv2.CALIB_CB_FAST_CHECK)\n\n if ret == True:\n result = np.array(\n [corners[0], corners[7], corners[40], corners[-1]])\n for res in result:\n cv2.circle(cv_image, (res[0][0], res[0][1]), 5, (0, 0, 255), 3)\n\n # print(result)\n corners2 = cv2.cornerSubPix(\n gray, result, (11, 11), (-1, -1), self.criteria)\n\n ret, rvecs, tvecs, inliers = cv2.solvePnPRansac(\n self.objp, corners2, self.mtx, self.dist, cv2.SOLVEPNP_P3P)\n\n imgpts, jac = cv2.projectPoints(self.axis, rvecs, tvecs, self.mtx, self.dist)\n img = self.draw(cv_image, corners2, imgpts)\n platePoint = Point(float(tvecs[0]),float(tvecs[1]),float(tvecs[2]))\n self.publisher.publish(platePoint)\n # cv2.imshow('img', img)\n return(img)\n\n successful_frames += 1\n cv2.waitKey(1)\n else:\n # cv2.imshow('img', cv_image)\n return(cv_image)\n cv2.waitKey(1)\n\n def distanceByArea(self, cv_image):\n results = self.alpr.recognize_ndarray(cv_image)\n for i, plate in enumerate(results['results']):\n best_candidate = plate['coordinates']\n corner1 = (best_candidate[0]['x'] , best_candidate[0]['y'])\n corner2 = (best_candidate[2]['x'] , best_candidate[2]['y'])\n platePixelWidth = abs(int(best_candidate[2]['x']) - int(best_candidate[0]['x']))\n platePixelHeight = abs(int(best_candidate[2]['y']) - int(best_candidate[0]['y']))\n platePixelArea = platePixelHeight*platePixelWidth\n \n # The distance equation required that the camera be calibrated, currently they're 'close enough'\n distance = round(52000*pow(platePixelArea, -0.368), 2)\n detected = plate['candidates'][0]\n if self.currentConfidence < detected['confidence'] or abs(distance-self.currentDistance)>200:\n self.currentConfidence = detected['confidence']\n # print('Plate #{}: {:7s} ({:.2f}%) - Plate distance: {}mm'.format(i, detected['plate'].upper(), detected['confidence'],distance))\n self.currentDistance = distance\n \n cv2.rectangle(cv_image,corner1,corner2,(0,255,0),2)\n cv2.putText(cv_image, ('distance = %dmm' % distance), (i*330,460), font, 1, (0, 255, 0), 2, cv2.LINE_AA)\n\n return cv_image\n\n def distanceByHomography(self, cv_image):\n cam_mtx = np.array([\n [767.92632836, 0 , 325.7824046],\n [0, 770.98907555, 157.44636998],\n [0, 0, 1]\n ])\n dist_coeffs = np.zeros((5, 1))\n results = self.alpr.recognize_ndarray(cv_image)\n for i, plate in enumerate(results['results']):\n best_candidate = plate['coordinates']\n worldPoints = np.float32([[0, 0, 0], [510, 0, 0], [510,110, 0], [0, 110, 0]])\n imagePoints = np.float32([\n (\n best_candidate[a]['x'] , best_candidate[a]['y']\n ) for a in range(0,4)])\n # print(imgPoints)\n # print(worldPoints)\n imgPoints = np.ascontiguousarray(imagePoints[:,:2].reshape(4,1,2))\n _ret, rvec, tvec = cv2.solvePnP(worldPoints, imgPoints, cam_mtx, dist_coeffs, flags=cv2.SOLVEPNP_P3P)\n print(\"Rotation Vector:\\n {0}\".format(rvec))\n print(\"Translation Vector:\\n {0}\".format(tvec))\n \n end_point2D, jacobian = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rvec, tvec, cam_mtx, dist_coeffs)\n \n for p in imagePoints:\n cv2.circle(cv_image, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n \n \n p1 = ( int(imagePoints[0][0]), int(imagePoints[0][1]))\n p2 = ( int(end_point2D[0][0][0]), int(end_point2D[0][0][1]))\n \n cv2.line(cv_image, p1, p2, (255,0,0), 2)\n\n return cv_image\n\n def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n cv_image_labeled = self.getPose(cv_image)\n cv2.imshow(WINDOW_NAME, cv_image_labeled)\n cv2.waitKey(1)\n\n\ndef main(args):\n ic = image_converter()\n rospy.init_node('image_converter', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"DomhnallP/SLAM_MAP","sub_path":"Examples/ROS/ORB_SLAM2/scripts/pose.py","file_name":"pose.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"}
+{"seq_id":"10492650202","text":"\"\"\"\nMain application window, displays set of dock widgets, manages inter-widget processes and data sharing.\n\nAuthor: Alexander Shiarella\n\"\"\"\n\nimport os\nimport sys\nimport shutil\nimport sqlite3\nfrom pathlib import Path\nimport threading\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDockWidget, QLabel, QMessageBox\nfrom PyQt5.QtCore import Qt, pyqtSlot, pyqtSignal, QUrl\nfrom PyQt5.QtMultimedia import QMediaContent\nfrom PyQt5.QtSql import QSqlQuery, QSqlDatabase\n\nfrom .AppEnums import AudioSource, ScreenMode\nfrom .AppResources import StampTableResources, GlobalResources, AudioResources, SQLResources\nfrom .AppUtils import ErrorUtils\nfrom .Records import StampRecord\n\nfrom .widgets.MediaPlayerWidget import MediaPlayerWidget\nfrom .widgets.SurveyTableWidget import SurveyTableWidget\nfrom .widgets.SurveyTableWidget import Column as SurveyTableColumn\nfrom .widgets.StampTableWidget import StampTableWidget\nfrom .widgets.ExtractorWidget import ExtractorWidget\nfrom .widgets.ProcessWidget import ProcessWidget\nfrom .widgets.ValidateWidget import ValidateWidget\nfrom .widgets.ExtractorWidget import Type as ExtractionType\nfrom .widgets.SurveyInfoDialog import AddSurveyDialog, EditSurveyDialog, CombineSurveyDialog\nfrom .widgets.ToolBars import ViewToolBar, ModeToolBar, GenerationToolBar, SettingsToolBar\nfrom .widgets.VisualizerWidget import VisualizerWidget\nfrom whoop.widgets.StatusBar import StatusBarWidget\n\nfrom .processing.AudioSplitter import AudioSplitter\nfrom .processing.ProcessWorker import ProcessWorker\nfrom .processing.ModelWorker import ModelWorker\nfrom .processing.ExtractionWorker import ExtractionWorker\n\nfrom .ScreenSettings import MainScreenSetting, SeparateScreenSetting\n\nfrom keras import Sequential\n\n\nclass MainWindow(QMainWindow):\n\n enableSurveyStart = pyqtSignal(bool)\n\n def __init__(self, databasePath, logWidget):\n super().__init__()\n\n # Log\n self.logWidget = logWidget\n\n # Style\n # self.setStyleSheet(open('Styles.css').read()) # Comment out if not using custom CSS.\n\n # Database\n self.qDatabaseConnection = None # To allow checking for existing connection to close.\n self.qDatabaseConnection = self.__openDatabase(databasePath)\n print(QSqlDatabase.database().connectionName())\n\n # General\n self.setWindowTitle(\"whoop\")\n self.errorLabel = QLabel() # TODO remove\n self.statusBarWidget = StatusBarWidget(self)\n self.statusBarWidget.showTextMessage(\"Status: Connected to \" + databasePath)\n\n # Toolbars\n self.viewToolBar = ViewToolBar(self)\n self.modeToolBar = ModeToolBar(self) # TODO abstract?\n self.generationToolBar = GenerationToolBar(self)\n self.addToolBar(Qt.TopToolBarArea, self.viewToolBar)\n self.viewToolBar.setVisible(False) # TODO depricated for now - add for training screen maybe\n self.settingsToolBar = SettingsToolBar()\n self.addToolBar(Qt.RightToolBarArea, self.settingsToolBar)\n\n # Widgets\n self.playerWidget = MediaPlayerWidget()\n self.surveyWidget = SurveyTableWidget()\n self.stampWidget = StampTableWidget()\n self.extractorWidget = ExtractorWidget()\n self.processWidget = ProcessWidget()\n self.validateWidget = ValidateWidget()\n self.visualizerWidget = VisualizerWidget()\n # self.logWidget = logWidget\n\n # Docks\n self.playerDock = QDockWidget(\"Audio\", self)\n self.surveyDock = QDockWidget(\"Surveys\", self)\n self.stampDock = QDockWidget(\"Annotation\", self)\n self.extractorDock = QDockWidget(\"Extraction\", self)\n self.processDock = QDockWidget(\"Processing\", self)\n self.validateDock = QDockWidget(\"Validation\", self)\n self.visualizerDock = QDockWidget(\"Visualization\", self)\n self.logDock = QDockWidget(\"Log\", self)\n self.playerDock.setWidget(self.playerWidget)\n self.surveyDock.setWidget(self.surveyWidget)\n self.stampDock.setWidget(self.stampWidget)\n self.extractorDock.setWidget(self.extractorWidget)\n self.processDock.setWidget(self.processWidget)\n self.validateDock.setWidget(self.validateWidget)\n self.visualizerDock.setWidget(self.visualizerWidget)\n self.logDock.setWidget(self.logWidget)\n\n # Screens\n self.generationScreen = MainScreenSetting(self)\n # TODO depricated - not using separate screens anymore (just showing docks) since side toolbar can be shared\n self.processScreen = SeparateScreenSetting(self)\n\n # Model/Processing\n self.kerasModel = None\n self.worker = ProcessWorker.null()\n self.modelWorker = ModelWorker.null()\n self.killProcessingEvent = threading.Event()\n\n # extraction\n self.extractionWorker = ExtractionWorker.null()\n\n self.__createInitialView()\n\n # signals\n self.__connectSignals()\n\n # TODO user confirmation? Save session?\n def closeEvent(self, event):\n self.__setKillFlag(True)\n\n try:\n # TODO something less hacky\n self.worker.join()\n self.extractionWorker.join()\n self.modelWorker.join()\n except RuntimeError:\n pass # The thread has not been started.\n\n self.__closeDatabase()\n super().closeEvent(event)\n\n # TODO Possible improvement: Reset table models so that DB can be switched from within app.\n def __openDatabase(self, path):\n self.logWidget.logItem(\"Opening database connection to \" + path)\n try:\n if self.qDatabaseConnection:\n self.__closeDatabase()\n qDatabaseConnection = QSqlDatabase.addDatabase(SQLResources.dbType)\n qDatabaseConnection.setDatabaseName(path)\n if not qDatabaseConnection.open():\n ErrorUtils.showErrorDialog(text=\"Could not create database connection.\",\n info=\"Application exiting.\")\n sys.exit(1) # TODO log error\n\n except sqlite3.Error as e:\n ErrorUtils.showErrorDialog(text=\"sqlite3 error when connecting to database. Exiting.\", info=str(e))\n sys.exit(1) # TODO log error\n\n except Exception as e:\n ErrorUtils.showErrorDialog(text=\"Uncaught exception when connecting to database. Exiting.\", info=str(e))\n sys.exit(1) # TODO log error\n\n self.logWidget.logItem(\"Opened database connection to \" + path)\n return qDatabaseConnection\n\n # TODO reset table models so that DB can be switched from within app\n def __closeDatabase(self):\n self.logWidget.logItem(\"Closing database connection\")\n QSqlDatabase.database().connectionName()\n self.qDatabaseConnection.close()\n del self.qDatabaseConnection\n QSqlDatabase.removeDatabase(QSqlDatabase.database().connectionName())\n\n def __connectSignals(self):\n # survey widget\n self.surveyWidget.addSurveySignal.connect(self.showAddSurveyDialog)\n self.surveyWidget.editSurveySignal.connect(self.showEditSurveyDialog)\n self.surveyWidget.loadSurveyAudioSignal.connect(self.loadSurveyAudio)\n self.surveyWidget.selectionChangeSignal.connect(self.onSurveySelectionChange)\n self.surveyWidget.combineSignal.connect(self.showCombineSurveyDialog)\n\n # player widget\n # self.playerWidget.mediaChangedSignal.connect(self.surveyWidget.onPlaylistMediaChange) # TODO removed for performance\n self.playerWidget.playlistSelectionChangedSignal.connect(self.onPlaylistSelectionChange)\n\n # stamp widget\n self.stampWidget.addStampSignal.connect(lambda key : self.supplyStamp(key))\n\n # extraction widget\n self.extractorWidget.runSignal.connect(self.__runExtraction)\n\n # process widget\n self.processWidget.runSignal.connect(self.__runProcessing)\n self.processWidget.loadSignal.connect(self.__loadModel)\n self.processWidget.cancelSignal.connect(lambda ignore, flagUp=True : self.__setKillFlag(flagUp=flagUp))\n\n # validate widget\n self.validateWidget.moveSignal.connect(self.__moveFile)\n\n # generation toolbar\n self.enableSurveyStart.connect(self.generationToolBar.enableSurvey)\n self.enableSurveyStart.connect(self.stampWidget.enableStamps)\n self.generationToolBar.switchSignal.connect(self.generationScreen.setMode)\n\n # view toolbar\n # TODO depricated - not using separate screens anymore (just showing docks) since side toolbar can be shared\n # self.viewToolBar.switchSignal.connect(self.onViewSwitch)\n\n @pyqtSlot(bool)\n def __setKillFlag(self, flagUp):\n if flagUp:\n self.killProcessingEvent.set()\n else:\n self.worker.join()\n self.killProcessingEvent.clear()\n\n @pyqtSlot(str, bool)\n def __moveFile(self, direc, toRemove):\n audioFile = self.playerWidget.getCurrentMediaUrl()\n if os.path.exists(audioFile):\n newPath = os.path.join(direc, os.path.basename(audioFile))\n try:\n if toRemove:\n shutil.move(audioFile, newPath)\n self.playerWidget.removeButtonAction()\n self.logWidget.logItem(\"Moved \" + audioFile + \" to \" + newPath)\n else:\n shutil.copy(audioFile, newPath)\n self.logWidget.logItem(\"Copied \" + audioFile + \" to \" + newPath)\n except shutil.SameFileError:\n # TODO show notification maybe option to overwrite\n self.logWidget.logItem(\" ERROR - destination \" + newPath + \" already exists\")\n ErrorUtils.showErrorDialog(text=(\"Did not move \" + audioFile), info=(newPath + \" already exists.\"))\n else:\n self.logWidget.logItem(\"ERROR - Could not find \" + audioFile)\n ErrorUtils.showErrorDialog(text=\"File not found\", info=audioFile)\n\n @pyqtSlot(str)\n def __loadModel(self, modelFile):\n self.modelWorker = ModelWorker(modelFile) # TODO move delclaration to constructor?\n self.modelWorker.modelSignal.connect(self.__modelLoaded)\n self.modelWorker.errorSignal.connect(self.__modelLoadError)\n self.modelWorker.start()\n\n @pyqtSlot(Sequential, str)\n def __modelLoaded(self, model, modelFile):\n self.kerasModel = model\n self.processWidget.setModelLabel(modelFile)\n self.processWidget.setDisabled(False)\n\n @pyqtSlot(str)\n def __modelLoadError(self, error):\n ErrorUtils.showErrorDialog(text=\"Could not load model.\", info=error)\n self.processWidget.setDisabled(False)\n\n def __runProcessing(self):\n self.logWidget.logItem(\"Running processing\")\n\n # temp disable changes\n self.processWidget.setDisabled(True)\n self.playerWidget.setDisabled(True)\n\n # get settings from ProcessWidget\n setting = self.processWidget.getSetting()\n\n # create tuple of audio file paths\n audioTup, errorMessage = self.__getAudioForProcessing(setting)\n if len(audioTup) < 1:\n self.logWidget.logItem(\"ERROR - no audio for processing.\")\n ErrorUtils.showErrorDialog(text=\"No audio selected for processing\", info=errorMessage)\n\n else:\n self.processWidget.runMode(True)\n self.worker = ProcessWorker(self, setting, audioTup, self.kerasModel, self.killProcessingEvent)\n self.worker.runModeSignal.connect(self.processWidget.runMode)\n self.worker.statusSignal.connect(self.logWidget.logItem)\n self.worker.runModeSignal.connect(self.__setKillFlag) # False if not running\n\n # p = multiprocessing.Process(target=self.__runProcess, args=(self.worker,))\n self.worker.start()\n\n self.processWidget.setDisabled(False)\n self.playerWidget.setDisabled(False)\n\n def __getAudioForProcessing(self, setting):\n audioList = []\n errorMessage = \"\"\n \n if setting.getAudioSource() == AudioSource.SELECTED:\n audioList.append(self.playerWidget.getCurrentMediaUrl())\n self.logWidget.logItem(\"Loading selected audio: \" + str(self.playerWidget.getCurrentMediaUrl()))\n \n elif setting.getAudioSource() == AudioSource.ALL:\n audioList = self.playerWidget.getAllMedia()\n self.logWidget.logItem(\"Loading all audio in Audio widget\")\n\n elif setting.getAudioSource() == AudioSource.DIREC:\n direc = setting.getInputDirec()\n if os.path.isdir(direc):\n self.logWidget.logItem(\"Loading audio from \" + direc)\n # get all allowed files in directory\n for filename in os.listdir(direc):\n if filename.endswith(AudioResources.allowedExt):\n audioList.append(os.path.join(direc, filename))\n else:\n self.logWidget.logItem(\"ERROR - not a valid directory \" + direc)\n errorMessage = \"Not a valid directory: \" + direc\n # TODO launch error dialog and return (not a valid directory)\n\n else:\n print(\"Error: no source selected\") # TODO error dialog and enablement\n self.logWidget.logItem(\"ERROR - no audio source selected.\")\n errorMessage = \"No audio source selected.\"\n\n return tuple(audioList), errorMessage\n\n # TODO depricated - not using separate screens anymore (just showing docks) since side toolbar can be shared\n # def __onViewSwitch(self, option):\n # if option == 0:\n # print(\"option 0\")\n # self.__loadGenerateView()\n # if option == 1:\n # print(\"option 1\")\n # self.loadProcessView()\n # if option == 2:\n # print(\"option 2\")\n\n def __loadGenerateView(self):\n self.generationScreen.showScreen()\n\n def loadProcessView(self):\n self.processScreen.showScreen()\n\n def __createInitialView(self):\n self.__loadGenerateView()\n self.generationScreen.setMode(ScreenMode.PREPARE)\n\n def __runExtraction(self):\n print(\"run extraction\")\n if self.checkSurveySelection():\n\n self.extractorWidget.runMode(True)\n # create a splitter object with values from forms\n args = self.extractorWidget.getSplitterArgs()\n splitter = AudioSplitter(*args)\n type = self.extractorWidget.getType()\n\n # get audio start and end\n audioStart, audioEnd = self.extractorWidget.getStartEnd()\n\n # for each survey selected, get all of timestamps\n # rows = self.surveyWidget.getSelectedRows()\n #\n # for row in rows:\n # audioFile = self.surveyWidget.getDataFromKey(row, SurveyTableColumn.FILE)\n #\n # if type == ExtractionType.CONT:\n # splitter.split(audioFile, self.extractorWidget.getOutputDirec()[0], start, end)\n #\n # else:\n # stampList = self.queryStamps(row)\n # if type == ExtractionType.POS or type == ExtractionType.BOTH:\n # splitter.extract(stampList, ExtractionType.POS, audioFile, self.extractorWidget.getOutputDirec()[1], start, end)\n # if type == ExtractionType.NEG or type == ExtractionType.BOTH:\n # splitter.extract(stampList, ExtractionType.NEG, audioFile, self.extractorWidget.getOutputDirec()[2], start, end)\n rows = self.surveyWidget.getSelectedRows()\n\n if type == ExtractionType.CONT:\n audioList = []\n for row in rows:\n audioFile = self.surveyWidget.getDataFromKey(row, SurveyTableColumn.FILE)\n audioList.append(audioFile)\n\n self.extractionWorker = ExtractionWorker(self,\n # splitter=splitter,\n splitterArgs = args,\n audioInfo=audioList,\n type=type,\n audioStart=audioStart,\n audioEnd=audioEnd,\n outputDirec=self.extractorWidget.getOutputDirec()[0])\n # todo disable button\n\n else:\n audioDict = {}\n for row in rows:\n audioFile = self.surveyWidget.getDataFromKey(row, SurveyTableColumn.FILE)\n audioDict[audioFile] = self.queryStamps(row)\n\n self.extractionWorker = ExtractionWorker(self,\n splitterArgs=args,\n # splitter=splitter,\n audioInfo=audioDict,\n type=type,\n audioStart=audioStart,\n audioEnd=audioEnd,\n outputDirec=[self.extractorWidget.getOutputDirec()[1], self.extractorWidget.getOutputDirec()[2]])\n\n self.worker.runModeSignal.connect(self.extractorWidget.runMode)\n self.extractionWorker.statusSignal.connect(self.logWidget.logItem)\n self.extractionWorker.start()\n # todo disable button\n\n # audioList = []\n # audioDict = {}\n # for row in rows:\n # audioFile = self.surveyWidget.getDataFromKey(row, SurveyTableColumn.FILE)\n #\n # if type == ExtractionType.CONT:\n # audio\n # splitter.split(audioFile, self.extractorWidget.getOutputDirec()[0], start, end)\n #\n # else:\n # stampList = self.queryStamps(row)\n # if type == ExtractionType.POS or type == ExtractionType.BOTH:\n # splitter.extract(stampList, ExtractionType.POS, audioFile,\n # self.extractorWidget.getOutputDirec()[1], start, end)\n # if type == ExtractionType.NEG or type == ExtractionType.BOTH:\n # splitter.extract(stampList, ExtractionType.NEG, audioFile,\n # self.extractorWidget.getOutputDirec()[2], start, end)\n\n\n def runQuery(self, queryStr, lastField, firstField = 0):\n query = QSqlQuery(queryStr)\n if query.exec_() == False:\n print(query.lastError().text()) # TODO\n result = []\n while query.next():\n rowResult = []\n for col in range(firstField, lastField):\n print(query.value(col)) # TODO remove\n rowResult.append(query.value(col))\n result.append(rowResult)\n return result\n\n def queryColumn(self, queryStr, col=0):\n query = QSqlQuery(queryStr)\n if query.exec_() == False:\n print(query.lastError().text()) # TODO\n result = []\n while query.next():\n result.append(query.value(col))\n return result\n\n def queryStamps(self, surveyRow):\n surveyID = self.surveyWidget.getDataFromKey(surveyRow, SurveyTableColumn.SURVEY_DATETIME)\n if self.extractorWidget.toFilter():\n queryStr = \"SELECT miliseconds FROM \" \\\n + StampTableResources.tableName \\\n + \" WHERE survey_datetime = \\\"\" \\\n + surveyID + \"\\\"\" \\\n + \" AND (\" + self.extractorWidget.getLabelFilter() + \")\" \\\n + \" ORDER BY miliseconds ASC\"\n else:\n queryStr = \"SELECT miliseconds FROM \" \\\n + StampTableResources.tableName \\\n + \" WHERE survey_datetime = \\\"\" \\\n + surveyID \\\n + \"\\\" ORDER BY miliseconds ASC\"\n stamps = self.queryColumn(queryStr)\n return stamps\n\n def checkSurveySelection(self):\n # TODO maybe remove (redundant)\n rows = self.surveyWidget.getSelectedRows()\n if len(rows) < 1:\n print(\"Error: No survey selected.\") # TODO\n self.errorMessage(\"No Survey Selected\")\n return False\n\n for row in rows:\n file = self.surveyWidget.getDataFromKey(row, SurveyTableColumn.FILE)\n if self.playerWidget.fileIndex(file) < 0:\n print(\"Error: Audio file is not loaded for all selected surveys.\") # TODO\n self.showNotLoadedError(file)\n return False\n return True\n\n def showNotLoadedError(self, errorFile):\n message = QMessageBox()\n message.setIcon(QMessageBox.Critical)\n message.setWindowTitle(\"Error\")\n message.setText(\"Load Audio Into Player\")\n message.setInformativeText(\"File: \" + errorFile)\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()\n\n def errorMessage(self, text):\n message = QMessageBox()\n message.setIcon(QMessageBox.Critical)\n message.setWindowTitle(\"Error\")\n message.setText(text)\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()\n\n # todo maybe local\n @pyqtSlot()\n def combineSurveys(self):\n rows = self.surveyWidget.getSelectedRows()\n print(len(rows))\n print(rows)\n\n\n\n @pyqtSlot()\n def supplyStamp(self, key):\n stamp = self.playerWidget.getMiliseconds()\n self.stampWidget.addStamp(stamp, key)\n\n def showCombineSurveyDialog(self):\n print(self.playerWidget.getDialogArgs())\n dialog = CombineSurveyDialog(self.surveyWidget, **self.playerWidget.getDialogArgs(),\n **self.surveyWidget.getDialogArgs())\n dialog.audioIndexSignal.connect(self.playerWidget.changeSelection)\n self.playerWidget.durationChangeSignal.connect(dialog.setDuration)\n if dialog.exec_():\n filter = self.stampWidget.createFilter(self.surveyWidget.getSelectedKeys())\n queryStr = \"SELECT * FROM ANNOTATION WHERE \" + filter\n result = self.runQuery(queryStr, 5)\n print(result)\n\n self.surveyWidget.addRecord(dialog.createRecord())\n for row in result:\n print(row[0])\n print(dialog.getDateTime())\n newRecord = StampRecord(miliseconds=row[0],\n surveyDatetime=dialog.getDateTime().toString(GlobalResources.datetimeFormat),\n label=row[3],\n note=row[4],\n creationDatetime=row[2])\n\n self.stampWidget.addRecord(newRecord)\n\n self.stampWidget.loadSurveyStamps(self.surveyWidget.getSelectedKeys())\n\n @pyqtSlot()\n def showAddSurveyDialog(self):\n print(self.playerWidget.getDialogArgs())\n dialog = AddSurveyDialog(self.surveyWidget, **self.playerWidget.getDialogArgs(),\n **self.surveyWidget.getDialogArgs())\n dialog.audioIndexSignal.connect(self.playerWidget.changeSelection)\n self.playerWidget.durationChangeSignal.connect(dialog.setDuration)\n if dialog.exec_():\n self.surveyWidget.addRecord(dialog.createRecord())\n\n @pyqtSlot()\n def showEditSurveyDialog(self):\n dialog = EditSurveyDialog(self.surveyWidget, **self.playerWidget.getDialogArgs(),\n **self.surveyWidget.getDialogArgs())\n dialog.audioIndexSignal.connect(self.playerWidget.changeSelection)\n self.playerWidget.durationChangeSignal.connect(dialog.setDuration)\n if dialog.exec_():\n self.surveyWidget.editRecord(dialog.createRecord())\n self.enableSurveyStart.emit(self.canStartSurvey())\n\n @pyqtSlot()\n def loadSurveyAudio(self):\n # load all not already in playlist\n rows = self.surveyWidget.getSelectedRows()\n for index in rows:\n path = self.surveyWidget.getDataFromKey(index, SurveyTableColumn.FILE)\n if not self.playerWidget.isLoaded(path):\n if self.isValidAudioFile(path):\n self.playerWidget.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))\n\n self.onSurveySelectionChange()\n QApplication.processEvents()\n self.repaint()\n\n @pyqtSlot()\n def onSurveySelectionChange(self):\n print(self.surveyWidget.singlePathSelected())\n self.playerWidget.select(self.surveyWidget.singlePathSelected())\n\n self.enableSurveyStart.emit(self.canStartSurvey())\n\n if len(self.surveyWidget.getSelectedRows()) > 0:\n self.stampWidget.loadSurveyStamps(self.surveyWidget.getSelectedKeys())\n else:\n self.stampWidget.clearSurveyStamps()\n\n QApplication.processEvents() # TODO maybe remove\n self.repaint() # need for button\n\n @pyqtSlot()\n def onPlaylistSelectionChange(self):\n self.enableSurveyStart.emit(self.canStartSurvey())\n QApplication.processEvents() # TODO maybe remove\n self.repaint() # need for button\n\n\n def canStartSurvey(self):\n if len(self.surveyWidget.getSelectedRows()) == 1:\n if self.playerWidget.hasSelection():\n if self.surveyWidget.singlePathSelected() == self.playerWidget.getCurrentMediaUrl():\n return True\n return False\n\n def isValidAudioFile(self, path):\n try:\n file = open(path, 'r')\n except FileNotFoundError:\n print(\"FILE NOT FOUND\")\n self.displayError(\"FILE NOT FOUND\")\n return False\n except IOError:\n self.displayError(\"FILE NOT READABLE\")\n return False\n\n extension = Path(path).suffix\n if extension not in [\".m4a\", \".mp3\", \".aac\", \".wav\"]:\n self.displayError(extension + \" FILE TYPE NOT ACCEPTED\")\n return False\n return True\n\n # TODO change\n def displayError(self, errorString):\n self.errorLabel.setText(\"ERROR: \" + errorString)\n","repo_name":"glam-imperial/Bornean-Gibbons-Call-Detection","sub_path":"gui/main/python/whoop/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":26415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"71157182863","text":"\"\"\"\nThis file is part of KIGM-Discord-Bot.\n\nKIGM-Discord-Bot is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nKIGM-Discord-Bot is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with KIGM-Discord-Bot. If not, see .\n\"\"\"\n\nfrom typing import Optional\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import cooldown\n\n\nclass Economy(commands.Cog, name=\":euro: Economy System\"):\n def __init__(self, bot):\n self.bot = bot\n self.ecofunc = self.bot.get_cog(\"Asyncfuncs\")\n\n # ------------------------------------Doing Stuff to the Balance--------------------------------------------\n\n @commands.command(description=\"Check your balance!\", aliases=[\"bal\"])\n async def balance(self, ctx, user: Optional[discord.Member] = None):\n \"\"\"\n if user == None:\n user = ctx.author\n\n await self.ecofunc.open_account(user)\n\n users = await self.ecofunc.get_user_data(user)\n purse_amt = users[\"Purse\"]\n bank_amt = users[\"Bank\"]\n\n embed = discord.Embed(\n title=\":european_post_office: The Official Bank of the KIGM Bot :european_post_office:\",\n description=f\"Balance of {user}:\",\n color=0xf8f8ff\n )\n\n embed.add_field(name=\":purse: Purse Bal: \", value=f'{str(purse_amt)} Nitro Shards ')\n embed.add_field(name=\":Bank: Bank Bal: \", value=f'{str(bank_amt)} Nitro Shards ')\n\n await ctx.send(embed=embed)\n \"\"\"\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.command(aliases=[\"wdrw\"], description=\"Withdraw your shards to the Bank!\")\n async def withdraw(self, ctx, amount=None):\n \"\"\"\n\n await self.ecofunc.get_user_data(ctx.author)\n\n if amount == None:\n await ctx.send(\"**ERROR!**\\npls give the specified amount next time pls (like this: `withdraw `)\")\n return\n\n if amount == 'all':\n users = await self.ecofunc.get_bank_data()\n amount = int(users[\"Bank\"])\n\n if amount == 'half':\n users = await self.ecofunc.get_bank_data()\n amount = int(users[\"Bank\"]) / 2\n\n amount=int(amount)\n bal = await self.ecofunc.update_eco(ctx.author)\n\n\n if amount > bal[1]:\n await ctx.send(\"**YOU DON'T HAVE THAT MUCH MONEY XD POOOOOOORRRR <:kekw:773125072637788160>**\")\n return\n\n if amount < 0:\n await ctx.send(\" Number must be positive kiddo \")\n return\n\n await self.ecofunc.update_eco(ctx.author, amount)\n await self.ecofunc.update_eco(ctx.author, -1*amount, \"Bank\")\n\n await ctx.send(f\"**You have successfully withdrew {amount} Nitro Shards from the Official Bank of the KIGM Bot to your purse! :Bank:**\")\n \"\"\"\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.command(aliases=[\"dep\"], description=\"Deposit your shards to the Bank!\")\n async def deposit(self, ctx, amount=None):\n # await self.ecofunc.open_account(ctx.author)\n\n # if amount == None:\n # await ctx.send(\"**ERROR!**\\npls give the specified amount next time pls (like this: `withdraw `)\")\n # return\n\n # bal = await self.ecofunc.update_eco(ctx.author)\n\n # if amount == 'all':\n # users = await self.ecofunc.get_bank_data()\n # amount = int(users[\"Purse\"])\n\n # if amount == 'half':\n # users = await self.ecofunc.get_bank_data()\n # amount = int(users[\"Purse\"]) / 2\n\n # amount=int(amount)\n\n # if amount > bal[0]:\n # await ctx.send(\"**YOU DON'T HAVE THAT MUCH MONEY XD POOOOOOORRRR <:kekw:773125072637788160>**\")\n # return\n\n # if amount < 0:\n # await ctx.send(\" Number must be positive kiddo \")\n # return\n\n # await self.ecofunc.update_eco(ctx.author, -1*amount)\n # await self.ecofunc.update_eco(ctx.author, amount, \"Bank\")\n\n # await ctx.send(f\"**You have successfully deposited {amount} Nitro Shards to the Official Bank of the KIGM Bot! :bank:**\")\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n # -----------------------------------Earning/Losing Shards--------------------------------------------------\n\n @commands.command(aliases=[\"give\"], description=\"Give someone your money!\")\n @commands.guild_only()\n async def send_money(self, ctx, member: discord.Member = None, amount=None):\n # await self.ecofunc.open_account(ctx.author)\n\n # if member == None:\n # await ctx.send(\"**ERROR** \\nNo member provided\")\n # return\n\n # await self.ecofunc.open_account(member)\n\n # if amount == None:\n # await ctx.send(\"**ERROR!**\\npls give the specified amount next time pls (like this: `withdraw `)\")\n # return\n\n # amount=int(amount)\n\n # if amount <= 24:\n # await ctx.send(\"**ERROR!**\\n**To avoid spam, I made the minimum Nitro Shard donation to __25__.**\\nI hope u understand :)\")\n # return\n\n # bal = await self.ecofunc.update_eco(ctx.author)\n\n # if amount > bal[0]:\n # await ctx.send(\"**YOU DON'T HAVE THAT MUCH MONEY XD POOOOOOORRRR <:kekw:773125072637788160>**\")\n # return\n\n # if amount < 0:\n # await ctx.send(\" Number must be positive kiddo \")\n # return\n\n # await self.ecofunc.update_eco(ctx.author, -1*amount)\n # await self.ecofunc.update_eco(member, amount)\n\n # await ctx.send(f\"**You have successfully gave {amount} Nitro Shards to {member.mention}! **\")\n\n # await member.send(f\"**{ctx.author}** | `ID: {ctx.author.id}` has given you **{amount} Nitro Shards! **\")\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.command(\n description=\":detective: *theif boi* :detective:\", aliases=[\"steal\", \"loot\"]\n )\n @commands.guild_only()\n @cooldown(1, 3600, commands.BucketType.user)\n async def rob(self, ctx, member: discord.Member = None):\n # await self.ecofunc.open_account(ctx.author)\n\n # if member == None:\n # await ctx.send(\"**ERROR** \\nNo member provided to rob lmao\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # await self.ecofunc.open_account(member)\n\n # bal = await self.ecofunc.update_eco(member)\n\n # if bal[0] < 40:\n # await ctx.send(\"**Member doesn't have that much money to get robbed!** Poor guy...\\n(person must have at least 40 Nitro Shards in order to be robbable.)\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # if bal[0] < 1500:\n # eorl = random.randint(10, bal[0]) * 0.8\n # else:\n # eorl = random.randint(10, bal[0]) / 6\n\n # eorl = int(eorl)\n # chance = random.randint(1, 9)\n\n # if chance == 8 or chance == 9:\n # await self.ecofunc.update_eco(ctx.author, -1*eorl)\n\n # earnings_of_member = eorl / 2\n # earnings_of_member= int(earnings_of_member)\n\n # await self.ecofunc.update_eco(member, earnings_of_member)\n\n # embed = discord.Embed(title=\"**ROB FAILED!**\", description=f\"You attempted to pickpocket {member.mention} but instead, stole {earnings_of_member} Nitro Shards from you!\",color = discord.Color.red())\n\n # embed.set_footer(text=f\"Dumb Thief: {ctx.author} | Plot Twister: {member}\", icon_url=ctx.author.avatar_url)\n\n # await ctx.send(embed=embed)\n\n # else:\n # await self.ecofunc.update_eco(ctx.author, eorl)\n # await self.ecofunc.update_eco(member, -1*eorl)\n\n # embed = discord.Embed(title=\"**ROB SUCCESSFUL!**\", description=f\"You pickpocketed {member.mention} and stole {eorl} Nitro Shards! \",color = discord.Color.green())\n\n # embed.set_footer(text=f\"Thief: {ctx.author} | Poor Victim: {member}\", icon_url=ctx.author.avatar_url)\n\n # await ctx.send(embed=embed)\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.command(description=\"Get your daily shards!\")\n @cooldown(1, 86400, commands.BucketType.user)\n async def daily(self, ctx):\n # await self.ecofunc.open_account(ctx.author)\n\n # await self.ecofunc.update_eco(ctx.author, 200)\n\n # embed = discord.Embed(title=\"Here's your daily shards!\", description='**200 Nitro Shards ** have been now placed into your purse!', color=0xfcb2c5)\n # embed.set_footer(text=\"You may get your daily again in 24 HOURS!\")\n\n # await ctx.send(embed=embed)\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.group(\n name=\"gamble\",\n description=\"Bet/Gamble your shards for more satisfaction!\",\n aliases=[\"bet\"],\n )\n @commands.guild_only()\n async def game_eco(self, ctx):\n # if ctx.invoked_subcommand is None:\n # embed = discord.Embed(\n # title='**Games where you can gamble your shards on!**',\n # color=discord.Color.red()\n # )\n\n # embed.add_field(name=\"**Flip the Coin!**\", value='`gamble coinflip `')\n # embed.add_field(name=\"**Roll the Dice!**\", value='`gamble dice `')\n\n # await ctx.send(embed=embed)\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @game_eco.command(\n description=\"Gamble yourself 50/50 with a coin!\",\n aliases=[\"flipcoin\", \"flipthecoin\"],\n )\n @commands.guild_only()\n @cooldown(1, 600, commands.BucketType.user)\n async def coinflip(self, ctx, bet: int = None):\n # await self.ecofunc.open_account(ctx.author)\n\n # if bet is None:\n # await ctx.send(\"Where's your bet? lol\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # bal = await self.ecofunc.update_eco(ctx.author)\n\n # if bet > bal[0]:\n # await ctx.send(\"You don't have that much Nitro Shards on your Bank like that lol\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # if bet < 30:\n # await ctx.send(\"Your bet must be higher than **30 Shards!**\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # await ctx.send(\"**The coin has flipped!**\\nPick a side! Heads or tails?\")\n\n # coin = ('head', 'tail')\n\n # flip = random.choice(coin)\n # try:\n\n # msg = await self.bot.wait_for(\"message\", timeout=8, check=lambda message: message.author == ctx.author and message.channel == ctx.channel)\n\n # msg = str(msg.content.lower())\n\n # if len(msg) < 3:\n # await ctx.send(\"Your guess must be heads/tails!\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # msgSplit = []\n\n # for letter in msg:\n # msgSplit.append(letter)\n\n # msgWord = f\"{msgSplit[0]}{msgSplit[1]}{msgSplit[2]}{msgSplit[3]}\"\n\n # if msgWord in coin:\n # if msgWord == flip:\n # await ctx.send(\"You won! You get 150% of your Nitro Shards back!\")\n\n # await self.ecofunc.update_eco(ctx.author, int(bet * 1.5))\n\n # elif msgWord != flip:\n # await ctx.send(f\"You lost! It was actually the **{flip}s side!**\\nRip ur {bet} Nitro Shards\")\n\n # await self.ecofunc.update_eco(ctx.author, -1*int(bet))\n\n # else:\n # await ctx.send(\"**ERROR!**\\n'Side' not found. Please try again.\")\n # ctx.command.reset_cooldown(ctx)\n\n # except asyncio.TimeoutError:\n # await ctx.send(\"*Command cancelled cuz u sloww*\")\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @game_eco.command(\n description=\"Guess the dice! (dev running out of ideas so just use the suggest command when u have a cool one)\",\n aliases=[\"guessthedice\"],\n )\n @commands.guild_only()\n @cooldown(1, 600, commands.BucketType.user)\n async def dice(self, ctx, bet: int = None):\n # await self.ecofunc.open_account(ctx.author)\n\n # die1 = random.randint(1, 6)\n # die2 = random.randint(1, 6)\n\n # if bet is None:\n # await ctx.send(\"**You don't have a bet!**\\nCome back when u do have ;)\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # bal = await self.ecofunc.update_eco(ctx.author)\n\n # if bet > bal[0]:\n # await ctx.send(\"You don't have that much Nitro Shards on your Bank like that lol\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # if bet < 30:\n # await ctx.send(\"Your bet must be higher than **30 Shards!**\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # await ctx.send(\"What do you want to guess?\\n\\n*1. Guess both of the numbers of each die (gives 200% of your shards back if guessed correctly)*\\n*2. Is it a double, or not? (only gives 120% of your money back if guessed correctly)*\")\n\n # try:\n\n # msg = await self.bot.wait_for(\"message\", timeout=10, check=lambda message: message.author == ctx.author and message.channel == ctx.channel)\n\n # msg = msg.content\n\n # if msg == \"1\" or msg == \"1.\":\n # await ctx.send(\"The dice is about to be rolled! What is your guess? (separate with spaces plss)\")\n # try:\n\n # guess_msg = await self.bot.wait_for(\"message\", timeout=20, check=lambda message: message.author == ctx.author and message.channel == ctx.channel)\n\n # guess_list = guess_msg.content.split(\" \")\n\n # if guess_list[0] not in \"123456\" or guess_list[1] not in \"123456\":\n # await ctx.send(\"Your guess must be a number lower than 7!\")\n # ctx.command.reset_cooldown(ctx)\n # return\n\n # if die1 == int(guess_list[0]) and die2 == int(guess_list[1]) or die1 == int(guess_list[1]) and die2 == int(guess_list[0]):\n # dice_guess_profit = bet * 2\n # await ctx.send(\"You are...\")\n # await asyncio.sleep(.5)\n # await ctx.send(f\"**Correct!** \\nThe dice numbers are `{die1}` and `{die2}`!\\n**Here's your prize money of :tada: {dice_guess_profit} Nitro Shards! :tada:**\")\n\n # await self.ecofunc.update_eco(ctx.author, int(dice_guess_profit))\n\n # elif die1 != int(guess_list[0]) and die2 != int(guess_list[1]) or die1 != int(guess_list[1]) and die2 != int(guess_list[0]):\n # await ctx.send(\"You are...\")\n # await asyncio.sleep(.5)\n # await ctx.send(f\"Incorrect! \\n**The numbers were {die1} and {die2}!**\\nWelp, all of your shards have to go somewhere, Am I right?\")\n\n # await self.ecofunc.update_eco(ctx.author, -1*int(bet))\n\n # except asyncio.TimeoutError:\n # await ctx.send(\"Slow. Just... cmon\")\n # ctx.command.reset_cooldown(ctx)\n\n # elif msg == \"2\" or msg == \"2.\":\n # await ctx.send(\"**The dice has rolled!** What do you think it is... double or no? [d/n]\")\n\n # try:\n\n # msg = await self.bot.wait_for(\"message\", timeout=15, check=lambda message: message.author == ctx.author and message.channel == ctx.channel)\n\n # if msg.content.lower() in ['double', 'd', 'yes', 'a double'] and die1 == die2 or msg.content.lower() in ['not double', 'not a double', 'no', 'n', 'false'] and die1 != die2:\n # await self.ecofunc.update_eco(ctx.author, -1*int(bet))\n\n # bet_double_profit = bet * 1.2\n # await ctx.send(\"Guess what...\")\n # await asyncio.sleep(.5)\n # await ctx.send(f\"You won! \\n{die1} and {die2}!\\nHere's the shards that I promised you to keep.\")\n\n # await self.ecofunc.update_eco(ctx.author, int(bet_double_profit))\n\n # elif msg.content.lower() in ['not double', 'not a double', 'no', 'n', 'false'] and die1 == die2 or msg.content.lower() in ['double', 'd', 'yes', 'a double'] and die1 != die2:\n # await ctx.send(\"Guess what...\")\n # await asyncio.sleep(.5)\n # await ctx.send(f\"You lost! \\n**:skull: RIP YOUR {bet} HARD-EARNED SHARDS :skull:**\\nDice was `{die1}` and `{die2}`!\")\n\n # await self.ecofunc.update_eco(ctx.author, -1*int(bet))\n\n # except asyncio.TimeoutError:\n # await ctx.send(\"slow. cancelled lol\")\n # ctx.command.reset_cooldown(ctx)\n\n # else:\n # await ctx.send(\"DON'T YOU KNOW THE DIFFERENCE BETWEEN A LETTER AND A NUMBER BRUHHH\")\n # ctx.command.reset_cooldown(ctx)\n\n # except asyncio.TimeoutError:\n # await ctx.send(\"Bruhh just pick a numberrrr cancelled.\")\n # ctx.command.reset_cooldown(ctx)\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n @commands.command(\n description=\"Work hard to get that juicy shards! \"\n )\n @commands.guild_only()\n @cooldown(1, 1800, commands.BucketType.user)\n async def work(self, ctx):\n # give_money = random.randint(15, 30)\n # work_list = [\n # f\"You made a discord bot and made **{str(give_money)} Nitro Shards!** \",\n # f\"You played :person_bouncing_ball: basketball :person_bouncing_ball: for a team and won! They gave you **{str(give_money)} Nitro Shards!** \",\n # f\"You worked for a nursing home :couch: for a day and made **{str(give_money)} Nitro Shards!** \",\n # f\"You worked as a car washer :blue_car: for a few hours and made **{str(give_money)} Nitro Shards!** \"\n # ]\n\n # work_msg = random.choice(work_list)\n\n # await self.ecofunc.open_account(ctx.author)\n\n # await self.ecofunc.update_eco(ctx.author, -int(give_money))\n\n # embed = discord.Embed(description=work_msg, color=0xffa500)\n\n # embed.set_author(name=f\"Hardwork Done by {ctx.author}\", icon_url=ctx.author.avatar_url)\n\n # await ctx.send(embed=embed)\n await ctx.send(\n \"**The Devs Are Currently Improving The Economy!**\\nSorry for the inconvinience!\"\n )\n\n\ndef setup(bot):\n bot.add_cog(Economy(bot))\n","repo_name":"Makiyu-py/KIGM-Discord-Bot","sub_path":"cogs/comms/cog_eco.py","file_name":"cog_eco.py","file_ext":"py","file_size_in_byte":19875,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"4612100389","text":"#!/usr/bin/python\n# Name: IlUtil.py\n# Desc: The constants and utility functions using in the Input Language Configure Panel\n# Date: 2010.03.24 - 1st release\n# 2010.06.17 - 2nd release, rewrite to separate functionality widgets to individual files\n# Depend: 1.va-config.st2.0\n# 2.bs-scim_simf\n\nimport os\nimport Util\nimport VAEnv\n\nimport simf\n\n#---constant variables---\n\n#paths\nLangKeyMapPath=\"/etc/oem/LangKeyMap\"\nInputLangListPath=\"/usr/share/maps/InputLangList\"\n\n#locale\nRcDomain=\"dvm-config-input-lang\"\nLocaleDir=\"/usr/share/locale\"\n\n#Ui sizes\nmainW=500 #main window width\nmainH=360 #main window height\n\ntopH=60 #top area height\nbotH=50 #bottom area height\nmidH=mainH-topH-botH #middle area height\n\nsepW=3 #separator width\npadW=3 #padding space width\n\nsideW=60 #side width(from window side to frame)\ninputLangW=mainW-2*sideW #input language set width\n\nbtnW=75 #button width\nbtnH=25 #button height\n\n#English IM uuid\nEnglishImUuid=\"c6bebc27-6324-4b77-8ad4-6d41dcaf2e08\"\n\n#im detail setup module dictionary{uuid:module}\nSetupModuleDic={\n \"065d7b20-dda2-47fb-8f94-3306d9a25e56\" : \"anthy-imengine-setup.so\",\n \"fcff66b6-4d3e-4cf2-833c-01ef66ac6025\" : \"chewing-imengine-setup.so\",\n \"05235cfc-43ce-490c-b1b1-c5a2185276ae\" : \"pinyin-imengine-setup.so\",\n \"d75857a5-4148-4745-89e2-1da7ddaf70a9\" : \"hangul-imengine-setup.so\",\n \"63752e02-c9cb-420c-bac6-f17f60a16822\" : \"thai-imengine-setup.so\"\n}\n\n#save mode\n(\n NS, #Normal Save: The configure would not be saved until the user presses OK button to confirm.\n IS, #Instant Save: The configure would be saved when it is modified.\n DIS #Disable Instant Save: To disable Instant Save temporarily.\n)=range(3)\n\n#input method combobox columns\n(\n LangCol,\n IconCol,\n NameCol,\n FunCol\n)=range(4)\n\n#page type\n(\n AddRmImPage,\n ToolbarPage\n)=range(2)\n\n#treeview column\n(\n IconCol,\n NameCol,\n EnableCol,\n IncCol,\n UuidCol\n)=range(5)\n\n#im info data\n(\n InfoUuid,\n InfoName,\n InfoLocale,\n InfoIcon,\n InfoPri\n)=range(5)\n\n#---functions---\n\n#convert language(English) to locale(en_US) \ndef convertLangToLocale(langStr):\n localeStr=None\n langListPath=InputLangListPath\n if os.path.exists(langListPath):\n fp=Util.open_file(langListPath,\"r\")\n for line in fp.readlines():\n if line[0]==\"\\n\" or line[0]==\"#\":\n continue\n line=line.strip()\n items=line.split(\";\")\n if items[0]==langStr:\n localeStr=items[1]\n break\n fp.close()\n if localeStr==None: #if there is no matched language, return \"~other\"\n return \"~other\"\n return localeStr\n\n#convert locale(en_US) to language(English)\ndef convertLocaleToLang(localeStr):\n langStr=None\n langListPath=InputLangListPath\n if os.path.exists(langListPath):\n fp=Util.open_file(langListPath,\"r\")\n for line in fp.readlines():\n if line[0]==\"\\n\" or line[0]==\"#\":\n continue\n line=line.strip()\n items=line.split(\";\")\n if items[1]==localeStr:\n langStr=items[0]\n break\n fp.close()\n if langStr==None: #if there is no matched locale, return \"Other\"\n return \"Other\"\n return langStr\n\n#get input language list \ndef getInputLangList():\n ilEnv=os.getenv(\"DI_DEF_INPUTLANG_LIST\")\n availableIl=None\n if ilEnv != None:\n availableIl=ilEnv.split(',')\n else:\n #use all of input language\n availableIl=None\n langList=None\n langListPath=InputLangListPath\n if os.path.exists(langListPath):\n langList=[]\n fp=Util.open_file(langListPath,\"r\")\n for line in fp.readlines():\n if line[0]==\"\\n\" or line[0]==\"#\":\n continue\n items=line.split(\";\")\n if availableIl:\n if items[0] in availableIl:\n langList.append(items[0])\n else:\n langList.append(items[0])\n return langList\n\n#get keyboard list\ndef getKeyboardList():\n kbList=VAEnv.di_have_key_list()\n if kbList:\n #modify keyboard name\n if \"Arabic(001)\" in kbList:\n kbList.remove(\"Arabic(001)\")\n kbList.append(\"Arabic (001)\")\n if \"Arabic(002)\" in kbList:\n kbList.remove(\"Arabic(002)\")\n kbList.append(\"Arabic (002)\")\n if \"Czech(QWERTY)\" in kbList:\n kbList.remove(\"Czech(QWERTY)\")\n kbList.append(\"Czech (QWERTY)\")\n if \"Polish(programmers)\" in kbList:\n kbList.remove(\"Polish(programmers)\")\n kbList.append(\"Polish (programmers)\")\n if \"Portuguese(Brazilian ABNT)\" in kbList:\n kbList.remove(\"Portuguese(Brazilian ABNT)\")\n kbList.append(\"Portuguese (Brazilian ABNT)\")\n if \"Slovak(QWERTY)\" in kbList:\n kbList.remove(\"Slovak(QWERTY)\")\n kbList.append(\"Slovak (QWERTY)\")\n kbList.sort()\n else:\n kbList=None\n return kbList\n\n#get default keyboard with the given language\ndef getDefaultKbWithLang(langStr):\n kbStr=None\n if os.path.exists(LangKeyMapPath):\n fp=Util.open_file(LangKeyMapPath,\"r\")\n for line in fp.readlines():\n if line[0]==\"\\n\" or line[0]==\"#\":\n continue\n line=line.strip()\n items=line.split(\":\")\n if items[0]==langStr:\n kbList=getKeyboardList()\n if items[1] in kbList:\n return items[1]\n else:\n return \"English U.S.\" \n return \"English U.S.\" \n \n#get the im info list with given lang\ndef getImInfoDicWithLang(langStr,filter=\"enabled\"):\n localeStr=convertLangToLocale(langStr)\n simf.init()\n simf.readcache(localeStr)\n imListStr=simf.listim(localeStr,filter)\n imList=imListStr.split(\",\")\n imInfoDic={}\n for uuid in imList:\n imInfoStr=simf.getinfo(uuid,localeStr)\n imInfo=imInfoStr.split(\",\")\n imInfoDic[uuid]=imInfo\n #covert pri from string to int\n imInfoDic[uuid][InfoPri]=int(imInfoDic[uuid][InfoPri],10)\n return imInfoDic\n\n#write im infos to simf\ndef writeImInfo(oriSortedUuidList,curSortedUuidList,langStr):\n #init simf\n localeStr=convertLangToLocale(langStr)\n simf.init()\n simf.readcache(localeStr)\n #deal with disabled im\n for uuid in oriSortedUuidList:\n if uuid not in curSortedUuidList:\n simf.disableim(uuid,\"flush\",localeStr)\n #deal with enabled im\n for uuid in curSortedUuidList:\n simf.setmaxpri(uuid,\"flush\",localeStr)\n\n#get the uuid which has the max priority\ndef getUuidWithMaxPri(inputImInfoDic):\n uuidList=inputImInfoDic.keys()\n priList=[inputImInfoDic[uuid][InfoPri] for uuid in uuidList]\n priToUuidDic=dict([[priList[i],uuidList[i]] for i in range(len(priList))])\n return priToUuidDic[max(priList)]\n\n#set given im with max priority\ndef setUuidWithMaxPri(inputUuid,inputImInfoDic,langStr):\n uuidList=[]\n priList=[]\n if inputImInfoDic=={} or inputImInfoDic==None:\n maxPri=0\n else:\n uuidList=inputImInfoDic.keys()\n priList=[inputImInfoDic[uuid][InfoPri] for uuid in uuidList]\n maxPri=max(priList)\n if inputUuid not in uuidList:\n #get info of this im from simf and add this im's info in ImInfoDic \n localeStr=convertLangToLocale(langStr)\n simf.init()\n simf.readcache(localeStr)\n imInfoStr=simf.getinfo(inputUuid,localeStr)\n imInfo=imInfoStr.split(\",\")\n inputImInfoDic[inputUuid]=imInfo\n inputImInfoDic[inputUuid][InfoPri]=maxPri+1\n return inputImInfoDic\n\n#disable given im by removing it from imInfoDic\ndef disableImWithUuid(inputUuid,inputImInfoDic):\n if inputUuid in inputImInfoDic:\n del inputImInfoDic[inputUuid]\n return inputImInfoDic\n\n","repo_name":"kaitowang/PythonDevTest","sub_path":"backup/dvm-config/pyconf/IlUtil.py","file_name":"IlUtil.py","file_ext":"py","file_size_in_byte":7842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"37903389596","text":"\"\"\"\n.. automodule:: model.credits\n.. automodule:: model.itemdata\n.. automodule:: model.modelobjectbase\n.. automodule:: model.openidaccount\n.. automodule:: model.pagedata\n.. automodule:: model.sitedata\n.. automodule:: model.testing\n.. automodule:: model.useraccount\n.. automodule:: model.userdata\n.. automodule:: model.usersettings\n\"\"\"\n\nfrom pprint import pprint, pformat\nimport logging\n\nimport cherrypy\nfrom psycopg2.extras import DictCursor\n\nfrom plugins.setuppgconnectionpool import SetupPgConnectionPool\n\ndef load_model(includes):\n o = {}\n \n for i in includes:\n if i.key in o:\n o[i.key].update(i.read())\n else:\n o[i.key] = i.read()\n \n return o\n \nclass ConnectionManager(object):\n\n def __init__(self, pool_name, auto_commit=True):\n pool_name = SetupPgConnectionPool.get_full_pool_name(pool_name)\n self.__pool = getattr(cherrypy.request.app, pool_name)\n self.__auto_commit = auto_commit\n\n def __enter__(self):\n self.__conn = self.__pool.getconn()\n return self.__conn\n \n def __exit__(self, exc_type, exc_value, traceback):\n if self.__auto_commit:\n self.__conn.commit()\n self.__pool.putconn(self.__conn)\n \ndef grab_connection(pool_name, auto_commit=True):\n return ConnectionManager(pool_name, auto_commit)\n \ndef get_scalar(connection, statement, variables, column):\n data = get_row(connection, statement, variables)\n if not data is None:\n return data[column]\n \ndef get_scalar_nc(pool_name, statement, variables, column):\n with grab_connection(pool_name) as conn:\n return get_scalar(conn, statement, variables, column)\n \ndef get_row(connection, statement, variables):\n cur = connection.cursor(cursor_factory=DictCursor)\n cur.execute(trim_statement(statement), variables)\n return cur.fetchone()\n \ndef get_row_nc(pool_name, statement, variables):\n \"\"\"\n Like get_row, but doesn't require a connection to be passed in as an\n argument.\n \n Useful when not wrapping multiple statements in a transaction.\n \"\"\"\n with grab_connection(pool_name) as conn:\n return get_row(conn, statement, variables)\n\ndef get_all_rows_nc(pool_name, statement, variables):\n with grab_connection(pool_name) as conn:\n return get_all_rows(conn, statement, variables)\n \ndef get_all_rows(connection, statement, variables):\n cur = connection.cursor(cursor_factory=DictCursor)\n cur.execute(trim_statement(statement), variables)\n return cur.fetchall()\n \ndef execute_action_nc(pool_name, statement, variables, return_row=False):\n with grab_connection(pool_name) as conn:\n return execute_action(conn, statement, variables, return_row)\n \ndef execute_action(connection, statement, variables, return_row=False):\n cur = connection.cursor(cursor_factory=DictCursor)\n cur.execute(trim_statement(statement), variables)\n if return_row:\n return cur.fetchone()\n \ndef trim_statement(input):\n \"\"\"\n Produces a neatened SQL statement\n \"\"\"\n cherrypy.log.error(\n 'Transforming: {0}'.format(input),\n 'trim_statement',\n logging.DEBUG)\n \n o = []\n for i in input.splitlines()[:]:\n a = i.strip()\n if '' != a:\n o.append(a)\n \n p = ' '.join(o)\n \n cherrypy.log.error(\n 'Returning: {0}'.format(p),\n 'trim_statement',\n logging.DEBUG)\n \n return p\n ","repo_name":"kestava/webapp","sub_path":"src/www/model/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"37154781431","text":"import math\n\nm = 0\nmaxP = 0\nfor p in range(2,1001,2):\n count = 0\n for a in range(2,int (p/3)):\n if (p*(p-2*a) %(2*p-a)) == 0:\n print (p,a)\n count = count + 1\n if count > m:\n m = count\n maxP = p\nprint (maxP, m)","repo_name":"chris-arsenault/Euler","sub_path":"000/Euler39.py","file_name":"Euler39.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"37529616850","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef philosophyGame(url,seen=None,count=0):\n if seen == None:\n seen= []\n r = requests.get(url)\n soup = BeautifulSoup(r.text,\"html.parser\")\n print(soup.h1.string)\n body = soup.find(class_=\"mw-parser-output\")\n for paragraph in body.find_all(\"p\"):\n for link in paragraph.find_all(\"a\"):\n new = link[\"href\"]\n if new.startswith(\"/wiki/\") and not new.startswith((\"/wiki/Help:\",\"/wiki/File:\")) and \"(\" not in link.previous_element:\n if new == \"/wiki/Philosophy\":\n print(\"----------\")\n print(\"Reached Philosophy in %s steps\"%(count))\n print(\"----------\")\n return None\n elif new in seen:\n print(\"----------\")\n print(\"Reached a loop\")\n print(\"----------\")\n return None\n seen.append(new)\n return philosophyGame(\"https://en.wikipedia.org\"+new,seen=seen,count=count+1)\n\nprint(\"----- Philosophy Game -----\")\nprint(\"Legend has it that clicking the first link on a wikipedia page always leads to the page 'Philosophy'\")\nprint(\"Test it out here!\")\nwhile True:\n print()\n print(\"Where would you like to start?\")\n print(\"1. Wikepedia URL\")\n print(\"2. Google a term\")\n print(\"3. Random page\")\n print(\"4. Exit\")\n print()\n choice = int(input(\"Choose an option: \"))\n print(\"----------\")\n if choice == 1:\n search = input(\"Enter a wikipedia url: \")\n if search.startswith(\"https://en.wikipedia.org/wiki/\"):\n try:\n philosophyGame(search)\n except:\n print(\"No internet connection you dingus\")\n else:\n print(\"That's not a wikipedia url\")\n elif choice == 2:\n headers = {\n 'apikey': 'b9ee4b50-de6e-11e9-8e76-ebc82d7b87c0',\n }\n\n params = (\n ('q',input(\"Enter a search term: \")),\n ('location', 'Australia'),\n ('search_engine', 'google.com'),\n ('gl', 'AU'),\n ('hl', 'en'),\n ('num',100)\n )\n\n print(\"Searching google...\")\n try:\n response = requests.get('https://app.zenserp.com/api/v2/search', headers=headers, params=params)\n results = response.json()\n\n for result in results['organic']:\n if 'url' in result and result['url'].startswith('https://en.wikipedia.org/wiki/'):\n wiki = result['url']\n break\n try:\n print(\"Found a wiki page:\",wiki)\n philosophyGame(wiki)\n except:\n print(\"There were no wikipedia pages in the first 100 results\")\n\n except:\n print(\"No internet connection you dingus\")\n \n elif choice == 3:\n try:\n philosophyGame(\"https://en.wikipedia.org/wiki/Special:Random\")\n except:\n print(\"No internet connection you dingus\")\n elif choice == 4:\n print(\"Goodbye!\")\n break\n","repo_name":"jmang00/philosophy-game","sub_path":"philosophy-game.py","file_name":"philosophy-game.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"22105006210","text":"#------------------------------------------#\n# Developed by: Arjan\n# Adapted by: Thiago Piovesan\n# Video link: https://youtu.be/XOFrvzWFM7Y\n#------------------------------------------#\n# Library importation:\nfrom dash import Dash, html\nfrom dash_bootstrap_components.themes import BOOTSTRAP\n\nfrom src.components.layout import create_layout\n\ndef main() -> None:\n app = Dash(external_stylesheets=[BOOTSTRAP])\n app.title = \"Medal dashboard\"\n app.layout = create_layout(app)\n app.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ThiagoPiovesan/Dash---Data-Visualization","sub_path":"part1-GettingStarted/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"35154005154","text":"from sys import stdin\nfrom operator import itemgetter\nimport re\n\ninp = [i.strip() for i in stdin.readlines()]\n\n# Find the second when lights are closest together\n# nums = [[int(i) for i in re.findall(r'-?\\d+', l)] for l in inp]\n# dif = list()\n# for i in range(20000):\n# minx = min(x + i * vx for (x, y, vx, vy) in nums)\n# maxx = max(x + i * vx for (x, y, vx, vy) in nums)\n# miny = min(y + i * vy for (x, y, vx, vy) in nums)\n# maxy = max(y + i * vy for (x, y, vx, vy) in nums)\n# dif.append(maxx-minx+maxy-miny)\n# print(dif.index(min(dif)))\n# print(min(dif))\n\nsecond = 10605\ncoords = [[tuple(map(int, j)) for j in re.compile('=<\\s?(.*?),\\s?(.*?)>').findall(i)] for i in inp]\npos, velo = list(), list()\nfor i in coords:\n pos.append(list(i[0]))\n velo.append(i[1])\n\nfor i in range(len(pos)):\n pos[i][0] += velo[i][0]*second\n pos[i][1] += velo[i][1]*second\nmaxi = max(max(pos, key=itemgetter(1)))\nmini = min(min(pos, key=itemgetter(1)))\nfor i in range(mini-mini, maxi*2):\n for j in range(mini-mini, maxi*2):\n if [j,i] in pos:\n print('#', end='')\n else:\n print('.', end='')\n print()","repo_name":"tterb/advent-of-code","sub_path":"2018/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"38516550886","text":"from typing import List, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.declarations.function_contract import FunctionContract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass MultipleInitUpgradeable(AbstractDetector):\n \"\"\"\n Openzeppelin Multiple Upgradable Initializer Calls Detector\n \"\"\"\n\n ARGUMENT = \"multiple-init-upgradeable\"\n HELP = \"Multiple upgradeable initializer calls detector.\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://docs.openzeppelin.com/contracts/4.x/upgradeable#multiple-inheritance\"\n\n WIKI_TITLE = \"Multiple Initializer Calls\"\n WIKI_DESCRIPTION = \"Initializer functions are not linearized by the compiler like constructors. Because of this, each __{ContractName}_init function embeds the linearized calls to all parent initializers. As a consequence, calling two of these init functions can potentially initialize the same contract twice.\"\n\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\n import \"@openzeppelin/contracts-upgradeable/token/ERC20/SafeERC20Upgradeable.sol\";\n import \"@openzeppelin/contracts-upgradeable/token/ERC20/ERC20PausableUpgradeable.sol\";\n import \"@openzeppelin/contracts-upgradeable/token/ERC20/ERC20BurnableUpgradeable.sol\";\n import \"@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol\";\n\n contract SomeToken is\n ERC20Upgradeable,\n ERC20PausableUpgradeable,\n ERC20BurnableUpgradeable\n // OwnableUpgradeable\n {\n function initialize() public initializer {\n __ERC20_init(\"Some Token\", \"CX\");\n __ERC20Pausable_init();\n }\n }\n```\n\"\"\"\n WIKI_RECOMMENDATION = \"Ensure not more than one upgradeable _init() called. \"\n\n # Check the Initialize function inside find how many function calls ends with _init()\n @staticmethod\n def detect_init_func(func: FunctionContract) -> bool:\n \"\"\"Detect if the function is calling multiple _init functions\"\"\"\n if not func.name == \"initialize\":\n return False\n \n calls = [c.name for c in func.internal_calls]\n if len([call for call in calls if call.endswith(\"_init\")]) <= 1:\n return False\n\n return True\n \n # Check contract name ends with Upgradeable\n def count_upgradable_inheritances(self, contract):\n count = 0\n for inherited_contract in contract.inheritance:\n if inherited_contract.name.endswith(\"Upgradeable\"):\n count += 1\n return count\n \n def detect_multiple_init(self, contract: Contract) -> List[FunctionContract]:\n ret = []\n for f in contract.functions_declared:\n # If initialize() calls more than one _init() and has more than one Upgradeable inheritance\n if self.detect_init_func(f) and self.count_upgradable_inheritances(contract) > 1:\n ret.append(f)\n return ret\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect the functions with multiple _init calls\"\"\"\n results = []\n for c in self.contracts:\n functions = self.detect_multiple_init(c)\n for func in functions:\n\n info: DETECTOR_INFO = [func, \" calls multiple _init functions\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n\n","repo_name":"Erengonen/slither-multiple-init-detector","sub_path":"slither/detectors/examples/multiple_init_upgradeable.py","file_name":"multiple_init_upgradeable.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"23698320051","text":"'''\r\nCreated on Oct 20, 2018\r\n\r\n@author: Jordan Deuley\r\n'''\r\n\r\nimport random\r\nimport time\r\nfrom pynput.keyboard import Key\r\n\r\nremoveLetters = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.\"\r\n\r\ndocHoward = str(\"Unknown Man\")\r\n\r\ntraitPoints = 25\r\n\r\n\r\ndef lineSkip():\r\n print(\"\")\r\n\r\n\r\ndef wakeUp():\r\n print(\"Hello?\")\r\n lineSkip()\r\n time.sleep(2)\r\n print(\"......\")\r\n lineSkip()\r\n time.sleep(2)\r\n print(\"Wake up!\")\r\n lineSkip()\r\n time.sleep(3)\r\n\r\n\r\ndef userName():\r\n print(\"About time you woke up.\")\r\n lineSkip()\r\n time.sleep(2)\r\n playerName = input(\"So what is your name? \")\r\n if playerName == \"B3T4 T3ST3R\":\r\n print(\"No you can't be him/her?\")\r\n print(\"You are a Beta Tester!\")\r\n print(\"Here take your stuff.\")\r\n lineSkip()\r\n time.sleep(1)\r\n print(\"......\")\r\n time.sleep(1)\r\n lineSkip()\r\n print(\"Your name is \" + playerName + \"? That is an odd name.\")\r\n time.sleep(3)\r\n return playerName\r\n\r\n\r\ndef createStrength():\r\n createStrength = input(\"How strong would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createStrength.isdigit() == True:\r\n while createStrength.isdigit() == True:\r\n if int(createStrength) > 10 or int(createStrength) < 0:\r\n print(\"ERROR 1\")\r\n print(\"You used too many points\")\r\n createStrength = input(\"How strong would you say you are? From a scale 1-10.\" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif int(createStrength) <= 10 and int(createStrength) <= traitPoints:\r\n print(\"Excellent.\")\r\n break\r\n elif int(createStrength) > 10 or int(createStrength) > traitPoints:\r\n print(\"You can't beat the system, try again\")\r\n createStrength = input(\"How strong would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif createStrength.isdigit() == False:\r\n while createStrength.isdigit() == False: \r\n print(\"ERROR 2\")\r\n print(\"User input error.\")\r\n createStrength = input(\"How strong would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createStrength.isdigit() == False:\r\n print(\"You can't beat the system, try again\")\r\n createStrength = input(\"How strong would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n return createStrength\r\n\r\n\r\ndef createSpeed():\r\n createSpeed = input(\"How fast would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createSpeed.isdigit() == True:\r\n while createSpeed.isdigit() == True:\r\n if int(createSpeed) > 10 or int(createSpeed) < 0:\r\n print(\"ERROR 1\")\r\n print(\"You used too many points\")\r\n createSpeed = input(\"How fast would you say you are? From a scale 1-10.\" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif int(createSpeed) <= 10 and int(createSpeed) <= traitPoints:\r\n print(\"Excellent.\")\r\n break\r\n elif int(createSpeed) > 10 or int(createSpeed) > traitPoints:\r\n print(\"You can't beat the system, try again\")\r\n createSpeed = input(\"How fast would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif createSpeed.isdigit() == False:\r\n while createSpeed.isdigit() == False: \r\n print(\"ERROR 2\")\r\n print(\"User input error.\")\r\n createSpeed = input(\"How fast would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createSpeed.isdigit() == False:\r\n print(\"You can't beat the system, try again\")\r\n createSpeed = input(\"How fast would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n return createSpeed\r\n \r\n\r\ndef createSmarts():\r\n createSmarts = input(\"How smart would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createSmarts.isdigit() == True:\r\n while createSmarts.isdigit() == True:\r\n if int(createSmarts) > 10 or int(createSmarts) < 0:\r\n print(\"ERROR 1\")\r\n print(\"You used too many points\")\r\n createSmarts = input(\"How smart would you say you are? From a scale 1-10.\" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif int(createSmarts) <= 10 and int(createSmarts) <= traitPoints:\r\n print(\"Excellent.\")\r\n break\r\n elif int(createSmarts) > 10 or int(createSmarts) > traitPoints:\r\n print(\"You can't beat the system, try again\")\r\n createSmarts = input(\"How smart would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n elif createSmarts.isdigit() == False:\r\n while createSmarts.isdigit() == False: \r\n print(\"ERROR 2\")\r\n print(\"User input error.\")\r\n createSmarts = input(\"How smart would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n if createSmarts.isdigit() == False:\r\n print(\"You can't beat the system, try again\")\r\n createSmarts = input(\"How smart would you say you are? From a scale 1-10. \" + \"You have \" + str(traitPoints) + \" trait points remaining. \")\r\n return createSmarts\r\n\r\n\r\ndef createStats():\r\n createStats = int(userSmarts) + int(userSpeed) + int(userStrength)\r\n return createStats","repo_name":"jman413/cautious-tribble","sub_path":"projectUnknownStats.py","file_name":"projectUnknownStats.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33263545857","text":"def minion_game():\n string = input()\n stuart = 0\n kevin = 0\n for j in range(len(string)):\n k = len(string) - j\n if string[j] == 'A' or string[j] == 'E' or string[j] == 'I' or string[j] == 'O' or string[j] == 'U':\n stuart += k\n else:\n kevin += k\n\n if stuart == kevin:\n return \"Draw\"\n elif stuart > kevin:\n return \"Kevin \" + str(stuart)\n else:\n return \"Stuart \" + str(kevin)\n\n\nprint(minion_game())\n","repo_name":"SaidazimovaAziza/hackerrank","sub_path":"solutions/minion_game.py","file_name":"minion_game.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"38321797728","text":"#!/usr/bin/python\n\n# Validate a JSON document against a JSON Schema\n\nfrom jsonschema import validate\nimport argparse, json\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--schema\", help=\"JSON Schema file name\", required=True)\n parser.add_argument(\"-i\", \"--instance\", help=\"JSON instance file name\", required=True)\n args = parser.parse_args()\n\n json_schema_file_name = args.schema\n json_instance_file_name = args.instance\n\n json_schema_file = open(json_schema_file_name)\n json_schema = json.load(json_schema_file)\n\n json_instance_file = open(json_instance_file_name)\n json_instance = json.load(json_instance_file)\n\n validate(json_instance, json_schema)\n","repo_name":"metadatacenter/cedar-util","sub_path":"scripts/python/archive/jsvalid.py","file_name":"jsvalid.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"24119638753","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n\n low = 0\n\n cols = len(matrix[0])\n high = len(matrix) * len(matrix[0]) - 1\n\n while low <= high:\n\n mid = (low + high) // 2\n r, c = divmod(mid, cols)\n\n val = matrix[r][c]\n\n if val == target:\n return True\n\n if val > target:\n high = mid - 1\n\n else:\n low = mid + 1\n\n return False","repo_name":"munagekar/cp","sub_path":"leetcode/00074.py","file_name":"00074.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"2843032565","text":"import tensorflow as tf\n\nfrom enemy import Enemy, SmarterEnemy\nfrom model_wrapper import ModelWrapper\nfrom structs import Result, Encodings3, Encodings4, Encodings5, Encodings\nfrom game import Game, BLOCK_SIZE\n\n\ndef evaluate(agent, grid_size, board_size, encodings, rotated, num_games, enemy_constructor=None, ui=False, interval=0.1):\n game = Game(encodings, update_interval=interval,\n board_size=board_size, ui=ui, with_enemy=True)\n if enemy_constructor:\n enemy = enemy_constructor(game.enemy, encodings)\n else:\n enemy = agent\n\n results = {Result.WIN: 0, Result.DRAW: 0, Result.LOSE: 0}\n\n for i in range(num_games):\n game.reset()\n if (i % (num_games // 100) == 0):\n print(f'games played: {i}')\n while not game.has_ended():\n state = game.state\n\n grid_state = game.grid(grid_size)\n agent_action = agent(grid_state, game.player.direction, rotated)\n\n if enemy_constructor:\n enemy_action = enemy(state)\n else:\n grid_state = game.grid(grid_size, center='enemy')\n enemy_action = agent(grid_state, game.enemy.direction, rotated)\n\n _, has_ended, result = game.step(\n agent_action, enemy_action, wait=ui)\n\n if has_ended:\n if result == Result.WIN.value:\n results[Result.WIN] += 1\n elif result == Result.DRAW.value:\n results[Result.DRAW] += 1\n elif result == Result.LOSE.value:\n results[Result.LOSE] += 1\n\n return results\n\n\ndef evaluate_without_enemy(agent, grid_size, board_size, encodings, rotated, ui=False, interval=0.1):\n game = Game(encodings, update_interval=interval,\n board_size=board_size, ui=ui)\n\n total_reward = 0\n\n for j in range(board_size[1]):\n for i in range(board_size[0]):\n game.reset(player_init_pos=(i * BLOCK_SIZE, j * BLOCK_SIZE))\n reward = 0\n while not game.has_ended():\n grid_state = game.grid(grid_size)\n agent_action = agent(\n grid_state, game.player.direction, rotated)\n\n _, has_ended, _ = game.step(agent_action, None, wait=ui)\n\n if has_ended:\n total_reward += reward\n else:\n reward += 1\n\n return total_reward / (board_size[0] * board_size[1])\n\n\nif __name__ == \"__main__\":\n game = Game(Encodings3, with_enemy=True)\n\n enemy = SmarterEnemy(game.enemy, Encodings3)\n model = tf.keras.models.load_model('model.h5', compile=False)\n agent = ModelWrapper(model)\n\n game.run_agent_vs_enemy(agent, enemy, False, grid_size=5)\n","repo_name":"nnyx7/Tron","sub_path":"tron.py","file_name":"tron.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"75095320461","text":"#search tweets that contains the content that user searches\nimport os\nimport tweepy\nimport requests, json\n\n\n# this function will search tweets by content\ndef getByContent(api, keyword, no_retweet):\n # user can input multiple keywords represent by a string, like \"banana apple orange\"\n #split keywords and save in a list\n keylist = keyword.split()\n myWord = []\n # if user does not want to see retweets\n if no_retweet == True:\n for key in keylist:\n # get keywords one by one\n myWord[key] = \"keyword -filter:retweets\"\n search = tweepy.Cursor(api.search, q = myWord[key], lang = \"en\", since = date_since)\n # get location and username of the user who tweet the content\n for i in search:\n result = [i.user.screen_name, i.user.location]\n print(result)\n # if user wants to see retweets\n else:\n for key in keylist:\n # get keyword one by one\n myWord[key] = \"keyword\"\n search = tweepy.Cursor(api.search, q = myWord[key], lang = \"en\", since = date_since)\n # get location and username of the user who tweet the content\n for i in search:\n result = [i.user.screen_name, i.user.location]\n print(result)\n\n\n\n\n","repo_name":"ChujunQi/Ec601_P2","sub_path":"TwitterAPI/searchContent.py","file_name":"searchContent.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"38053327654","text":"from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask import flash # use flash to store validation messages\n\nclass Message:\n DB = 'ohana_rideshare_schema' # databse name\n def __init__(self, data) -> None:\n self.id = data['id']\n self.message = data['message']\n self.created_at = data['created_at']\n self.updated_at = data['id']\n self.ride = None\n self.user = None\n \n # CRUD\n # CREATE\n @classmethod\n def send_message(cls, data): # add message\n query = \"\"\" INSERT INTO messages (message, ride_id, user_id)\n VALUES (%(message)s, %(ride_id)s, %(user_id)s)\"\"\"\n return connectToMySQL(cls.DB).query_db(query, data)\n ","repo_name":"jejcode/dojo_ohana_rideshare","sub_path":"flask_app/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"31808220886","text":"from kivymd.app import MDApp\nfrom kivy.uix.gridlayout import GridLayout\n\n\ndef get_ingridients(m):\n nitro=str(10 * m / 1000)\n salt=str(15 * m / 1000)\n starts=str(0.5 * m / 1000)\n sugar=str(5 * m / 1000)\n time=str(round(m / 500 * 2))\n\n return {'salt': salt, \n 'nitro': nitro, \n 'starts': starts, \n 'sugar': sugar, \n 'time': time}\n\nclass Container(GridLayout):\n \n def calculate(self):\n try:\n mass=int(self.text_input.text)\n except:\n mass=0\n\n i = get_ingridients(mass)\n\n self.salt.text = i.get('salt') + ' +5'\n self.nitro.text = i.get('nitro')\n self.starts.text = i.get('starts')\n self.sugar.text = i.get('sugar')\n self.time.text = i.get('time')\n\nclass MyApp(MDApp):\n title='Q Recepie'\n def build(self):\n def __init__(**kwargs):\n self.theme_cls.theme_style = \"Light\"\n super().__init__(**kwargs)\n return Container()\n\nif __name__ == '__main__':\n MyApp().run()\n ","repo_name":"Sotemy/kivy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"11010614937","text":"import xlrd\nimport numpy as np\nfrom sklearn.cross_validation import train_test_split\nimport pandas as pd\n\n\ndef loadData():\n df = pd.read_excel('2014 and 2015 CSM dataset.xlsx')\n dataSetDF = df[['Ratings', 'Budget', 'Screens', 'Sequel']]\n # 'Sentiment', 'Views', 'Likes', 'Dislikes', 'Comments',\n dataSetDF = dataSetDF.dropna(how='any')\n cols = dataSetDF.shape[1]\n X = dataSetDF.iloc[:, 1:cols]\n y = dataSetDF.iloc[:, 0]\n\n X_norm = (X - X.min()) / (X.max() - X.min())\n X_norm.insert(0, 'ones', 1)\n\n return X_norm,y\n\n\ndef computeCost(X, y, theta):\n inner = np.power(((X * theta.T) - y), 2)\n return np.sum(inner) / (2 * len(X))\n\ndef gradientDescent(X, y, theta, alpha, iters):\n temp = np.matrix(np.zeros(theta.shape))\n parameters = int(theta.ravel().shape[1])\n cost = np.zeros(iters)\n\n for i in range(iters):\n error = (X * theta.T) - y\n\n for j in range(parameters):\n term = np.multiply(error, X[:,j])\n temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))\n\n theta = temp\n cost[i] = computeCost(X, y, theta)\n\n return theta, cost\n\n\nif __name__ == '__main__':\n # 去掉了前两列以及 Gross的数据 当前返回的为np.array\n X,y = loadData()\n # 来处理训练集和测试集\n x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n # trainSet, testSet = train_test_split(dataSet, test_size=0.2)\n X = np.matrix(x_train.values)\n y = np.matrix(y_train.values)\n cols = X.shape[1]\n theta = np.matrix(np.zeros((1, cols)))\n # 1!!!!!!!!\n print(theta)\n\n print(X.shape, theta.shape, y.shape)\n cost = computeCost(X, y, theta)\n\n alpha = 0.01\n iters = 5\n\n # 执行梯度下降算法\n g, cost = gradientDescent(X, y, theta, alpha, iters)\n print(g)\n print(np.shape(x_test))\n result = sum(x_test[0]*g)\n print(result)\n print(y_test[0])\n\n # print(result)\n\n","repo_name":"Mereder/logistic","sub_path":"gradientDescent.py","file_name":"gradientDescent.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33886797417","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# File : client_thread.py\n# Author : bssthu\n# Project : rtk_checker\n# Description : \n# \n\nimport socket\nimport threading\nimport time\nfrom rtk_check import log\n\nBUFFER_SIZE = 4096\n\n\nclass ClientThread(threading.Thread):\n \"\"\"从差分源服务器接收数据的线程\"\"\"\n\n def __init__(self, server_ip, server_port, got_data_cb):\n \"\"\"构造函数\n\n Args:\n server_ip: 差分源服务器IP地址\n server_port: 差分源服务器端口\n got_data_cb: 接收到数据包时调用的回调函数\n \"\"\"\n super().__init__()\n self.server_ip = server_ip\n self.server_port = server_port\n self.got_data_cb = got_data_cb\n self.rcv_count = 0\n self.running = True\n\n def run(self):\n \"\"\"线程主函数\n\n 循环运行,建立连接、接收数据,并在连接出错时重连。\n \"\"\"\n log.info('client thread: start, %s:%d' % (self.server_ip, self.server_port))\n while self.running:\n try:\n self.receive_data()\n except Exception as e:\n log.error('client thread error: %s' % e)\n time.sleep(3)\n log.info('client thread: bye')\n\n def receive_data(self):\n \"\"\"建立连接并循环接收数据\n\n 在超时时重连,在出错时返回。\n \"\"\"\n client = self.connect()\n log.info('client thread: connected')\n timeout_count = 0\n while self.running:\n try:\n # 接收数据\n data = client.recv(BUFFER_SIZE)\n # 连接失败的处理\n if len(data) == 0:\n raise RuntimeError('socket connection broken')\n # 收到数据后的处理\n self.rcv_count += 1\n log.debug('rcv %d bytes. id: %d' % (len(data), self.rcv_count))\n self.got_data_cb(data, self.rcv_count)\n timeout_count = 0\n except socket.timeout:\n # 超时处理,超时5次时主动重连\n # 超时时间短是为了在需要时能快速退出\n timeout_count += 1\n if timeout_count >= 5:\n timeout_count = 0\n client = self.reconnect(client)\n log.debug('client timeout, reconnect')\n try:\n client.close()\n except socket.error:\n pass\n except Exception as e:\n log.error('client exception when close: %s' % e)\n\n def connect(self):\n \"\"\"尝试建立连接并设置超时参数\"\"\"\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.settimeout(10)\n try:\n client.connect((self.server_ip, self.server_port))\n except socket.timeout as e:\n raise socket.timeout('%s when connect' % e)\n client.settimeout(3)\n return client\n\n def reconnect(self, client):\n \"\"\"重连 socket\"\"\"\n try:\n client.close()\n except:\n log.error('client exception when close.')\n return self.connect()\n","repo_name":"gautodev/rtk_check","sub_path":"rtk_check/client_thread.py","file_name":"client_thread.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"35111852925","text":"class Solution:\n def totalNQueens(self, n: int) -> int:\n self.cnt = 0\n\n # 初始化棋盘\n board = [['.'] * n for _ in range(n)]\n\n # 放置 第一行\n self.backtrack(board, 0)\n\n return self.cnt\n\n def backtrack(self, board, row):\n # 最后一行 结束\n if row == len(board):\n # 一种方式\n self.cnt += 1\n return\n\n for col in range(len(board)):\n # 是否可放置\n if not self.valid(board, row, col):\n continue\n\n # 选择\n board[row][col] = 'Q'\n self.backtrack(board, row + 1)\n # 取消选择\n board[row][col] = '.'\n\n @staticmethod\n def valid(board, row, col):\n \"\"\"判罚是否可放置皇后\"\"\"\n\n # 列\n for i in range(row + 1):\n if board[i][col] == 'Q':\n return False\n\n # 右上\n r, c = row - 1, col + 1\n while r >= 0 and c < len(board):\n if board[r][c] == 'Q':\n return False\n r -= 1\n c += 1\n\n # 左上\n r, c = row - 1, col - 1\n while r >= 0 and c >= 0:\n if board[r][c] == 'Q':\n return False\n r -= 1\n c -= 1\n return True\n","repo_name":"xiaoTaoist/LeetCode","sub_path":"python/52. N 皇后 II.py","file_name":"52. N 皇后 II.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"30586476686","text":"# To use this code, make sure you\n#\n# import json\n#\n# and then, to convert JSON from a string, do\n#\n# result = users_response_from_dict(json.loads(json_string))\n\nfrom dataclasses import dataclass\nfrom typing import Optional, Any, List, TypeVar, Type, cast, Callable\n\n\nT = TypeVar(\"T\")\n\n\ndef from_str(x: Any) -> str:\n assert isinstance(x, str)\n return x\n\n\ndef from_none(x: Any) -> Any:\n assert x is None\n return x\n\n\ndef from_union(fs, x):\n for f in fs:\n try:\n return f(x)\n except:\n pass\n assert False\n\n\ndef from_int(x: Any) -> int:\n assert isinstance(x, int) and not isinstance(x, bool)\n return x\n\n\ndef from_bool(x: Any) -> bool:\n assert isinstance(x, bool)\n return x\n\n\ndef to_class(c: Type[T], x: Any) -> dict:\n assert isinstance(x, c)\n return cast(Any, x).to_dict()\n\n\ndef from_list(f: Callable[[Any], T], x: Any) -> List[T]:\n assert isinstance(x, list)\n return [f(y) for y in x]\n\n\n@dataclass\nclass Errors:\n description: Optional[str] = None\n code: Optional[int] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Errors':\n assert isinstance(obj, dict)\n description = from_union([from_str, from_none], obj.get(\"description\"))\n code = from_union([from_int, from_none], obj.get(\"code\"))\n return Errors(description, code)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"description\"] = from_union([from_str, from_none], self.description)\n result[\"code\"] = from_union([from_int, from_none], self.code)\n return result\n\n\n@dataclass\nclass Address:\n street_address: Optional[str] = None\n locality: Optional[str] = None\n region: Optional[str] = None\n postal_code: Optional[str] = None\n country: Optional[str] = None\n primary: Optional[bool] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Address':\n assert isinstance(obj, dict)\n street_address = from_union([from_str, from_none], obj.get(\"streetAddress\"))\n locality = from_union([from_str, from_none], obj.get(\"locality\"))\n region = from_union([from_str, from_none], obj.get(\"region\"))\n postal_code = from_union([from_str, from_none], obj.get(\"postalCode\"))\n country = from_union([from_str, from_none], obj.get(\"country\"))\n primary = from_union([from_bool, from_none], obj.get(\"primary\"))\n return Address(street_address, locality, region, postal_code, country, primary)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"streetAddress\"] = from_union([from_str, from_none], self.street_address)\n result[\"locality\"] = from_union([from_str, from_none], self.locality)\n result[\"region\"] = from_union([from_str, from_none], self.region)\n result[\"postalCode\"] = from_union([from_str, from_none], self.postal_code)\n result[\"country\"] = from_union([from_str, from_none], self.country)\n result[\"primary\"] = from_union([from_bool, from_none], self.primary)\n return result\n\n\n@dataclass\nclass Email:\n value: Optional[str] = None\n primary: Optional[bool] = None\n type: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Email':\n assert isinstance(obj, dict)\n value = from_union([from_str, from_none], obj.get(\"value\"))\n primary = from_union([from_bool, from_none], obj.get(\"primary\"))\n type = from_union([from_str, from_none], obj.get(\"type\"))\n return Email(value, primary, type)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"value\"] = from_union([from_str, from_none], self.value)\n result[\"primary\"] = from_union([from_bool, from_none], self.primary)\n result[\"type\"] = from_union([from_str, from_none], self.type)\n return result\n\n\n@dataclass\nclass Group:\n value: Optional[str] = None\n display: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Group':\n assert isinstance(obj, dict)\n value = from_union([from_str, from_none], obj.get(\"value\"))\n display = from_union([from_str, from_none], obj.get(\"display\"))\n return Group(value, display)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"value\"] = from_union([from_str, from_none], self.value)\n result[\"display\"] = from_union([from_str, from_none], self.display)\n return result\n\n\n@dataclass\nclass Meta:\n created: Optional[str] = None\n location: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Meta':\n assert isinstance(obj, dict)\n created = from_union([from_str, from_none], obj.get(\"created\"))\n location = from_union([from_str, from_none], obj.get(\"location\"))\n return Meta(created, location)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"created\"] = from_union([from_str, from_none], self.created)\n result[\"location\"] = from_union([from_str, from_none], self.location)\n return result\n\n\n@dataclass\nclass Name:\n given_name: Optional[str] = None\n family_name: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Name':\n assert isinstance(obj, dict)\n given_name = from_union([from_str, from_none], obj.get(\"givenName\"))\n family_name = from_union([from_str, from_none], obj.get(\"familyName\"))\n return Name(given_name, family_name)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"givenName\"] = from_union([from_str, from_none], self.given_name)\n result[\"familyName\"] = from_union([from_str, from_none], self.family_name)\n return result\n\n\n@dataclass\nclass Photo:\n value: Optional[str] = None\n type: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Photo':\n assert isinstance(obj, dict)\n value = from_union([from_str, from_none], obj.get(\"value\"))\n type = from_union([from_str, from_none], obj.get(\"type\"))\n return Photo(value, type)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"value\"] = from_union([from_str, from_none], self.value)\n result[\"type\"] = from_union([from_str, from_none], self.type)\n return result\n\n\n@dataclass\nclass Manager:\n pass\n\n @staticmethod\n def from_dict(obj: Any) -> 'Manager':\n assert isinstance(obj, dict)\n return Manager()\n\n def to_dict(self) -> dict:\n result: dict = {}\n return result\n\n\n@dataclass\nclass UrnScimSchemasExtensionEnterprise10:\n manager: Optional[Manager] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'UrnScimSchemasExtensionEnterprise10':\n assert isinstance(obj, dict)\n manager = from_union([Manager.from_dict, from_none], obj.get(\"manager\"))\n return UrnScimSchemasExtensionEnterprise10(manager)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"manager\"] = from_union([lambda x: to_class(Manager, x), from_none], self.manager)\n return result\n\n\n@dataclass\nclass UrnScimSchemasExtensionSlackGuest10:\n type: Optional[str] = None\n expiration: Optional[str] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'UrnScimSchemasExtensionSlackGuest10':\n assert isinstance(obj, dict)\n type = from_union([from_str, from_none], obj.get(\"type\"))\n expiration = from_union([from_str, from_none], obj.get(\"expiration\"))\n return UrnScimSchemasExtensionSlackGuest10(type, expiration)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"type\"] = from_union([from_str, from_none], self.type)\n result[\"expiration\"] = from_union([from_str, from_none], self.expiration)\n return result\n\n\n@dataclass\nclass Resource:\n schemas: Optional[List[str]] = None\n id: Optional[str] = None\n external_id: Optional[str] = None\n meta: Optional[Meta] = None\n user_name: Optional[str] = None\n nick_name: Optional[str] = None\n name: Optional[Name] = None\n display_name: Optional[str] = None\n profile_url: Optional[str] = None\n title: Optional[str] = None\n timezone: Optional[str] = None\n active: Optional[bool] = None\n emails: Optional[List[Email]] = None\n photos: Optional[List[Photo]] = None\n groups: Optional[List[Group]] = None\n addresses: Optional[List[Address]] = None\n phone_numbers: Optional[List[Email]] = None\n roles: Optional[List[Email]] = None\n urn_scim_schemas_extension_enterprise_10: Optional[UrnScimSchemasExtensionEnterprise10] = None\n urn_scim_schemas_extension_slack_guest_10: Optional[UrnScimSchemasExtensionSlackGuest10] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'Resource':\n assert isinstance(obj, dict)\n schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get(\"schemas\"))\n id = from_union([from_str, from_none], obj.get(\"id\"))\n external_id = from_union([from_str, from_none], obj.get(\"externalId\"))\n meta = from_union([Meta.from_dict, from_none], obj.get(\"meta\"))\n user_name = from_union([from_str, from_none], obj.get(\"userName\"))\n nick_name = from_union([from_str, from_none], obj.get(\"nickName\"))\n name = from_union([Name.from_dict, from_none], obj.get(\"name\"))\n display_name = from_union([from_str, from_none], obj.get(\"displayName\"))\n profile_url = from_union([from_str, from_none], obj.get(\"profileUrl\"))\n title = from_union([from_str, from_none], obj.get(\"title\"))\n timezone = from_union([from_str, from_none], obj.get(\"timezone\"))\n active = from_union([from_bool, from_none], obj.get(\"active\"))\n emails = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get(\"emails\"))\n photos = from_union([lambda x: from_list(Photo.from_dict, x), from_none], obj.get(\"photos\"))\n groups = from_union([lambda x: from_list(Group.from_dict, x), from_none], obj.get(\"groups\"))\n addresses = from_union([lambda x: from_list(Address.from_dict, x), from_none], obj.get(\"addresses\"))\n phone_numbers = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get(\"phoneNumbers\"))\n roles = from_union([lambda x: from_list(Email.from_dict, x), from_none], obj.get(\"roles\"))\n urn_scim_schemas_extension_enterprise_10 = from_union([UrnScimSchemasExtensionEnterprise10.from_dict, from_none], obj.get(\"urn:scim:schemas:extension:enterprise:1.0\"))\n urn_scim_schemas_extension_slack_guest_10 = from_union([UrnScimSchemasExtensionSlackGuest10.from_dict, from_none], obj.get(\"urn:scim:schemas:extension:slack:guest:1.0\"))\n return Resource(schemas, id, external_id, meta, user_name, nick_name, name, display_name, profile_url, title, timezone, active, emails, photos, groups, addresses, phone_numbers, roles, urn_scim_schemas_extension_enterprise_10, urn_scim_schemas_extension_slack_guest_10)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"schemas\"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)\n result[\"id\"] = from_union([from_str, from_none], self.id)\n result[\"externalId\"] = from_union([from_str, from_none], self.external_id)\n result[\"meta\"] = from_union([lambda x: to_class(Meta, x), from_none], self.meta)\n result[\"userName\"] = from_union([from_str, from_none], self.user_name)\n result[\"nickName\"] = from_union([from_str, from_none], self.nick_name)\n result[\"name\"] = from_union([lambda x: to_class(Name, x), from_none], self.name)\n result[\"displayName\"] = from_union([from_str, from_none], self.display_name)\n result[\"profileUrl\"] = from_union([from_str, from_none], self.profile_url)\n result[\"title\"] = from_union([from_str, from_none], self.title)\n result[\"timezone\"] = from_union([from_str, from_none], self.timezone)\n result[\"active\"] = from_union([from_bool, from_none], self.active)\n result[\"emails\"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.emails)\n result[\"photos\"] = from_union([lambda x: from_list(lambda x: to_class(Photo, x), x), from_none], self.photos)\n result[\"groups\"] = from_union([lambda x: from_list(lambda x: to_class(Group, x), x), from_none], self.groups)\n result[\"addresses\"] = from_union([lambda x: from_list(lambda x: to_class(Address, x), x), from_none], self.addresses)\n result[\"phoneNumbers\"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.phone_numbers)\n result[\"roles\"] = from_union([lambda x: from_list(lambda x: to_class(Email, x), x), from_none], self.roles)\n result[\"urn:scim:schemas:extension:enterprise:1.0\"] = from_union([lambda x: to_class(UrnScimSchemasExtensionEnterprise10, x), from_none], self.urn_scim_schemas_extension_enterprise_10)\n result[\"urn:scim:schemas:extension:slack:guest:1.0\"] = from_union([lambda x: to_class(UrnScimSchemasExtensionSlackGuest10, x), from_none], self.urn_scim_schemas_extension_slack_guest_10)\n return result\n\n\n@dataclass\nclass UsersResponse:\n total_results: Optional[int] = None\n items_per_page: Optional[int] = None\n start_index: Optional[int] = None\n schemas: Optional[List[str]] = None\n resources: Optional[List[Resource]] = None\n errors: Optional[Errors] = None\n\n @staticmethod\n def from_dict(obj: Any) -> 'UsersResponse':\n assert isinstance(obj, dict)\n total_results = from_union([from_int, from_none], obj.get(\"totalResults\"))\n items_per_page = from_union([from_int, from_none], obj.get(\"itemsPerPage\"))\n start_index = from_union([from_int, from_none], obj.get(\"startIndex\"))\n schemas = from_union([lambda x: from_list(from_str, x), from_none], obj.get(\"schemas\"))\n resources = from_union([lambda x: from_list(Resource.from_dict, x), from_none], obj.get(\"Resources\"))\n errors = from_union([Errors.from_dict, from_none], obj.get(\"Errors\"))\n return UsersResponse(total_results, items_per_page, start_index, schemas, resources, errors)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"totalResults\"] = from_union([from_int, from_none], self.total_results)\n result[\"itemsPerPage\"] = from_union([from_int, from_none], self.items_per_page)\n result[\"startIndex\"] = from_union([from_int, from_none], self.start_index)\n result[\"schemas\"] = from_union([lambda x: from_list(from_str, x), from_none], self.schemas)\n result[\"Resources\"] = from_union([lambda x: from_list(lambda x: to_class(Resource, x), x), from_none], self.resources)\n result[\"Errors\"] = from_union([lambda x: to_class(Errors, x), from_none], self.errors)\n return result\n\n\ndef users_response_from_dict(s: Any) -> UsersResponse:\n return UsersResponse.from_dict(s)\n\n\ndef users_response_to_dict(x: UsersResponse) -> Any:\n return to_class(UsersResponse, x)\n","repo_name":"Beckley93/SlackStarterBot","sub_path":".pythonlibs/lib/python3.10/site-packages/slack_types/scim_api/v1/users_response.py","file_name":"users_response.py","file_ext":"py","file_size_in_byte":14877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"69944209742","text":"from django.shortcuts import render\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom users.models import Tenant, PropertyManager\nfrom django.template import loader\nfrom .models import Lease\n\n# Create your views here.\ndef UploadLease(request):\n if request.method == \"POST\":\n if request.user.is_authenticated:\n if request.user.is_propertymanager:\n pm1 = PropertyManager.objects.get(user=request.user)\n id = request.POST.get('tenant',None)\n tenant = Tenant.objects.get(id = id)\n document = request.FILES['document']\n lease = Lease(tenant=tenant,pm=pm1,lease=document)\n lease.save()\n html = \"\"\n content = loader.render_to_string('sharedoc/documents.html')\n upper,lower = content.split('',1)\n upper += html\n upper += lower\n return HttpResponse(upper)\n else:\n if request.user.is_authenticated:\n pm1 = PropertyManager.objects.get(user=request.user)\n return render(request,'sharedoc/documents.html',{'pm':pm1})\n else:\n return HttpResponseRedirect('/login')\n\ndef ViewLeases(request):\n if request.user.is_authenticated:\n if request.user.is_propertymanager:\n pm1 = PropertyManager.objects.get(user=request.user)\n return render(request,'sharedoc/viewdocuments.html',{'pm':pm1})\n else:\n tenant = Tenant.objects.get(user=request.user)\n lease = Lease.objects.get(tenant=tenant)\n return render(request,'sharedoc/viewdocuments.html',{'tenant':tenant, 'lease':lease})\n else:\n return HttpResponseRedirect('/login')\n","repo_name":"imd15/LeaseLord","sub_path":"BackEnd/LeaseLordWeb/document/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"8965147070","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom dataclasses import dataclass\nfrom rio_control_node.msg import Joystick_Status, Robot_Status\nfrom ck_ros_msgs_node.msg import HMI_Signals\nfrom ck_utilities_py_node.joystick import Joystick\n\n@dataclass\nclass Params:\n drivetrain_fwd_back : float = 0\n drivetrain_left_right : float = 0\n drivetrain_swerve_percent_fwd_vel : float = 0\n drivetrain_swerve_direction : float = 0\n drivetrain_swerve_percent_angular_rot : float = 0\n drivetrain_quickturn : bool = False\n drivetrain_brake : bool = False\n gauge_axis_id: int = -1\n elevator_vertical_axis_id: int = -1\n claw_open_button_id: int = -1\n\nparams = Params()\n\nhmi_pub = rospy.Publisher(name=\"/HMISignals\", data_class=HMI_Signals, queue_size=10, tcp_nodelay=True)\n\ndrive_joystick = Joystick(0)\narm_joystick = Joystick(1)\nbb1_joystick = Joystick(2)\nbb2_joystick = Joystick(3)\n\nis_auto = False\n\ndef robot_status_callback(msg : Robot_Status):\n global is_auto\n is_auto = (msg.robot_state == msg.AUTONOMOUS)\n\ndef joystick_callback(msg : Joystick_Status):\n global is_auto\n global hmi_pub\n global drive_joystick\n global arm_joystick\n global bb1_joystick\n global bb2_joystick\n global params\n Joystick.update(msg)\n\n hmi_update_msg = HMI_Signals()\n\n hmi_update_msg.gauge_value = int(1500 + (1500 * drive_joystick.getRawAxis(params.gauge_axis_id)))\n hmi_update_msg.elevator_vertical = float(drive_joystick.getRawAxis(params.elevator_vertical_axis_id))\n hmi_update_msg.claw_open = bool(drive_joystick.getButton(params.claw_open_button_id))\n\n hmi_pub.publish(hmi_update_msg)\n\n\ndef init_params():\n global params\n params.gauge_axis_id = rospy.get_param(\"gauge_axis_id\", -1)\n params.elevator_vertical_axis_id = rospy.get_param(\"elevator_vertical_axis_id\", -1)\n params.claw_open_button_id = rospy.get_param(\"claw_open_button_id\", -1)\n\n\ndef ros_main(node_name):\n rospy.init_node(node_name)\n init_params()\n rospy.Subscriber(name=\"/JoystickStatus\", data_class=Joystick_Status, callback=joystick_callback, queue_size=1, tcp_nodelay=True)\n rospy.Subscriber(name=\"/RobotStatus\", data_class=Robot_Status, callback=robot_status_callback, queue_size=1, tcp_nodelay=True)\n rospy.spin()","repo_name":"frcteam195/hmi_agent_simulation_robot_py_node","sub_path":"src/hmi_agent_node/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26883906969","text":"# This prints a different message depending on whether the\n# program is executed during December or not.\n\nmonth = input(\"Please enter the month: \")\n\n# The following line will only ever execute during December\nif month == 12:\n print(\"It's christmas time!\")\n#elif month == 8:\n # print(\"Happy Birthday , Frank it's August!\")\nelse:\n # If it’s not December, this line is executed.\n print(\"Just another ordinary day...\")\n\n","repo_name":"electronsandbits/python-learning","sub_path":"Beecher/month.py","file_name":"month.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"20603894687","text":"\"\"\"\nExport the typer apps we use throughout our codebase.\n\nHaving this in a single file allows multiple files to provide subcommands\nfor the same CLI application. So we can put deployment related stuff under\ndeployer.py, debug related stuff under debug.py, etc\n\"\"\"\nimport typer\n\n# The typer app to which all subcommands are attached\n# Disable 'pretty' exception handling\napp = typer.Typer(pretty_exceptions_show_locals=False)\ngenerate_app = typer.Typer(pretty_exceptions_show_locals=False)\ncilogon_client_app = typer.Typer(pretty_exceptions_show_locals=False)\ndebug_app = typer.Typer(pretty_exceptions_show_locals=False)\nexec_app = typer.Typer(pretty_exceptions_show_locals=False)\ngrafana_app = typer.Typer(pretty_exceptions_show_locals=False)\nvalidate_app = typer.Typer(pretty_exceptions_show_locals=False)\n\napp.add_typer(\n generate_app,\n name=\"generate\",\n help=\"Generate various types of assets. It currently supports generating files related to billing, \"\n \"new dedicated clusters, helm upgrade strategies and resource allocation.\",\n)\napp.add_typer(\n cilogon_client_app,\n name=\"cilogon-client\",\n help=\"Manage cilogon clients for hubs' authentication.\",\n)\napp.add_typer(\n exec_app,\n name=\"exec\",\n help=\"Execute a shell in various parts of the infra. It can be used for poking around, or debugging issues.\",\n)\napp.add_typer(\n debug_app,\n name=\"debug\",\n help=\"Debug issues by accessing different components and their logs\",\n)\napp.add_typer(grafana_app, name=\"grafana\", help=\"Manages Grafana related workflows.\")\napp.add_typer(\n validate_app,\n name=\"validate\",\n help=\"Validate configuration files such as helm chart values and cluster.yaml files.\",\n)\n","repo_name":"AIDEA775/2i2c-infrastructure","sub_path":"deployer/cli_app.py","file_name":"cli_app.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"}
+{"seq_id":"74562992462","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom src.cct import cct_2\nimport matplotlib.pyplot as plt\n\ncifar10_mean, cifar10_std = [0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize(cifar10_mean, cifar10_std)])\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\nbatch_size = 4\n\n\ndef imshow(images, labels, predicted_labels=None):\n # Using torchvision to make first grid of the images\n img = torchvision.utils.make_grid(images)\n\n # Inverting the normalization\n img = img.permute(1, 2, 0).mul(torch.tensor(cifar10_std))\n img += torch.tensor(cifar10_mean)\n\n # Plotting the grid\n fig, ax = plt.subplots(figsize=(6, 24))\n plt.imshow(img)\n\n if predicted_labels is not None:\n # Outputing the predicted labels\n ax.set_xlabel('Predicted labels', fontsize=18, labelpad=12)\n ax.set_xticks(torch.arange(len(images)) * 35 + 20)\n ax.set_xticklabels([classes[predicted_labels[j]]\n for j in range(len(images))], fontsize=14)\n\n # Outputing the ground truth labels\n gax = ax.secondary_xaxis('top')\n gax.set_xlabel('Ground truth', fontsize=18, labelpad=12)\n gax.set_xticks(torch.arange(len(images)) * 35 + 20)\n gax.set_xticklabels([classes[labels[j]]\n for j in range(len(images))], fontsize=14)\n plt.show()\n\n\ntrainset = torchvision.datasets.CIFAR10(root='./cifar10', train=True,\n download=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=2)\n\ntestset = torchvision.datasets.CIFAR10(root='./cifar10', train=False,\n download=False, transform=transform)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n\ndataiter = iter(trainloader)\ntest_dataiter = iter(testloader)\nsaved_file = \"image.pt\"\ntransformer = cct_2(img_size=32,\n num_classes=10,\n positional_embedding='learnable',\n n_conv_layers=2,\n kernel_size=3,\n plot=True,\n saved_file=saved_file)\n\ntransformer.load_state_dict(torch.load('checkpoint.pth'))\nimages, labels = test_dataiter.next()\ntorch.save(images[0], saved_file)\ntorch.save(labels[0], \"labels.pt\")\nimages[0] = torch.load(saved_file)\nlabels[0] = torch.load(\"labels.pt\")\n\n\n\n#torch.save(images[0], \"exampleImage.pt\")\nimages[0] = torch.load(\"images/exampleImage.pt\")\n_, predicted = torch.max(transformer(images), 1)\nimshow(images, labels, predicted)\n","repo_name":"tomkark/SOT_ViT","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"15684425443","text":"# %%\nfrom __future__ import annotations\nimport numpy as np\nimport numpy.random as rand\nimport numpy.typing as npt\nimport math\nfrom abc import ABC, abstractmethod\nfrom collections import deque\nfrom ice.recipe import recipe\nfrom ice.apis.openai import openai_complete\nfrom ice.agents.openai import OpenAIAgent, Agent\nfrom ice.trace import TracedABC\n\nfrom typing import Generic, TypeVar, Tuple, Callable, List, Dict\n\n# %%\nRT = TypeVar('RT')\nIT = TypeVar('IT')\n# OT = TypeVar('OT')\nUpdateRule = Callable[[npt.NDArray[np.float32], int, float, float, float], npt.NDArray[np.float32]]\n\nLOG_0 = -1e6\n\n\"\"\"\nLike ice.agent, but complete_logprobs also returns logprobs\n\"\"\"\nclass LogprobsAgent(Agent):\n @abstractmethod\n async def complete_logprobs(self, prompt : str, stop : str|None = None) -> Tuple[str, List[Tuple[npt.NDArray[np.float32], List[str], int]]]:\n pass\n\n# Mostly a wrapper oaround OpenAI Agent. We need to grab the logprobs for complete though, \n# so we can't use the complete method of vanilla OpenAI Agent complete. \nclass WrappedICEAgent(OpenAIAgent, LogprobsAgent):\n def __init__(self):\n super().__init__()\n async def complete_logprobs(self, prompt: str, stop: str | None = None) -> Tuple[str, List[Tuple[npt.NDArray[np.float32], List[str], int]]]:\n response = await openai_complete(prompt, stop, temperature = 0.7, logprobs=5)\n text : str= response['choices'][0]['text']\n if 'completion_tokens' in response['usage']: \n completion_length : int = response['usage']['completion_tokens'] + 1\n else: \n completion_length = 1\n tokens : List[str]= response['choices'][0]['logprobs']['tokens'][:completion_length]\n token_logprobs : List[float] = response['choices'][0]['logprobs']['token_logprobs'][:completion_length]\n logprob_dicts : List[Dict[str, float]]= response['choices'][0]['logprobs']['top_logprobs'][:completion_length]\n for token, token_logprob, logprob_dict in zip(tokens, token_logprobs, logprob_dicts):\n logprob_dict[token] = token_logprob\n steps = [(np.fromiter([logprob_dict[key] for key in list(logprob_dict)], dtype = np.float32), list(logprob_dict), list(logprob_dict).index(token)) for token, logprob_dict in zip(tokens, logprob_dicts)]\n return(text, steps)\n\n\"\"\"\nRepresents a search tree to search through possible evaluations of a non-deterministic program \nwith return type RT.\n\"\"\"\nclass SearchTreeNode(ABC, Generic[RT]):\n \"\"\"\n Should play out a program until the end, and then accumulate the logprobs used to choose \n the chosen playout and the (unnormalized) desired log-prob scores from, eg, \n adding the log-prob from the prior supplied by gpt-3 logprobs during generation\n and the log-likelihoods generated by verifiers. \n Should recursively calculate updates along the way too.\n return:\n (desired log-prob score, log-prob, result, update)\n\n seq represents a sequence of choices used to force rollouts to use an already explored path through the search tree.\n predict/complete nodes currently ignore seq.\n seq can be a partial path. \n \"\"\"\n @abstractmethod\n async def rolloutAndUpdate(self, rule : UpdateRule, agent: LogprobsAgent, seq : deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n pass\n\nclass TracedSearchTreeWrapper(SearchTreeNode[RT], TracedABC):\n def __init__(self, wrapped : SearchTreeNode[RT]):\n self.wrapped = wrapped\n async def rolloutAndUpdate(self, rule: UpdateRule, agent: LogprobsAgent, seq: deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n score, logprob, result, update = await self.wrapped.rolloutAndUpdate(rule, agent, seq)\n return (score, logprob, result, TracedSearchTreeWrapper(update))\n\nclass Leaf(SearchTreeNode[RT]):\n def __init__(self, result : RT) -> None:\n self.result = result\n async def rolloutAndUpdate(self, rule : UpdateRule, agent: LogprobsAgent, seq : deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n return (0.0, 0.0, self.result, self)\n\nclass ScoreNode(SearchTreeNode[RT]):\n def __init__(self, score : float, child : SearchTreeNode[RT]):\n self.score = score\n self.child = child\n async def rolloutAndUpdate(self, rule : UpdateRule, agent : LogprobsAgent, seq : deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n score, logprob, result, update = await self.child.rolloutAndUpdate(rule, agent, seq)\n return (score + self.score, logprob, result, ScoreNode(self.score, update))\n\nclass ChoiceNode(SearchTreeNode[RT]):\n def __init__(self, scores : npt.NDArray[np.float32], logprobs : npt.NDArray[np.float32], children : List[SearchTreeNode[RT]], visits : float = 1):\n self.scores = scores\n self.logprobs = logprobs\n self.children = children\n self.visits = visits\n async def rolloutAndUpdate(self, rule : UpdateRule, agent : LogprobsAgent, seq : deque[int] = deque([]))-> Tuple[float, float, RT, SearchTreeNode[RT]]:\n if seq:\n idx = seq.popleft()\n else: \n idx : int = rand.choice(len(self.logprobs), None, p = np.exp(self.logprobs)/np.sum(np.exp(self.logprobs)))\n score, logprob, result, update = await self.children[idx].rolloutAndUpdate(rule, agent, seq)\n score += self.scores[idx]\n logprob += self.logprobs[idx]\n nlogprobs = rule(self.logprobs, idx, score, logprob, self.visits)\n nchildren = self.children.copy()\n nchildren[idx] = update\n return (score, logprob, result, ChoiceNode(self.scores, nlogprobs, nchildren, self.visits + 1))\n\n# Program is a continuation that takes the result of a GPT-3 completion request \n# and chooses what to do next (eg, answer extraction or running a verifier)\nProgram = Callable[[IT], SearchTreeNode[RT]]\n\n''' no longer used\nclass PredictNode(SearchTreeNode[RT]):\n def __init__(self, prompt : str, program : Program[str, RT]):\n self.prompt = prompt\n self.program = program\n async def rolloutAndUpdate(self, rule: UpdateRule, agent : LogprobsAgent, seq : deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n logits, outputs = await agent.predict(context = self.prompt)\n children = [self.program(output) for output in outputs]\n return await ChoiceNode(logits, logits, children).rolloutAndUpdate(rule, agent)\n'''\n\nclass CompleteNode(SearchTreeNode[RT]):\n def __init__(self, prompt : str, prefix : str, program : Program[str, RT], stop : str | None = None):\n self.prompt = prompt\n self.program = program\n self.stop = stop\n self.prefix = prefix\n async def rolloutAndUpdate(self, rule: UpdateRule, agent: LogprobsAgent, seq : deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n result, steps = await agent.complete_logprobs(self.prompt + self.prefix, self.stop)\n prefix = self.prefix\n nodes : List[SearchTreeNode[RT]]= []\n prev : Tuple[ChoiceNode[RT], int] | None = None\n seq = deque([])\n for step in steps:\n logprobs, child_strs, child = step\n seq.append(child)\n children : List[SearchTreeNode[RT]] = [CompleteNode(self.prompt, prefix + child_str, self.program, self.stop) for child_str in child_strs]\n this_node = ChoiceNode[RT](logprobs, logprobs, children)\n if prev is not None: \n prev_node, prev_child_idx = prev\n prev_node.children[prev_child_idx] = this_node\n prev = (this_node, child)\n nodes.append(this_node)\n prefix = prefix + cleanUnicode(child_strs[child])\n if prev is not None:\n prev_node, prev_child_idx = prev\n prev_node.children[prev_child_idx] = self.program(self.prefix + result)\n return await nodes[0].rolloutAndUpdate(rule, agent, seq)\n else:\n return await self.program(self.prefix + result).rolloutAndUpdate(rule, agent, seq)\n\nclass ClassifyNode(SearchTreeNode[RT]):\n def __init__(self, prompt : str, choices : Tuple[str, ...], program : Program[Dict[str, float], RT]):\n self.prompt = prompt\n self.choices = choices\n self.program = program\n async def rolloutAndUpdate(self, rule: UpdateRule, agent: LogprobsAgent, seq: deque[int] = deque([])) -> Tuple[float, float, RT, SearchTreeNode[RT]]:\n cats = (await agent.classify(prompt = self.prompt, choices = self.choices))[0]\n return await self.program(cats).rolloutAndUpdate(rule, agent)\n\n\n\"\"\"\n# Adaptive Importance Sampling.\n\nUpdate rule designed so that the logprobs should converge to scores such that sampling from the search tree leads follows the correct posterior distribution.\n\nLet z be the choice of child, y be the choice of subsequent children, and z the event we're conditioning/scoring on. \nLet q_z, q_y|z, q_x|yz denote the known values for p(z), p(y|z), and p(x|y,z). \n(ie, q_z is carried by the score fields of this choice node, q_y|z the product of the score fields of downstream choice nodes,\nand q_x is the product of the score fields of downstream score nodes)\nLet p_z denote the probabilities associated with each child. \nWe desire that p_z converges to p(x|z)p(z) = sum_y p(x|y,z)p(y|z)p(z) = q_z(sum_y q_y|z q_x|yz)\nso that when normalized the p_z represent p(z|x).\nDenote this desired value as q_z|x\n\nLet 1_z denote the vector where the z'th entry is 1 and the others 0. \nWe note that sampling y,z using p_z and p_y|z, \n$E[1_z q_z q_y|z q_x|yz / (p_z p_y|z)] \n= sum_z p_z 1_z q_z / p_z E[q_y|z q_x|yz / p_y|z]\n= sum_z 1_z q_z sum_y p_y|z q_y|z q_x|yz /p_y|z \n= sum_z 1_z q_z|x\n= (q_0|x, q_1|x, ... , q_n|x)$\nwhich is the desired value we want the probs to converge to. \n\nWe thus calculate our tree by sampling the above value and averaging \nit into our probability weights. \n\nThis should reduce our variance compared to rejection sampling over time. \n\"\"\"\ndef mcrule(logprobs : npt.NDArray[np.float32], idx : int, score : float, logprob : float, visits : float) -> npt.NDArray[np.float32]:\n nweights = np.zeros_like(logprobs)\n nweights[idx] = math.exp(score - logprob)\n return np.log(visits * np.exp(logprobs) + nweights) - np.log(visits+1)\n\nEOT = '<|endoftext|>'\n\ndef cleanUnicode(string : str) -> str:\n if string.startswith('bytes:'):\n return string[6:].encode('utf-8').decode('unicode_escape')\n return string\ndef checkRepeatWhitespace(string : str) -> bool:\n return (len(string) > 3) and string[3:].isspace()\n\n''' no longer used\ndef generateStepR(prompt : str, stop : str | None, generated : str, prev_token : str) -> SearchTreeNode[str]:\n generated = generated + cleanUnicode(prev_token)\n if prev_token == EOT or (stop is not None and prev_token == stop) or checkRepeatWhitespace(generated):\n return Leaf(generated)\n return PredictNode(prompt + generated, generate(prompt, stop, generated))\ndef generate(prompt : str, stop : str | None, generated : str = \"\") -> Program[str, str]:\n return (lambda prev_token: generateStepR(prompt, stop, generated, prev_token))\ndef generateTree(prompt :str, stop : str | None) -> SearchTreeNode[str]:\n return PredictNode(prompt, generate(prompt, stop, \"\"))\n\n'''\n\n'''\nno longer used\ndef condition_on_yes(retNode : SearchTreeNode[RT]) -> Program[dict[str,float],RT]:\n def program(cats : dict[str,float]) -> SearchTreeNode[RT]:\n nonlocal retNode\n return ScoreNode(math.log(cats[' yes']) if cats[' yes'] != 0 else LOG_0, retNode)\n return program\n'''\n\n'''\nA program that answers a Yes/No question about a text, conditions on that question being 'Yes', and returns the text.\n'''\n\n''' no longer used\ndef answer_and_score(question : str) -> Program[str, str]:\n def answer_context_and_score(context : str) -> SearchTreeNode[str]:\n nonlocal question\n prompt = \\\n f\"\"\"Background text:\n \n {context}\n\n Answer the following question about the background text above: \n\n {question}\n\n Say \"Answer: Yes\" or \"Answer: No\"\n\n Answer:\n \"\"\"\n return ClassifyNode(prompt, (' Yes', ' No'), condition_on_yes(Leaf(context)))\n return answer_context_and_score\n'''","repo_name":"spagovir/ice-cot-verifier-inference","sub_path":"search_tree.py","file_name":"search_tree.py","file_ext":"py","file_size_in_byte":12276,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"19468362359","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n#from pyspark.sql.types import *\n\nimport sys\nimport datetime\n\nif __name__ == \"__main__\":\n print(\"PySpark Mysql Application Started ...\")\n\n if len(sys.argv) == 2:\n get_data_date = sys.argv[1]\n print(\"get_data_date: \" + get_data_date)\n else:\n print(\"Failed, provide the get_data_date to start.\")\n exit(1)\n\n '''spark = SparkSession \\\n .builder \\\n .appName(\"PySpark Mysql\") \\\n .master(\"local[*]\") \\\n .config(\"spark.jars\", \"file:///C://Users//Thiruppathi//PycharmProjects//pyspark_mysql_demo//mysql-connector-java-5.1.46.jar\") \\\n .config(\"spark.executor.extraClassPath\", \"file:///C://Users//Thiruppathi//PycharmProjects//pyspark_mysql_demo//mysql-connector-java-5.1.46.jar\") \\\n .config(\"spark.executor.extraLibrary\", \"file:///C://Users//Thiruppathi//PycharmProjects//pyspark_mysql_demo//mysql-connector-java-5.1.46.jar\") \\\n .config(\"spark.driver.extraClassPath\", \"file:///C://Users//Thiruppathi//PycharmProjects//pyspark_mysql_demo//mysql-connector-java-5.1.46.jar\") \\\n .enableHiveSupport()\\\n .getOrCreate()'''\n\n spark = SparkSession \\\n .builder \\\n .appName(\"PySpark Mysql Demo\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n\n spark.sparkContext.setLogLevel(\"ERROR\")\n\n if get_data_date is None:\n current_datetime = datetime.datetime.now()\n start_date = current_datetime.strftime(\"%Y-%m-%d\")\n print(\"start_date: \" + start_date)\n print(type(current_datetime))\n get_datetime = current_datetime + datetime.timedelta(days=-1)\n get_data_date = get_datetime.strftime(\"%Y-%m-%d\")\n print(\"get_data_date: \" + get_data_date)\n\n mysql_db_driver_class = \"com.mysql.jdbc.Driver\"\n table_name = \"cc_transaction_tbl\"\n host_name = \"localhost\"\n port_no = str(3306)\n user_name = \"root\"\n password = \"root\"\n database_name = \"ecommerce_db\"\n\n mysql_select_query = None\n mysql_select_query = \"(select * from \" + table_name + \" where date_format(transaction_datetime, '%Y-%m-%d') = '\" \\\n + get_data_date + \"') as cc_transaction_tbl\"\n\n mysql_jdbc_url = \"jdbc:mysql://\" + host_name + \":\" + port_no + \"/\" + database_name\n\n print(\"Printing JDBC Url: \" + mysql_jdbc_url)\n\n trans_detail_tbl_data_df = spark.read.format(\"jdbc\") \\\n .option(\"url\", mysql_jdbc_url) \\\n .option(\"driver\", mysql_db_driver_class) \\\n .option(\"dbtable\", mysql_select_query) \\\n .option(\"user\", user_name) \\\n .option(\"password\", password) \\\n .load()\n\n trans_detail_tbl_data_df.show(2, False)\n\n hdfs_output_path = \"/user/hadoop/data/ecommerce_data/transactional_detail\" + \"/\" + get_data_date\n print(\"Printing hdfs_output_path: \" + hdfs_output_path)\n\n trans_detail_tbl_data_df\\\n .write \\\n .option(\"timestampFormat\", \"yyyy-MM-dd HH:mm:ss\") \\\n .csv(hdfs_output_path, header=True, sep=\",\")\n\n print(\"PySpark Mysql Demo Completed.\")","repo_name":"thirupathikumar/pyspark_sample","sub_path":"pyspark_examples/pyspark/pyspark_read_from_mysql.py","file_name":"pyspark_read_from_mysql.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"14164747686","text":"import data\nfrom profiteer import database_f, cli_f, sync_q, sync_f, user, error, user_log\n\ntable_list = [\n user.User.table_info,\n error.Error.table_info,\n user_log.UserLog.table_info,\n] + data.sync_tables\n\ndef main(fix = False, show_fixes=False, print_output=True):\n output = []\n \n # This allows for easy swapping\n if print_output:\n pr = print\n else:\n pr = output.append\n \n cursor = database_f.get_cursor()\n \n if fix: pr(cli_f.shell_text(\"\\n''Checking and fixing tables''\"))\n else: pr(cli_f.shell_text(\"\\n''Checking tables''\"))\n \n for table_info in table_list:\n pr(sync_f.check_table(cursor, table_info, fix, show_fixes))\n \n if fix:\n pr(\"\\nCommited changes\\n\")\n \n if pr == output.append:\n try:\n return \"\\n\".join(output)\n except Exception as e:\n print(output)\n raise\n else:\n return \"\"\n","repo_name":"Teifion/Profiteer","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"1119748708","text":"from random import *\nfrom termcolor import colored\n\n\nmandarin_int = [\"líng\", \"yī\", \"èr\", \"sān\", \"sì\", \"wǔ\", \"liù\", \"qī\", \"bā\", \"jiǔ\"]\nmandarin_measures = {\n 3: \"bǎi\",\n 4: \"qīan\",\n 5: \"wàn\"\n}\n\nmeasure_colours = {\n 3: \"green\",\n 4: \"yellow\",\n 5: \"red\"\n}\n\n\ndef print_num_measure(num_str):\n if int(num_str) == 0:\n return\n if int(num_str[0]) != 0 or int(num_str[1]) != 0:\n if int(num_str[0]) == 2:\n print(\"liǎng\", end=\" \")\n else:\n print(mandarin_int[int(num_str[0])], end=\" \")\n if int(num_str[0]) != 0:\n print(colored(mandarin_measures[len(num_str)], measure_colours[len(num_str)]), end=\" \")\n if len(num_str) > 3:\n print_num_measure(num_str[1-len(num_str):])\n else:\n print_2_digit(num_str[-2:], False)\n\n\ndef generate_number():\n size = randint(1, 8)\n return round(random() * pow(10, size))\n\n\ndef print_2_digit(num_str, is_only_2):\n if int(num_str) == 0:\n return\n if int(num_str[0]) != 1 or not is_only_2:\n print(mandarin_int[int(num_str[0])], end=\" \")\n if int(num_str[0]) != 0:\n print(colored(\"shí\", \"blue\"), end=\" \")\n if int(num_str[1]) != 0:\n print(mandarin_int[int(num_str[1])], end=\" \")\n\n\ndef get_mandarin(num):\n num_str = str(num)\n\n if len(num_str) == 1:\n print(mandarin_int[int(num_str)], end=\"\")\n elif len(num_str) == 2:\n print_2_digit(num_str, True)\n elif len(num_str) <= 5:\n print_num_measure(num_str)\n else:\n get_mandarin(num_str[:-4])\n if int(num_str[0]) != 0:\n print(colored(\"wàn\", \"red\"), end=\" \")\n print_num_measure(num_str[-4:])\n\n\nif __name__ == '__main__':\n while 1:\n num = generate_number()\n if len(str(num)) > 6:\n print(str(num)[:-6], end=\",\")\n print(str(num)[-6:-3], end=\",\")\n print(str(num)[-3:], end=\"\")\n elif len(str(num)) > 4:\n print(str(num)[:-3], end=\",\")\n print(str(num)[-3:], end=\"\")\n else:\n print(num, end=\"\")\n input()\n get_mandarin(num)\n print(\"\")\n input()\n","repo_name":"SarahBornais/chinese_numbers_practice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"704415249","text":"#!/usr/bin/python3\n\nimport os\nimport json\nfrom BaseController import *\n\nclass ControllerFactory:\n def __init__(self, mockData = False):\n self.mockData = mockData\n self.controllers = {}\n return\n\n def scanDir(self,jsonDirectory = \"saved-game\"):\n jsonFiles = []\n for directory in os.walk(jsonDirectory):\n for fileName in directory[2]: # the file name list\n if (fileName[-5:] == '.json'): # load .json files explicitly\n f = fileName[:-5] # cut .json extension\n jsonFiles.append(f)\n jsonFiles.sort()\n return jsonFiles\n\n def createController(self,name='base'):\n \"\"\" Return Real or Mock Controllers By Name \"\"\"\n\n if self.mockData:\n return {\n 'alive': AliveController('mock-data', 'alive'),\n 'characters': CharacterController('mock-data', 'characters'),\n 'inventory': InventoryController('mock-data', 'inventory'),\n 'temp': TempController('mock-data', 'temp'),\n 'worldRooms' : WorldRoomsController('mock-data', 'worldRooms')\n }.get(name, BaseController('mock-data', 'base'))\n\n return {\n 'alive': AliveController(),\n 'characters': CharacterController(),\n 'inventory': InventoryController(),\n 'temp': TempController(),\n 'worldRooms' : WorldRoomsController()\n }.get(name, BaseController('mock-data', 'base'))\n\n def initGame(self, jsonDirectory = \"saved-game\"):\n files = self.scanDir(jsonDirectory)\n for fileName in files:\n self.controllers[fileName] = self.createController(fileName)\n\n return self.controllers\n","repo_name":"starvagrant/deprecatedgitcrystals","sub_path":"draft/ControllerFactory.py","file_name":"ControllerFactory.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"36713156126","text":"\"\"\"\n시간복잡도가 중요한 문제는 아님\n십진수를 다른 n진수로 변환하는 방법을 안다면 크게 어렵지 않은 문제\n역시 문자열 처리하는 과정이 들어감.\n\"\"\"\n\n\ndef solution(n, t, m, p):\n numbers = \"0123456789ABCDEF\"\n s, num, turn, answer = \"\", 0, p - m, \"\"\n for _ in range(t):\n turn += m\n while True:\n if len(s) < turn:\n temp = num\n number = \"\"\n while True:\n temp, left = divmod(temp, n)\n number = str(numbers[left]) + number\n if not temp:\n break\n s += number\n num += 1\n else:\n break\n answer += s[turn - 1]\n return answer","repo_name":"kasta12/algorithm_test","sub_path":"python/카카오_n진수 게임.py","file_name":"카카오_n진수 게임.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"21934328403","text":"def func(string):\r\n str=''\r\n for i in range(len(string)-1,-1,-1):\r\n str=str+string[i]\r\n\r\n if str == string:\r\n print(\"It's a palindrome\")\r\n else:\r\n print(\"It's not a palindrome\")\r\n\r\nstring=input(\"Enter any word: \")\r\nfunc(string)","repo_name":"sumairlaghari/AllAssignments","sub_path":"AssignmentsNo5/As5.4.py","file_name":"As5.4.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"38406683398","text":"\"\"\"Module containing the Behavioral model class\"\"\"\nimport copy as cp\nimport numpy as np\nfrom .agent import Agent\n\n\nclass BehavioralModel:\n \"\"\"A class used to represent the behavioral beh_model\n\n Attributes\n ----------\n agent : object\n Object of class agent\n a_t : array_like\n Action value in trial t\n \"\"\"\n p_a_giv_h: np.ndarray # likelihood function of action giv history and tau\n rvs: np.ndarray\n log_likelihood: float = np.nan\n action_t = np.nan # agent action\n\n def __init__(self, tau_gen: float, agent_object: Agent):\n\n self.agent: Agent = agent_object\n self.tau = tau_gen # decision noice parameter\n\n def eval_p_a_giv_tau(self):\n \"\"\"Evaluate conditional probability distribution of actions given the\n history of actions and observations and tau\n aka. likehood of this tau\"\"\"\n self.p_a_giv_h = np.exp((1 / self.tau) * self.agent.valence_t) / sum(\n np.exp((1 / self.tau) * self.agent.valence_t))\n\n def eval_rvs(self):\n \"\"\"Evaluate action according to sample from multinomial distribution\"\"\"\n rng = np.random.default_rng()\n self.rvs = rng.multinomial(1, self.p_a_giv_h)\n\n def return_action(self):\n \"\"\"let behavioral model return action value given agent's decision.\"\"\"\n # probability action given decision of 1\n if (np.isnan(self.tau) or self.tau == 0):\n self.action_t = cp.deepcopy(self.agent.decision_t)\n\n else:\n self.eval_p_a_giv_tau()\n self.eval_rvs()\n action_index = self.rvs.argmax()\n self.action_t = self.agent.a_s1[action_index]\n\n return self.action_t\n\n def eval_p_a_giv_h_this_action(self, this_action):\n \"\"\"Evaluate the conditional probability of this action given the\n history of actions and observations and tau aka. log likelihood of this\n tau\"\"\"\n self.log_likelihood = float(np.log(\n self.p_a_giv_h[\n np.where(self.agent.a_s1 == this_action)[0][0]]\n ))\n","repo_name":"belindamef/treasure-hunt","sub_path":"code/utilities/modelling.py","file_name":"modelling.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"25186361025","text":"from flask import Flask, render_template\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n list={\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"}\r\n d=dict(好きな色='紫',好きな科目='科目',好きなゲーム='FPS')\r\n flag=True\r\n return render_template('index.html',list=list,d=d,flag=flag)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(port=8080)","repo_name":"yukis4n/psemi_flask","sub_path":"env/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"37061852916","text":"\n\"\"\"\n* In the 20×20 grid below, four numbers along a diagonal line have been marked in red.\n* \n* The product of these numbers is 26 × 63 × 78 × 14 = 1788696.\n* What is the greatest product of four adjacent numbers in the same direction(up, down, left, right, or diagonally) in the 20×20 grid?\n\"\"\"\n\nimport random\nimport numpy as np\n\n# create a grid\ndef createGrid(n, p):\n rows, cols = (n, p)\n grid = []\n for i in range(rows):\n col = []\n for j in range(cols):\n col.append(random.randint(1, 9))\n grid.append(col)\n print(col)\n\n return grid\n\n# largest product of adjacent n numbers in a row k\ndef largestProductInRow(grid,n,k):\n lprod = 1\n for i in range(0,len(grid)-n+1):\n product = 1\n for j in range(i,i+n):\n product *= grid[k][j]\n if product>lprod:\n lprod = product\n return lprod\n\n# largest product of adjacent n numbers in a column k\ndef largestProductInCol(grid, n, k):\n lprod = 1\n for i in range(0,len(grid[0])-n+1):\n product = 1\n for j in range(i,i+n):\n product *= grid[j][k]\n if product>lprod:\n lprod = product\n return lprod\n\n\n# extract diagonals from grid\ndef extractDiagonalsFromGrid(grid):\n\n diags = []\n rows = len(grid)\n cols = len(grid[0])\n indexRow = min([rows,cols])\n indexCol = max([rows, cols])\n\n if rows>cols:\n grid = np.array(grid).T\n print(grid)\n\n # j for row \n # i for col\n # principal dial: 1,1 - 2,2 - i,i === i=0,lastIndex\n\n for j in range(0,indexRow):\n diag = []\n for i in range(0, indexRow-j):\n try:\n diag.append(grid[i+j][i])\n except:\n print('out of range '+[i+j, i])\n finally:\n continue\n diags.append(diag)\n # print(diag)\n\n for j in range(1, indexCol):\n diag = []\n for i in range(0, indexCol-j):\n try:\n diag.append(grid[i][j+i])\n except:\n print('out of range '+[i+j, i])\n finally:\n continue\n diags.append(diag)\n # print(diag)\n return diags\n\ndef extractAllDiagonalsFromGrid(grid):\n diags = []\n diags.append(extractDiagonalsFromGrid(grid))\n diags.append(extractDiagonalsFromGrid(reverseGrid(grid)))\n return diags\n\ndef reverseGrid(grid):\n rGrid = []\n for i in range(len(grid)-1,-1,-1):\n rGrid.append(grid[i])\n\n return rGrid\n \n# largest product of adjacent n numbers in a diagonal k\ndef largestProductInDiag(grid,n):\n diags = extractAllDiagonalsFromGrid(grid)\n lprod = 1\n for i in range(0,1):\n for j in range(0,len(diags[i])):\n product = 1\n for k in range(0,len(diags[i][j])):\n if len(diags[i][j])==n:\n product *= diags[i][j][k]\n if product>lprod:\n lprod = product\n\n return lprod\n\ndef largestProductInGrid(grid,n):\n\n pDiag = largestProductInDiag(grid, n)\n\n pCol = 1\n for i in range(0,len(grid)):\n if largestProductInCol(grid, n, i) > pCol:\n pCol = largestProductInCol(grid, n, i)\n\n pRow = 1\n for j in range(0, len(grid[0])):\n if largestProductInRow(grid, n, j) > pRow:\n pRow = largestProductInRow(grid, n, j)\n\n theLargest = [pDiag,pRow,pCol]\n\n return max(theLargest)\n\n\nprint(largestProductInGrid(createGrid(10, 10), 4))\n","repo_name":"m-housni/coding-challenges","sub_path":"D2/largestProductInGrid.py","file_name":"largestProductInGrid.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"20808773260","text":"\ndef smallerNumbersThanCurrent(nums):\n \n smaller_num = []\n for i in range(len(nums)):\n j=0\n num = 0\n while j < len(nums):\n if nums[j] < nums[i]:\n num +=1\n j+=1\n smaller_num.append(num)\n \n return smaller_num\n\nprint(smallerNumbersThanCurrent([8,1,2,2,3]))","repo_name":"HiwotTadesse/Competitive-Programming","sub_path":"smallerNumbersThanCurrent.py","file_name":"smallerNumbersThanCurrent.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"10768964351","text":"import typing\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..core.config import DatasetName\nfrom ..core.dataset import (assign_sorted_coord, check_multi_index_aligned, filter_aggregate, pivot_multi_index_level,\n quantile_slice, stacked_histogram, subset_xs)\nfrom ..core.plot import (AALineDataView, BarPlotDataView, BenchmarkPlot, BenchmarkSubPlot, BenchmarkTable,\n HistPlotDataView, LegendInfo, Scale, ScatterPlotDataView, Symbols, TableDataView)\n\n\nclass SetBoundsDistribution(BenchmarkSubPlot):\n \"\"\"\n Draw kernel (static) csetbounds distribution by size of enforced bounds.\n \"\"\"\n def __init__(self, plot):\n super().__init__(plot)\n self.bounds_stats = self.get_dataset(DatasetName.KERNEL_CSETBOUNDS_STATS)\n assert self.bounds_stats is not None, \"Can not find required dataset\"\n\n def get_stats_df(self):\n raise NotImplementedError(\"Must override\")\n\n def get_legend_info(self):\n df = self.get_stats_df()\n datasets = df.index.get_level_values(\"dataset_id\").unique()\n legend_info = self.build_legend_by_dataset()\n # Only use the datasets in the dataframe\n legend_index = legend_info.info_df.index\n avail_labels = legend_index[legend_index.isin(datasets)]\n return LegendInfo(legend_info.info_df.reindex(avail_labels))\n\n\nclass SetBoundsSimpleDistribution(SetBoundsDistribution):\n def generate(self, fm, cell):\n \"\"\"\n Generate interleaved histograms with one set of bars for each dataset, so\n that we have side-by-side comparison of the buckets.\n \"\"\"\n df = self.get_stats_df()\n nviews = len(df.index.get_level_values(\"dataset_id\").unique())\n # Determine buckets we are going to use\n min_size = max(df[\"size\"].min(), 1)\n max_size = max(df[\"size\"].max(), 1)\n log_buckets = range(int(np.log2(min_size)), int(np.log2(max_size)) + 1)\n buckets = [2**i for i in log_buckets]\n\n # Build histograms for each dataset\n view = HistPlotDataView(df, x=\"size\", buckets=buckets, bucket_group=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n cell.x_ticks = buckets\n cell.x_config.label = \"Bounds size (bytes)\"\n cell.x_config.scale = Scale(\"log\", base=2)\n cell.yleft_config.label = \"#csetbounds\"\n cell.add_view(view)\n\n\nclass KernelBoundsDistribution(SetBoundsSimpleDistribution):\n def get_cell_title(self):\n return \"Kernel bounds histogram\"\n\n def get_stats_df(self):\n df = self.bounds_stats.merged_df\n return df[df[\"src_module\"] == \"kernel\"]\n\n\nclass ModulesBoundsDistribution(SetBoundsSimpleDistribution):\n def get_cell_title(self):\n return \"Modules bounds histogram\"\n\n def get_stats_df(self):\n df = self.bounds_stats.merged_df\n return df[df[\"src_module\"] != \"kernel\"]\n\n\nclass KernelBoundsDistributionByKind(SetBoundsDistribution):\n def get_cell_title(self):\n return \"Kernel bounds by kind\"\n\n def get_stats_df(self):\n df = self.bounds_stats.merged_df\n return df[df[\"src_module\"] == \"kernel\"]\n\n def get_kind_legend_info(self):\n df = self.get_stats_df()\n kind = df[\"kind\"].unique()\n dsid = df.index.unique(\"dataset_id\")\n index = pd.MultiIndex.from_product([dsid, kind], names=[\"dataset_id\", \"kind\"])\n legend_df = pd.DataFrame(index=index)\n legend_df[\"labels\"] = np.array(map(lambda k: k.name, kind))\n legend_df[\"colors\"] = LegendInfo.gen_colors(legend_df, \"Paired\", groupby=\"kind\")\n return LegendInfo(legend_df)\n\n def generate(self, fm, cell):\n \"\"\"\n Generate interleaved and stacked histograms.\n Horizontal bars are created for each datasets, stacked bars are generated\n for each setbounds kind.\n \"\"\"\n df = self.get_stats_df()\n nviews = len(df.index.get_level_values(\"dataset_id\").unique())\n # Determine buckets we are going to use\n min_size = max(df[\"size\"].min(), 1)\n max_size = max(df[\"size\"].max(), 1)\n log_buckets = range(int(np.log2(min_size)), int(np.log2(max_size)) + 1)\n buckets = [2**i for i in log_buckets]\n\n # Build the stacked histogram dataframe\n hist_df = stacked_histogram(df, group=\"dataset_id\", stack=\"kind\", data_col=\"size\", bins=buckets)\n\n view = BarPlotDataView(hist_df, x=\"bin_start\", yleft=\"count\", bar_group=\"dataset_id\", stack_group=\"kind\")\n\n # Build histograms for each dataset\n view.legend_info = self.get_kind_legend_info()\n cell.x_config.ticks = buckets\n cell.x_config.label = \"Bounds size (bytes)\"\n cell.x_config.scale = Scale(\"log\", base=2)\n cell.yleft_config.label = \"#csetbounds\"\n cell.add_view(view)\n\n\nclass KernelStructStatsPlot(BenchmarkSubPlot):\n \"\"\"\n Base class for kernel struct size plots\n \"\"\"\n def __init__(self, plot):\n super().__init__(plot)\n self.struct_stat = self.get_dataset(DatasetName.KERNEL_STRUCT_STATS)\n assert self.struct_stat is not None, \"Can not find required dataset\"\n\n def get_df_no_baseline(self):\n # Omit baseline as we are looking at deltas\n sel = (self.struct_stat.merged_df.index.get_level_values(\"dataset_id\") != self.benchmark.uuid)\n return self.struct_stat.merged_df[sel]\n\n def get_df(self):\n return self.struct_stat.merged_df\n\n def get_agg_df(self):\n return self.struct_stat.agg_df\n\n def get_legend_info(self):\n legend = self.build_legend_by_dataset()\n return legend.assign_colors_hsv(\"dataset_id\", h=(0.1, 0.9), s=(0.4, 0.9), v=(0.6, 1))\n\n\nclass KernelStructSizeHist(KernelStructStatsPlot):\n def get_median_line_legend(self):\n legend = {\n uuid: \"median \" + str(bench.instance_config.name)\n for uuid, bench in self.benchmark.merged_benchmarks.items()\n }\n legend[self.benchmark.uuid] = f\"median {self.benchmark.instance_config.name}(*)\"\n index = pd.Index(legend.keys(), name=\"dataset_id\")\n return LegendInfo.from_index(index, cmap_name=\"Greys\", labels=legend.values(), color_range=(0.5, 1))\n\n def get_hist_column(self):\n raise NotImplementedError(\"Must override\")\n\n def get_df_selector(self):\n return None\n\n def build_buckets(self, df):\n # Determine bucket sizes\n hcol = self.get_hist_column()\n min_size = df[hcol].min()\n max_size = df[hcol].max()\n if np.sign(min_size) == np.sign(max_size) or min_size == 0 or max_size == 0:\n interval = sorted(np.abs((min_size, max_size)))\n log_min, log_max = np.ceil(np.log2(np.maximum(interval, 1))).astype(int)\n buckets = [2**i for i in range(log_min, log_max + 1)]\n if min_size < 0:\n buckets = list(-1 * np.array(buckets))\n else:\n neg_min = abs(min_size)\n neg_log = np.ceil(np.log2(neg_min)).astype(int)\n pos_log = np.ceil(np.log2(max_size)).astype(int)\n neg_buckets = [-2**i for i in range(0, neg_log + 1)]\n pos_buckets = [2**i for i in range(0, pos_log + 1)]\n buckets = neg_buckets + pos_buckets\n return sorted(buckets)\n\n def generate(self, fm, cell):\n df = self.get_df()\n agg_df = self.get_agg_df()\n hcol = self.get_hist_column()\n buckets = self.build_buckets(df)\n\n # Build histogram\n view = HistPlotDataView(df, x=hcol, buckets=buckets, bucket_group=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n # Add help lines for the median struct size\n line_cols = [hcol + (\"median\", )]\n view = AALineDataView(agg_df, vertical=line_cols)\n view.style.line_style = \"dashed\"\n view.legend_info = self.get_median_line_legend()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n cell.x_config.label = \"Size (bytes)\"\n cell.x_config.scale = Scale(\"symlog\", base=2, lintresh=1, linscale=0.25)\n cell.x_config.ticks = buckets\n cell.x_config.limits = (min(buckets), max(buckets))\n cell.yleft_config.label = \"# structs\"\n\n\nclass KernelStructSizeDistribution(KernelStructSizeHist):\n \"\"\"\n Draw kernel structure size shift in distribution\n \"\"\"\n def get_cell_title(self):\n return \"Kernel structure size distribution\"\n\n def get_hist_column(self):\n return (\"size\", \"sample\")\n\n\nclass KernelStructSizeOverhead(KernelStructSizeHist):\n def get_cell_title(self):\n return \"Kernel structure size overhead\"\n\n def get_hist_column(self):\n return (\"size\", \"delta_baseline\")\n\n def get_df(self):\n df = super().get_df()\n # take the non-baseline delta values only\n baseline = df.index.get_level_values(\"dataset_id\") == self.benchmark.uuid\n return df[~baseline]\n\n def get_agg_df(self):\n df = super().get_agg_df()\n # take the non-baseline delta values only\n baseline = df.index.get_level_values(\"dataset_id\") == self.benchmark.uuid\n return df[~baseline]\n\n def generate(self, fm, cell):\n super().generate(fm, cell)\n cell.x_config.label = \"size delta (bytes)\"\n cell.yleft_config.scale = Scale(\"log\", base=10)\n\n\nclass KernelStructSizeRelOverhead(KernelStructSizeHist):\n def get_cell_title(self):\n return \"Kernel structure size overhead\"\n\n def get_hist_column(self):\n return (\"size\", \"norm_delta_baseline\")\n\n def get_df(self):\n df = super().get_df()\n # take the non-baseline delta values only\n baseline = df.index.get_level_values(\"dataset_id\") == self.benchmark.uuid\n df = df[~baseline].copy()\n df[self.get_hist_column()] *= 100\n return df\n\n def get_agg_df(self):\n df = super().get_agg_df()\n # take the non-baseline delta values only\n baseline = df.index.get_level_values(\"dataset_id\") == self.benchmark.uuid\n df = df[~baseline].copy()\n df[self.get_hist_column() + (\"median\", )] *= 100\n return df\n\n def build_buckets(self, df):\n min_val = df[self.get_hist_column()].min()\n max_val = df[self.get_hist_column()].max()\n min_val = int(np.floor(min_val / 10) * 10)\n max_val = int(np.ceil(max_val / 10) * 10)\n buckets = range(min_val, max_val + 10, 10)\n return buckets\n\n def generate(self, fm, cell):\n super().generate(fm, cell)\n cell.x_config.label = \"% size delta\"\n cell.x_config.scale = Scale(\"linear\")\n cell.yleft_config.scale = Scale(\"log\", base=10)\n\n\nclass KernelStructSizeCachelineFixableOverhead(KernelStructStatsPlot):\n \"\"\"\n Show kernel structure size overhead in terms of number of cache lines.\n We only show structures for which the size overhead causes the structure to grow by\n more than one cache line and for which removing the padding would fix the issue.\n \"\"\"\n def get_cell_title(self):\n return \"Fixable overhead larger than 64-byte cache lines\"\n\n def generate(self, fm, cell):\n all_df = self.get_df()\n baseline_df = all_df.xs(self.benchmark.uuid, level=\"dataset_id\").copy()\n other_df = self.get_df_no_baseline().copy()\n cacheline_size = 64\n dsize_col = (\"size\", \"delta_baseline\")\n size_col = (\"size\", \"sample\")\n\n def count_cachelines(v):\n return np.ceil(float(v) / cacheline_size)\n\n baseline_df[\"size_ncachelines\"] = baseline_df[size_col].map(count_cachelines)\n other_df[\"size_ncachelines\"] = other_df[size_col].map(count_cachelines)\n other_df[\"size_fixup_ncachelines\"] = (other_df[size_col] -\n other_df[(\"total_pad\", \"delta_baseline\")]).map(count_cachelines)\n _, aligned_baseline = other_df.align(baseline_df)\n aligned_baseline = aligned_baseline.reorder_levels(other_df.index.names).sort_index()\n changed_cachelines = other_df[\"size_ncachelines\"] > aligned_baseline[\"size_ncachelines\"]\n fixable_cachelines = other_df[\"size_fixup_ncachelines\"] == aligned_baseline[\"size_ncachelines\"]\n sel = (changed_cachelines & fixable_cachelines)\n # We need to keep the frame aligned when selecting, otherwise we will shuffle the bar data\n view_df = filter_aggregate(other_df, sel, by=[\"dataset_id\"], how=\"any\").copy()\n view_df[\"x\"] = assign_sorted_coord(view_df, sort=[dsize_col], group_by=[\"dataset_id\"], ascending=False)\n\n view = BarPlotDataView(view_df, x=\"x\", yleft=[dsize_col], bar_group=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n cell.x_config.label = \"struct name\"\n cell.x_config.ticks = view_df[\"x\"].unique()\n cell.x_config.tick_labels = view_df.index.get_level_values(\"name\").unique()\n cell.x_config.tick_rotation = 90\n cell.yleft_config.label = Symbols.DELTA + \"size (bytes)\"\n\n\nclass KernelStructHighOverhead(KernelStructStatsPlot):\n def get_df(self):\n # Omit baseline as we are looking at deltas\n sel = (self.struct_stat.merged_df.index.get_level_values(\"dataset_id\") != self.benchmark.uuid)\n return self.struct_stat.merged_df[sel]\n\n def get_legend_info(self):\n legend_base = self.build_legend_by_dataset()\n if len(self.columns) > 1:\n legend_merge = {}\n for col, desc in zip(self.columns, self.columns_desc):\n legend_merge[col] = legend_base.map_label(lambda l: desc + \" \" + l)\n legend_info = LegendInfo.combine(\"column\", legend_merge)\n legend_info.remap_colors(\"Paired\", group_by=\"dataset_id\")\n else:\n legend_info = legend_base.map_label(lambda l: self.columns_desc[0] + \" \" + l)\n legend_info.remap_colors(\"Paired\")\n return legend_info\n\n def get_high_overhead_df(self, quantile, maxbar):\n df = self.get_df()\n ngroups = len(df.index.get_level_values(\"dataset_id\").unique())\n max_entries = int(maxbar / ngroups)\n match = df[df[self.columns] >= df[self.columns].quantile(quantile)]\n if len(match) > maxbar:\n self.logger.warning(\"capping high delta entries to %d, %dth percentile contains %d\", maxbar, quantile * 100,\n len(match) / ngroups)\n # Actually compute the frame. Note that this may have slightly more entries than maxbar\n high_df = quantile_slice(df, self.columns, quantile, max_entries, [\"dataset_id\"])\n return high_df\n\n def generate(self, fm, cell):\n high_df = self.get_high_overhead_df(0.9, 50)\n high_df[\"x\"] = assign_sorted_coord(high_df, sort=self.columns, group_by=[\"dataset_id\"], ascending=False)\n\n view = BarPlotDataView(high_df, x=\"x\", yleft=self.columns, bar_group=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n cell.x_config.label = \"struct name\"\n cell.x_config.ticks = high_df[\"x\"].unique()\n cell.x_config.tick_labels = high_df.index.get_level_values(\"name\").unique()\n cell.x_config.tick_rotation = 90\n\n\nclass KernelStructPaddingOverhead(KernelStructHighOverhead):\n columns = [(\"total_pad\", \"delta_baseline\")]\n columns_desc = [Symbols.DELTA + \"padding\"]\n\n def get_cell_title(self):\n return \"Top kernel struct padding overhead\"\n\n def generate(self, fm, cell):\n super().generate(fm, cell)\n cell.yleft_config.label = Symbols.DELTA + \"padding (bytes)\"\n\n\nclass KernelStructNestedPaddingOverhead(KernelStructHighOverhead):\n columns = [(\"nested_total_pad\", \"delta_baseline\")]\n columns_desc = [Symbols.DELTA + \"nested padding\"]\n\n def get_cell_title(self):\n return \"Top kernel cumulative struct padding overhead\"\n\n def generate(self, fm, cell):\n super().generate(fm, cell)\n cell.yleft_config.label = Symbols.DELTA + \"padding (bytes)\"\n\n\nclass KernelStructNestedPackedSizeOverhead(KernelStructHighOverhead):\n \"\"\"\n Measure the size difference in structures, considering only the member size, as if the structure\n was packed.\n \"\"\"\n columns = [(\"nested_packed_size\", \"delta_baseline\")]\n columns_desc = [Symbols.DELTA + \"nested packed size\"]\n\n def get_cell_title(self):\n return \"Kernel struct packed size delta\"\n\n def generate(self, fm, cell):\n super().generate(fm, cell)\n cell.yleft_config.label = Symbols.DELTA + \"packed size (bytes)\"\n\n\nclass KernelStructPaddingOverheadTable(KernelStructHighOverhead):\n columns = [(\"total_pad\", \"delta_baseline\"), (\"nested_total_pad\", \"delta_baseline\")]\n columns_desc = [Symbols.DELTA + \"padding\", Symbols.DELTA + \"nested padding\"]\n\n def get_legend_info(self):\n return None\n\n def get_cell_title(self):\n return \"Top kernel struct padding overhead\"\n\n def generate(self, fm, cell):\n high_df = self.get_high_overhead_df(0.9, np.inf)\n view = TableDataView(high_df, columns=self.columns)\n view.legend_info = self.get_legend_info()\n view.legend_level = high_df.index.names\n cell.add_view(view)\n\n\nclass KernelStructPointerDensity(KernelStructStatsPlot):\n def get_cell_title(self):\n return \"Kernel struct pointer density\"\n\n def generate(self, fm, cell):\n df = self.struct_stat.merged_df.copy()\n ptr_count_col = (\"ptr_count\", \"sample\")\n m_count_col = (\"member_count\", \"sample\")\n\n df[\"ptr_density\"] = (df[ptr_count_col] / df[m_count_col]) * 100\n buckets = range(0, 101, 5)\n\n view = HistPlotDataView(df, x=\"ptr_density\", buckets=buckets, bucket_group=\"dataset_id\")\n view.legend_info = self.build_legend_by_dataset()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n cell.x_config.label = \"% pointer density\"\n cell.x_config.ticks = buckets\n cell.yleft_config.label = \"# structs\"\n\n\nclass KernelPaddingDistribution(KernelStructStatsPlot):\n \"\"\"\n Histogram showing the structure padding distribution.\n \"\"\"\n def get_cell_title(self):\n return \"Kernel struct padding distribution\"\n\n def generate(self, fm, cell):\n df = self.struct_stat.merged_df.copy()\n pad_col = (\"total_pad\", \"sample\")\n max_bucket = int(np.log2(df[pad_col].max())) + 1\n buckets = [2**i for i in range(max_bucket)]\n\n view = HistPlotDataView(df, x=pad_col, buckets=buckets, bucket_group=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n\n cell.x_config.label = \"padding (bytes)\"\n cell.x_config.ticks = buckets\n cell.x_config.scale = Scale(\"log\", base=2)\n cell.yleft_config.label = \"# structs\"\n\n\nclass KernelPaddingScatter(KernelStructStatsPlot):\n \"\"\"\n Scatter plot showing the structure padding delta, without showing the structure names\n to better display the distribution of structure sizes.\n \"\"\"\n def get_cell_title(self):\n return \"Kernel struct padding scatter distribution\"\n\n def generate(self, fm, cell):\n df = self.struct_stat.merged_df.copy()\n df[\"x\"] = df.groupby(\"dataset_id\").cumcount()\n pad_col = (\"total_pad\", \"delta_baseline\")\n\n view = ScatterPlotDataView(df, x=\"x\", yleft=pad_col, group_by=\"dataset_id\")\n view.legend_info = self.get_legend_info()\n view.legend_level = [\"dataset_id\"]\n cell.add_view(view)\n cell.x_config.label = \"struct\"\n cell.x_config.ticks = [df[\"x\"].min(), df[\"x\"].max()]\n cell.x_config.tick_labels = []\n cell.yleft_config.label = Symbols.DELTA + \"padding (bytes)\"\n\n\nclass KernelStaticInfoPlot(BenchmarkPlot):\n \"\"\"\n Report subobject bounds and struct statistics for the kernel\n \"\"\"\n require = {DatasetName.KERNEL_CSETBOUNDS_STATS, DatasetName.KERNEL_STRUCT_STATS}\n name = \"kernel-static-stats\"\n\n subplots = [\n KernelBoundsDistribution,\n ModulesBoundsDistribution,\n KernelBoundsDistributionByKind,\n KernelStructSizeDistribution,\n KernelStructSizeOverhead,\n KernelStructSizeRelOverhead,\n KernelStructSizeCachelineFixableOverhead,\n KernelStructPaddingOverhead,\n KernelStructNestedPaddingOverhead,\n KernelStructNestedPackedSizeOverhead,\n KernelStructPointerDensity,\n KernelPaddingDistribution,\n KernelPaddingScatter,\n ]\n\n def get_plot_name(self):\n return \"Kernel compile-time stats\"\n\n\nclass KernelStructSizeLargeOverhead(KernelStructStatsPlot):\n \"\"\"\n Show the structures responsible for the largest overhead\n \"\"\"\n def get_col(self):\n raise NotImplementedError(\"Must override\")\n\n def generate(self, fm, cell):\n df = self.struct_stat.merged_df\n col = self.get_col()\n # Get range high 10%\n high_thresh = df[col].quantile(0.9)\n cond = (df[col] >= high_thresh)\n high_df = df[cond]\n if \"iteration\" in df.index.names:\n high_df = high_df.droplevel(\"iteration\")\n assert high_df.index.is_unique, \"Non unique index?\"\n\n legend_info = self.get_legend_info()\n # Currently only use the legend_info to map labels to dataset_id\n show_df = legend_info.map_labels_to_level(high_df, \"dataset_id\", axis=0)\n show_df = pivot_multi_index_level(show_df, \"dataset_id\")\n # sort by highest delta\n sort_cols_sel = None\n for i, level_value in enumerate(col):\n sel = (show_df.columns.get_level_values(i) == level_value)\n if sort_cols_sel is None:\n sort_cols_sel = sel\n else:\n sort_cols_sel = sort_cols_sel & sel\n sort_cols = show_df.columns[sort_cols_sel].to_list()\n show_df.sort_values(by=sort_cols, ascending=False, inplace=True)\n\n show_cols = show_df.columns.get_level_values(\"metric\").isin(self.struct_stat.data_columns())\n col_idx = show_df.columns[show_cols]\n view = TableDataView(show_df, columns=col_idx)\n # force no legend coloring, have not set it up yet\n view.legend_info = None\n cell.add_view(view)\n\n\nclass KernelStructSizeLargeRelOverhead(KernelStructSizeLargeOverhead):\n def get_cell_title(self):\n return \"Large % overhead (>90th percentile)\"\n\n def get_col(self):\n return (\"size\", \"norm_delta_baseline\")\n\n\nclass KernelStructSizeLargeAbsOverhead(KernelStructSizeLargeOverhead):\n def get_cell_title(self):\n return \"Large absolute overhead (>90th percentile)\"\n\n def get_col(self):\n return (\"size\", \"delta_baseline\")\n\n\nclass PAHoleTable(BenchmarkSubPlot):\n \"\"\"\n Produce tabular output to provide information similar to what pahole generates.\n Struct members are on the Y axis. Datasets are pivoted to the columns axis.\n \"\"\"\n def __init__(self, plot):\n super().__init__(plot)\n self.struct_stats = self.get_dataset(DatasetName.KERNEL_STRUCT_STATS)\n self.member_stats = self.get_dataset(DatasetName.KERNEL_STRUCT_MEMBER_STATS)\n\n def get_cell_title(self):\n return \"Kernel pahole\"\n\n def build_table_legend(self, view_df) -> typing.Tuple[LegendInfo, pd.DataFrame]:\n \"\"\"\n Generate the legend for the pivoted table.\n We color rows in alternate colors and highlight the padding members.\n \"\"\"\n stripe_index = view_df.groupby(\"name\").ngroup() % 2\n # Insert the legend keys into the view frame\n view_df[\"color_stripe\"] = stripe_index\n\n # Build legend info\n legend_index = pd.Index([0, 1], name=\"color_stripe\")\n legend = LegendInfo.from_index(legend_index, [\"\"] * len(legend_index))\n legend.info_df[\"colors\"] = LegendInfo.gen_colors(legend.info_df,\n mapname=\"Greys\",\n groupby=[\"color_stripe\"],\n color_range=(0.0, 0.2))\n return legend, [\"color_stripe\"]\n\n def generate(self, fm, cell):\n struct_df = self.struct_stats.merged_df\n member_df = self.member_stats.merged_df\n if \"iteration\" in struct_df.index.names:\n struct_df = struct_df.droplevel(\"iteration\")\n if \"iteration\" in member_df.index.names:\n member_df = member_df.droplevel(\"iteration\")\n\n pahole_df = self.member_stats.gen_pahole_table()\n # Ensure things are still aligned\n assert check_multi_index_aligned(pahole_df, [\"name\", \"src_file\", \"src_line\", \"member_index\"])\n\n # Combine the member size (or padding size) with the member name, for ease of reading\n pahole_df[\"member_name\"] = pahole_df[\"member_name\"] + \" (\" + pahole_df[\"member_size\"].map(\n lambda s: f\"{s:.0f}\") + \")\"\n\n # Pivot the dataset_id level to the columns. We only care about the member_name and member_size\n # columns at this point\n pahole_columns = [\"member_name\"]\n legend_info = self.build_legend_by_dataset()\n view_df = pahole_df[pahole_columns]\n view_df = legend_info.map_labels_to_level(view_df, \"dataset_id\", axis=0)\n view_df = pivot_multi_index_level(view_df, \"dataset_id\")\n\n # Generate the table legend and associated columns\n table_legend, legend_levels = self.build_table_legend(view_df)\n\n col_sel = view_df.columns.get_level_values(\"metric\").isin(pahole_columns)\n view = TableDataView(view_df, columns=view_df.columns[col_sel])\n view.legend_info = table_legend\n view.legend_level = legend_levels\n cell.add_view(view)\n\n\nclass KernelStaticInfoTables(BenchmarkTable):\n \"\"\"\n Report tables of struct statistics with large deltas for the kernel\n \"\"\"\n require = {DatasetName.KERNEL_STRUCT_STATS, DatasetName.KERNEL_STRUCT_MEMBER_STATS}\n name = \"kernel-static-tables\"\n subplots = [\n KernelStructSizeLargeRelOverhead,\n KernelStructSizeLargeAbsOverhead,\n PAHoleTable,\n ]\n\n def get_plot_name(self):\n return \"Kernel compile-time detailed stats\"\n","repo_name":"qwattash/cheri-benchplot","sub_path":"pycheribenchplot/kernel_static/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":26224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"72809432797","text":"import os\nfrom query_helper import NewsQueryHelper, LocalQueryHelper\n\nNEWS_API_KEY = os.environ.get(\"NEWS_API_KEY\")\nWEATHER_API_KEY = os.environ.get(\"WEATHER_API_KEY\")\n\nif NEWS_API_KEY is None:\n print(\"Please provide news API key\")\n exit()\n\nif WEATHER_API_KEY is None:\n print(\"Please provide weather API key\")\n exit()\n\nUNITS = \"imperial\"\nLANGUAGE = \"en\"\n\nEXPORT_TO_JSON = True\n\nLOCAL_QUERIES = [\n LocalQueryHelper(\n city=\"Boston\",\n state=\"MA\",\n zipcode=\"02460\",\n country=\"us\",\n units=UNITS),\n LocalQueryHelper(\n city=\"Seattle\",\n state=\"WA\",\n zipcode=\"98122\",\n country=\"us\",\n units=UNITS),\n ]\n\n# All sources can be found programmatically using https://newsapi.org/docs/endpoints/sources\nNEWS_SOURCES = [\n\"abc-news\",\n\"al-jazeera-english\",\n\"ars-technica\",\n\"breitbart-news\",\n\"business-insider\",\n\"cbs-news\",\n\"cnn\",\n\"crypto-coins-news\",\n\"fox-news\",\n\"google-news\",\n\"hacker-news\",\n\"medical-news-today\",\n\"msnbc\",\n\"national-geographic\",\n\"nbc-news\",\n\"new-scientist\",\n\"next-big-future\",\n\"reuters\",\n\"techcrunch\",\n\"the-american-conservative\",\n\"the-verge\",\n\"the-wall-street-journal\",\n\"usa-today\"]\n\nNEWS_QUERIES = [\n NewsQueryHelper(\n query_name=\"All\",\n query=None,\n sources=NEWS_SOURCES,\n category=None,\n country=None,\n language=LANGUAGE,\n article_limit=20\n )\n]","repo_name":"gabriel-ross/PaperBoy","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"40960311160","text":"from abstraction import Abstraction\nfrom Automata import Automata,State\nfrom copy import deepcopy\nimport os\nimport json\n\nclass AutomataManager():\n def __init__(self,abstraction):\n self.automata = Automata(abstraction)\n\n self.abstraction = abstraction\n self.automata._Automata__setConsideredAttributes(self.abstraction.getConsideredAttributes())\n self.automata._Automata__setIgnoredAttributes(self.abstraction.getIgnoredAttributes())\n\n def newTrace(self):\n self.trace = []\n self.prevStateID = -1\n self.currentStateID = -1\n self.currentState = None\n self.edgeState = None\n\n def setParentPath(self,parentPath):\n self.parentPath = parentPath\n\n def getAutomata(self):\n return self.automata\n\n def getTrace(self):\n return self.trace\n\n def getCurrentState(self):\n return self.currentState\n\n def saveAutomata(self,automataDir):\n print(\"Save automata.\")\n\n # clean the automata directory first\n for file in os.listdir(automataDir):\n targetFile = os.path.join(automataDir,file)\n if os.path.isfile(targetFile):\n os.remove(targetFile)\n\n # dump states informations\n for state in self.automata.states:\n statePath = os.path.join(automataDir,\"state\"+str(state.ID)+\".json\")\n data = {'stateID':state.ID}\n data[\"stateType\"] = state.Type\n data[\"stateXMLs\"] = state.XMLs\n data[\"stateMoves\"] = state.Moves\n data[\"stateParent\"] = state.parent\n with open(statePath, \"w\") as outfile:\n json.dump(data, outfile, indent=4)\n\n # dump edges informations\n edgePath = os.path.join(automataDir,\"edges.json\")\n edges = list(self.automata.edges)\n data = {'edges':edges}\n with open(edgePath, \"w\") as outfile:\n json.dump(data, outfile, indent=4)\n\n '''\n This function can load only one already existed automata as a current automata.\n '''\n def loadAutomata(self,automataDir,algo):\n for root,dirnames,filenames in os.walk(automataDir):\n for file in filenames:\n if file.startswith(\"state\"):\n stateFile = os.path.join(root,file)\n state_json = open(stateFile)\n stateData = json.load(state_json)\n state_json.close()\n\n # new a state\n if stateData[\"stateType\"] == \"View\":\n # get first XML in stateXMLs\n # ex: David-exp1\\net.mandaria.tippytipper\\version1\\abstraction1\\session9\\traceSet\\1\\uidump0.xml\n xml = self.getXMLbyStepID(stateData[\"stateXMLs\"][0],algo)\n\n newState = self.automata._Automata__generateState(xml)\n\n if stateData[\"stateType\"] == \"Action\":\n # get first Move in stateMoves\n # ex: David-exp1\\net.mandaria.tippytipper\\version1\\abstraction1\\session9\\traceSet\\1\\move1.json\n move = self.getMovebyStepID(stateData[\"stateMoves\"][0],algo)\n\n newState = self.automata._Automata__generateState(move)\n\n newState.ID = stateData[\"stateID\"]\n newState.Type = stateData[\"stateType\"]\n newState.Moves = stateData[\"stateMoves\"]\n newState.XMLs = stateData[\"stateXMLs\"]\n newState.parent = stateData[\"stateParent\"]\n\n # add new state into automata\n self.automata.states.append(newState)\n\n # update the stateNum of the automata\n self.automata.stateNum += 1\n\n elif file.startswith(\"edges\"):\n edgeFile = os.path.join(root,file)\n edge_json = open(edgeFile)\n edgeData = json.load(edge_json)\n edge_json.close()\n\n self.automata.edges = edgeData[\"edges\"]\n\n #return a duplicated automata\n return deepcopy(self.automata)\n\n '''\n This function can be called repeatedly, keep adding new state in current automata\n '''\n def loadSession(self,sessionDir):\n # sessionDir is 'David\\\\net.mandaria.tippytipper\\\\version1\\\\abstraction1\\\\session1\\\\traceSet'\n sessionNum = int( os.path.split(os.path.split(sessionDir)[0])[1].replace(\"session\",\"\"))\n abstractionNum = int( os.path.split(os.path.split(os.path.split(sessionDir)[0])[0])[1].replace(\"abstraction\",\"\"))\n versionNum = int( os.path.split(os.path.split(os.path.split(os.path.split(sessionDir)[0])[0])[0])[1].replace(\"version\",\"\"))\n\n for root,dirnames,filenames in os.walk(sessionDir):\n if dirnames != []:\n # each session enter once\n # dirnames = [1,10,2,3,4,5,6,7,8,9]\n # totalTrace = [1,2,3,4,5,6,7,8,9,10]\n totalTrace = sorted([int(x) for x in dirnames])\n for traceNum in totalTrace:\n step = 0\n while os.path.isfile(os.path.join(root,str(traceNum),\"uidump\"+str(step)+\".xml\")):\n print(\"trace = \",str(traceNum),\"step = \",str(step))\n stepID = (versionNum,abstractionNum,sessionNum,int(traceNum),step)\n xml = os.path.join(root,str(traceNum),\"uidump\"+str(step)+\".xml\")\n if step == 0 : # handle the initial state in each session\n self.updateAutomataByRestart(xml,stepID)\n else:\n move = os.path.join(root,str(traceNum),\"move\"+str(step)+\".json\")\n action = self.automata._Automata__generateAction(move)\n if action[0] == \"restart\":\n self.updateAutomataByRestart(xml,stepID)\n else:\n self.updateAutomataByAction(action,stepID)\n self.updateAutomataByXML(xml,stepID)\n step += 1\n\n def getXMLbyStepID(self,stepID,algo):\n if algo == \"SELabeler\":\n versionNum,abstractionNum,sessionNum,traceNum,stepNum = stepID\n xml = os.path.join(self.parentPath,\"version\"+str(versionNum),\"abstraction\"+str(abstractionNum),\\\n \"labeledTrace\",\"traceSet\",str(traceNum),\"uidump\"+str(stepNum)+\".xml\")\n else:\n versionNum,abstractionNum,sessionNum,traceNum,stepNum = stepID\n xml = os.path.join(self.parentPath,\"version\"+str(versionNum),\"abstraction\"+str(abstractionNum),\\\n \"session\"+str(sessionNum),\"traceSet\",str(traceNum),\"uidump\"+str(stepNum)+\".xml\")\n return xml\n\n def getMovebyStepID(self,stepID,algo):\n if algo == \"SELabeler\":\n versionNum,abstractionNum,sessionNum,traceNum,stepNum = stepID\n move = os.path.join(self.parentPath,\"version\"+str(versionNum),\"abstraction\"+str(abstractionNum),\\\n \"labeledTrace\",\"traceSet\",str(traceNum),\"move\"+str(stepNum)+\".json\")\n else:\n versionNum,abstractionNum,sessionNum,traceNum,stepNum = stepID\n move = os.path.join(self.parentPath,\"version\"+str(versionNum),\"abstraction\"+str(abstractionNum),\\\n \"session\"+str(sessionNum),\"traceSet\",str(traceNum),\"move\"+str(stepNum)+\".json\")\n return move\n\n def updateAutomataByRestart(self,xml,stepID,memInfo):\n self.edgeState = None\n self.currentState = self.automata._Automata__generateState(xml)\n tempViewList = self.currentState.viewList\n\n self.currentStateID = self.automata._Automata__addState(self.currentState,stepID) # view state\n for state in self.automata.states:\n if state.ID == self.currentStateID:\n self.currentState = state\n\n # 1. update current viewList\n self.currentState.viewList = tempViewList\n\n # 2. get memory information\n self.currentState.totalMemory = memInfo.pssTotal\n\n self.currentState.ID = self.currentStateID\n self.trace.append(self.currentState)\n\n return self.currentStateID\n\n\n def updateAutomataByXML(self,xml,stepID,memInfo):\n self.currentState = self.automata._Automata__generateState(xml)\n tempViewList = self.currentState.viewList\n\n if self.edgeState == None: # the beginning state\n self.currentStateID = self.automata._Automata__addState(self.currentState,stepID) # view state\n self.currentState.ID = self.currentStateID\n\n # get memory information\n self.currentState.totalMemory = memInfo.pssTotal\n\n self.trace.append(self.currentState)\n\n else: # the rest states, need to update edgeState's fromState and toState\n self.prevStateID = self.currentStateID\n self.currentStateID = self.automata._Automata__addState(self.currentState,stepID) # view state\n for state in self.automata.states:\n if state.ID == self.currentStateID:\n self.currentState = state\n\n # 1. update current viewList\n self.currentState.viewList = tempViewList\n\n # 2. get memory information\n self.currentState.totalMemory = memInfo.pssTotal\n\n self.automata._Automata__addEdge(self.prevStateID,self.currentStateID)\n self.trace.append(self.currentState)\n\n return self.currentStateID\n\n #print(\"shortest path = \"+str(self.automata.getShortestPath(self.automata.initialState.ID,self.currentState.ID)))\n\n def updateAutomataByAction(self,action,stepID):\n self.edgeState = State()\n self.edgeState.Type = \"Action\"\n self.edgeState.action = action\n self.prevStateID = self.currentStateID\n self.edgeState.parent = self.prevStateID\n self.currentStateID = self.automata._Automata__addState(self.edgeState,stepID) # action state\n self.edgeState.ID = self.currentStateID\n print(\" add edge prev = \",self.prevStateID,\" curr = \",self.currentStateID)\n self.automata._Automata__addEdge(self.prevStateID,self.currentStateID)\n self.trace.append(self.edgeState)\n\n return self.currentStateID\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"behappycc/VTAAT2015","sub_path":"code/AutomataManager.py","file_name":"AutomataManager.py","file_ext":"py","file_size_in_byte":10380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"22778090103","text":"from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nimport re\n\ndef chunk_text(text, chunk_size=512):\n \"\"\"Split the text into chunks of size `chunk_size`.\"\"\"\n words = text.split(' ')\n chunks = [' '.join(words[i:i+chunk_size]) for i in range(0, len(words), chunk_size)]\n return chunks\n\ndef remove_duplicates(text):\n # Split the text into sentences\n sentences = re.split('(?<=[.!?]) +', text)\n \n # Compute TF-IDF vectors for each sentence\n vectorizer = TfidfVectorizer().fit_transform(sentences)\n \n # Compute pairwise cosine similarity between the sentences\n similarity_matrix = cosine_similarity(vectorizer)\n \n # Find pairs of sentences where the similarity score is above the threshold\n similar_pairs = np.argwhere(similarity_matrix > 0.8)\n \n # Get the indices of sentences to remove\n indices_to_remove = [pair[1] for pair in similar_pairs if pair[0] != pair[1]]\n \n # Remove similar sentences\n unique_sentences = [sentence for i, sentence in enumerate(sentences) if i not in indices_to_remove]\n \n return ' '.join(unique_sentences)\n \nclass TextSummarization:\n tokenizer = AutoTokenizer.from_pretrained('t5-base', model_max_length=512)\n model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', return_dict=True)\n\n def summarize_text_t5(self, text : str):\n chunks = chunk_text(text)\n summaries = []\n\n for chunk in chunks:\n inputs = self.tokenizer.encode(\"summarize: \" + chunk,\n return_tensors='pt',\n max_length=512,\n truncation=True)\n\n summary_ids = self.model.generate(inputs, max_length=512, min_length=80, length_penalty=5., num_beams=2)\n summary = self.tokenizer.decode(summary_ids[0])\n\n # Remove the special tokens\n summary = summary.replace('', '').replace('', '').strip()\n\n summaries.append(summary)\n\n summaries_nodup = remove_duplicates(' '.join(summaries))\n\n # Check if the last sentence ends with . ! or ?\n sentences = re.split('(?<=[.!?]) +', summaries_nodup)\n\n # capitalize sentences\n sentences = [sentence.capitalize() for sentence in sentences]\n\n if sentences[-1][-1] not in ['.', '!', '?']:\n final_summary = ' '.join(sentences[:-1])\n else:\n final_summary = ' '.join(sentences)\n\n return final_summary\n \nif __name__ == \"__main__\":\n import argparse\n\n # Create the parser\n parser = argparse.ArgumentParser(description='Summarize a text.')\n\n # Add the arguments\n parser.add_argument('text', type=str, help='Text to summarize.')\n\n # Parse the arguments\n args = parser.parse_args()\n\n # handle arguments\n if len(args.text) < 80:\n raise Exception(\"Text's length must be longer or equal to 80 characters.\")\n\n summarizer = TextSummarization()\n\n print(summarizer.summarize_text_t5(args.text))","repo_name":"leo-cb/wikipedia-article-summarization","sub_path":"summarize_t5.py","file_name":"summarize_t5.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"33909560150","text":"from flask import Flask\nfrom ext import *\nfrom routes import bp\n\ndef create_app(config=\"settings\") -> Flask:\n\n app = Flask(__name__)\n\n app.config.from_object(config)\n init_extensions(app)\n init_routes(app)\n\n return app\n\ndef init_routes(app: Flask):\n\n app.register_blueprint(bp)\n\ndef init_extensions(app):\n\n db.init_app(app)\n db.app = app\n ma.init_app(app)\n migrate.init_app(app, db)\n","repo_name":"fastfists/DesignChallenges","sub_path":"Devify/server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"36711351252","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport sklearn.svm\nimport sklearn.ensemble\nimport sklearn.model_selection\nimport sklearn.feature_selection\nimport tqdm\nimport multiprocessing\nimport functools\nimport h5py\nimport datetime\n\nfrom utils import readSessions\nfrom utils.cachedDataFrame import cachedDataFrame\n\n#The bins that the decoder needs to distinguish\nselectedLabels = [\"mC2L-\", \"mC2R-\", \"mL2C-\", \"mR2C-\", \"pC2L-\", \"pC2R-\",\n \"dL2C-\", \"pL2Co\", \"pL2Cr\", \"dR2C-\", \"pR2Co\", \"pR2Cr\"]\n\ndef _crossValScore(X, Y):\n svm = sklearn.svm.SVC(kernel=\"linear\", cache_size=2000)\n trainX, testX, trainY, testY = sklearn.model_selection.train_test_split(X, Y, test_size=0.2, stratify=Y)\n svm.fit(trainX, trainY)\n predicted = svm.predict(testX)\n accuracy = np.mean(predicted == testY)\n return accuracy\n\ndef _testRealAndShuffled(i, realX, realY, shuffledX, shuffledY, nNeurons):\n np.random.seed(np.random.randint(1000000)+i) #Seed each process differently\n neurons = np.random.choice(realX.shape[1], nNeurons, replace=False)\n realScore = _crossValScore(realX[neurons], realY)\n shuffledScore = _crossValScore(shuffledX[neurons], shuffledY)\n return (i, realScore, shuffledScore)\n\ndef _prepareTrials(deconv, lfa):\n avgSig = deconv.groupby(lfa.actionNo).mean()\n labels = lfa.groupby(\"actionNo\").label.first()\n validTrials = np.logical_and(avgSig.notna().all(axis=1), labels.isin(selectedLabels))\n X = avgSig[validTrials]\n Y = labels[validTrials]\n return X, Y\n\ndef _testSameAndNextDay(i, realX, realY, shuffledX, shuffledY, nextX, nextY):\n np.random.seed(np.random.randint(1000000)+i)\n trainX, testX, trainY, testY = sklearn.model_selection.train_test_split(realX, realY,\n test_size=0.2, stratify=realY)\n svm = sklearn.svm.SVC(kernel=\"linear\").fit(trainX, trainY)\n fromAcc = np.mean(svm.predict(testX) == testY)\n toAcc = np.mean(svm.predict(nextX) == nextY)\n \n trainX, testX, trainY, testY = sklearn.model_selection.train_test_split(shuffledX, shuffledY,\n test_size=0.2, stratify=shuffledY)\n svm = sklearn.svm.SVC(kernel=\"linear\").fit(trainX, trainY)\n shuffledFromAcc = np.mean(svm.predict(testX) == testY)\n shuffledToAcc = np.mean(svm.predict(nextX) == nextY)\n \n return (i, fromAcc, toAcc, shuffledFromAcc, shuffledToAcc)\n\ndef _dateDiff(fromDate, toDate):\n fromDate = datetime.datetime.strptime(fromDate, \"%y%m%d\")\n toDate = datetime.datetime.strptime(toDate, \"%y%m%d\")\n return (toDate-fromDate).days\n\n@cachedDataFrame(\"decodeWithIncreasingNumberOfNeurons.pkl\")\ndef decodeWithIncreasingNumberOfNeurons(dataFile):\n nShufflesPerNeuronNum = 10\n with multiprocessing.Pool(5) as pool:\n res = []\n for sess in readSessions.findSessions(dataFile, task=\"2choice\"):\n deconv = sess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n lfa = sess.labelFrameActions(reward=\"sidePorts\")\n if len(deconv) != len(lfa): continue\n shuffledLfa = sess.shuffleFrameLabels(switch=False)[0]\n realX, realY = _prepareTrials(deconv, lfa)\n shuffledX, shuffledY = _prepareTrials(deconv, shuffledLfa)\n with tqdm.tqdm(total=int(realX.shape[1]/5)*nShufflesPerNeuronNum, desc=str(sess)) as t:\n for nNeurons in range(5, realX.shape[1], 5):\n fcn = functools.partial(_testRealAndShuffled, realX=realX, realY=realY,\n shuffledX=shuffledX, shuffledY=shuffledY, nNeurons=nNeurons)\n for scores in pool.imap(fcn, range(nShufflesPerNeuronNum)):\n res.append((str(sess), sess.meta.task, nNeurons)+scores)\n t.update(1)\n return pd.DataFrame(res, columns=[\"session\", \"task\", \"nNeurons\", \"i\", \"realAccuracy\", \"shuffledAccuracy\"])\n\n#def _calcMI(X, Y):\n# mi = list()\n# actionsAsInts = Y.astype(\"category\").cat.codes.values.reshape(-1, 1)\n# for i in range(X.shape[1]):\n# mi.append(sklearn.feature_selection.mutual_info_regression(actionsAsInts,\n# X[i],\n# discrete_features=True,\n# n_neighbors=3)[0])\n# return np.array(mi)\n\n\ndef _launchCrossValScore(i, X, Y):\n np.random.seed(np.random.randint(1000000)+i)\n return i, _crossValScore(X, Y)\n\n@cachedDataFrame(\"decodeSortedByMI.pkl\")\ndef decodeWithSortedNeurons(dataFile):\n nShufflesPerNeuronNum = 10\n with multiprocessing.Pool(5) as pool:\n res = []\n for sess in readSessions.findSessions(dataFile, task=\"2choice\"):\n deconv = sess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n lfa = sess.labelFrameActions(reward=\"sidePorts\")\n if len(deconv) != len(lfa): continue\n X, Y = _prepareTrials(deconv, lfa)\n mutualInformation = sklearn.feature_selection.mutual_info_classif(X, Y)\n ascending = np.argsort(mutualInformation)\n descending = ascending[::-1]\n N = min(201, X.shape[1])\n with tqdm.tqdm(total=int((N-1)/5)*nShufflesPerNeuronNum*2, desc=str(sess)) as t:\n for nNeurons in range(5, N, 5):\n for ordering in (\"ascending\", \"descending\"):\n selectedNeurons = ascending[:nNeurons] if ordering==\"ascending\" else descending[:nNeurons]\n fcn = functools.partial(_launchCrossValScore, X=X[selectedNeurons], Y=Y)\n for i, score in pool.imap(fcn, range(nShufflesPerNeuronNum)):\n res.append((str(sess), sess.meta.task, nNeurons, i, ordering, score))\n t.update(1)\n return pd.DataFrame(res, columns=[\"session\", \"task\", \"nNeurons\", \"i\", \"ordering\", \"accuracy\"])\n\n@cachedDataFrame(\"decodeConfusion.pkl\")\ndef decodingConfusion(dataFile):\n confMats = []\n for sess in readSessions.findSessions(dataFile, task=\"2choice\"):\n deconv = sess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n lfa = sess.labelFrameActions(reward=\"sidePorts\")\n if len(deconv) != len(lfa): continue\n realX, realY = _prepareTrials(deconv, lfa)\n for i in tqdm.trange(5, desc=str(sess)):\n trainX, testX, trainY, testY = sklearn.model_selection.train_test_split(realX, realY,\n test_size=0.2, stratify=realY)\n svm = sklearn.svm.SVC(kernel=\"linear\").fit(trainX, trainY)\n pred = svm.predict(testX)\n m = sklearn.metrics.confusion_matrix(testY, pred)\n m = pd.DataFrame(m, index=svm.classes_, columns=svm.classes_)\n m = m.rename_axis(index=\"true\", columns=\"predicted\").unstack()\n m = m.rename(\"occurences\").reset_index()\n m[\"sess\"] = str(sess)\n m[\"i\"] = i\n m[\"nNeurons\"] = deconv.shape[1]\n confMats.append(m)\n return pd.concat(confMats)\n\n@cachedDataFrame(\"decodingAcrossDays.pkl\")\ndef decodingAcrossDays(dataFile, alignmentFile):\n alignmentStore = h5py.File(alignmentFile, \"r\")\n with multiprocessing.Pool(5) as pool:\n acrossDaysResult = []\n for genotype in alignmentStore[\"data\"]:\n for animal in alignmentStore[\"data/{}\".format(genotype)]:\n for fromDate in alignmentStore[\"data/{}/{}\".format(genotype, animal)]:\n fromSess = next(readSessions.findSessions(dataFile, animal=animal, date=fromDate))\n fromTask = fromSess.meta.task\n if fromTask == \"openField\": continue\n fromDeconv = fromSess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n fromLfa = fromSess.labelFrameActions(reward=\"sidePorts\")\n if len(fromDeconv) != len(fromLfa): continue\n suffledLfa = fromSess.shuffleFrameLabels(switch=False)[0]\n fromX, fromY = _prepareTrials(fromDeconv, fromLfa)\n shuffledX, shuffledY = _prepareTrials(fromDeconv, suffledLfa)\n for toDate in alignmentStore[\"data/{}/{}/{}\".format(genotype, animal, fromDate)]:\n if toDate <= fromDate: continue\n match = alignmentStore[\"data/{}/{}/{}/{}/match\".format(genotype, animal, fromDate, toDate)][()]\n\n toSess = next(readSessions.findSessions(dataFile, animal=animal, date=toDate))\n toTask = toSess.meta.task\n if toTask == \"openField\": continue\n toDeconv = toSess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n toLfa = toSess.labelFrameActions(reward=\"sidePorts\")\n if len(toDeconv) != len(toLfa): continue\n\n if _dateDiff(fromDate, toDate) <= 0: continue\n toX, toY = _prepareTrials(toDeconv, toLfa)\n\n fcn = functools.partial(_testSameAndNextDay, realX=fromX[match[:,0]], realY=fromY,\n shuffledX=shuffledX[match[:,0]], shuffledY=shuffledY,\n nextX=toX[match[:,1]], nextY=toY)\n for scores in tqdm.tqdm(pool.imap(fcn, range(5)), total=5, desc=\"{} to {}\".format(fromSess, toDate)):\n acrossDaysResult.append((genotype, animal, fromDate, toDate,\n fromTask, toTask, match.shape[0])+scores)\n columns=[\"genotype\", \"animal\", \"fromDate\", \"toDate\", \"fromTask\",\n \"toTask\", \"nNeurons\", \"i\", \"sameDayScore\", \"nextDayScore\",\n \"sameDayShuffled\", \"nextDayShuffled\"]\n return pd.DataFrame(acrossDaysResult, columns=columns)\n\n\ndef decodeMovementProgress(dataFile, label=\"mR2C-\"):\n @cachedDataFrame(\"decodeMovementProgress_{}.pkl\".format(label[:4]))\n def cachedVersion():\n return _decodeMovementProgress(dataFile, label)\n return cachedVersion()\n\ndef _decodeMovementProgress(dataFile, label):\n allSess = []\n for sess in readSessions.findSessions(dataFile, task=\"2choice\"):\n for shuffle in (False, True):\n if shuffle:\n lfa = sess.shuffleFrameLabels(switch=False)[0]\n else:\n lfa = sess.labelFrameActions(reward=\"sidePorts\")\n deconv = sess.readDeconvolvedTraces(rScore=True).reset_index(drop=True)\n if len(lfa) != len(deconv): continue\n if deconv.isna().any().any(): continue #TODO: Fix this\n X = deconv[lfa.label==label]\n Y = lfa.actionProgress[lfa.label==label]\n\n actionNos = lfa.actionNo[lfa.label==label]\n\n XactionNo = X.set_index(actionNos).sort_index()\n YactionNo = pd.Series(Y.values, index=XactionNo.index)\n\n splitter = sklearn.model_selection.KFold(5, shuffle=True)\n uniqueActionNos = actionNos.unique()\n\n for trainInd, testInd in tqdm.tqdm(splitter.split(uniqueActionNos), total=5, desc=str(sess)):\n trainActionNos = uniqueActionNos[trainInd]\n testActionNos = uniqueActionNos[testInd]\n trainX = XactionNo.loc[trainActionNos]\n trainY = YactionNo.loc[trainActionNos]\n testX = XactionNo.loc[testActionNos]\n testY = YactionNo.loc[testActionNos]\n\n classifier = sklearn.linear_model.LinearRegression()\n classifier.fit(trainX, trainY)\n pred = classifier.predict(testX)\n allSess.append(pd.DataFrame({'true': testY, 'predicted': pred, 'sess': str(sess),\n 'nNeurons': X.shape[1], 'nTrials': len(uniqueActionNos),\n 'shuffle': shuffle, 'label': label}))\n return pd.concat(allSess)\n\ndef get_centers(rois):\n # find pixel of maximum intensity in each mask; use as neuron center\n centers = np.array(np.unravel_index(np.array([np.argmax(roi) for roi in rois]),\n rois.shape[1:]))\n centers = centers[::-1].T\n return(centers)","repo_name":"wegmor/striatum-2choice","sub_path":"analysisDecoding.py","file_name":"analysisDecoding.py","file_ext":"py","file_size_in_byte":12522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"75061651358","text":"import discord\r\nimport asyncio\r\nimport typing\r\nimport itertools\r\nfrom collections import Counter\r\nimport random\r\nimport re\r\nimport errors\r\nimport datetime\r\nfrom discord.ext import commands, menus\r\nfrom discord.ext.commands.cooldowns import BucketType\r\nfrom discord.ext.menus.views import ViewMenuPages\r\n\r\ndef setup(client):\r\n client.add_cog(Mod(client))\r\n\r\nclass ServerBansEmbedPage(menus.ListPageSource):\r\n def __init__(self, data, guild):\r\n self.data = data\r\n self.guild = guild\r\n super().__init__(data, per_page=20)\r\n \r\n async def format_page(self, menu, entries):\r\n offset = menu.current_page * self.per_page\r\n colors = [0x910023, 0xA523FF]\r\n color = random.choice(colors)\r\n bans = await self.guild.bans()\r\n embed = discord.Embed(title=f\"{self.guild}'s bans ({len(bans)})\", description=\"\\n\".join(f'{i+1}. {v}' for i, v in enumerate(entries, start=offset)), timestamp=discord.utils.utcnow(), color=color)\r\n return embed\r\n\r\nclass Mod(commands.Cog):\r\n \"<:staff:858326975869485077> | Moderation commands\"\r\n def __init__(self, client):\r\n self.client = client\r\n \r\n @staticmethod\r\n async def do_removal(ctx: commands.Context, limit: int, predicate, *, before=None, after=None, bulk: bool = True):\r\n if limit > 2000:\r\n return await ctx.send(f'Too many messages to search given ({limit}/2000)')\r\n\r\n async with ctx.typing():\r\n if before is None:\r\n before = ctx.message\r\n else:\r\n before = discord.Object(id=before)\r\n\r\n if after is not None:\r\n after = discord.Object(id=after)\r\n\r\n try:\r\n deleted = await ctx.channel.purge(limit=limit, before=before, after=after, check=predicate, bulk=bulk)\r\n except discord.Forbidden:\r\n return await ctx.send('I do not have permissions to delete messages.')\r\n except discord.HTTPException as e:\r\n return await ctx.send(f'Error: {e} (try a smaller search?)')\r\n\r\n spammers = Counter(m.author.display_name for m in deleted)\r\n deleted = len(deleted)\r\n messages = [f'{deleted} message{\" was\" if deleted == 1 else \"s were\"} removed.']\r\n if deleted:\r\n messages.append('')\r\n spammers = sorted(spammers.items(), key=lambda t: t[1], reverse=True)\r\n messages.extend(f'**{name}**: {count}' for name, count in spammers)\r\n\r\n to_send = '\\n'.join(messages)\r\n\r\n if len(to_send) > 2000:\r\n await ctx.send(f'Successfully removed {deleted} messages.', delete_after=10, reply=False)\r\n else:\r\n await ctx.send(to_send, delete_after=10, reply=False)\r\n \r\n @commands.command(help=\"Cleans up the bots messages.\")\r\n async def cleanup(self, ctx, amount : int=25):\r\n if amount > 25:\r\n \r\n if not ctx.channel.permissions_for(ctx.author).manage_messages:\r\n return await ctx.send(\"You must have `manage_messages` permission to perform a search greater than 25\")\r\n \r\n if not ctx.channel.permissions_for(ctx.me).manage_messages:\r\n return await ctx.send(\"I need the `manage_messages` permission to perform a search greater than 25\")\r\n\r\n if ctx.channel.permissions_for(ctx.me).manage_messages:\r\n prefix = tuple(await self.client.get_pre(self.client, ctx.message))\r\n bulk = True\r\n\r\n def check(msg):\r\n return msg.author == ctx.me or msg.content.startswith(prefix)\r\n else:\r\n bulk = False\r\n\r\n def check(msg):\r\n return msg.author == ctx.me\r\n\r\n await self.do_removal(ctx, predicate=check, bulk=bulk, limit=amount)\r\n\r\n @commands.command(help=\"Gets the current guild's list of bans\")\r\n # @commands.has_permissions(ban_members=True)\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, ban_members=True)\r\n @commands.cooldown(1, 5, commands.BucketType.user)\r\n async def bans(self, ctx, id : int=None):\r\n if id:\r\n guild = self.client.get_guild(id)\r\n if not guild:\r\n return await ctx.send(\"I couldn't find that server. Make sure the ID you entered was correct.\")\r\n else:\r\n guild = ctx.guild\r\n \r\n guildBans = await guild.bans()\r\n bans = []\r\n \r\n if not guildBans:\r\n raise errors.NoBannedMembers\r\n \r\n for ban in guildBans:\r\n \r\n bans.append(f\"[{ban.user}](https://discord.com/users/{ban.user.id} \\'Name: {ban.user.name}\\nID: {ban.user.id}\\nDiscriminator: #{ban.user.discriminator}') | [Hover for reason](https://discord.com/ '{ban.reason}')\")\r\n \r\n paginator = ViewMenuPages(source=ServerBansEmbedPage(bans,guild), clear_reactions_after=True)\r\n page = await paginator._source.get_page(0)\r\n kwargs = await paginator._get_kwargs_from_page(page)\r\n if paginator.build_view():\r\n paginator.message = await ctx.send(embed=kwargs['embed'],view = paginator.build_view())\r\n else:\r\n paginator.message = await ctx.send(embed=kwargs['embed'])\r\n await paginator.start(ctx)\r\n\r\n @commands.command()\r\n async def punish(self, ctx):\r\n valid_punishments = ['kick', 'ban']\r\n\r\n def check(m: discord.Message): # m = discord.Message.\r\n return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id\r\n\r\n message = await ctx.send(\"What do you want the punishment to be? (kick/ban)\")\r\n\r\n try:\r\n punishment = await self.client.wait_for(event='message', timeout=15, check=check)\r\n except asyncio.TimeoutError:\r\n await message.delete()\r\n await ctx.message.delete(delay=5.0)\r\n return await ctx.send(\"It's been over 15 seconds, please try again by doing `-punish`\", delete_after=5.0)\r\n else:\r\n await ctx.send(\"Who do you want me to punish?\")\r\n\r\n try:\r\n member = await self.client.wait_for(event='message', timeout=15, check=check)\r\n except asyncio.TimeoutError:\r\n await message.delete()\r\n await ctx.message.delete(delay=5.0)\r\n return await ctx.send(\"It's been over 15 seconds, please try again by doing `-punish`\", delete_after=5.0)\r\n else:\r\n await ctx.send(\"What's the reason for this punishment?\")\r\n\r\n try:\r\n reason = await self.client.wait_for(event='message', timeout=15, check=check)\r\n except asyncio.TimeoutError:\r\n await message.delete()\r\n await ctx.message.delete(delay=5.0)\r\n return await ctx.send(\"It's been over 15 seconds, please try again by doing `-punish`\", delete_after=5.0)\r\n else:\r\n await ctx.send(f\"{punishment.content} {member.content} {reason.content}\")\r\n\r\n punishment = punishment.content\r\n member = commands.MemberConverter().convert(ctx, member.content)\r\n reason = reason.content\r\n\r\n if punishment not in valid_punishments:\r\n return await ctx.send(f\"That's not a valid punishment. Please try again by doing `-punish`.\")\r\n\r\n if reason > 500:\r\n return await ctx.send(f\"Your reason has exceeded the 500-character limit. Please try again by doing `-punish`\")\r\n\r\n await member.send(f'punishment: {punishment}\\nmember: {member}\\n{reason}')\r\n\r\n @commands.command(help=\"Announces a message in a specified channel\")\r\n @commands.check_any(commands.has_permissions(manage_messages=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True)\r\n async def announce(self, ctx, channel : discord.TextChannel, *, message):\r\n channelid = channel.id\r\n channel = self.client.get_channel(channelid)\r\n\r\n try:\r\n await ctx.message.delete()\r\n except:\r\n pass\r\n\r\n await channel.send(message)\r\n\r\n @commands.command(help=\"Bans the person you mention\")\r\n @commands.check_any(commands.has_permissions(ban_members=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, ban_members=True)\r\n async def ban(self, ctx, member : discord.Member, *, reason=None):\r\n if member.id == ctx.author.id:\r\n return await ctx.repl(\"You can't ban yourself!\")\r\n\r\n if isinstance(member, discord.Member):\r\n if member.top_role >= ctx.me.top_role:\r\n return await ctx.send(ctx, \"I cannot ban that member. Try moving my role to the top.\")\r\n\r\n if reason == None or reason > 500:\r\n reason = \"Reason was not provided or it exceeded the 500-character limit.\"\r\n await ctx.send(f\"Successfully banned `{member}` for `{reason}`\")\r\n\r\n try:\r\n await member.message(f\"You have been banned from {ctx.guild}\\nReason: {reason}\")\r\n await member.ban(reason=reason)\r\n\r\n except:\r\n return await member.ban(reason=reason)\r\n\r\n @commands.command(help=\"Kicks the person you mention\")\r\n @commands.check_any(commands.has_permissions(kick_members=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, kick_members=True)\r\n async def kick(self, ctx, member : discord.Member, *, reason=None):\r\n if member.id == ctx.author.id:\r\n return await ctx.repl(\"You can't kick yourself!\")\r\n\r\n if isinstance(member, discord.Member):\r\n if member.top_role >= ctx.me.top_role:\r\n return await ctx.send(ctx, \"I cannot kick that member. Try moving my role to the top.\")\r\n\r\n if reason == None or reason > 500:\r\n reason = \"Reason was not provided or it exceeded the 500-character limit.\"\r\n await ctx.send(f\"Successfully kicked `{member}` for `{reason}`\")\r\n\r\n try:\r\n await member.message(f\"You have been kicked from {ctx.guild}\\nReason: {reason}\")\r\n await member.kick(reason=reason)\r\n\r\n except:\r\n return await member.kick(reason=reason)\r\n\r\n @commands.command(help=\"Bulk deletes a certain amount of messages\", aliases=['cls', 'clr'])\r\n @commands.check_any(commands.has_permissions(manage_messages=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_messages=True)\r\n async def purge(self, ctx, amount : int, channel : discord.TextChannel=None):\r\n if amount > 1000:\r\n return await ctx.send(\"Amount cannot be more than 1000.\")\r\n\r\n if channel == None:\r\n channel = ctx.channel\r\n\r\n text = 'messages'\r\n if amount == 1:\r\n text = 'message'\r\n\r\n await ctx.message.delete()\r\n await channel.purge(limit=amount)\r\n await ctx.send(f\"Successfully deleted `{amount}` {text} in {channel.mention}.\", delete_after=5.0)\r\n\r\n\r\n @commands.command(help=\"Changes the slowmode of a channel\", aliases=['sm', 'slowm', 'slowness'])\r\n @commands.check_any(commands.has_permissions(manage_channels=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_channels=True)\r\n async def slowmode(self, ctx, number : int, channel : discord.TextChannel=None):\r\n if number > 21600:\r\n return await ctx.send(\"Number cannot be more than 21600.\")\r\n\r\n if channel == None:\r\n channel = ctx.channel\r\n\r\n await channel.edit(slowmode_delay=number, reason=f'Changed by `{ctx.author}` using command')\r\n await ctx.send(f\"Successfully changed the slowmode of {channel.mention} to `{number}`.\", delete_after=5.0)\r\n await ctx.message.delete()\r\n\r\n @commands.command(help=\"Creates a new role\", aliases=['create_role', 'addrole', 'add_role',' newrole', 'new_role'])\r\n @commands.check_any(commands.has_permissions(manage_roles=True), commands.is_owner())\r\n @commands.bot_has_permissions(send_messages=True, embed_links=True, manage_roles=True)\r\n async def createrole(self, ctx, color : discord.Color, *, name):\r\n server = ctx.guild\r\n\r\n await server.create_role(name=name, color=color, reason=f'Made by `{ctx.author}` using command')\r\n await ctx.send(f\"Successfully created a role called `{name}` with the color being `{color}`.\")\r\n","repo_name":"someone782/Stealth-Bot","sub_path":"cogs/mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":12674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"3947062526","text":"import asyncio\nimport json\nfrom datetime import datetime\nimport socket\nimport argparse\nfrom time import time\n\nimport websockets\n\nfrom vmsfilter.path_store import PathStorage\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', dest='hostile_areas', help='path for vector file with hostile areas polygons',\n required=True)\n parser.add_argument('-b', dest='habitat_areas', help='path for vector file with habitat areas polygons',\n required=True)\n parser.add_argument('-g', dest='ignore_areas', help='path for vector file with ignore areas polygons',\n required=True)\n parser.add_argument('-e', dest='eyes_ws_path', help='websocket path to the eyes server (without the /vms/... path)',\n required=True)\n parser.add_argument('-t', dest='targeter_tcp_ip', help='tcp ip of the targeter server', required=True)\n parser.add_argument('-p', dest='targeter_tcp_port', type=int, help='tcp port of the targeter server', required=True)\n parser.add_argument('-l', required=False, type=float, dest='time_to_lose', default=None,\n help='time to lose a path, in seconds (optional)')\n args = parser.parse_args()\n\n hostile_areas_path = args.hostile_areas\n habitat_area_path = args.habitat_areas\n ignore_areas_path = args.ignore_areas\n\n eyes_path = args.eyes_ws_path\n targeter_path = args.targeter_tcp_ip, args.targeter_tcp_port\n\n time_to_lose = args.time_to_lose\n\n path_store = PathStorage()\n path_store.load_areas(hostile_areas_path, habitat_area_path, ignore_areas_path)\n print(path_store.hostile_areas)\n print(path_store.ignore_areas)\n print(path_store.habitat_area)\n\n\n async def connect(path):\n while True:\n try:\n async with websockets.connect(path + \"/vms/VmsStatus/SetSystemState\", timeout=5) as ws:\n print('connected to eyes')\n index = 0\n while ws:\n message = await ws.recv()\n index += 1\n print(f\"{datetime.now().time()}: from eyes msg_num: {index}\")\n data = json.loads(message)\n moi = data.get('Moving Objects Info')\n if not moi:\n continue\n objects = moi['Objects']\n print(f\"{datetime.now().time()}: got {len(objects)} new sightings!\")\n for obj in objects:\n path_store.add_object(obj)\n\n mark = data.get('VmsMatchingState', {}).get('VmsRegistrationMark')\n if mark:\n path_store.additional_info['VmsRegistrationMark'] = mark\n except (ConnectionRefusedError, TimeoutError, socket.timeout) as e:\n print(f'connection to eyes refused, retrying... error: {e!r}')\n await asyncio.sleep(2)\n continue\n except (ConnectionError, Exception) as e:\n print(f'connection to eyes closed unexpectedly, retrying... error: {e!r}')\n await asyncio.sleep(2)\n continue\n\n\n async def con(path):\n while True:\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n # Connect to server and send data\n sock.settimeout(5)\n sock.connect(path)\n sock.settimeout(0.1)\n print('connected to targeter')\n cur_target_id = -1\n while True:\n try:\n r = sock.recv(1024)\n except (socket.timeout, BlockingIOError):\n got = False\n else:\n got = len(r) > 0\n\n if got:\n # definetly retarget\n print('retargeting')\n target = path_store.get_most_suspicious()\n if not target:\n cur_target_id = -1\n else:\n cur_target_id = target.id\n else:\n # maybe retarget\n while True:\n timeout_retarget = False\n if time_to_lose is not None and cur_target_id != -1:\n data = path_store.data_for(cur_target_id)\n t = time()\n last_seen = data.get('time')\n if last_seen and (t - last_seen) > time_to_lose:\n timeout_retarget = True\n\n if cur_target_id == -1 or timeout_retarget:\n t = path_store.get_most_suspicious()\n if t:\n cur_target_id = t.id\n else:\n break\n else:\n break\n\n data = path_store.data_for(cur_target_id)\n additional_info = path_store.additional_info\n d = {'id': cur_target_id, **data, **additional_info}\n sock.send(bytes(json.dumps(d) + '\\n', 'utf-8'))\n await asyncio.sleep(0.15)\n except (ConnectionRefusedError, TimeoutError, socket.timeout):\n print('connection to targeter refused, retrying...')\n await asyncio.sleep(2)\n continue\n except (ConnectionError, Exception):\n print('connection to targeter closed unexpectedly, retrying...')\n await asyncio.sleep(2)\n continue\n\n\n asyncio.get_event_loop().run_until_complete(asyncio.gather(\n con(targeter_path),\n connect(eyes_path)\n ))\n asyncio.get_event_loop().run_forever()\n","repo_name":"bentheiii/vmsfilter","sub_path":"vmsfilter/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"74925371358","text":"import matplotlib.pyplot as plt\r\nimport planegeometry.geometry as geom\r\nimport seaborn as sb\r\n\r\n# starting circles ####\r\nc0 = geom.Circle((0,0), 3) # the exterior circle\r\nn = 3\r\ncircles0 = geom.SteinerChain(c0, n, phi = 0.25, shift = 0)[\"circles\"]\r\n\r\n# construct the inversions ####\r\ninversions = [None]*(n+1)\r\nfor i in range(n):\r\n inversions[i] = geom.Inversion.from_fixing_three_circles(\r\n c0, circles0[i], circles0[(i+1) % n]\r\n )\r\ninversions[n] = geom.Inversion.from_swapping_two_circles(\r\n c0, circles0[n]\r\n)\r\n\r\n# first generation of children\r\ncircles1 = []\r\nfor i in range(n):\r\n ip1 = (i+1) % n\r\n for j in range(n+1): #(j in (1L:(n+1L))[-c(i,ip1)]){\r\n if j != i and j != ip1:\r\n circle = inversions[i].invert_circle(circles0[j])\r\n circles1.append((circle, i))\r\n\r\n\r\n# function to construct the \"children\" ####\r\ndef children(inversions, circles1):\r\n m = len(inversions)\r\n n = len(circles1) \r\n circles2 = [] \r\n for i in range(n):\r\n k = circles1[i][1]\r\n for j in range(m):\r\n if j != k:\r\n circle = inversions[j].invert_circle(circles1[i][0])\r\n circles2.append((circle, j))\r\n return circles2\r\n\r\n# construct children ####\r\ndepth = 5\r\nallCircles = [None]*depth\r\nallCircles[0] = circles0\r\nallCircles[1] = circles1\r\nfor i in range(depth)[2:]:\r\n allCircles[i] = children(inversions, allCircles[i-1])\r\nfor i in range(depth)[1:]:\r\n allCircles[i] = [c[0] for c in allCircles[i]]\r\n\r\n# plot ####\r\ncolors = sb.color_palette(palette=\"bright\", n_colors=depth)\r\nfigure, axes = plt.subplots(figsize=(10, 10))\r\naxes.set_aspect(1)\r\ndef draw_circle(C, color, fill=True):\r\n axes.add_artist(\r\n plt.Circle(\r\n C.center, C.radius, fill=fill, facecolor=color, \r\n edgecolor=\"black\", linewidth=2\r\n )\r\n )\r\ndraw_circle(c0, \"black\", False)\r\nfor i in range(depth):\r\n for circ in allCircles[i]:\r\n draw_circle(circ, colors[i])\r\nplt.title(\"Apollonian gasket\", fontdict = {\"fontsize\": 40})\r\nplt.xlim(-4, 4)\r\nplt.ylim(-4, 4)\r\nplt.axis(\"off\")\r\nplt.show()\r\n","repo_name":"stla/PyPlaneGeometry","sub_path":"examples/ApollonianGasket.py","file_name":"ApollonianGasket.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"8089804975","text":"from nltk.corpus import stopwords,wordnet,words\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk import pos_tag\r\nimport nltk\r\nimport re\r\nfrom nltk.tokenize import MWETokenizer,word_tokenize\r\nimport string\r\nimport json\r\n\r\nclass TextToWord:\r\n def __init__(self,text):\r\n self.text = text\r\n\r\n # 获取单词的词性\r\n def get_wordnet_pos(self,tag):\r\n if tag == 'NNP':\r\n return 'NNP'\r\n elif tag.startswith('J'):\r\n return wordnet.ADJ\r\n elif tag.startswith('V'):\r\n return wordnet.VERB\r\n elif tag.startswith('N'):\r\n return wordnet.NOUN\r\n elif tag.startswith('R'):\r\n return wordnet.ADV\r\n else:\r\n return None\r\n\r\n def removeUseless(self,text):\r\n #1.去除数字 2.去除url 3.去除非英文字符\r\n #pattern = re.compile(r'([1-9]\\d*\\.?\\d*)|(0\\.\\d*[1-9])')\r\n pattern1 = re.compile(r\"((http|ftp|https)://)(([a-zA-Z0-9\\._-]+\\.[a-zA-Z]{2,6})|([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\\&%_\\./-~-]*)?\")\r\n pattern2 = re.compile(r'[^A-Za-z \\n[(.*\\\\!:;\\')*?\\]]*')\r\n #text_nonumber = re.sub(pattern,'',text)\r\n text_nourl = re.sub(pattern1,'',text) # 去掉url\r\n text_no_cli = re.sub(r'\\$.*','',text_nourl) # 去掉指令\r\n text_no_punc = re.sub(r'[^\\w\\s]',' ',text_no_cli) # 去除标点\r\n text_done = re.sub(pattern2,'',text_no_punc) # 去掉非英文字符\r\n return text_done\r\n\r\n def lower(self,tokenlist):\r\n token_list = []\r\n for w in tokenlist:\r\n token_list.append(w.lower())\r\n return token_list\r\n\r\n def processText(self):\r\n stop_words = set(stopwords.words('english'))\r\n # tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\r\n\r\n\r\n tokenizer = MWETokenizer([('web', 'framework'), ('file', 'system'), ('command', 'line')]) # 针对短语\r\n text_process = TextToWord.removeUseless(self,self.text)\r\n # text_token = TextToWord.lower(self,tokenizer.tokenize(text_process)) # 转成 token 不应该先lower,因为pos 的时候会根据大写判断 专有名词\r\n text_token = tokenizer.tokenize(word_tokenize(text_process))\r\n text_token = TextToWord.lower(self,text_token)\r\n filtered_sentence = [w for w in text_token if not w in stop_words]\r\n # print(filtered_sentence)\r\n\r\n tagged_sent = pos_tag(filtered_sentence) # 获取单词词性\r\n wnl = WordNetLemmatizer()\r\n text_processed = []\r\n custom_dictionary = ['fs']\r\n for tag in tagged_sent:\r\n if tag[0] in custom_dictionary:\r\n wordnet_pos = 'NNP'\r\n else:\r\n wordnet_pos = TextToWord.get_wordnet_pos(self,tag[1]) or wordnet.NOUN\r\n if wordnet_pos == 'NNP': # 专有名词不进行还原\r\n text_processed.append(tag[0])\r\n else:\r\n text_processed.append(wnl.lemmatize(tag[0], pos=wordnet_pos)) # 词形还原\r\n\r\n with open('C:/Users/Admin/Documents/我的坚果云/NPM_Cate/material/delete-words',encoding=\"utf-8\") as files:\r\n delete_words_list = files.read()\r\n delete_words = delete_words_list.split()\r\n\r\n # with open('C:/Users/Admin/Documents/我的坚果云/NPM_Cate/material/uninformative-words.json', 'r') as json_file:\r\n # uninformative_words = json.load(json_file)\r\n #\r\n # delete_words = set(delete_words + uninformative_words)\r\n\r\n text_processed = [w for w in text_processed if not w in delete_words]\r\n\r\n\r\n return text_processed # 最后小写化\r\n\r\nif __name__==\"__main__\":\r\n sentence = \"# 1-liners cli\\n\\n\\n \\n
\\n\\n\\n Copy common util functions to clipboard, courtesy of 1-liners \\n
\\n\\n## Quick Start\\n\\n```bash\\n$ npx 1-liners-cli\\n```\\n\\n## Keep It Around\\n\\n```bash\\n$ npm install -g 1-liners-cli # Install globally\\n$ 1-liners\"\r\n text2word = TextToWord(sentence)\r\n print(text2word.processText())\r\n\r\n\r\n","repo_name":"clutchyu/Categorizing-npm-packages","sub_path":"readme/text2word.py","file_name":"text2word.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"18163742071","text":"from bs4 import BeautifulSoup\nfrom requests import RequestException\n\nfrom exceptions import ParserFindTagException\n\nGET_RESPONSE_LOG_ERROR = 'Возникла ошибка при загрузке страницы {url}'\nTAG_NOT_FOUND_LOG_ERROR = 'Не найден тег {tag} {attrs}'\nELEMENTS_NOT_FOUND_LOG_ERROR = 'Не найдены элементы по запросу: {expression}'\n\n\nclass DelayedLogger:\n \"\"\"Класс для отложного логирования пойманных ошибок.\"\"\"\n\n def __init__(self):\n self.__messages = []\n\n def add_message(self, message):\n self.__messages.append(message)\n\n def log(self, logger):\n for error_message in self.__messages:\n logger(error_message)\n\n\ndef get_response(session, url):\n try:\n response = session.get(url)\n response.encoding = 'utf-8'\n return response\n except RequestException:\n raise ConnectionError(\n GET_RESPONSE_LOG_ERROR.format(url=url)\n )\n\n\ndef get_soup(session, url, features='lxml'):\n return BeautifulSoup(\n get_response(session, url).text,\n features=features\n )\n\n\ndef find_tag(soup, tag, attrs=None):\n searched_tag = soup.find(\n tag, attrs={} if attrs is None else attrs\n )\n if searched_tag is None:\n raise ParserFindTagException(\n TAG_NOT_FOUND_LOG_ERROR.format(tag=tag, attrs=attrs)\n )\n return searched_tag\n\n\ndef select_elements(soup, expression, single_tag=False):\n selected = (\n soup.select_one(expression) if single_tag else soup.select(expression)\n )\n if not selected:\n raise ParserFindTagException(\n ELEMENTS_NOT_FOUND_LOG_ERROR.format(expression=expression)\n )\n return selected\n","repo_name":"DoeryMK/bs4_parser_pep","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"}
+{"seq_id":"826576444","text":"# -*- coding: utf-8 -*-\n\nfrom Products.Five import BrowserView\n\n\nclass FieldEditView(BrowserView):\n \"\"\"\n This manage methods of CU1/CU2 specific features fields view\n \"\"\"\n def __init__(self, context, request):\n super(BrowserView, self).__init__(context, request)\n self.context = context\n self.request = request\n\n def getFieldIds(self):\n licence_config = self.context.getLicenceConfig()\n vocname = self.request['vocname']\n specificfeatures = getattr(licence_config, vocname)\n spf_id = self.request['spf_id']\n vocterm = getattr(specificfeatures, spf_id)\n return vocterm.getRelatedFields()\n","repo_name":"IMIO/Products.urban","sub_path":"src/Products/urban/browser/fieldoverlay_edit.py","file_name":"fieldoverlay_edit.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"20371382071","text":"from aiogram import Bot, Dispatcher, types\nfrom aiogram.fsm.storage.memory import MemoryStorage\n\nfrom config_data.config import Config, load_config\n\n\nconfig: Config = load_config()\n\nbot = Bot(token=config.tg_bot.token)\nstorage = MemoryStorage()\ndp = Dispatcher(bot=bot, storage=storage)\n\n\nADMIN_CHAT_ID = config.tg_bot.admin_chat_id\n\n\n@dp.message()\nasync def send_to_admin(message: types.Message, text: str, parse_mode):\n await bot.send_message(chat_id=ADMIN_CHAT_ID, text=text, parse_mode=parse_mode)\n","repo_name":"connectoid/chatgpt-dalle2-bot","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"37593867664","text":"def substring_search(string, substring):\n for j in range(len(string) - len(substring) + 1): # позиции дальше не вместят подстроку по длине\n for k in range(len(substring)):\n if (string[j + k] == substring[k] and k == len(substring) - 1):\n return True\n elif (string[j + k] != substring[k]):\n break\n return False\n\ndef string_to_massive(H1, W1, string): # эта функция превратит строку в двумерный массив\n total_list = []\n current_list = []\n for i in range(H1):\n current_list = string.split(' ')[i]\n total_list.append(current_list)\n current_list = []\n return total_list\n\ndef TankRush(H1, W1, S1, H2, W2, S2):\n\n TwoDMap = string_to_massive(H1, W1, S1) # на входе программы подаются строки, поэтому сначала превратим их в двумерные массивы\n TwoDTanks = string_to_massive(H2, W2, S2)\n\n length_map = len(S1) # определим длины масиивов в символах, не во вложенных массивах\n length_tanks = len(S2)\n amount_of_entries = [] # в этом массиве сохраним количество вхождений для каждого элемента S2, в виде числа\n for i in range(len(TwoDTanks)):\n amount_of_entries.append(0) # заполним нолями массив\n\n if length_tanks > length_map: # ecли массив для поиска больше карты - сразу вернем False\n return False\n\n for i in range(H1):\n for j in range(H2):\n if substring_search(TwoDMap[i], TwoDTanks[j]):\n amount_of_entries[j] = amount_of_entries[j] + 1\n\n for i in range(W2):\n if amount_of_entries[i] == 0:\n return False\n\n return True\n","repo_name":"rmakarenko/TheRevenant","sub_path":"tank.py","file_name":"tank.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"14444198411","text":"from reto.argumentos import operacion\nfrom reto.kwargumentos import directorio1\n\nsuma = operacion('+',2,3,4)\nmultiplicacion = operacion('*',1,3,4)\nprint(suma)\nprint(multiplicacion)\n\ndirectorio1(Richie='12345', Daniela = '0987')\nhelp(operacion)","repo_name":"beduExpert/B1-Programacion-Con-Python-2020","sub_path":"Sesion-03/Reto-04/reto_04.py","file_name":"reto_04.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"es","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"}
+{"seq_id":"27304789824","text":"import itertools\nimport random\nimport numpy as np\nimport param\n\nfrom ... import aux, reg\nfrom ...aux import AttrDict, SuperList\nfrom . import crawler, turner, crawl_bend_interference, intermitter, sensor, feeder, memory, basic\nfrom ...param import class_defaults, NestedConf, class_objs\nfrom .. import deb, agents\n\n__all__ = [\n 'BrainModule',\n 'BrainModuleDB',\n 'LarvaModuleDB',\n 'SpaceDict',\n 'moduleDB',\n]\n\n\nclass BrainModule(NestedConf):\n ModeShortNames = AttrDict({'realistic': 'RE', 'square': 'SQ', 'gaussian': 'GAU', 'constant': 'CON',\n 'default': 'DEF', 'neural': 'NEU', 'sinusoidal': 'SIN', 'nengo': 'NENGO',\n 'phasic': 'PHI', 'branch': 'BR', 'osn': 'OSN', 'RL': 'RL', 'MB': 'MB'})\n\n mID = param.String(default=None, doc='The unoique ID of the module')\n color = param.Color(default=None, doc='The background color when plotting module tables')\n dict = param.Dict(default=aux.AttrDict(), doc='A dictionary of implemented modes as classes')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.excluded = [basic.Effector, 'phi', 'name']\n self.default_dict = AttrDict(\n {mode: class_defaults(A=self.dict[mode], excluded=self.excluded) for mode in self.modes if not isinstance(self.dict[mode], dict)})\n\n @property\n def parent_class(self):\n return aux.common_ancestor_class(list(self.dict.values()))\n\n @property\n def modes(self):\n return self.dict.keylist\n\n @property\n def short_modes(self):\n return SuperList([self.ModeShortNames[m] for m in self.modes])\n\n def get_class(self, mode):\n if mode in self.short_modes:\n mode = [k for k in self.modes if self.ModeShortNames[k] == mode][0]\n if mode in self.modes:\n return self.dict[mode]\n else:\n return None\n\n def build_module(self, conf, **kwargs):\n if conf is not None and 'mode' in conf:\n C = self.get_class(conf.mode)\n if C is not None:\n return C(**{k: conf[k] for k in conf if k != 'mode'}, **kwargs)\n return None\n\n def module_conf(self, mode=None, include_mode=True, **kwargs):\n if mode in self.short_modes:\n mode = [k for k in self.modes if self.ModeShortNames[k] == mode][0]\n if mode in self.default_dict:\n d=self.default_dict[mode]\n d.update_existingdict(kwargs)\n if include_mode:\n d['mode'] = mode\n return d\n else:\n return None\n\n def module_objects(self, mode=None, excluded=None):\n if excluded is None:\n excluded = self.excluded\n C = self.get_class(mode=mode)\n if C is not None:\n return class_objs(A=C, excluded=excluded)\n\n else:\n return AttrDict()\n\n def module_pars(self, **kwargs):\n return self.module_objects(**kwargs).keylist\n\n def as_entry(self, d):\n return AttrDict({f'brain.{self.mID}': d})\n\n\nclass BrainModuleDB(NestedConf):\n BrainModuleModes = AttrDict({\n 'crawler': {\n 'constant': crawler.Crawler,\n 'gaussian': crawler.GaussOscillator,\n 'square': crawler.SquareOscillator,\n 'realistic': crawler.PhaseOscillator,\n 'nengo': basic.NengoEffector\n\n },\n 'interference': {\n 'default': crawl_bend_interference.DefaultCoupling,\n 'square': crawl_bend_interference.SquareCoupling,\n 'phasic': crawl_bend_interference.PhasicCoupling\n },\n 'turner': {\n 'neural': turner.NeuralOscillator,\n 'sinusoidal': turner.SinTurner,\n 'constant': turner.ConstantTurner,\n 'nengo': basic.NengoEffector\n },\n 'intermitter': {\n 'default': intermitter.Intermitter,\n 'branch': intermitter.BranchIntermitter\n },\n 'feeder': {\n 'default': feeder.Feeder,\n 'nengo': basic.NengoEffector\n\n },\n 'olfactor': {\n 'default': sensor.Olfactor,\n 'osn': sensor.OSNOlfactor,\n },\n 'toucher': {\n 'default': sensor.Toucher,\n },\n 'windsensor': {\n 'default': sensor.Windsensor,\n },\n 'thermosensor': {\n 'default': sensor.Thermosensor,\n },\n 'memory': {\n 'RL': {'olfaction': memory.RLOlfMemory, 'touch': memory.RLTouchMemory},\n 'MB': {'olfaction': memory.RemoteBrianModelMemory, 'touch': memory.RemoteBrianModelMemory}\n },\n # 'memory': {\n # 'RL': memory.RLmemory,\n # 'MB': memory.RemoteBrianModelMemory\n # },\n })\n\n BrainModuleColors = AttrDict({\n 'crawler': 'lightcoral',\n 'turner': 'indianred',\n 'interference': 'lightsalmon',\n 'intermitter': '#a55af4',\n 'olfactor': 'palegreen',\n 'windsensor': 'plum',\n 'thermosensor': 'plum',\n 'toucher': 'pink',\n 'feeder': 'pink',\n 'memory': 'pink',\n })\n\n def __init__(self, **kwargs):\n self.LocoModsBasic = SuperList(['crawler', 'turner', 'interference', 'intermitter'])\n self.LocoMods = SuperList(['crawler', 'turner', 'interference', 'intermitter', 'feeder'])\n self.SensorMods = SuperList(['olfactor', 'toucher', 'windsensor', 'thermosensor'])\n self.BrainMods = self.BrainModuleModes.keylist\n self.brainDB = AttrDict(\n {k: BrainModule(mID=k, dict=self.BrainModuleModes[k], color=self.BrainModuleColors[k]) for k in\n self.BrainMods})\n\n super().__init__(**kwargs)\n\n def mod_modes(self, k, short=False):\n if k not in self.BrainMods:\n return None\n else:\n if short:\n return self.brainDB[k].short_modes\n else:\n return self.brainDB[k].modes\n\n def build_module(self, mID=None, conf=None, **kwargs):\n return self.brainDB[mID].build_module(conf=conf, **kwargs) if mID in self.BrainMods else None\n\n def build_modules(self, mIDs, conf, **kwargs):\n return AttrDict(\n {mID: self.build_module(mID=mID, conf=conf[mID] if mID in conf else None, **kwargs) for mID in mIDs})\n\n def build_locomodules(self, conf, **kwargs):\n return self.build_modules(mIDs=self.LocoMods, conf=conf, **kwargs)\n\n def build_sensormodules(self, conf, **kwargs):\n return self.build_modules(mIDs=self.SensorMods, conf=conf, **kwargs)\n\n def module_conf(self, mID=None, mode=None, as_entry=True, **kwargs):\n M = self.brainDB[mID]\n conf = M.module_conf(mode=mode, **kwargs) if mID in self.BrainMods else None\n return M.as_entry(conf) if as_entry else conf\n\n def module_objects(self, mID=None, mode=None, as_entry=True, **kwargs):\n M = self.brainDB[mID]\n objs = M.module_objects(mode=mode, **kwargs) if mID in self.BrainMods else AttrDict()\n return M.as_entry(objs) if as_entry else objs\n\n def modules_objects(self, mIDs, conf, as_entry=True, **kwargs):\n C = AttrDict(\n {mID: self.module_objects(mID, conf[mID] if mID in conf else AttrDict(), as_entry=False, **kwargs) for mID\n in mIDs})\n return AttrDict({f'brain.{mID}': C[mID] for mID in C}).flatten() if as_entry else C\n\n def module_pars(self, **kwargs):\n return self.module_objects(**kwargs).flatten().keylist\n\n def modules_pars(self, **kwargs):\n return self.modules_objects(**kwargs).keylist\n\n def brainConf(self, ms={}, mkws={}):\n C = AttrDict()\n for k in self.BrainMods:\n C[k] = self.brainDB[k].module_conf(mode=ms[k] if k in ms else None, **mkws[k] if k in mkws else {})\n C.nengo = (C.crawler is not None and C.crawler.mode == 'nengo')\n return C\n\n def mcolor(self, k):\n return self.brainDB[k].color if k in self.BrainMods else None\n\n def mod_combs(self, ks, short=False, to_return='yield'):\n ks = ks.existing(self.BrainMods)\n x = itertools.product(*[self.mod_modes(k, short=short) for k in ks])\n if to_return == 'yield':\n return x\n elif to_return == 'list':\n return list(x)\n\n def parent_class(self, k):\n return self.brainDB[k].parent_class if k in self.BrainMods else None\n\n def get_memory_class(self, mode, modality):\n try:\n return self.brainDB['memory'].dict[mode][modality]\n except:\n return None\n\n def memory_kws(self, mode='RL', modality='olfaction',as_entry=True, **kwargs):\n A=self.get_memory_class(mode, modality)\n if A is not None:\n c=class_defaults(A=A, excluded=['dt'],included={'mode': mode, 'modality': modality}, **kwargs)\n return AttrDict({'brain.memory':c}) if as_entry else c\n else:\n return None\n\n def build_memory_module(self, conf, **kwargs):\n if conf is not None and 'mode' in conf and 'modality' in conf:\n A = self.get_memory_class(conf.mode, conf.modality)\n if A is not None:\n return A(**{k: conf[k] for k in conf if k not in ['mode', 'modality']}, **kwargs)\n return None\n\n def detect_brainconf_modes(self, m):\n return AttrDict({k: m[k].mode if (k in m and 'mode' in m[k]) else None for k in self.BrainMods})\n\n\nclass LarvaModuleDB(BrainModuleDB):\n LarvaModuleColors = AttrDict({\n 'body': 'lightskyblue',\n 'physics': 'lightsteelblue',\n 'energetics': 'lightskyblue',\n 'DEB': 'lightskyblue',\n 'gut': 'lightskyblue',\n 'Box2D': 'lightcoral',\n 'sensorimotor': 'lightcoral',\n })\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.ModuleColorDict = AttrDict(**self.BrainModuleColors, **self.LarvaModuleColors)\n self.LarvaModsBasic = SuperList(['body', 'physics'])\n self.LarvaModsOptional = SuperList(['energetics', 'sensorimotor', 'Box2D'])\n self.LarvaMods = self.LarvaModsBasic + self.LarvaModsOptional\n self.AllModules = self.BrainMods + self.LarvaMods\n self.LarvaModsConfDict = AttrDict({\n 'body': self.body_kws,\n 'physics': self.physics_kws,\n 'energetics': self.energetics_kws,\n 'sensorimotor': self.sensorimotor_kws,\n 'Box2D': self.Box2D_kws\n })\n self.LarvaModsDefaultDict = AttrDict({k : f() for k,f in self.LarvaModsConfDict.items()})\n\n\n def sensorimotor_kws(self, **kwargs):\n return class_defaults(agents.ObstacleLarvaRobot, excluded=[agents.LarvaRobot], **kwargs)\n\n def energetics_kws(self, gut_kws={}, DEB_kws={}):\n return AttrDict({\n 'DEB': class_defaults(deb.DEB, excluded=[deb.DEB_model, 'substrate', 'id'], **DEB_kws),\n 'gut': class_defaults(deb.Gut, **gut_kws)\n })\n\n def body_kws(self, **kwargs):\n return class_defaults(agents.LarvaSegmented,\n excluded=[agents.OrientedAgent, 'vertices', 'base_vertices', 'width', 'guide_points',\n 'segs'],\n **kwargs)\n\n def physics_kws(self, **kwargs):\n return class_defaults(agents.BaseController, **kwargs)\n\n def Box2D_kws(self, **kwargs):\n d = AttrDict({\n 'joint_types': {\n 'friction': {'N': 0, 'args': {}},\n 'revolute': {'N': 0, 'args': {}},\n 'distance': {'N': 0, 'args': {}}\n }})\n return d.update_existingnestdict(kwargs)\n\n def larvaConf(self, ms={}, mkws={}):\n C = AttrDict({'brain': self.brainConf(ms=ms, mkws=mkws)})\n for k, c in self.LarvaModsDefaultDict.items():\n if k in self.LarvaModsOptional :\n if k not in mkws:\n C[k] = None\n continue\n if k not in mkws:\n mkws[k] = {}\n C[k] = c.update_existingnestdict(mkws[k])\n return C\n\n\nmoduleDB = LarvaModuleDB()\n\n\nclass SpaceDict(NestedConf):\n base_model = reg.conf.Model.confID_selector()\n space_mkeys = param.ListSelector(default=[], objects=moduleDB.AllModules,\n label='keys of modules to include in space search',\n doc='Keys of the modules where the optimization parameters are')\n Pmutation = param.Magnitude(default=0.3, step=0.01, label='mutation probability',\n doc='Probability of mutation for each agent in the next generation')\n Cmutation = param.Magnitude(default=0.1, step=0.01, label='mutation coeficient',\n doc='Fraction of allowed parameter range to mutate within')\n\n init_mode = param.Selector(default='random', objects=['random', 'model', 'default'],\n label='mode of initial generation', doc='Mode of initial generation')\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.mConf0 = reg.conf.Model.getID(self.base_model)\n self.space_objs = self.build()\n self.space_ks = self.space_objs.keylist\n self.parclasses = AttrDict({p: self.parclass(p) for p in self.space_ks})\n # for p in self.space_ks:\n # self.param.add_parameter(p, self.space_objs[p])\n\n def build(self):\n D = AttrDict()\n for k in self.space_mkeys:\n xx = self.mConf0.brain[k]\n if xx is not None:\n A = moduleDB.brainDB[k][xx.mode]\n Aobjs = class_objs(A, excluded=[basic.Effector, 'phi', 'name'])\n for p, obj in Aobjs.items():\n if p in xx:\n obj.default = xx[p]\n D[f'brain.{k}.{p}'] = obj\n return D\n\n def obj_attr(self, k, flat=True):\n if flat:\n return AttrDict({p: getattr(obj, k) if hasattr(obj, k) else None for p, obj in self.space_objs.items()})\n else:\n return AttrDict(\n {obj.name: getattr(obj, k) if hasattr(obj, k) else None for p, obj in self.space_objs.items()})\n\n def obj_min_max_value(self, p):\n obj = self.space_objs[p]\n v = obj.default\n if isinstance(v, tuple):\n if v[0] is None:\n v = v[1]\n elif v[1] is None:\n v = v[0]\n else:\n v = np.mean([v[0], v[1]])\n min, max = obj.bounds if hasattr(obj, 'bounds') else (None, None)\n step = obj.step if hasattr(obj, 'step') else None\n try:\n return param._get_min_max_value(min, max, value=v, step=step)\n except:\n return None\n\n @property\n def defaults(self):\n return self.obj_attr('default')\n\n def parclass(self, p):\n obj = self.space_objs[p]\n c = type(obj)\n\n def check(k):\n m = getattr(param, k)\n ms = [m] + m.__subclasses__()\n return c in ms or issubclass(c, m)\n\n valid = [k for k in ['Magnitude', 'Integer', 'Number', 'Selector', 'Boolean', 'Range', 'Dict'] if check(k)]\n return valid[0]\n\n def randomize(self):\n g = self.defaults\n for p in self.space_ks:\n v = g[p]\n obj = self.space_objs[p]\n cl = self.parclasses[p]\n if cl in ['Selector']:\n g[p] = random.choice(obj.objects)\n elif cl in ['Boolean']:\n g[p] = random.choice([True, False])\n else:\n vmin, vmax = obj.bounds\n if None in (vmin, vmax):\n vmin0, vmax0, vv = self.obj_min_max_value(p)\n else:\n vmin0, vmax0 = vmin, vmax\n\n if cl in ['Range']:\n vnew = random.uniform(vmin0, vmax0)\n vnew2 = random.uniform(vmin0, vmax0)\n g[p] = (np.clip(vnew, a_min=vmin, a_max=vmax), np.clip(vnew2, a_min=vmin, a_max=vmax))\n elif cl in ['Integer']:\n g[p] = obj.crop_to_bounds(random.randint(vmin0, vmax0))\n else:\n g[p] = obj.crop_to_bounds(random.uniform(vmin0, vmax0))\n\n def mutate(self, g):\n for p in self.space_ks:\n v = g[p]\n if random.random() < self.Pmutation:\n obj = self.space_objs[p]\n cl = self.parclasses[p]\n if cl in ['Selector']:\n g[p] = random.choice(obj.objects)\n elif cl in ['Boolean']:\n g[p] = random.choice([True, False])\n else:\n if v is not None:\n if hasattr(obj, 'step') and obj.step is not None:\n vr = obj.step * 5\n else:\n vmin, vmax = obj.bounds\n if None in (vmin, vmax):\n vmin, vmax, vv = self.obj_min_max_value(p)\n vr = np.abs(vmax - vmin) * 0.5\n s = self.Cmutation * vr\n if cl in ['Range']:\n vmin, vmax = obj.bounds\n g[p] = (np.clip(random.gauss(v[0], s), a_min=vmin, a_max=vmax),\n np.clip(random.gauss(v[1], s), a_min=vmin, a_max=vmax))\n\n elif cl in ['Integer']:\n g[p] = obj.crop_to_bounds(int(random.gauss(v, s)))\n else:\n g[p] = obj.crop_to_bounds(random.gauss(v, s))\n\n return g\n\n def create_first_generation(self, N):\n m = self.init_mode\n if m == 'default':\n return [self.defaults] * N\n elif m == 'model':\n return [AttrDict({k: self.mConf0.flatten()[k] for k in self.space_ks})] * N\n elif m == 'random':\n return [self.randomize() for i in range(N)]\n else:\n raise ValueError('Not implemented')\n\n\n@reg.funcs.stored_conf(\"Model\")\ndef Model_dict():\n MD = moduleDB\n LMs = MD.LocoModsBasic\n\n def olf_kws(g={'Odor': 150.0}, mode='default', **kwargs):\n return MD.module_conf(mID='olfactor', mode=mode, gain_dict=g, **kwargs)\n\n E = {}\n\n def new(id, id0, kws={}):\n try:\n E[id] = E[id0].new_dict(kws)\n except:\n pass\n\n def extend(id0, pref=None):\n if pref is None:\n pref=id0\n def new0(id, kws={}):\n new(id=id, id0=id0, kws=kws)\n\n for sg, g in zip(['', '0', '_x2'], [{'Odor': 150.0}, {'Odor': 0.0}, {'CS': 150.0, 'UCS': 0.0}]):\n for sb, br in zip(['', '_brute'], [False, True]):\n idd = f'{pref}_navigator{sg}{sb}'\n o = olf_kws(g=g, brute_force=br)\n new0(idd, o)\n for k in ['RL', 'MB']:\n new0(f'{idd}_{k}', {**o, **MD.memory_kws(k)})\n\n for ss, eeb in zip(['', '_max'], [0.5, 0.9]):\n f = AttrDict({**MD.module_conf(mID='feeder', mode='default'), 'brain.intermitter.feed_bouts': True,\n 'brain.intermitter.EEB': eeb})\n new0(f'{pref}{ss}_feeder', f)\n for sg, g in zip(['', '0', '_x2'], [{'Odor': 150.0}, {'Odor': 0.0}, {'CS': 150.0, 'UCS': 0.0}]):\n idd = f'{pref}{ss}_forager{sg}'\n o = olf_kws(g=g)\n new0(idd, {**o, **f})\n for k in ['RL', 'MB']:\n new0(f'{idd}_{k}', {**o, **f, **MD.memory_kws(k)})\n\n for mm in [f'{pref}_avg', f'{pref}_var', f'{pref}_var2']:\n if mm in reg.conf.Model.confIDs:\n E[mm] = reg.conf.Model.getID(mm)\n\n for id, (Tm, ImM) in zip(['Levy', 'NEU_Levy', 'NEU_Levy_continuous'],\n [('SIN', 'DEF'), ('NEU', 'DEF'), ('NEU', None)]):\n E[id] = MD.larvaConf(ms=AttrDict(zip(LMs, ['CON', Tm, 'DEF', ImM])),\n mkws={'interference': {'attenuation': 0.0}, 'intermitter': {'run_mode': 'exec'}})\n extend(id0=id)\n\n for mms in MD.mod_combs(LMs, short=True):\n kws={'ms': AttrDict(zip(LMs, mms)), 'mkws' : {'interference': {'attenuation': 0.1, 'attenuation_max': 0.6}} if mms[\n 2] != 'DEF' else {}}\n if 'NENGO' in mms:\n if list(mms) != ['NENGO','NENGO','SQ','DEF']:\n continue\n id='nengo_explorer'\n E[id] = MD.larvaConf(**kws)\n extend(id0=id, pref='nengo')\n else:\n id = \"_\".join(mms)\n E[id] = MD.larvaConf(**kws)\n if mms[0] == 'RE' and mms[3] == 'DEF':\n extend(id0=id)\n if mms[1] == 'NEU' and mms[2] == 'PHI':\n for idd in ['navigator', 'navigator_x2', 'forager', 'forager0', 'forager_x2', 'max_forager', 'max_forager0',\n 'forager_RL', 'forager0_RL', 'max_forager_RL', 'max_forager0_RL',\n 'forager_MB', 'forager0_MB', 'max_forager_MB', 'max_forager0_MB',\n 'feeder', 'max_feeder']:\n E[idd] = E[f'{id}_{idd}']\n E['explorer'] = E[id]\n E['RLnavigator'] = E[f'{id}_navigator_RL']\n\n for id, dd in zip(['imitator', 'zebrafish', 'thermo_navigator', 'OSNnavigator', 'OSNnavigator_x2'],\n [{'body.Nsegs': 11},\n {'body.body_plan': 'zebrafish_larva', 'Box2D': {'joint_types': {\n 'revolute': {'N': 1, 'args': {'maxMotorTorque': 10 ** 5, 'motorSpeed': 1}}}}},\n MD.module_conf(mID='thermosensor', mode='default'),\n olf_kws(mode='osn'),\n olf_kws({'CS': 150.0, 'UCS': 0.0}, mode='osn')]):\n new(id, 'explorer', dd)\n for ss, kkws in zip(['', '_2', '_brute'], [{}, {'touch_sensors': [0, 2]}, {'brute_force': True}]):\n new(f'toucher{ss}', 'explorer', MD.module_conf(mID='toucher', mode='default', **kkws))\n new(f'RLtoucher{ss}', f'toucher{ss}', MD.memory_kws(modality='touch'))\n for id, gd in zip(['follower-R', 'follower-L', 'gamer', 'gamer-5x'], [{'Left_odor': 150.0, 'Right_odor': 0.0},\n {'Left_odor': 0.0, 'Right_odor': 150.0},\n {'Flag_odor': 150.0, 'Left_base_odor': 0.0,\n 'Right_base_odor': 0.0},\n {'Flag_odor': 150.0, 'Left_base_odor': 0.0,\n 'Right_base_odor': 0.0, 'Left_odor': 0.0,\n 'Right_odor': 0.0}\n ]):\n new(id, 'forager', {'brain.olfactor.gain_dict': gd})\n\n new('immobile', 'navigator', {'brain.crawler': None, 'brain.turner': None,\n 'brain.intermitter': None, 'brain.interference': None,\n **MD.module_conf(mID='toucher', mode='default')})\n new('obstacle_avoider', 'navigator', {'sensorimotor': MD.sensorimotor_kws()})\n\n for id in ['explorer', 'navigator', 'feeder', 'forager']:\n new(f'{id}_sample', id, {k: 'sample' for k in MD.module_pars(mID='crawler', mode='RE')})\n\n for sp, k_abs, eeb in zip(['rover', 'sitter'], [0.8, 0.4], [0.67, 0.37]):\n en_ws = MD.energetics_kws(gut_kws={'k_abs': k_abs}, DEB_kws={'species': sp})\n en_ws2 = {**en_ws, 'brain.intermitter.EEB': eeb}\n new(f'{sp}_explorer', 'explorer', en_ws)\n new(f'{sp}_navigator', 'navigator', en_ws)\n new(f'{sp}_feeder', 'feeder', en_ws2)\n new(f'{sp}_forager', 'forager', en_ws2)\n new(sp, f'{sp}_feeder')\n\n return E\n","repo_name":"bagjohn/larvaworld_autoversioning","sub_path":"src/larvaworld/lib/model/modules/module_modes.py","file_name":"module_modes.py","file_ext":"py","file_size_in_byte":24031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"74124194718","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('freebasics', '0006_change_site_url_field_type'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='freebasicscontroller',\n name='postgres_db_url',\n field=models.TextField(null=True, blank=True),\n ),\n ]\n","repo_name":"praekeltfoundation/mc2-freebasics","sub_path":"freebasics/migrations/0007_freebasicscontroller_postgres_db_url.py","file_name":"0007_freebasicscontroller_postgres_db_url.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"30583313355","text":"from typing import Union,List,Callable\nimport cv2\nimport numpy as np\nimport calibration\nfrom projectTypes import *\nimport math\n\n\n\n\ndef getFrameGenerator(videoPath:str,calibrationPath:str,imageWidth:int) -> Callable:\n cap = cv2.VideoCapture(videoPath)\n CAM,DIST = calibration.load_coefficients(calibrationPath)\n\n def frameGenerator() -> Union[np.ndarray,None]:\n ret, frame = cap.read()\n if frame is not None:\n frame = cv2.undistort(frame,CAM,DIST)#Undistort\n\n w ,h = frame.shape[0],frame.shape[1] #Resize to width with Aspect Ratio\n sf = imageWidth/w\n dim = [int(h*sf),int(w*sf)]\n frame = cv2.resize(frame,dim)\n return frame\n return None\n\n return frameGenerator\n\ndef getPossibleMarkers(frame:np.ndarray ,HSVrange:HSVRange) -> List[List[int]]:\n threshed = getColourMask(frame,HSVrange)\n cnts = cv2.findContours(threshed, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n posMarkerPoints = []\n for cnt in cnts:\n posMarkerPoints.append(get_cnt_centre(cnt))\n \n return posMarkerPoints\n\n\n\ndef getColourMask(frame:np.ndarray ,HSVrange:HSVRange) -> np.ndarray:\n\n KERNEL = np.ones((5,5),np.uint8)\n hsvframe = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n threshed = cv2.inRange(hsvframe, HSVrange.lower, HSVrange.upper)\n threshed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, KERNEL)\n threshed = cv2.morphologyEx(threshed, cv2.MORPH_OPEN, KERNEL)\n threshed = cv2.dilate(threshed,KERNEL,iterations=1)\n\n\n return threshed\n\ndef get_cnt_centre(cnt:np.ndarray) -> List[int]:\n M = cv2.moments(cnt)\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return [cX,cY]\n return [0,0]\n\ndef get_hough_sets(markerPoints:list,frame:np.ndarray) -> List[List[List[int]]]:\n #INEFFICENT FUNCTION WORKS FOR NOW TODO\n\n #Add points to blank image\n blank_image = np.zeros((frame.shape[0],frame.shape[1],1), np.uint8)\n for marker in markerPoints: \n cX,cY = marker\n blank_image[cY][cX] = 255\n \n\n\n lines = cv2.HoughLines(blank_image, 2, np.pi/180,3)\n \n if lines is not None:\n print('len lines',len(lines))\n else:\n print('Hough lines in None')\n\n lineSets = []\n if lines is not None:\n for r_theta in lines:\n arr = np.array(r_theta[0], dtype=np.float64)\n rho, theta = arr\n closePoints = getPointsCloseToLine(rho,theta,5,markerPoints)\n if len(closePoints)>=1:\n potLine = []\n for point in closePoints:\n point=list(point)\n potLine.append(point)\n lineSets.append(potLine)\n return lineSets\n\n\ndef draw_hough_sets(frame,lineSets):\n for line in lineSets:\n cv2.line(frame,line[0],line[-1],(200,90,255),2)\n return frame\n\ndef getPointsCloseToLine(rho:float,theta:float,distance:float,points:List[List[int]]) -> List[List[int]]:\n if theta != 0:\n p1 = [0,rho/math.sin(theta)]\n p2 =[1000,(rho-1000*math.cos(theta))/math.sin(theta)]\n \n\n pt1 = np.asarray(p1)\n pt2 = np.asarray(p2)\n pt3 = np.asarray(points)\n\n d=np.cross(pt2-pt1,pt3-pt1)/np.linalg.norm(pt2-pt1)\n npPoints = np.asarray(points)\n\n indices = np.where(abs(d) 2번 반복\n# 8. 다시 물고기의 수를 조절\n# 9. 바닥에 일렬로 놓기\n\nfrom collections import deque\n\nn, k = map(int, input().split())\n# 상 하 좌 우\ndirections = [(-1, 0), (1, 0), (0, -1), (0, 1)]\nboard = [deque(list(map(int, input().split())))]\n\nresult = 0\n\ndef get_diff(board) :\n get_q = board[0]\n return max(get_q) - min(get_q)\n\ndef rotate_stack(arr) :\n while True :\n if len(arr) > len(arr[0]) - len(arr[-1]) :\n break\n blocks = []\n r = len(arr)\n c = len(arr[-1])\n\n for i in range(r) :\n temp_q = deque()\n for _ in range(c) :\n temp_q.append(arr[i].popleft())\n blocks.append(temp_q)\n\n arr = [arr[0]]\n rotated = rotate_90(blocks)\n for row in rotated :\n arr.append(deque(row))\n\n return arr\n\n\ndef rotate_90(block) :\n temp = [[0] * len(block) for _ in range(len(block[0]))]\n for i in range(len(block[0])) :\n for j in range(len(block)) :\n temp[i][j] = block[j][len(block[0])-1-i]\n return temp\n\ndef fix_fish(arr) :\n dp = [[0] * len(arr[x]) for x in range(len(arr))]\n for x in range(len(arr)) :\n for y in range(len(arr[x])) :\n for dir in directions :\n nx = x + dir[0]\n ny = y + dir[1]\n\n if 0 <= nx < len(arr) and 0 <= ny < len(arr[nx]) :\n # 현재 칸이 인접한 칸보다 크면\n if arr[x][y] > arr[nx][ny] :\n diff = (arr[x][y] - arr[nx][ny]) // 5\n if diff >= 1 :\n dp[x][y] -= diff\n else : # 현재 칸이 인접한 칸보다 작으면\n diff = (arr[nx][ny] - arr[x][y]) // 5\n if diff >= 1 :\n dp[x][y] += diff\n for i in range(len(arr)) :\n for j in range(len(arr[i])) :\n arr[i][j] += dp[i][j]\n\ndef make_one_line(arr) :\n temp = deque()\n for i in range(len(arr[-1])) :\n for j in range(len(arr)) :\n temp.append(arr[j][i])\n\n for i in range(len(arr[-1]), len(arr[0])) :\n temp.append(arr[0][i])\n\n return [temp]\n\ndef half_rotation(arr) :\n temp = deque()\n for i in range(n // 2) :\n temp.append(arr[0].popleft())\n rotated = rotate_180([temp])\n arr += rotated\n\n left = []\n for i in range(2) :\n data = deque()\n for j in range(n // 4) :\n data.append(arr[i].popleft())\n left.append(data)\n rotated = rotate_180(left)\n arr += rotated\n\ndef rotate_180(arr) :\n temp = []\n for i in reversed(range(len(arr))) :\n arr[i].reverse()\n temp.append(arr[i])\n\n return temp\n\n\nwhile True :\n # 물고기가 가장 많이 들어있는 어항과 가장 적게 들어있는 어항의 물고기 수 차이 구하기\n diff = get_diff(board)\n if diff <= k :\n print(result)\n break\n\n # 1번 수행\n min_value = min(board[0])\n for i in range(len(board[0])) :\n if board[0][i] == min_value :\n board[0][i] += 1\n\n # 2번 수행\n value = board[0].popleft()\n board.append(deque([value]))\n # 3번 수행\n board = rotate_stack(board)\n # 5번 수행\n fix_fish(board)\n # 6번 수행\n board = make_one_line(board)\n # 7번 수행\n half_rotation(board)\n # 8번 수행\n fix_fish(board)\n # 9번 수행\n board = make_one_line(board)\n\n result += 1\n \n'''\n1. 물고기의 수를 조절하는 작업에서 사용될 방향 (상 하 좌 우)을 정의하여 directions 리스트에 저장한다.\n\n2. deque() 를 통해 어항 상태를 입력받아 구성하고, 물고기가 가장 많이 들어있는 어항과 가장 적게 들어있는 어항의 물고기 수의 차이가 k 이하가 될 때까지 아래와 같은 작업을 반복 수행한다.\n - (1번 수행) 어항에서 가장 작은 값을 가져와 min_value에 할당하고, 각 어항을 확인하여 어항에 들어있는 물고기의 수가 min_value와 같다면 1 증가시킨다.\n - (2번 수행) 가장 좌측에 있는 어항을 빼내고 value에 할당한 후 deque() 로 감싸 다시 board 리스트에 추가한다.\n - (3번 수행) rotate_stack() 함수를 통해 board 리스트를 시계 방향으로 90도 회전시킨 후 바닥에 있는 어항 위에 쌓는다.\n - (5번 수행) fix_fish() 함수를 통해 물고기의 수를 조절한다. 주의할 점은 조절하는 작업이 동시에 이루어지므로 board 리스트를 그대로 다루지 않고 임시 리스트를 통해 수행해야 한다.\n - (6번 수행) make_one_line() 함수를 통해 어항(board)을 일렬로 만들어준다.\n - (7번 수행) half_rotate() 함수를 통해 n개의 어항을 반으로 나눠 왼쪽 부분을 180도 회전하는 작업을 2번 해준다.\n - (8번 수행) fix_fish() 함수를 통해 다시 물고기의 수를 조절해준다.\n - (9번 수행) make_one_line() 함수를 통해 다시 board 리스트를 바닥에 일렬로 놓아준다.\n - 9번 수행까지 완료하면 어항 정리 횟수(result)를 1 증가시킨다.\n - 물고기의 수의 차이가 k 이하가 되면 result 값을 출력하고 반복 작업을 종료한다.\n \n3. 리스트를 시계 방향으로 90도 회전시킨 후 바닥에 있는 어항 위에 쌓는 rotate_stack() 함수의 작업은 아래와 같다.\n - 전달받은 arr 리스트의 행이 회전되는 칸을 제외한 바닥에 있는 어항의 길이보다 클 경우 가장 오른쪽에 있는 어항의 아래에 바닥에 있는 어항이 없게 되므로 break 한다.\n - 위 조건이 아닐 경우, 회전 시킬 칸을 빼내어 temp_q 리스트에 추가한다.\n - 한 줄의 어항들을 빼낼 때마다 blocks 리스트에 temp_q 리스트를 추가한다.\n - rotate_90() 함수를 통해 정의된 blocks 리스트를 90도 시계 방향으로 회전시킨다.\n - 회전된 리스트 rotated 에서 행을 하나씩 가져와 arr 리스트에 추가하고, while문이 종료되면 arr 리스트를 반환한다.\n\n4. 물고기의 수를 조절하는 fix_fish() 함수의 작업은 아래와 같다.\n - 임시 리스트(dp)를 정의한다.\n - 각 위치에서 인접한 네 방향을 확인하여 현재 칸이 인접한 칸보다 크면 두 어항에 대해서 물고기 수의 차이를 5로 나눈 몫을 구해 현재 칸에 있는 물고기 수에서 뺀다.\n - 반대로 만약 현재 칸이 인접한 칸보다 작으면 인접한 칸의 어항에 있는 물고기 수에서 뺀다.\n - 모든 위치가 인접한 칸의 위치를 확인하여 물고기의 수를 갱신하는 작업을 마치면 arr[i][j] 위치에 dp[i][j] 값을 더한다.\n\n5. 어항을 일렬로 놓아주는 make_one_line() 함수의 작업은 아래와 같다.\n - arr 리스트의 마지막 행에 대한 열의 크기를 열로, arr 리스트의 행의 크기를 행으로 설정하여 temp에 arr[j][i]를 추가한다.\n - 이후 바닥에 남아있는 어항(arr[0][i])도 temp 리스트에 추가해준다. 작업을 마치면 [temp] 를 반환한다.\n\n6. 어항을 반으로 나누는 half_totation() 함수와 180도 돌리는 rotate_180() 함수의 작업은 아래와 같다.\n - 반복문을 통해 arr 리스트의 가장 왼쪽 요소를 n // 2 개 빼내어 temp에 추가한다.\n - rotate_180() 함수를 통해 n // 2 개의 요소를 180도 회전시켜준다. \n - arr 리스트에 회전이 완료된 rotated 리스트를 추가한다.\n - 다시 n // 4 로 나눠 180도 회전하여 arr 리스트에 회전이 완료된 left 리스트를 추가한다.\n\n'''\n","repo_name":"unie2/PythonCodingTest-Practice","sub_path":"BOJ/어항 정리.py","file_name":"어항 정리.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"851305570","text":"from header import *\n\n\n# script_convert_3d_pos_to_map_pos\n# Input: pos1 = 3d_pos, pos2 = map_size_pos\n# Output: pos0 = map_pos\nconvert_3d_pos_to_map_pos = (\n\t\"convert_3d_pos_to_map_pos\",\n\t\t\t[(set_fixed_point_multiplier, 1000),\n\t\t\t\t(position_transform_position_to_local, pos3, pos2, pos1),\n\t\t\t\t(position_get_x, \":agent_x_pos\", pos3),\n\t\t\t\t(position_get_y, \":agent_y_pos\", pos3),\n\t\t\t\t(val_div, \":agent_x_pos\", \"$g_battle_map_scale\"),\n\t\t\t\t(val_div, \":agent_y_pos\", \"$g_battle_map_scale\"),\n\t\t\t\t(set_fixed_point_multiplier, 1000),\n\t\t\t\t(store_sub, \":map_x\", 980, \"$g_battle_map_width\"),\n\t\t\t\t(store_sub, \":map_y\", 730, \"$g_battle_map_height\"),\n\t\t\t\t(val_add, \":agent_x_pos\", \":map_x\"),\n\t\t\t\t(val_add, \":agent_y_pos\", \":map_y\"),\n\t\t\t\t(position_set_x, pos0, \":agent_x_pos\"),\n\t\t\t\t(position_set_y, pos0, \":agent_y_pos\"),\n\t\t])\n\n\t\t\n# script_store_movement_order_name_to_s1\n# Input: arg1 = team_no, arg2 = class_no\n# Output: s1 = order_name\nstore_movement_order_name_to_s1 = (\n\t\"store_movement_order_name_to_s1\",\n\t\t\t[(store_script_param_1, \":team_no\"),\n\t\t\t\t(store_script_param_2, \":class_no\"),\n\t\t\t\t(team_get_movement_order, \":cur_order\", \":team_no\", \":class_no\"),\n\t\t\t\t(try_begin),\n\t\t\t\t\t(eq, \":cur_order\", mordr_hold),\n\t\t\t\t\t(str_store_string, s1, \"@Holding\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_follow),\n\t\t\t\t\t(str_store_string, s1, \"@Following\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_charge),\n\t\t\t\t\t(str_store_string, s1, \"@Charging\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_advance),\n\t\t\t\t\t(str_store_string, s1, \"@Advancing\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_fall_back),\n\t\t\t\t\t(str_store_string, s1, \"@Falling Back\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_stand_closer),\n\t\t\t\t\t(str_store_string, s1, \"@Standing Closer\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_spread_out),\n\t\t\t\t\t(str_store_string, s1, \"@Spreading Out\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", mordr_stand_ground),\n\t\t\t\t\t(str_store_string, s1, \"@Standing\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(str_store_string, s1, \"@N/A\"),\n\t\t\t\t(try_end),\n\t\t])\n\t\t\n# script_store_riding_order_name_to_s1\n# Input: arg1 = team_no, arg2 = class_no\n# Output: s1 = order_name\nstore_riding_order_name_to_s1 = (\n\t\"store_riding_order_name_to_s1\",\n\t\t\t[(store_script_param_1, \":team_no\"),\n\t\t\t\t(store_script_param_2, \":class_no\"),\n\t\t\t\t(team_get_riding_order, \":cur_order\", \":team_no\", \":class_no\"),\n\t\t\t\t(try_begin),\n\t\t\t\t\t(eq, \":cur_order\", rordr_free),\n\t\t\t\t\t(str_store_string, s1, \"@Free\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", rordr_mount),\n\t\t\t\t\t(str_store_string, s1, \"@Mount\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", rordr_dismount),\n\t\t\t\t\t(str_store_string, s1, \"@Dismount\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(str_store_string, s1, \"@N/A\"),\n\t\t\t\t(try_end),\n\t\t])\n\t\t\n# script_store_weapon_usage_order_name_to_s1\n# Input: arg1 = team_no, arg2 = class_no\n# Output: s1 = order_name\nstore_weapon_usage_order_name_to_s1 = (\n\t\"store_weapon_usage_order_name_to_s1\",\n\t\t\t[(store_script_param_1, \":team_no\"),\n\t\t\t\t(store_script_param_2, \":class_no\"),\n\t\t\t\t(team_get_weapon_usage_order, \":cur_order\", \":team_no\", \":class_no\"),\n\t\t\t\t(team_get_hold_fire_order, \":cur_hold_fire\", \":team_no\", \":class_no\"),\n\t\t\t\t(try_begin),\n\t\t\t\t\t(eq, \":cur_order\", wordr_use_any_weapon),\n\t\t\t\t\t(eq, \":cur_hold_fire\", aordr_fire_at_will),\n\t\t\t\t\t(str_store_string, s1, \"@Any Weapon\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", wordr_use_blunt_weapons),\n\t\t\t\t\t(eq, \":cur_hold_fire\", aordr_fire_at_will),\n\t\t\t\t\t(str_store_string, s1, \"@Blunt Weapons\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", wordr_use_any_weapon),\n\t\t\t\t\t(eq, \":cur_hold_fire\", aordr_hold_your_fire),\n\t\t\t\t\t(str_store_string, s1, \"str_hold_fire\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(eq, \":cur_order\", wordr_use_blunt_weapons),\n\t\t\t\t\t(eq, \":cur_hold_fire\", aordr_hold_your_fire),\n\t\t\t\t\t(str_store_string, s1, \"str_blunt_hold_fire\"),\n\t\t\t\t(else_try),\n\t\t\t\t\t(str_store_string, s1, \"@N/A\"),\n\t\t\t\t(try_end),\n\t\t])\n\t\t","repo_name":"admiralnelson/modded_modded_1257ad","sub_path":"script/functions/battle_ui.py","file_name":"battle_ui.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"38218770680","text":"# -*- coding: utf-8 -*-\n'''\n:Author: huangzehua@corp.netease.com\n:Create Date: 2021/12/29\n:file: some easy decorators\n'''\n\n\nimport inspect\nfrom FuncHandle import MockFuncManager\nfrom functools import wraps\n\nclass DecoratorManger(object):\n\t@staticmethod\n\tdef GetAllDecorators():\n\t\tres = {}\n\t\tfor varName, varObj in globals().iteritems():\n\t\t\tif varName.startswith(\"_\"):\n\t\t\t\tcontinue\n\t\t\tif inspect.isclass(varObj):\n\t\t\t\tfor attrName, attrValue in varObj.__dict__.iteritems():\n\t\t\t\t\tif attrName.startswith(\"_\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif attrName == \"GetAllDecorators\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif attrName == \"DebugLocalDecorator\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif isinstance(attrValue, staticmethod) or isinstance(attrValue, classmethod):\n\t\t\t\t\t\tres[attrName] = getattr(varObj, attrName, None)\n\t\treturn res\n\nclass DebugDecoratorBase(object):\n\t\"\"\"\n\t静态的修饰表示开始\n\t\"\"\"\n\t@staticmethod\n\tdef DebugFunc(func, LOGGER):\n\t\tLOGGER.append(\"DebugFuncStart\")\n\t\treturn func\n\n\nclass DebugLocalDecorator(DebugDecoratorBase):\n\t\"\"\"\n\t获取函数对应的locals\n\t\"\"\"\n\t@staticmethod\n\tdef DebugLocal(func, eventCallBack):\n\t\t@wraps(func if not (isinstance(func, staticmethod) or isinstance(func, classmethod)) else func.__func__)\n\t\tdef MockCallFunc(*args, **kwargs):\n\t\t\twarpFunc = MockFuncManager.GetWarpFunc(func, eventCallBack)\n\t\t\t# print \"funcfuncfunc\",func,func.__name__\n\t\t\t# print \"warpFuncwarpFuncwarpFunc\",warpFunc,warpFunc.__name__\n\t\t\tres = warpFunc(*args, **kwargs)\n\t\t\treturn res\n\t\treturn MockCallFunc\n\n\t@staticmethod\n\tdef DebugNewFunc(func, eventCallBack):\n\t\tnewFunc = MockFuncManager.GetWarpFunc(func, eventCallBack)\n\t\t# print \"funcfuncfunc\",func,func.__name__\n\t\t# print \"warpFuncwarpFuncwarpFunc\",newFunc,newFunc.__name__\n\t\treturn newFunc\n","repo_name":"whzhfly/mock_debug","sub_path":"FuncDecoratorHelper/FunctionDecorators.py","file_name":"FunctionDecorators.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"6062518826","text":"\"\"\"Work with files in Python\"\"\"\n\n\nfile = open(\n r\"/home/sergey/projects/learning_python/python/api.txt\", encoding=\"utf-8\"\n) # чтение файла при указании полного пути.\n# encoding=\"utf-8\" необходимо использовать при работе с кириллицей\n\n# file1 = open(\"api.txt\")\n# print(file1.read())\n\nprint(\n file.read(3)\n) # Прочитать только первые три символа. Программа запоминает место, где остановилась.\nprint(file.read(3)) # Прочитать следующие три символа\n\nprint(file.readline()) # Прочитать одну строку\nprint(file.readline()) # Прочитать следующую строку\n\nprint(\n file.read()\n) # При запуске прочитает весь файл до конца и далее чтение файла станет не доступно,\n# так как файл является итерируемым объектом.\n\n# NB!!! При работе с файлами необходимо закрывать файл при окончании работы с ним!!!\n# Если не закрыть файл, то при работе с ним, существует риск того, что память, которую\n# занимал файл при работе с ним не освободится.\n\nfile.close()\n\n# Однако при работе с файлом во время обработки может произойти ошибка и до метода объекта\n# file.close() интерпретатор может не дойти\n\n# По этому существует два варианта работы с файлами: через вызов try exept и через\n# менеджер контекста\n\n\n# С помощью цикла for возможно обойти все строки файла, т. к. файл является итерируемым объектом\nfile = open(r\"/home/sergey/projects/learning_python/python/api.txt\", encoding=\"utf-8\")\nfor row in file:\n print(row)\n\nfile.close()\n\n# Побуквенный обход каждой строки\nfile = open(r\"/home/sergey/projects/learning_python/python/api.txt\", encoding=\"utf-8\")\nfor row in file:\n for letter in row:\n print(letter)\n\nfile.close()\n\n# Метод создание списка из файла, элентами которого будут строки из файла\nfile = open(r\"/home/sergey/projects/learning_python/python/api.txt\", encoding=\"utf-8\")\nlst = file.readlines()\nprint(lst)\n\nfile.close()\n\n# Режим записи в файл. По умолчанию при открытии файла запись в него невозможна, т.к. файл открывается\n# в режиме только для чтения.\n\nfile_copy = open(\n r\"/home/sergey/projects/learning_python/python/api copy.txt\", \"r\", encoding=\"utf-8\"\n)\n# Параметр 'r' - read, который если не указывать будет по умолчанию дает права только на чтение.\n# file4.write(\"helo\") # выдаст ошибку, т.к. файл открыт только для чтения. io.UnsupportedOperation: not writable\n\nfile_copy.close()\n\nfile_copy = open(\n r\"/home/sergey/projects/learning_python/python/api copy.txt\", \"w\", encoding=\"utf-8\"\n)\n# При замене параметра на 'r' на 'w', файл становится доступным для перезаписи.\nfile_copy.write(\"helo\") # Полностью заменяет содержимое файла\n\nfile_copy.close()\n\nfile_copy = open(\n r\"/home/sergey/projects/learning_python/python/api copy.txt\", \"a\", encoding=\"utf-8\"\n)\nfile_copy.write(\" World\") # 'a' - режим запист в конец файла без перезаписи\n\n# Одновременная работа в нескольких режимах невозможна.\n# Существует режим 'a+', который позволяет одновременно работать в режиме записи в конец файла и чтении.\n\nfile_copy.close()\n\n# Менеджер контекста - специальная конструкция управления внешними ресурсами в Python\n# Внешние ресурсы: база данных, блокировки, файлы, сессии на сайтах итд.\n\n# Ко всем этим ресурсам мыдолжны вначале подключиться, а затем отключиться\n# Менеджер контекста контролирует, что нужно делать, когда мы получаем доступ к ресурсу, и что\n# нужно делать, когда нам этот ресурс уже не нужен\n\n# # NB!!! При работе с файлами необходимо закрывать файл при окончании работы с ним!!!\n# Однако при работе с файлом во время обработки может произойти ошибка и до метода объекта\n# file.close() интерпретатор может не дойти\n\nwith open( # open - в данном случае объект менеджера контекста\n \"/home/sergey/projects/learning_python/python/password.txt\", \"w\", encoding=\"utf-8\"\n) as f:\n f.write(\"123\")\n f.write(\"hello\")\nprint(\"end\")\n# f.write(\" World!!!\") #ValueError: I/O operation on closed file.\n\n# После блока with менеджер контекста автоматически закрывает файл\n\n# Не каждая функция поддерживает менеджер контекста, например функция print()\n# with print(\"Hello\") as f:\n# pass # AttributeError: __enter__ (у функции print не реализован такой магический метод)\n# Соответственно не каждый объект может быть объектом менеджера контекста\n\n# При этом можно создать свой собственный объект для менеджера контекста\n\n\n#\n","repo_name":"sergeymaksheev/leaning_python","sub_path":"python/2_function_open.py","file_name":"2_function_open.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"43189075691","text":"import json\n\nimport jwt\nfrom aiohttp import web\nfrom aiohttp.web_middlewares import middleware\n\nfrom app.user import manager\n\nsecret = \"секретный секрет\"\n\n\n@middleware\nasync def protect_jwt(request, handler):\n\n white_list = ['/logs', '/get_token']\n if request.path not in white_list:\n token = request.headers.get('Authorization', None)\n if token:\n try:\n id_user = jwt.decode(token.split()[1], secret, algorithms=[\"HS256\"])\n request['user'] = id_user\n user_model = await manager.get_user(request)\n if user_model:\n request['user'].update({'user_model': user_model})\n return await handler(request)\n except jwt.exceptions.PyJWTError:\n return web.Response(body=json.dumps({'error_message': 'token is not valid'}), status=401)\n\n return web.Response(body=json.dumps({'error_message': 'token not found'}), status=401)\n\n return await handler(request)\n\n\nasync def get_token(request: web.Request):\n try:\n user_name = request.rel_url.query.get('user_name')\n if user_name:\n request['user'] = {'user_name': user_name}\n user_model = await manager.add_user(request)\n jwt_token = jwt.encode({\"user_id\": user_model.id}, secret, algorithm=\"HS256\")\n return web.Response(body=json.dumps({'token': 'Bearer '+jwt_token}), status=201)\n except Exception as _ex:\n raise _ex\n\n\n\n\n","repo_name":"Mesheryakof/neuron","sub_path":"middleware/jwt_protect.py","file_name":"jwt_protect.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"71031924639","text":"from __future__ import print_function\nimport os\nimport subprocess\nimport json\nimport time\nimport sys\nimport getopt\nfrom sys import stdout\nfrom subprocess import check_output\nfrom collections import defaultdict\nfrom __builtin__ import str\n\n\nCONF_FILE = './stomp.json'\nPOLICY = ['simple_policy_ver1', 'simple_policy_ver2', 'simple_policy_ver3', 'simple_policy_ver4', 'simple_policy_ver5']\nSTDEV_FACTOR = [ 0.01, 0.10, 0.50] # percentages\nARRIVE_SCALE = [ 1.0, 1.50, 2.0] # percentages\n\n#CONF_FILE = './stomp.json'\n#POLICY = ['simple_policy_ver1', 'simple_policy_ver2', 'simple_policy_ver3', 'simple_policy_ver4', 'simple_policy_ver5']\n#STDEV_FACTOR = [ 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] # percentages\n#ARRIVE_SCALE = [ 0.1, 0.2 , 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2] # percentages\n\n\ndef usage_and_exit(exit_code):\n stdout.write('\\nusage: run_all.py [--help] [--verbose] [--csv-out] [--save-stdout] [--pre-gen-tasks] [--arrival-trace] [--input-trace] [--user-input-trace]\\n\\n')\n sys.exit(exit_code)\n\n\n\ndef main(argv):\n\n try:\n opts, args = getopt.getopt(argv,\"hvcspaiu\",[\"help\", \"verbose\", \"csv-out\", \"save-stdout\", \"pre-gen-tasks\", \"arrival-trace\", \"input-trace\", \"user-input-trace\"])\n except getopt.GetoptError:\n usage_and_exit(2)\n\n verbose = False\n save_stdout = False\n pre_gen_tasks = False\n use_arrival_trace = False\n use_input_trace = False\n use_user_input_trace = False\n do_csv_output = False\n out_sep = '\\t'\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage_and_exit(0)\n elif opt in (\"-v\", \"--verbose\"):\n verbose = True\n elif opt in (\"-c\", \"--csv-out\"):\n do_csv_output = True\n out_sep = ','\n elif opt in (\"-s\", \"--save-stdout\"):\n save_stdout = True\n elif opt in (\"-p\", \"--pre-gen-tasks\"):\n pre_gen_tasks = True\n elif opt in (\"-a\", \"--arrival-trace\"):\n use_arrival_trace = True\n elif opt in (\"-i\", \"--input-trace\"):\n use_input_trace = True\n elif opt in (\"-u\", \"--user-input-trace\"):\n use_user_input_trace = True\n else:\n stdout.write('\\nERROR: Unrecognized input parameter %s\\n' % opt)\n usage_and_exit(3)\n \n if (use_arrival_trace and use_input_trace):\n stdout.write('\\nERROR: Cannot specify both arrival-trace and input-trace\\n')\n usage_and_exit(4)\n\n if (use_arrival_trace and use_user_input_trace):\n stdout.write('\\nERROR: Cannot specify both arrival-trace and user-input-trace\\n')\n usage_and_exit(4)\n\n if (use_user_input_trace and use_input_trace):\n stdout.write('\\nERROR: Cannot specify both use_user-input-trace and input-trace\\n')\n usage_and_exit(4)\n\n # Simulation directory\n sim_dir = time.strftime(\"sim_%d%m%Y_%H%M%S\")\n if os.path.exists(sim_dir):\n shutil.rmtree(sim_dir)\n os.makedirs(sim_dir)\n\n # This dict is used to temporarily hold the output from the\n # different runs. Everything is dumped to files later on.\n sim_output = {}\n\n start_time = time.time()\n num_executions = 0\n first_time = True\n\n # We open the JSON config file and update the corresponding\n # parameters directly in the stomp_params dicttionary\n with open(CONF_FILE) as conf_file:\n stomp_params = json.load(conf_file)\n\n stomp_params['general']['working_dir'] = os.getcwd() + '/' + sim_dir\n\n\n ###############################################################################################\n # MAIN LOOP\n for arr_scale in ARRIVE_SCALE:\n\n sim_output[arr_scale] = {}\n stomp_params['simulation']['arrival_time_scale'] = arr_scale\n\n for policy in POLICY:\n sim_output[arr_scale][policy] = {}\n\n for stdev_factor in STDEV_FACTOR:\n\n sim_output[arr_scale][policy][stdev_factor] = {}\n sim_output[arr_scale][policy][stdev_factor]['avg_resp_time'] = {}\n sim_output[arr_scale][policy][stdev_factor]['avg_resp_time_global'] = {}\n sim_output[arr_scale][policy][stdev_factor]['avg_wait_time_global'] = {}\n\n\n ###########################################################################################\n # Update the simulation configuration by updating\n # the specific parameters in the input JSON data\n stomp_params['simulation']['sched_policy_module'] = 'policies.' + policy \n for task in stomp_params['simulation']['tasks']:\n # Set the stdev for the service time\n for server, mean_service_time in stomp_params['simulation']['tasks'][task]['mean_service_time'].items():\n stdev_service_time = (stdev_factor*mean_service_time)\n stomp_params['simulation']['tasks'][task]['stdev_service_time'][server] = stdev_service_time\n\n stomp_params['general']['basename'] = 'policy:' + policy \\\n + '__stdev_factor:' + str(stdev_factor)\n conf_str = json.dumps(stomp_params)\n\n ###########################################################################################\n # Create command and execute the simulation\n\n command = ['./stomp_main.py' \n + ' -j \\'' + conf_str + '\\''\n ]\n\n command_str = ' '.join(command)\n\n if (pre_gen_tasks):\n command_str = command_str + ' -p'\n\n if (use_arrival_trace):\n if (policy == POLICY[0]) and (stdev_factor == STDEV_FACTOR[0]):\n command_str = command_str + ' -g generated_arrival_trace.trc'\n else:\n command_str = command_str + ' -a generated_arrival_trace.trc'\n\n if (use_input_trace):\n if (policy == POLICY[0]):\n command_str = command_str + ' -g generated_trace_stdf_' + str(stdev_factor) + '.trc'\n else:\n command_str = command_str + ' -i generated_trace_stdf_' + str(stdev_factor) + '.trc'\n\n if (use_user_input_trace):\n command_str = command_str + ' -i ../user_traces/user_gen_trace_stdf_' + str(stdev_factor) + '.trc'\n\n if (verbose):\n print('Running', command_str)\n\n sys.stdout.flush()\n output = subprocess.check_output(command_str, stderr=subprocess.STDOUT, shell=True)\n\n if (save_stdout):\n fh = open(sim_dir + '/run_stdout_' + policy + \"_arr_\" + str(arr_scale) + '_stdvf_' + str(stdev_factor) + '.out', 'w')\n\n ###########################################################################################\n # Parse the output line by line\n output_list = output.splitlines()\n i = 0\n for i in range(len(output_list)):\n if (save_stdout):\n fh.write('%s\\n' % (output_list[i]))\n if output_list[i].strip() == \"Response time (avg):\":\n line = output_list[i+1].strip()\n (key, value) = line.split(':')\n sim_output[arr_scale][policy][stdev_factor]['avg_resp_time_global'][key.strip()] = value.strip().split(' ')[0]\n for j in range(i+1, len(output_list)):\n line = output_list[j]\n if not line.strip():\n break\n (key, value) = line.split(':')\n sim_output[arr_scale][policy][stdev_factor]['avg_resp_time'][key.strip()] = value.strip()\n #sys.stdout.write('Set sim_output[%s][%s][%s][%s][%s] = %s\\n' % (arr_scale, policy, stdev_factor, 'avg_resp_time', key.strip(), value.strip()))\n elif output_list[i].strip() == \"Waiting time (avg):\":\n line = output_list[i+1].strip()\n (key, value) = line.split(':')\n sim_output[arr_scale][policy][stdev_factor]['avg_wait_time_global'][key.strip()] = value.strip().split(' ')[0]\n elif output_list[i].strip() == \"Histograms:\":\n line = output_list[i+1]\n histogram = line.split(':')[1]\n sim_output[arr_scale][policy][stdev_factor]['queue_size_hist'] = histogram.strip()\n\n elif \"Total simulation time:\" in output_list[i].strip():\n elems = output_list[i].split(\":\")\n #stdout.write('HERE: %s : %s : %s\\n' % (str(policy), str(stdev_factor), elems[1]))\n #stdout.write('%s\\n' % (output_list[i].strip()))\n #sys.stdout.flush()\n sim_output[arr_scale][policy][stdev_factor]['total_sim_time'] = elems[1]\n\n if (save_stdout):\n fh.close()\n num_executions += 1\n time.sleep(1)\n\n\n ###############################################################################################\n # Dump outputs to files\n\n # Average respose time\n if (do_csv_output):\n fh = open(sim_dir + '/avg_resp_time.csv', 'w')\n else:\n fh = open(sim_dir + '/avg_resp_time.out', 'w')\n for arr_scale in ARRIVE_SCALE:\n fh.write('Arrival_Scale %lf\\n' % arr_scale)\n for policy in sorted(sim_output[arr_scale].iterkeys()):\n fh.write('%s\\n' % policy)\n first_time = True\n for stdev_factor in sorted(sim_output[arr_scale][policy].iterkeys()):\n if first_time:\n # Print header\n fh.write(' Arr_scale%s Policy%s Stdev_Factor%s' % (out_sep, out_sep, out_sep))\n for key in sorted(sim_output[arr_scale][policy][stdev_factor]['avg_resp_time'].iterkeys()):\n fh.write('%s%s%s%s%s' % (key, out_sep, out_sep, out_sep, out_sep))\n fh.write('\\n')\n first_time = False\n # Print values\n fh.write(' %s%s%s%s%s%s' % (str(arr_scale), out_sep, policy, out_sep, str(stdev_factor), out_sep))\n for key in sorted(sim_output[arr_scale][policy][stdev_factor]['avg_resp_time'].iterkeys()):\n tl = sim_output[arr_scale][policy][stdev_factor]['avg_resp_time'][key].split()\n for tt in tl:\n fh.write('%s%s' % (tt, out_sep))\n fh.write('\\n')\n fh.write('\\n\\n')\n fh.close()\n\n # Average waiting time\n if (do_csv_output):\n fh = open(sim_dir + '/avg_wait_time.csv', 'w')\n else:\n fh = open(sim_dir + '/avg_wait_time.out', 'w')\n for arr_scale in ARRIVE_SCALE:\n fh.write('Arrival_Scale %lf\\n' % arr_scale)\n for policy in sorted(sim_output[arr_scale].iterkeys()):\n fh.write('%s\\n' % policy)\n first_time = True\n for stdev_factor in sorted(sim_output[arr_scale][policy].iterkeys()):\n if first_time:\n # Print header\n fh.write(' Arr_scale%s Policy%s Stdev_Factor%sAvg. Response Time (global)%sAvg. Waiting Time (global)\\n' % (out_sep, out_sep, out_sep, out_sep))\n first_time = False\n # Print values\n fh.write(' %s%s%s%s%s%s%s%s%s\\n' % (str(arr_scale), out_sep,\n policy, out_sep,\n str(stdev_factor), out_sep,\n sim_output[arr_scale][policy][stdev_factor]['avg_resp_time_global']['global'], out_sep,\n sim_output[arr_scale][policy][stdev_factor]['avg_wait_time_global']['global']))\n fh.write('\\n\\n')\n fh.close()\n\n # Queue size histogram\n if (do_csv_output):\n fh = open(sim_dir + '/queue_size_hist.csv', 'w')\n else:\n fh = open(sim_dir + '/queue_size_hist.out', 'w')\n for arr_scale in ARRIVE_SCALE:\n fh.write('Arrival_Scale %lf\\n' % arr_scale)\n for policy in sorted(sim_output[arr_scale].iterkeys()):\n fh.write('%s\\n' % policy)\n fh.write(' Arr_Scale%sStdev_Factor%sQueue_Histogram\\n' % (out_sep, out_sep))\n for stdev_factor in sorted(sim_output[arr_scale][policy].iterkeys()):\n fh.write(' %s%s%s%s' % (str(arr_scale), out_sep, str(stdev_factor), out_sep))\n tl = sim_output[arr_scale][policy][stdev_factor]['queue_size_hist'].replace(',',' ').split()\n for tt in tl:\n fh.write('%s%s' % (tt, out_sep))\n fh.write('\\n')\n fh.write('\\n\\n')\n fh.close()\n\n # Total Simulation Time\n if (do_csv_output):\n fh = open(sim_dir + '/total_sim_time.csv', 'w')\n else:\n fh = open(sim_dir + '/total_sim_time.out', 'w')\n for arr_scale in ARRIVE_SCALE:\n fh.write('Arrival_Scale %lf\\n' % arr_scale)\n for policy in sorted(sim_output[arr_scale].iterkeys()):\n fh.write('%s\\n' % policy)\n fh.write(' Arr_Scale%sStdev_Factor%sTotal_Sim_Time\\n' % (out_sep, out_sep))\n for stdev_factor in sorted(sim_output[arr_scale][policy].iterkeys()):\n fh.write(' %s%s%s%s' % (str(arr_scale), out_sep, str(stdev_factor), out_sep))\n tl = sim_output[arr_scale][policy][stdev_factor]['total_sim_time'].replace(',',' ').split()\n for tt in tl:\n fh.write('%s%s' % (tt, out_sep))\n fh.write('\\n')\n fh.write('\\n\\n')\n fh.close()\n\n\n elapsed_time = time.time() - start_time\n stdout.write('%d configurations executed in %.2f secs.\\nResults written to %s\\n' % (num_executions, elapsed_time, sim_dir))\n\n\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"IBM/stomp","sub_path":"utils/run_all.py","file_name":"run_all.py","file_ext":"py","file_size_in_byte":14259,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"51"}
+{"seq_id":"20615373018","text":"a, b = list(map(int, input().split()))\n\ndistancia, variavel_a, variavel_b, voltas = 0, 0, 0, 0\n\nwhile distancia < b:\n variavel_a += a\n variavel_b += b\n distancia = variavel_b - variavel_a\n voltas += 1\n\nprint(voltas)\n","repo_name":"mayara-canaver/Competitive-Programming","sub_path":"URI Online Judge - Beecrowd/Python 3/1467 - Zerinho ou Um - Python 3.py","file_name":"1467 - Zerinho ou Um - Python 3.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"it","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"}
+{"seq_id":"3823206849","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 12 17:58:24 2017\n\n@author: jonathanliono\n\"\"\"\n\nimport csv\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod\n\n\n# defining object for data stream\nclass CsvDsStream(object):\n def __init__(self,\n thefile,\n hasheader=True,\n ignored_headers=[],\n pick_only_headers=[],\n read_filters={},\n trailing_f_name=''):\n self.f = open(thefile, newline='')\n reader = csv.reader(self.f)\n self.filename = thefile\n self.headers = next(reader)\n self.filestream = reader\n self.headrow = None\n self.featureheaders = None\n self.ignored_headers = ignored_headers\n self.pick_only_headers = pick_only_headers\n self.read_filters = read_filters\n self.trailing_f_name = trailing_f_name\n self.window_buffer = [] # format of object is {'t':time, 'row': row}\n self.active = True\n self.t_key = None\n self.c_key = None\n\n def time_key(self):\n return self.headers[0] if self.t_key is None else self.headers[self.t_key]\n\n def class_label_key(self):\n return self.headers[len(self.headers) - 1] if self.c_key is None else self.headers[self.c_key]\n\n def set_time_key(self, time_key_to_set):\n isset = False\n for k, i in enumerate(self.headers):\n if i == time_key_to_set:\n self.t_key = k\n isset = True\n break\n\n return isset\n\n def set_class_label_key(self, class_label_key_to_set):\n isset = False\n for k, i in enumerate(self.headers):\n if i == class_label_key_to_set:\n self.c_key = k\n isset = True\n break\n\n return isset\n\n def next(self):\n nextrow = next(self.filestream)\n kv = {}\n for k, i in enumerate(self.headers):\n kv[i] = nextrow[k]\n\n return kv\n\n def close_stream(self):\n self.active = False\n self.f.close()\n\n def head_row(self):\n if self.headrow is None:\n self.headrow = self.next()\n\n return self.headrow\n\n def is_inactive(self):\n return not self.active\n\n def clear_head(self):\n self.headrow = None\n\n def feature_headers(self):\n if self.featureheaders is None:\n self.featureheaders = []\n for index, key_header in enumerate(self.headers):\n if (key_header != self.class_label_key()\n and key_header != self.time_key()\n and key_header not in self.ignored_headers):\n if len(self.pick_only_headers) > 0:\n if key_header in self.pick_only_headers:\n self.featureheaders.append(key_header)\n else:\n self.featureheaders.append(key_header)\n\n return self.featureheaders\n\n def add_itemrow_to_window_buffer(self, time, row, label):\n self.window_buffer.append({'t': time, 'row': row, 'label': label})\n\n def get_rows_from_window_buffer_after(self, time):\n rows = []\n labels = []\n for i in range(0, len(self.window_buffer)):\n row = self.window_buffer[i]\n if row['t'] >= time:\n rows.append(row['row'])\n labels.append(row['label'])\n\n return rows, labels\n\n def remove_rows_in_window_buffer_before(self, time):\n rows = []\n for i in range(0, len(self.window_buffer)):\n row = self.window_buffer[i]\n if row['t'] >= time:\n rows.append(row)\n\n self.window_buffer = rows\n\n\nclass DStreamFeatureConstructionInstanceLvlProxy(metaclass=ABCMeta):\n @abstractmethod\n def construct(self, instance_row): pass\n\n\nclass DStreamManager(object):\n def __init__(self):\n self.streams = []\n self.streams_func_summary_features = []\n self.streams_feature_constructions_instance_lvl = []\n self.starttime = None\n self.windowsize = None\n self.windowstep = None\n\n def is_inactive(self):\n isallinactive = True\n for stream in self.streams:\n if not stream.is_inactive():\n isallinactive = False\n break\n\n return isallinactive\n\n def register(self, ds_stream, rules=None, f_constructions=None):\n \"\"\"\n\n :param ds_stream: data stream\n :param rules: key value pair where key \"default\" is the default one and value should be object that has attribute func_per_feature and func_per_feature_headers\n :param f_constructions: key value pair for constructing features from features of ds_stream param.\n The key should be the name of new constructed feature and the value should be the function\n with np.array(). The return value of the function would then be the list of new constructed array\n \"\"\"\n self.streams.append(ds_stream)\n self.streams_func_summary_features.append(rules)\n self.streams_feature_constructions_instance_lvl.append(f_constructions)\n\n def define_temporal_sliding_window(self, windowsize=100, step=100):\n self.windowsize = np.float128(windowsize)\n self.windowstep = np.float128(step)\n self.starttime = None\n\n def temporal_segmentation(self, func_per_feature=None, func_per_feature_headers=None):\n try:\n self.windowsize\n self.windowstep\n self.starttime\n except (AttributeError, NameError):\n print(\"NOT DEFINED\")\n return\n\n if self.starttime is None:\n for stream in self.streams:\n time = np.float128(stream.head_row()[stream.time_key()])\n if self.starttime is None or self.starttime > time:\n self.starttime = np.float128(time)\n time = None\n\n # set expected end time\n endtime = self.starttime + self.windowsize\n # print('Start time: ')\n # print(repr(self.starttime))\n # print('End time: ')\n # print(endtime)\n row_headers = []\n row_result = []\n row_result_labels = []\n all_class_labels = [] # just for keeping track all occurance of labels.\n for idx_stream, stream in enumerate(self.streams):\n cachedrows, cachedlabels = stream.get_rows_from_window_buffer_after(self.starttime)\n if len(cachedrows) == 0:\n clmncnt = len(stream.feature_headers())\n if self.streams_feature_constructions_instance_lvl[idx_stream] is not None \\\n and len(self.streams_feature_constructions_instance_lvl[idx_stream]) > 0:\n clmncnt += len(self.streams_feature_constructions_instance_lvl[idx_stream])\n stream_array = np.array(cachedrows).reshape(0, clmncnt)\n else:\n stream_array = np.array(cachedrows)\n stream_classlabel_array = cachedlabels\n itemrow = None\n\n if stream.is_inactive():\n continue\n\n while itemrow is None:\n try:\n itemrow = stream.head_row()\n except StopIteration:\n stream.active = False\n break\n time = np.float128(itemrow[stream.time_key()])\n\n if time >= self.starttime and time < endtime:\n row = []\n labels = []\n\n # filter mechanism\n if len(stream.read_filters) > 0:\n should_continue = False\n for fkey, fval in stream.read_filters.items():\n if itemrow[fkey] != fval:\n should_continue = True\n break\n\n if should_continue:\n itemrow = None\n stream.clear_head()\n continue\n\n for key_header in stream.headers:\n if key_header == stream.class_label_key():\n labels.append(itemrow[key_header])\n elif (key_header != stream.time_key()\n and key_header not in stream.ignored_headers):\n if len(stream.pick_only_headers) > 0:\n if key_header in stream.pick_only_headers:\n row.append(np.float128(itemrow[key_header] if itemrow[key_header] else np.nan))\n else:\n row.append(np.float128(itemrow[key_header] if itemrow[key_header] else np.nan))\n\n if self.streams_feature_constructions_instance_lvl[idx_stream] is not None \\\n and len(self.streams_feature_constructions_instance_lvl[idx_stream]) > 0:\n for fckey, fcfunc in self.streams_feature_constructions_instance_lvl[idx_stream].items():\n if fcfunc is not None:\n # row.append(fcfunc.construct(itemrow)) # for implementation of proxy abstract class\n row.append(fcfunc(itemrow))\n else:\n row.append(np.nan)\n\n stream_array = np.append(stream_array, np.array([row]), axis=0)\n unique, pos = np.unique(np.array(labels), return_inverse=True)\n counts = np.bincount(pos)\n maxpos = counts.argmax()\n majoritylabel = unique[maxpos]\n stream_classlabel_array.append(majoritylabel)\n all_class_labels.append(majoritylabel)\n stream.add_itemrow_to_window_buffer(time, row, majoritylabel)\n itemrow = None\n stream.clear_head()\n\n if stream.is_inactive():\n continue\n\n rules = self.streams_func_summary_features[idx_stream]\n\n if rules is not None:\n defaultrule = rules['default']\n customrulekeys = list(rules['custom'].keys()) if 'custom' in rules else []\n if defaultrule['func_per_feature'] is not None and defaultrule['func_per_feature_headers'] is not None:\n feature_headers = list(stream.feature_headers()) # get a copy of feature headers rather than modifying it.\n if self.streams_feature_constructions_instance_lvl[idx_stream] is not None \\\n and len(self.streams_feature_constructions_instance_lvl[idx_stream]) > 0:\n feature_headers += list(self.streams_feature_constructions_instance_lvl[idx_stream].keys())\n\n for index, key_header in enumerate(feature_headers):\n incustomrule = any(key_header in s for s in customrulekeys)\n feature_matrix = stream_array[:, index]\n\n summary = defaultrule['func_per_feature'](feature_matrix) if not incustomrule else \\\n rules['custom'][key_header]['func_per_feature'](feature_matrix)\n row_result.extend(summary)\n headernamestouse = defaultrule['func_per_feature_headers']() if not incustomrule else \\\n rules['custom'][key_header]['func_per_feature_headers']()\n for header_summary in headernamestouse:\n trailingname = '' if stream.trailing_f_name == '' else '_' + str(stream.trailing_f_name)\n row_headers += [key_header + trailingname + '_' + header_summary]\n\n else:\n if func_per_feature is not None and func_per_feature_headers is not None:\n feature_headers = stream.feature_headers()\n if self.streams_feature_constructions_instance_lvl[idx_stream] is not None \\\n and len(self.streams_feature_constructions_instance_lvl[idx_stream]) > 0:\n feature_headers += list(self.streams_feature_constructions_instance_lvl[idx_stream].keys())\n\n for index, key_header in enumerate(feature_headers):\n feature_matrix = stream_array[:, index]\n summary = func_per_feature(feature_matrix)\n row_result.extend(summary)\n for header_summary in func_per_feature_headers():\n trailingname = '' if stream.trailing_f_name == '' else '_' + str(stream.trailing_f_name)\n row_headers += [key_header + trailingname + '_' + header_summary]\n\n stream_classlabel_array = list(filter(''.__ne__, stream_classlabel_array))\n if len(stream_classlabel_array) > 0:\n unique, pos = np.unique(np.array(stream_classlabel_array), return_inverse=True)\n counts = np.bincount(pos)\n maxpos = counts.argmax()\n majoritylabel = unique[maxpos]\n row_result_labels.append(majoritylabel)\n\n self.starttime = self.starttime + self.windowstep\n for stream in self.streams:\n stream.remove_rows_in_window_buffer_before(self.starttime)\n\n if len(row_result_labels) > 0:\n unique, pos = np.unique(np.array(row_result_labels), return_inverse=True)\n counts = np.bincount(pos)\n maxpos = counts.argmax()\n majoritylabel = unique[maxpos]\n if isinstance(majoritylabel, list):\n majoritylabel = majoritylabel[0]\n else:\n majoritylabel = ''\n\n # check row result before returning\n if (len(row_result) == 0 or all(np.isnan(value) for value in row_result)) and majoritylabel == '':\n if self.is_inactive():\n raise TimeoutError('no more to segment')\n else:\n return None, row_headers, None, []\n\n return row_result, row_headers, majoritylabel, all_class_labels\n","repo_name":"cruiseresearchgroup/OPTWIN","sub_path":"code/python/windowing/sliding_window.py","file_name":"sliding_window.py","file_ext":"py","file_size_in_byte":14289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"7954124139","text":"\n\"\"\"\nGaudenz Halter\nUniversity of Zurich\nJune 2018\n\n\"\"\"\nimport numpy as np\n\nfrom vian.core.data.interfaces import IAnalysisJob, VisualizationTab, ParameterWidget, DataSerialization, TimelineDataset\nfrom vian.core.container.project import MOVIE_DESCRIPTOR, BaseProjectEntity, VIANProject, SEGMENT\nfrom vian.core.container.analysis import IAnalysisJobAnalysis\n\nfrom vian.core.analysis.color.palette_extraction import *\nfrom vian.core.container.hdf5_manager import vian_analysis\nfrom vian.core.visualization.palette_plot import *\n\nimport librosa\nfrom vian.core.analysis.misc import preprocess_frame\n\n\"\"\"\narray Structure: \n\n d = np.zeros(shape=1)\n\n\"\"\"\n\n\n@vian_analysis\nclass OpticalFlowAnalysis(IAnalysisJob):\n def __init__(self, resolution=30):\n super(OpticalFlowAnalysis, self).__init__(\"Optical Flow\", [MOVIE_DESCRIPTOR],\n menu = IAnalysisJob.M_MOVEMENT,\n dataset_name=\"OpticalFlow\",\n dataset_shape=(1,),\n dataset_dtype=np.float16,\n author=\"Gaudenz Halter\",\n version=\"1.0.0\",\n multiple_result=False,\n data_serialization=DataSerialization.HDF5_SINGLE)\n self.resolution = resolution\n\n def prepare(self, project: VIANProject, targets: List[BaseProjectEntity], fps, class_objs=None):\n \"\"\"\n This function is called before the analysis takes place. Since it is in the Main-Thread, we can access our project,\n and gather all data we need.\n\n \"\"\"\n fps = project.movie_descriptor.fps\n targets, args = super(OpticalFlowAnalysis, self).prepare(project, targets, fps, class_objs)\n self.movie_path = project.movie_descriptor.movie_path\n self.margins = project.movie_descriptor.get_letterbox_rect(as_coords=True)\n return args\n\n def process(self, argst, sign_progress):\n args, sign_progress = super(OpticalFlowAnalysis, self).process(argst, sign_progress)\n print(argst)\n # Signal the Progress\n sign_progress(0.0)\n\n movie_path = self.movie_path\n margins = self.margins\n\n cap = cv2.VideoCapture(movie_path)\n length = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n start = 0\n stop = int(length)\n prvs = None\n\n magnitudes = np.zeros(shape=int(np.ceil(stop / self.resolution)))\n idx = 0\n for i in range(start, stop, self.resolution):\n sign_progress((i - start) / ((stop - start) + 1))\n\n cap.set(cv2.CAP_PROP_POS_FRAMES, i)\n ret, frame = cap.read()\n\n if frame is None:\n break\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if margins is not None:\n frame = frame[margins[1]:margins[3], margins[0]:margins[2]]\n\n preprocess_frame(frame, self.max_width)\n\n if prvs is None:\n prvs = frame\n\n flow = cv2.calcOpticalFlowFarneback(prvs, frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n\n magnitudes[idx] = np.mean(mag)\n idx += 1\n\n prvs = frame\n\n magnitudes[magnitudes == np.inf] = 0\n magnitudes = np.nan_to_num(magnitudes)\n\n result = IAnalysisJobAnalysis(\n name=\"Optical Flow\",\n results=magnitudes,\n analysis_job_class=self.__class__,\n parameters=dict(resolution=self.resolution),\n container=None\n )\n\n return result\n\n def modify_project(self, project: VIANProject, result: IAnalysisJobAnalysis, main_window=None):\n \"\"\"\n This Function will be called after the processing is completed.\n Since this function is called within the Main-Thread, we can modify our project here.\n \"\"\"\n\n super(OpticalFlowAnalysis, self).modify_project(project, result, main_window)\n\n def get_preview(self, analysis: IAnalysisJobAnalysis):\n \"\"\"\n This should return the Widget that is shown in the Inspector when the analysis is selected\n \"\"\"\n w = QWidget()\n lt = QGridLayout(w)\n w.setLayout(lt)\n\n lt.addWidget(QLabel(\"Lab:\"), 0, 0)\n lbl1 = QLabel(str(analysis.get_adata()['color_lab']).replace(\"[\", \"(\").replace(\"]\", \")\"))\n lt.addWidget(lbl1, 0, 1)\n\n lt.addWidget(QLabel(\"RGB:\"), 1, 0)\n lbl2 = QLabel(str(analysis.get_adata()['color_bgr'][::-1]).replace(\"[\", \"(\").replace(\"]\", \")\"))\n lt.addWidget(lbl2, 1, 1)\n\n lt.addWidget(QLabel(\"LCH:\"), 2, 0)\n lbl3 = QLabel(str(lab_to_lch(analysis.get_adata()['color_lab'])).replace(\"[\", \"(\").replace(\"]\", \")\"))\n lt.addWidget(lbl3, 2, 1)\n\n view = EGraphicsView(w)\n view.set_image(numpy_to_pixmap(np.array(([[analysis.get_adata()['color_bgr']] * 100] * 25)).astype(np.uint8)))\n lt.addWidget(view, 3, 0, 1, 2)\n return w\n\n def get_visualization(self, analysis, result_path, data_path, project, main_window):\n \"\"\"\n This function should show the complete Visualization\n \"\"\"\n return []\n\n def get_timeline_datasets(self, analysis, project) -> List[TimelineDataset]:\n ms_to_idx = 1000 / (project.movie_descriptor.fps / self.resolution)\n\n return [\n TimelineDataset(\"Optical Flow (mean)\", analysis.get_adata(), ms_to_idx)\n ]\n\n def get_hdf5_description(self):\n #TODO\n return dict(\n title=\"Average Color Values\",\n description=\"Contains a list of average color values. \",\n color_space=\"CIELab, BGR\",\n dimensions=\"1st: index of the feature vector \\\\ \"\n \" [0]: Average Value: Luminance (CIELab) {0.0, ..., 100.0 }\\\\\"\n \" [1] Average Value: A-Channel (CIELab) {-128.0, ..., 128.0}\\\\\"\n \" [2] Average Value: B-Channel (CIELab) {-128.0, ..., 128.0}\\\\\"\n \" [3] Average Value: B-Channel (BGR) {0, ..., 255}\\\\\"\n \" [4] Average Value: G-Channel (BGR) {0, ..., 255}\\\\\"\n \" [5] Average Value: R-Channel (BGR) {0, ..., 255}\\\\\"\n \" [6] Average Value: Luebbe Saturation (BGR) {0, ..., 1.0}, \"\n \"(Deprecated, this will be removed at some point)\\\\\"\n \" [7] Average Value: Experimental Saturation (BGR) {0, ..., 1.0}, \"\n \"(Deprecated, this will be removed at some point)\\\\\"\n )\n\n def to_hdf5(self, data):\n return data\n\n def from_hdf5(self, db_data):\n return db_data\n\n\n","repo_name":"FilmColors/VIAN","sub_path":"vian/core/analysis/motion/optical_flow.py","file_name":"optical_flow.py","file_ext":"py","file_size_in_byte":6844,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"51"}
+{"seq_id":"71358588319","text":"# -*- coding: utf-8 -*-\n__author__ = 'lihe '\n__description__ = '''\n'''\n\nimport os\nimport sys\nimport socket\nimport time\nimport uuid\nimport random\n\napp_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(app_root)\n\nimport paho.mqtt.publish as publish\nimport paho.mqtt.client as mqtt_client\nfrom paho.mqtt.client import MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING, MQTT_LOG_ERR, MQTT_LOG_DEBUG\nfrom logzero import logger as log\n\n__all__ = [\n 'Publisher',\n 'Topic',\n 'TopicConsumer',\n 'push_to_topic',\n]\n\n'''topic encapsulation'''\n\n\nclass Topic(object):\n \"\"\"\n init ``topic`` configs\n\n .. code:: python\n\n # required\n conf = {\n \"hostname\": \"127.0.0.1\",\n \"port\": 1883,\n \"username\": \"\",\n \"password\": \"\"\n }\n\n \"\"\"\n\n def __init__(self, conf=None):\n conf = conf if conf else {}\n self.conf = {\n 'hostname': conf.get('hostname', 'localhost'),\n 'port': conf.get('port', 1883),\n 'auth': {\n 'username': conf.get('username', 'admin'),\n 'password': conf.get('password', 'admin'),\n }\n }\n\n def run(self, handler=None):\n pass\n\n\nclass Publisher(Topic):\n \"\"\"\n simple single publish mode, no persist connection required\n\n \"\"\"\n\n def run(self, dat=None):\n \"\"\"\n publish\n\n - e.g.:\n\n .. code:: python\n\n t = {\n 'hostname': '192.168.1.2',\n 'port': 1883,\n 'username': 'admin',\n 'password': 'admin',\n }\n Publisher(t).run({'payload': json.dumps(t)}) if pub else TopicConsumer(t).run()\n\n :param dat:\n :type dat:\n :return:\n :rtype:\n \"\"\"\n conf = self.conf\n dat = dat if dat else {}\n publish.single(topic=dat.get('topic', 'test_topic'),\n payload=dat.get('payload', 'just a test topic payload'),\n qos=dat.get('qos', 0),\n retain=dat.get('retain', False),\n hostname=conf['hostname'],\n port=conf['port'],\n auth=conf['auth'])\n\n\nclass TopicConsumer(Topic):\n \"\"\"\n ``topic`` consumer\n\n .. code:: python\n\n def topic_test(pub=True):\n t = {\n 'hostname': '192.168.1.2',\n 'port': 1883,\n 'username': 'admin',\n 'password': 'admin',\n }\n Publisher(t).run({'payload': json.dumps(t)}) if pub else TopicConsumer(t).run()\n\n \"\"\"\n\n def __init__(self, conf=None, loop_to=1.0):\n super().__init__(conf)\n self.loop_to = loop_to\n\n def run(self, handler=None):\n while True:\n try:\n conf = self.conf\n handler = handler if handler else {}\n client_ = mqtt_client.Client(client_id=getattr(handler, 'cid', hex(uuid.getnode())),\n clean_session=getattr(handler, 'clean_session', True))\n client_.username_pw_set(conf['auth']['username'], conf['auth']['password'])\n client_.connect(conf['hostname'], conf['port'])\n client_.on_log = getattr(handler, 'on_log', _on_log)\n client_.on_connect = getattr(handler, 'on_connect', _on_connect)\n client_.on_message = getattr(handler, 'on_message', _on_message)\n client_.on_disconnect = getattr(handler, 'on_disconnect', _on_disconnect)\n client_.loop_forever(timeout=self.loop_to)\n except socket.error as err:\n # if failed connect to server, will retry periodically until connected\n log.error('socket error: {}'.format(err))\n # wait period 2s\n time.sleep(1.5 + random.random())\n continue\n except Exception as ee:\n log.error('topic error: {}'.format(ee))\n break\n\n\ndef push_to_topic(dst, dat, qos=0, retain=False, cfgs=None):\n cfg_amqt = {\n 'hostname': cfgs['hostname'],\n 'port': cfgs['port'],\n 'username': cfgs['username'],\n 'password': cfgs['password'],\n }\n\n msg = {\n 'topic': dst,\n 'payload': dat,\n 'qos': qos,\n 'retain': retain,\n }\n try:\n Publisher(cfg_amqt).run(msg)\n return True\n except Exception as err:\n log.error('push to amqt: {}'.format(err))\n return False\n\n\n''' topic default functions '''\n\n\ndef _on_log(client, userdata, level, buf):\n if level == MQTT_LOG_INFO:\n head = 'INFO'\n elif level == MQTT_LOG_NOTICE:\n head = 'NOTICE'\n elif level == MQTT_LOG_WARNING:\n head = 'WARN'\n elif level == MQTT_LOG_ERR:\n head = 'ERR'\n elif level == MQTT_LOG_DEBUG:\n head = 'DEBUG'\n else:\n head = level\n log.info('%s: %s' % (head, buf))\n\n\ndef _on_connect(client, userdata, flags, rc):\n \"\"\" default on connection callback \"\"\"\n log.debug('[MQTT] Connected with result code ' + str(rc))\n client.subscribe('test_topic', qos=2)\n\n\ndef _on_message(client, userdata, msg):\n \"\"\" default on message callback \"\"\"\n log.debug(msg.payload)\n\n\ndef _on_disconnect(client, userdata, rc):\n if rc != 0:\n log.debug('Unexpected disconnection %s' % rc)\n","repo_name":"coghost/iamq","sub_path":"amq/amq.py","file_name":"amq.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"15306862536","text":"from nh_sys.settings.base import (\n ALLOWED_HOSTS,\n CORS_ALLOWED_ORIGIN_REGEXES,\n CORS_ALLOWED_ORIGINS,\n CORS_ORIGIN_WHITELIST,\n)\n\nDEBUG = True\n\nALLOWED_HOSTS += [\n \".herokuapp.com\",\n \"https://biomedsys-production.up.railway.app\",\n \".railway.app\",\n]\n\nCORS_ALLOWED_ORIGINS += [\n \"https://nh_sys-fend-pwa.herokuapp.com\",\n \"https://nh_sys-proxy.herokuapp.com\",\n \"https://nh-sys.netlify.app\",\n]\n\nCORS_ALLOWED_ORIGIN_REGEXES += [\n r\"^(http?:\\/\\/)?((localhost)|(127\\.0\\.0\\.1)):3\\d{3}\",\n r\"^(http?:\\/\\/)?((localhost)|(127\\.0\\.0\\.1)):5\\d{3}\",\n r\"^https:\\/\\/nh-sys-*\",\n \"https://biomedsys-production.up.railway.app\",\n r\"^https:\\/\\/biomedsys-*\",\n r\"(^|^[^:]+:\\/\\/|[^\\.]+\\.)nh-sys\\.co\\.ke\",\n]\nCORS_ORIGIN_WHITELIST += [\n \"https://biomedsys-production.up.railway.app\",\n # add any other domains or subdomains that you want to allow here\n]\n","repo_name":"EdwinAtieno/biomed_sys","sub_path":"nh_sys/settings/staging.py","file_name":"staging.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"3832325834","text":"import datetime\nimport re\n\nfrom crawler.management.commands.helpers.base_scrapper import BaseScrapper\nfrom crawler.models import RestaurantScraperConfig\nfrom crawler.scrapers.beautifulsoup_wrapper import BeautifulSoupWrapper\nfrom crawler.scrapers.restaurant_duplicates_checker import RestaurantDuplicatesChecker\nfrom restaurants.models import Restaurant\n\n\nclass MenuPrahaRestaurantParser():\n def __init__(self, url):\n self.soup = BeautifulSoupWrapper(url)\n\n def get_name(self):\n text = self.soup.find('h1')\n\n if text is None:\n return None\n\n return text.text\n\n def get_address(self):\n address_wrapper = self.soup.find('h3', itemprop='address')\n\n if address_wrapper is None:\n return None\n\n street = address_wrapper.find('span', itemprop='streetAddress').text\n city = address_wrapper.find('span', itemprop='addressLocality').text\n\n map_link = address_wrapper.find('a')['href']\n\n gps = re.search(r'([0-9]{1,2}\\.[0-9]+,[0-9]{1,2}\\.[0-9]+)', map_link)\n\n if gps is None:\n gps = (None, None)\n else:\n gps = gps.group(1).split(',') if ',' in gps.group(1) else (None, None)\n\n return {\n \"address\": street + \", \" + city,\n \"gps\": {\n \"lat\": float(gps[0]) if gps[0] is not None else None,\n \"lng\": float(gps[1])if gps[1] is not None else None\n }\n }\n\n def get_restaurant(self):\n return {\n \"name\": self.get_name(),\n \"locality\": self.get_address(),\n }\n\n\nclass Command(BaseScrapper):\n help = 'Scrape recipes websites and save new recipes'\n\n def handle(self, *args, **kwargs):\n soup = BeautifulSoupWrapper(\"https://menupraha.cz/sitemap.xml\")\n\n checker = RestaurantDuplicatesChecker()\n\n urls = [restaurant_url.text for restaurant_url in soup.select('loc')]\n urls = list(filter(lambda x: re.search(r'restaurace/[0-9]+', x), urls))\n\n for url in urls:\n parser = MenuPrahaRestaurantParser(url)\n details = parser.get_restaurant()\n\n if details is None:\n continue\n\n if checker.already_exists(details['name'],\n (details['locality']['gps']['lat'], details['locality']['gps']['lng'])):\n print(details['name'] + ' se zda ze existuje')\n continue\n\n restaurant = Restaurant(\n name=details['name'],\n menu_url=url,\n address=details['locality']['address'],\n gps_lat=details['locality']['gps']['lat'],\n gps_lng=details['locality']['gps']['lng']\n )\n restaurant.save()\n\n print(details['name'])\n\n config = RestaurantScraperConfig(\n restaurant=restaurant,\n scraper_parameters=url,\n next_visit=datetime.date.today(),\n next_visit_interval=1,\n menu_scraper='daily_menu.MenuPraha'\n )\n\n config.save()\n\n # return\n","repo_name":"michalkvacek/at-chutna","sub_path":"daily_menu/crawler/management/commands/menupraha_restaurants.py","file_name":"menupraha_restaurants.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"20916276420","text":"# coding: utf-8\nfrom tkinter import *\nimport copy\nimport random\nimport pickle\nimport os.path\n\n\ndef createSpace(rida, veerg):\n taustavarv = root.cget('bg')\n Vahe = Text(root, height=2, width=5, background=taustavarv, bd=0)\n Vahe.grid(row=rida, column=veerg, padx=0, pady=0)\n\n\ndef createBoard():\n leftBoardStartCol = 1\n rightBoardStartCol = boardWidth + 2\n createSpace(1, 0)\n # vasakpoolne mänguväli\n # sinna paigutad alguses oma laevad\n # praegune loogika: kõigepealt viiene laev, siis neljane, kolmene, kahene\n for i in range(boardWidth):\n for j in range(boardWidth):\n button = Button(root, font=\"Times 13\", height=1, width=3, bg = boardButton)\n button.grid(row=j + 1, column=i + leftBoardStartCol, padx=0, pady=0)\n button.configure(command=lambda i=i, j=j: pressButtonLeft(j, i))\n buttonsLeft[(j, i)] = button\n\n createSpace(1, leftBoardStartCol + boardWidth)\n\n # parempoolne mänguväli, sealt hakkad vastase laevu otsima hiljem\n for i in range(boardWidth):\n for j in range(boardWidth):\n button = Button(root, font=\"Times 13\", height=1, width=3, bg = boardButton)\n button.grid(row=j + 1, column=i + rightBoardStartCol, padx=0, pady=0)\n button.configure(command=lambda i=i, j=j: pressButtonRight(j, i), state='disabled')\n buttonsRight[(j, i)] = button\n\n createSpace(1, rightBoardStartCol + boardWidth)\n\n # print(buttonsLeft) # Debug\nfrom random import randint\n\ndef randomise():\n global player6, player5, player4, player3, player2\n boats = 6\n while(boats >= 2):\n randomX = randint(0, 9)\n randomY = randint(0, 9)\n direction = randint(1, 4)\n zeroPoint = (randomX, randomY)\n #if not zeroPoint in playerAllBoatsCoordinates:\n # if (direction == 1):\n # print(\"\")\n\ndef pressButtonLeft(row, col):\n global player6, player5, player4, player3, player2\n button = buttonsLeft[(row, col)] # see nupp mida vajutati\n\n if button['text'] == \"\" and (row, col) not in forbiddenButtonsForBoats: # kui nuppu veel ei ole ära märgistatud\n button.configure(text=\"O\", bg=\"black\")\n current.append((row, col))\n elif button['text'] == \"O\" and (row, col) not in playerAllBoatsCoordinates:\n # kui on märgistatud aga veel ei kuulu laevale -> siis lubame muuta (kui kuulub juba laevale, ei lase enam muuta) võib hiljem edasi arendada\n button.configure(text=\"\", bg= boardButton)\n current.remove((row, col))\n\n if len(player6) == 0 and isBoat(6):\n # kõigepelt viiene laev (ja viiest laeva veel ei ole)\n mangijaTeade.configure(text='Position your ship of length 5. ')\n for b in current:\n buttonsLeft[b].configure(background = color2) # märgistab nupud teise värviliseks\n player6 = copy.deepcopy(current) # salvestab laeva punktid\n for e in player6:\n playerAllBoatsCoordinates.append(e)\n current.clear()\n\n if len(player6) != 0 and len(player5) == 0 and isBoat(5):\n # kõigepelt viiene laev (ja viiest laeva veel ei ole)\n mangijaTeade.configure(text='Position your ship of length 4. ')\n for b in current:\n buttonsLeft[b].configure(background = color2) # märgistab nupud teise värviliseks\n player5 = copy.deepcopy(current) # salvestab laeva punktid\n for e in player5:\n playerAllBoatsCoordinates.append(e)\n current.clear()\n elif len(player6) != 0 and len(player5) != 0 and len(player4) == 0 and isBoat(4):\n # siis neljane laev (eeldusel, et viiene on juba olemas)\n mangijaTeade.configure(text='Position your ship of length 3. ')\n for b in current:\n buttonsLeft[b].configure(background = color2)\n player4 = copy.deepcopy(current)\n for e in player4:\n playerAllBoatsCoordinates.append(e)\n current.clear()\n elif len(player6) != 0 and len(player5) != 0 and len(player4) != 0 and len(player3) == 0 and isBoat(3):\n mangijaTeade.configure(text='Position your ship of length 2. ')\n for b in current:\n buttonsLeft[b].configure(background = color2)\n player3 = copy.deepcopy(current)\n for e in player3:\n playerAllBoatsCoordinates.append(e)\n current.clear()\n elif len(player6) != 0 and len(player5) != 0 and len(player4) != 0 and len(player3) != 0 and len(player2) == 0 and isBoat(2):\n for b in current:\n buttonsLeft[b].configure(background = color2)\n player2 = copy.deepcopy(current)\n for e in player2:\n playerAllBoatsCoordinates.append(e)\n current.clear()\n\n allBoatsAreChosen_EnableBombingMode()\n updateForbiddenButtonsForPlayer()\n\n\ndef updateForbiddenButtonsForPlayer():\n global playerAllBoatsCoordinates, forbiddenButtonsForBoats\n for elem in playerAllBoatsCoordinates:\n surroundingButtons = []\n zeroPoint = (elem[0] - 1, elem[1] - 1)\n for y in range(3):\n for x in range(3):\n point = (zeroPoint[0] + y, zeroPoint[1] + x)\n surroundingButtons.append(point)\n if not point in forbiddenButtonsForBoats:\n if point in buttonsLeft:\n forbiddenButtonsForBoats.append(point)\n buttonsLeft[point].configure(state='disabled')\n\n\ndef allBoatsAreChosen_EnableBombingMode():\n global playerBoats, playerAllBoatsCoordinates\n if len(player6) != 0 and len(player5) != 0 and len(player4) != 0 and len(player3) != 0 and len(player2) != 0:\n mangijaTeade.configure(text=' ')\n print(\"All set!\")\n playerBoats = [player6, player5, player4, player3, player2]\n for b in buttonsLeft:\n buttonsLeft[b].configure(state='disabled') # vasakpoolsele mänguväljale ei saa enam klikkida\n teade.configure(text='Bombs away!')\n for e in buttonsRight:\n buttonsRight[e].configure(state='normal') # lubame klikkimise parempoolsel mänguväljal\n for e in playerBoats:\n playerSunkedBoats[str(e)] = False\n\n\ndef isBoat(length):\n global current\n x_id = set()\n y_id = set()\n for e in current:\n x_id.add(e[0])\n y_id.add(e[1])\n if len(x_id) == 1: # kõikide x'ide koordinaadid on võrdsed\n min, max = getMaxMin(y_id)\n if max - min == (length - 1) and len(y_id) == length: # nupud on järjest ja õige pikkusega\n return True\n elif len(y_id) == 1: # kõikide y'ite koordinaadid on võrdsed\n min, max = getMaxMin(x_id)\n if max - min == (length - 1) and len(x_id) == length: # nupud on järjest ja õige pikkusega\n return True\n\n\ndef getMaxMin(list): # leiab listist maximumi ja minimumi\n min = 10\n max = -1\n for el in list:\n if int(el) < min:\n min = el\n if int(el) > max:\n max = el\n return min, max\n\n\ndef pressButtonRight(row, col):\n if (row, col) not in playerGuesses: # ei ole sinna kasti veel klikitud\n playerGuesses.append((row, col))\n # Lisame mängija pakkumise kaalutud väärtustega pakkumiste sõnastikku\n global guessCounter, allGuesses\n if ((row, col) not in allGuesses):\n allGuesses[(row, col)] = 0\n allGuesses[(row, col)] += guessWeights[guessCounter]\n guessCounter += 1\n # print(allGuesses) # Debug\n\n if (row, col) in AIAllBoatsCoordinates:\n global playerHitGuesses\n playerHitGuesses.append((row, col)) # Koordinaadi lisamine mängija jooksvate pihta saamiste hulka\n # print('playerHitGuesses:', playerHitGuesses) # Debug\n buttonsRight[(row, col)].configure(text=\"X\")\n buttonsRight[(row, col)].configure(background = color6) # Pihta läinud ruut saab teise tooni\n aiTeade.configure(text=\"HIT!\") # Teavitus\n checkDidBoatSunk()\n checkIfGameOver()\n # Kuna mängija sai AI laevale pihta, siis saab mängija uuesti pakkuda\n else:\n buttonsRight[(row, col)].configure(text=\"O\")\n buttonsRight[(row, col)].configure(background = color1) # Mööda läinud ruut värvitakse valgeks\n aiTeade.configure(text=\"MISS!\") # Teavitus\n # AI saab pakkuda ainult siis kui mängija pakkus mööda\n # checkIfGameOver()\n doAIMove()\n # Enne järgmist käiku kontrollime, kas mäng on läbi\n # checkIfGameOver() # Põhjustab AI võidu puhul topeltlõpetamise (sh. duubelread result.txt failis)???\n\n\ndef checkDidBoatSunk():\n for paat in AIBoats:\n if all(x in playerHitGuesses for x in paat):\n # kogu paat põhjas -> värvime ära\n for b in paat:\n buttonsRight[b].configure(background = color3)\n AISunkedBoats[str(paat)] = True # lisame uppunud laevade listi\n # Jooksvate pihtasaamiste kustutamine\n global playerHitGuesses\n playerHitGuesses.clear()\n aiTeade.configure(text=\"DOWN YOU GO!\")\n\n\ndef doAIMove():\n global AIGuesses, AIHitGuesses\n if len(AIHitGuesses) == 0:\n move = AIMove(coordinateFreqDict)\n else:\n move = AIPickNext(AIHitGuesses[-1], coordinateFreqDict)\n # print('Move: ', move) # Debug\n # print('AIHits: ', AIHitGuesses) # Debug\n AIGuesses.add(move)\n if move in playerAllBoatsCoordinates:\n buttonsLeft[move].configure(text=\"X\") # AI sai paadile pihta\n buttonsLeft[move].configure(background = color5)\n # Lisame pihta saanud koordinaadi tabamuste hulka\n AIHitGuesses.append(move)\n boatIsSunk = checkDidAIManageToSinkABoat()\n # Kontrollime, kas mäng on läbi\n checkIfGameOver()\n # AI saab uuesti pakkuda ja vastase pakkumiskord jääb vahele\n # Kui paat on põhjas, siis tühjendame edukad pakkumised ja valime järgmise populaarseima asukoha mänguväljalt\n if (boatIsSunk):\n AIHitGuesses.clear()\n doAIMove()\n else:\n buttonsLeft[move].configure(text=\"-\") # AI pani mööda\n buttonsLeft[move].configure(background = color1) # Mööda läinud ruut värvitakse valgeks\n\n\n# Pakutava koordinaadi juhuslik valimine\ndef pickMoveRandom():\n voimalikud = []\n for e in buttonsLeft.keys():\n if e not in AIGuesses:\n voimalikud.append(e)\n return random.choice(voimalikud)\n\n\ndef checkDidAIManageToSinkABoat():\n for paat in playerBoats:\n if all(x in AIHitGuesses for x in paat):\n # kogu paat põhjas -> värvime ära\n for b in paat:\n buttonsLeft[b].configure(background = color4)\n playerSunkedBoats[str(paat)] = True # lisame uppunud laevade listi\n # Lisame laeva ümbruses olevad nupud AI pakkumiste hulka, sest seal ei saa enam laevu olla\n global AIGuesses\n forbidden = getForbiddenButtonsForAI(paat)\n for coord in forbidden:\n AIGuesses.add(coord)\n return True\n return False\n\n\ndef checkIfGameOver():\n if playerWon():\n displayWin()\n elif playerLost():\n displayLoss()\n\ndef save(result):\n # statistika jaoks\n endOfGame(result)\n\ndef displayLoss():\n teade.configure(text=\"You lost...\")\n for b in buttonsRight:\n buttonsRight[b].configure(state='disabled')\n save('AIwon')\n\ndef displayWin():\n teade.configure(text=\"Congratulations, you won!\")\n for b in buttonsRight:\n buttonsRight[b].configure(state='disabled')\n save('AIlost')\n\ndef playerWon():\n all(x == True for x in AISunkedBoats.values())\n\ndef playerLost():\n return all(x == True for x in playerSunkedBoats.values())\n\ndef endOfGame(result):\n addPlayerBoats()\n saveCoordinateFreqDict()\n saveGuessesDict()\n\n\n# AI laevad: ruudud, mida kõige \"viimastena\" on pakutud. Selle jaoks tuleb varem mängija pakkumised sõnastikku salvestada,\n# kus hilisemad pakkumised saavad suuremad väärtused.\n# Alguspunkt kõige vähem pakutule ja siis vaatab edasi, et missugune ruut vasakult, paremalt, ülevalt või alt on järgmisena vähimalt pakutud.\ndef defineAIBoats():\n global AI2, AI3, AI4, AI5, AI6, AIBoats, AISunkedBoats\n\n AI6 = generateBoat(6)\n AI5 = generateBoat(5)\n AI4 = generateBoat(4)\n AI3 = generateBoat(3)\n AI2 = generateBoat(2)\n AIBoats = [AI6, AI5, AI4, AI3, AI2]\n\n for e in AIBoats:\n AISunkedBoats[str(e)] = False\n print(e) # Debug\n\n\ndef generateBoat(length):\n global allGuesses, AIAllBoatsCoordinates\n\n boat = []\n x_id = []\n y_id = []\n\n mostLessFrequent = lastChosen(allGuesses, boat)\n start = random.choice(mostLessFrequent)\n boat.append(start)\n startX = start[0]\n startY = start[1]\n x_id.append(startX)\n y_id.append(startY)\n\n forbidden = getForbiddenButtonsForAI(AIAllBoatsCoordinates)\n # Loendurid, mis jälgivad, et ei jääks tsüklisse kui ei leidu laeva jaoks vajalik arv vabu ruute\n progress_counter_prev = 0\n progress_counter = 0\n while (len(boat) != length):\n allSurroundingButtons = findSurroundingButtons(boat)\n\n for i in range(len(allSurroundingButtons)):\n e = random.choice(allSurroundingButtons)\n if e not in forbidden:\n x = e[0]\n y = e[1]\n x_id.append(x)\n y_id.append(y) # lisame listi\n if len(set(x_id)) == 1 or len(set(y_id)) == 1: # ja vaatame kas ikka on ühel joonel\n boat.append(e)\n progress_counter += 1\n break\n else:\n x_id.remove(x)\n y_id.remove(y)\n if progress_counter == progress_counter_prev: # Juhul kui jääb tsüklisse ja laeva jaoks ei leidu sobivat ruutu\n print('Cannot build a ship of , length', length, boat) # Debug\n return generateBoat(length) # Püüab uuesti laeva luua\n else:\n progress_counter_prev = copy.copy(progress_counter)\n for elem in boat:\n AIAllBoatsCoordinates.append(elem)\n return boat\n\n\ndef findSurroundingButtons(boat):\n surrounding = []\n for coordinate in boat:\n surroundingThatButton = surroundingButtonsGen(coordinate)\n for button in surroundingThatButton:\n if button not in surrounding and button not in boat:\n surrounding.append(button)\n return surrounding\n\n\ndef surroundingButtonsGen(elem):\n global buttonsRight\n list = [(elem[0] - 1, elem[1]), (elem[0] + 1, elem[1]),\n (elem[0], elem[1] - 1), (elem[0], elem[1] + 1)]\n result = []\n for e in list:\n if e in buttonsRight:\n result.append(e)\n return result\n\n\ndef getForbiddenButtonsForAI(lst):\n result = []\n for elem in lst:\n surroundingButtons = [(elem[0] - 1, elem[1]), (elem[0] + 1, elem[1]),\n (elem[0], elem[1] - 1), (elem[0], elem[1] + 1),\n (elem[0] - 1, elem[1] - 1), (elem[0] + 1, elem[1] + 1),\n (elem[0] + 1, elem[1] - 1), (elem[0] - 1, elem[1] + 1)]\n for surrounding in surroundingButtons:\n if not surrounding in result:\n if surrounding in buttonsLeft:\n result.append(surrounding)\n return result\n\n\ndef lastChosen(dict, buttonsAlreadyInBoat):\n forbidden = getForbiddenButtonsForAI(AIAllBoatsCoordinates)\n # võtab kõige viimasena pakutud buttonid ja vaatab ega nad juba keelatud ei ole (laevade läheduses)\n max = 0\n keys = [(0, 0)]\n for k in dict.keys():\n if k not in forbidden and k not in AIAllBoatsCoordinates and k not in buttonsAlreadyInBoat:\n # Otsime suurima väärtusega ehk kõige hiljem valituid kohti mängulaual\n if dict[k] > max:\n max = dict[k]\n keys = [k]\n elif dict[k] == max:\n keys.append(k)\n return keys\n\n\ndef findMostPickedSpot(dict):\n max = 0\n keys = []\n for k in dict.keys():\n if k not in AIGuesses:\n # Otsime suurima väärtusega ehk kõige hiljem valituid kohti mängulaual\n if dict[k] > max:\n max = dict[k]\n keys = [k]\n elif dict[k] == min:\n keys.append(k)\n return keys\n\n\n## AI pakub kõige sagedamini valitud asukohti. Kui kõige sagedasem on valitud, siis vaatab edasi,\n# et missugune ruut vasakult, paremalt, ülevalt või alt on järgmisena kõige rohkem täidetud olnud.\n# Kui laev on põhja lastud, siis vastavalt reeglitele - ümbritsevaid ruute enam ei pakuta.\ndef AIMove(dict):\n # Teeb esimese valiku nuppude seast, mida mängija on varem kõige rohkem laevade asukohaks valinud\n keys = findMostPickedSpot(dict)\n # Kontroll, kas mingi sobiv pakkumine on varasemate laevade asukohtade seast leitud\n if len(keys) == 0:\n # Juhul kui mängija laevad on kohtades, kus kunagi varem pole laevu olnud, tehakse juhuslik valik\n pickKey = pickMoveRandom()\n else:\n pickKey = random.choice(keys)\n return pickKey\n\n\ndef onTheSameRow(AIHits):\n return AIHits[0][0] == AIHits[1][0]\n\n\ndef findHitNeighbours(button):\n if len(AIHitGuesses) > 1:\n # Kontroll, kas asuvad samal real\n if onTheSameRow(AIHitGuesses):\n neighbours = [(button[0], button[1] - 1), (button[0], button[1] + 1)]\n else:\n # Asuvad samas veerus\n neighbours = [(button[0] - 1, button[1]), (button[0] + 1, button[1])]\n else:\n # Määratleme kõikvõimalikud naabrid läbi\n neighbours = [(button[0] - 1, button[1]), (button[0] + 1, button[1]),\n (button[0], button[1] - 1), (button[0], button[1] + 1)]\n return neighbours\n\n\ndef findAnotherPopularOption(pot, dict):\n max_key = pot[0]\n max_value = 0\n for el in pot:\n if (el in dict and dict[el] > max_value):\n max_key = el\n max_value = dict[el]\n return max_key\n\n\ndef findPotentialPicks(neighbours):\n potentials = []\n for coord in neighbours:\n if (coord in buttonsLeft and coord not in AIGuesses):\n potentials.append(coord)\n return potentials\n\n\n# Funktsioon kõrvalasuva asukoha pakkumiseks\ndef AIPickNext(button, dict):\n neighbours = findHitNeighbours(button)\n\n potentialPicks = findPotentialPicks(neighbours)\n\n if len(potentialPicks) == 0:\n return AIPickNext(AIHitGuesses[0], coordinateFreqDict)\n # Juhul kui on ainult üks võimalik koht\n elif len(potentialPicks) == 1:\n return potentialPicks[0]\n else:\n return findAnotherPopularOption(potentialPicks, dict)\n\n\ndef readCoordinateFreqDict():\n coordinateDict = pickle.load(open(\"playerBoatsCoordFrequencyDictFile.p\", \"rb\"))\n dict = {}\n for elem in coordinateDict.keys(): # convering back to tuples\n x = int(elem[0])\n y = int(elem[1])\n value = coordinateDict[elem]\n dict[(x, y)] = value\n return dict\n\n\ndef readGuessesDict():\n guessesDict = pickle.load(open(\"playerGuessesFrequencyDictFile.p\", \"rb\"))\n for elem in guessesDict.keys(): # convering back to tuples\n x = int(elem[0])\n y = int(elem[1])\n value = guessesDict.pop(elem)\n guessesDict[(x, y)] = value\n return guessesDict\n\n\n# Pärast mängu lõppu tuleb mängija poolt valitud laevade asukohtade ruudud salvestada \"populaarseimate\" ruutude hulka,\n# mida AI kasutab alates järgmisest mängust pommitamiseks.\ndef addPlayerBoats():\n global coordinateFreqDict\n for coordinate in playerAllBoatsCoordinates:\n if (coordinate not in coordinateFreqDict):\n coordinateFreqDict[coordinate] = 1\n else:\n coordinateFreqDict[coordinate] += 1\n # print(coordinateFreqDict) #Debug\n\n\ndef saveCoordinateFreqDict():\n global coordinateFreqDict\n pickle.dump(coordinateFreqDict, open(\"playerBoatsCoordFrequencyDictFile.p\", \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n # print(coordinateFreqDict)\n\n\ndef saveGuessesDict():\n global allGuesses\n pickle.dump(allGuesses, open(\"playerGuessesFrequencyDictFile.p\", \"wb\"),\n protocol=pickle.HIGHEST_PROTOCOL)\n # print(allGuesses)\n\n\ndef start(state):\n global playerAllBoatsCoordinates, player6, player5, player4, player3, player2, playerBoats, playerSunkedBoats, forbiddenButtonsForBoats\n forbiddenButtonsForBoats = []\n playerAllBoatsCoordinates = []\n player6 = {}\n player5 = {}\n player4 = {}\n player3 = {}\n player2 = {}\n playerBoats = []\n playerSunkedBoats = {}\n\n global AIAllBoatsCoordinates, AISunkedBoats, playerGuesses, playerHitGuesses, guessCounter, AIGuesses, AIHitGuesses\n\n AIAllBoatsCoordinates = []\n AISunkedBoats = {}\n playerGuesses = []\n playerHitGuesses = []\n guessCounter = 0\n AIGuesses = set()\n AIHitGuesses = []\n\n global allGuesses, coordinateFreqDict\n # AI jaoks andmete sisse lugemine\n allGuesses = readGuessesDict()\n coordinateFreqDict = readCoordinateFreqDict()\n\n createBoard()\n defineAIBoats()\n\n teade.configure(text=\"Place your ships on the board!\")\n mangijaTeade.configure(text='Position a ship of length 6. ')\n aiTeade.configure(text=\" \")\n\ndef createFiles():\n # Loome AI jaoks vajalikud failid\n if not os.path.isfile(\"playerBoatsCoordFrequencyDictFile.p\"): # fails don't exist yet -> inizialize\n d = {}\n for i in range(boardWidth):\n for j in range(boardWidth):\n d[(j, i)] = 0\n pickle.dump(d, open(\"playerBoatsCoordFrequencyDictFile.p\", \"wb\"), protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(d, open(\"playerGuessesFrequencyDictFile.p\", \"wb\"), protocol=pickle.HIGHEST_PROTOCOL)\n\ndef setColors():\n # Määrame värvid\n global color1, color2, color3, color4, color5, color6, boardButton\n color1 = 'White'\n color2 = 'gray28'\n color3 = 'RoyalBlue2'\n color4 = 'red3'\n color5 = 'PaleGreen3'\n color6 = 'gray'\n boardButton = 'cornflower blue'\n\n# Kaalud mängija poolt pakutavate ruutude jaoks. Mida hiljem mängija ruudu mängulaualt valib, seda suurema väärtuse saab.\ndef createWeights():\n weights = []\n start = 0.1\n step = 0.1\n for i in range(100):\n weights.append(start)\n start += step\n return weights\n\ndef main():\n\n #listide loomine\n global boardWidth, buttonsLeft, buttonsRight, current\n boardWidth = 10\n buttonsLeft = {}\n buttonsRight = {}\n current = [] # viimased, mis mängija poolt valitud (saavad laeva punktideks, aga veel ei ole)\n\n # Väärtuste listi loomine hilisemaks mängija pakkumiste loendamiseks\n global guessWeights\n guessWeights = createWeights()\n\n # Akna loomine\n global root, teade, mangijaTeade, aiTeade\n root = Tk()\n root.wm_title(\"Battleships\")\n root.resizable(0, 0)\n\n minuLaevad = Label(root, text=\"Your ships\", font=\"Times 15 bold\").grid(row=0, column=1, padx=10, pady=8,\n columnspan=10)\n vastaseLaevad = Label(root, text=\"Enemy ships\", font=\"Times 15 bold\").grid(row=0, column=12, padx=10, pady=8,\n columnspan=10)\n teade = Label(root, text=\"Place your ships on the board.\", font=\"Times 13 bold\")\n teade.grid(row=12, column=1, padx=10, pady=8, columnspan=21)\n\n mangijaTeade = Label(root, text='Position a ship of length 5.', font=\"Times 13 bold\")\n mangijaTeade.grid(row=11, column=1, padx=10, pady=8, columnspan=10)\n\n aiTeade = Label(root, text=\" \", font=\"Times 13 bold\")\n aiTeade.grid(row=11, column=12, padx=10, pady=8, columnspan=10)\n\n loppNupp = Button(root, text=\"Restart\", command=lambda: start(False))\n loppNupp.grid(row=13, column=12, padx=10, pady=8, columnspan=3)\n\n loppNupp = Button(root, text=\"Randomise\", command=lambda: start(True))\n loppNupp.grid(row=13, column=8, padx=10, pady=8, columnspan=3)\n\n start(False)\n root.mainloop()\n\n\ncreateFiles()\nsetColors()\nmain()\n","repo_name":"Vossip/Artificial-Intelligence-UT-Course","sub_path":"Battleship/battleships.py","file_name":"battleships.py","file_ext":"py","file_size_in_byte":24077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"21303187265","text":"from __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom cloudengine import CloudProvider\nfrom cloudengine.google import GoogleAuth\n\nVIDEO_BUCKET = \"video-backup.arjancodes.com\"\nREGION = \"eu-west-1c\"\n\n\ndef create_cloud_provider(\n region: str = REGION, bucket_name: str = VIDEO_BUCKET\n) -> ACCloud:\n authentication = GoogleAuth(\"service_key.json\")\n cloud = CloudProvider(\n region=region,\n http_auth=authentication,\n secure=True,\n )\n return ACCloud(cloud, bucket_name)\n\n\n@dataclass\nclass ACCloud:\n cloud_provider: CloudProvider\n bucket_name: str\n\n def find_files(self, query: str, max_result: int) -> list[str]:\n response = self.cloud_provider.filter_by_query(\n bucket=self.bucket_name, query=query, max=max_result\n )\n return response[\"result\"][\"data\"][0]\n","repo_name":"saidvandeklundert/notes","sub_path":"principles and styles/composition over inheritance/cloud solution/solution_oo.py","file_name":"solution_oo.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"}
+{"seq_id":"41103397639","text":"from __future__ import annotations\nfrom collections import defaultdict\nfrom collections.abc import Callable\nfrom typing import Any\nfrom aoc.common import log\n\n\nclass Instruction:\n _opcode: str\n _operands: tuple[Any, ...] | None\n\n def __init__(self, opcode: str, operands: tuple[Any, ...] | None) -> None:\n self._opcode = opcode\n self._operands = operands\n\n def __repr__(self) -> str:\n return (\n f\"Instruction[opcode: {self._opcode}, \"\n f\"operands: {self._operands}]\"\n )\n\n @classmethod\n def NOP(cls) -> Instruction:\n return Instruction(\"NOP\", None)\n\n @classmethod\n def JMP(cls, value: int) -> Instruction:\n return Instruction(\"JMP\", (value,))\n\n @classmethod\n def JIE(cls, register: str, value: int) -> Instruction:\n return Instruction(\"JIE\", (register, value))\n\n @classmethod\n def JI1(cls, register: str, value: int) -> Instruction:\n return Instruction(\"JI1\", (register, value))\n\n @classmethod\n def JN0(cls, register: str, value: str) -> Instruction:\n return Instruction(\"JN0\", (register, value))\n\n @classmethod\n def SET(cls, register: str, value: str) -> Instruction:\n return Instruction(\"SET\", (register, value))\n\n @classmethod\n def TGL(cls, register: str) -> Instruction:\n return Instruction(\"TGL\", (register,))\n\n @classmethod\n def ADD(cls, register: str, value: int) -> Instruction:\n return Instruction(\"ADD\", (register, value))\n\n @classmethod\n def SUB(cls, register: str, value: str) -> Instruction:\n return Instruction(\"SUB\", (register, value))\n\n @classmethod\n def MUL(cls, register: str, value: str) -> Instruction:\n return Instruction(\"MUL\", (register, value))\n\n @classmethod\n def DIV(cls, register: str, value: int) -> Instruction:\n return Instruction(\"DIV\", (register, value))\n\n @classmethod\n def MEM(cls, address: int, value: object) -> Instruction:\n return Instruction(\"MEM\", (address, value))\n\n @classmethod\n def OUT(cls, operand: str) -> Instruction:\n return Instruction(\"OUT\", (operand,))\n\n @property\n def opcode(self) -> str:\n return self._opcode\n\n @property\n def operands(self) -> tuple[Any, ...] | None:\n return self._operands\n\n @property\n def is_MUL(self) -> bool:\n return self._opcode == \"MUL\"\n\n\nclass Program:\n _instructions: list[Instruction]\n _memory: dict[int, object]\n _registers: dict[str, int | str]\n _instruction_pointer: int\n _inf_loop_treshold: int | None\n _output_consumer: Callable[[str], None] | None\n _cycles: int\n\n def __init__(\n self,\n instructions: list[Instruction],\n inf_loop_treshold: int | None = None,\n output_consumer: Callable[[str], None] | None = None,\n ) -> None:\n self._instructions = instructions\n self._inf_loop_treshold = inf_loop_treshold\n self._memory = dict[int, object]()\n self._registers = dict[str, int | str]()\n self._instruction_pointer = 0\n self._output_consumer = output_consumer\n self._cycles = 0\n\n @property\n def instructions(self) -> list[Instruction]:\n return self._instructions\n\n @property\n def inf_loop_treshold(self) -> int | None:\n return self._inf_loop_treshold\n\n @property\n def instruction_pointer(self) -> int:\n return self._instruction_pointer\n\n @property\n def memory(self) -> dict[int, object]:\n return self._memory\n\n @property\n def registers(self) -> dict[str, int | str]:\n return self._registers\n\n @property\n def cycles(self) -> int:\n return self._cycles\n\n @property\n def output_consumer(self) -> Callable[[str], None] | None:\n return self._output_consumer\n\n def null_operation(self) -> None:\n pass\n\n def set_register_value(self, register: str, value: int | str) -> None:\n self._registers[register] = value\n\n def move_instruction_pointer(self, value: int) -> int:\n self._instruction_pointer += value\n return self._instruction_pointer\n\n def set_memory_value(self, address: int, value: object) -> None:\n self._memory[address] = value\n\n def replace_instruction(self, idx: int, new_ins: Instruction) -> None:\n log(self.instructions)\n log(f\"replacing {self.instructions[idx]} with {new_ins}\")\n self._instructions[idx] = new_ins\n log(self.instructions)\n\n\nclass VirtualMachine:\n _instruction_set: dict[str, Callable[[Program, Instruction, Any], Any]]\n\n def __init__(self) -> None:\n self._instruction_set = {\n \"NOP\": self._nop,\n \"JMP\": self._jmp,\n \"JIE\": self._jie,\n \"JI1\": self._ji1,\n \"JN0\": self._jn0,\n \"SET\": self._set,\n \"TGL\": self._tgl,\n \"ADD\": self._add,\n \"SUB\": self._sub,\n \"MUL\": self._mul,\n \"DIV\": self._div,\n \"MEM\": self._mem,\n \"OUT\": self._out,\n }\n\n def _nop(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n program.null_operation()\n program.move_instruction_pointer(1)\n\n def _jmp(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (count, *_) = instruction.operands\n program.move_instruction_pointer(count)\n log(program.registers)\n\n def _jie(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, count) = instruction.operands\n if (\n register not in program.registers\n or program.registers[register] % 2 == 0\n ):\n program.move_instruction_pointer(count)\n else:\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _ji1(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, count) = instruction.operands\n if register in program.registers and program.registers[register] == 1:\n program.move_instruction_pointer(count)\n else:\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _jn0(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (test, count) = instruction.operands\n if test.startswith(\"*\"):\n if test[1:] in program.registers:\n test = program.registers[test[1:]]\n else:\n test = 0\n else:\n test = int(test)\n if count.startswith(\"*\"):\n if count[1:] in program.registers:\n count = program.registers[count[1:]]\n else:\n count = 0\n else:\n count = int(count)\n if test != 0:\n program.move_instruction_pointer(int(count))\n else:\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _set(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, value) = instruction.operands\n if str(value).startswith(\"*\"):\n value = value[1:]\n if value in program.registers:\n program.set_register_value(\n register, int(program.registers[value])\n )\n else:\n program.set_register_value(register, int(value))\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _tgl(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register,) = instruction.operands\n if register in program.registers:\n idx = ip + int(program.registers[register])\n if 0 <= idx < len(program.instructions):\n new_ins = self._toggle_instruction(program.instructions[idx])\n program.replace_instruction(idx, new_ins)\n program.move_instruction_pointer(1)\n\n def _toggle_instruction(self, instruction: Instruction) -> Instruction:\n if instruction.operands is None:\n raise RuntimeError\n if instruction.opcode == \"ADD\" and instruction.operands[1] == 1:\n return Instruction.ADD(instruction.operands[0], -1)\n elif (\n instruction.opcode == \"ADD\"\n and instruction.operands[1] == -1\n or instruction.opcode == \"TGL\"\n ):\n return Instruction.ADD(instruction.operands[0], 1)\n elif instruction.opcode == \"JN0\":\n op2 = str(instruction.operands[1])\n return Instruction.SET(\n op2[1:] if op2.startswith(\"*\") else op2,\n str(instruction.operands[0]),\n )\n elif instruction.opcode == \"SET\":\n op1 = str(instruction.operands[0])\n return Instruction.JN0(\n str(instruction.operands[1]),\n op1 if op1.isnumeric() else \"*\" + op1,\n )\n else:\n raise RuntimeError(\n \"Cannot toggle instruction: \" + str(instruction)\n )\n\n def _add(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, value) = instruction.operands\n new_value = (\n value\n if register not in program.registers\n else program.registers[register] + value\n )\n program.set_register_value(register, new_value)\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _sub(\n self, program: Program, instruction: Instruction, ip: str\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, value) = instruction.operands\n value = self._value(program, value)\n new_value = (\n value\n if register not in program.registers\n else program.registers[register] - value\n )\n program.set_register_value(register, new_value)\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _div(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, value) = instruction.operands\n new_value = (\n 0\n if register not in program.registers\n else program.registers[register] // value\n )\n program.set_register_value(register, new_value)\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _mul(\n self, program: Program, instruction: Instruction, ip: str\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n log(instruction.opcode + str(instruction.operands))\n (register, value) = instruction.operands\n value = self._value(program, value)\n new_value = (\n value\n if register not in program.registers\n else program.registers[register] * value\n )\n program.set_register_value(register, new_value)\n program.move_instruction_pointer(1)\n log(program.registers)\n\n def _mem(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n (address, value) = instruction.operands\n program.set_memory_value(address, value)\n program.move_instruction_pointer(1)\n\n def _out(\n self, program: Program, instruction: Instruction, ip: int\n ) -> None:\n if instruction.operands is None:\n raise RuntimeError\n (operand,) = instruction.operands\n if operand.startswith(\"*\"):\n operand = operand[1:]\n if operand in program.registers:\n operand = program.registers[operand]\n else:\n operand = int(operand)\n if operand is not None and program.output_consumer is not None:\n program.output_consumer(operand)\n program.move_instruction_pointer(1)\n\n def _value(self, program: Program, op: str) -> int | str:\n if op.startswith(\"*\"):\n return program.registers[op[1:]]\n else:\n return int(op)\n\n def run_program(self, program: Program) -> None:\n seen: defaultdict[int, int] = defaultdict(int)\n while 0 <= program.instruction_pointer < len(program.instructions):\n # instruction = program.instructions[program.instruction_pointer]\n if program.inf_loop_treshold is not None:\n seen[program.instruction_pointer] += 1\n\n # + instruction.opcode)\n # self._instruction_set[instruction.opcode](\n # program,\n # instruction,\n # program.instruction_pointer)\n # program._cycles += 1\n self.step(program)\n if (\n program.inf_loop_treshold is not None\n and program.instruction_pointer in seen\n ):\n instruction_count = seen[program.instruction_pointer]\n if instruction_count >= program.inf_loop_treshold:\n raise RuntimeError(\"Infinite loop!\")\n log(\"Normal exit\")\n\n def step(self, program: Program) -> None:\n instruction = program.instructions[program.instruction_pointer]\n if instruction.opcode not in self._instruction_set:\n raise ValueError(\"Unsupported instruction: \" + instruction.opcode)\n self._instruction_set[instruction.opcode](\n program, instruction, program.instruction_pointer\n )\n program._cycles += 1\n","repo_name":"pareronia/adventofcode","sub_path":"src/main/python/aoc/vm.py","file_name":"vm.py","file_ext":"py","file_size_in_byte":14691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"28792833443","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/19 16:32\n# @Author : xiaoqing\n# @File : 5太平洋大西洋.py\n# @Software: PyCharm Community Edition\n\nclass Solution(object):\n def pacificAtlantic(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n m, n = len(matrix), len(matrix[0])\n def dfs(i, j, isarrive):\n if i == 0 or j == 0:\n # 可以到太平洋\n isarrive[0] = 1\n if i == m-1 or j == n-1:\n # 可以到大西洋\n isarrive[1] = 1\n if i > 0 and matrix[i][j] >= matrix[i-1][j]:\n return dfs(i-1, j, isarrive)\n if i < m - 1 and matrix[i][j] >= matrix[i+1][j]:\n return dfs(i+1, j, isarrive)\n if j > 0 and matrix[i][j] >= matrix[i][j-1]:\n return dfs(i, j-1, isarrive)\n if j < n - 1 and matrix[i][j] >= matrix[i][j+1]:\n return dfs(i, j+1, isarrive)\n return 0\n result = []\n for i in range(m):\n for j in range(n):\n isarrive = [0, 0]\n num = dfs(i, j, isarrive)\n if num == 2:\n result.append([i, j])\n return result\n\n\nclass Solution(object):\n def pacificAtlantic(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n if not matrix: return []\n\n r, c = len(matrix), len(matrix[0])\n if r == 1 and c == 1: return [[0, 0]]\n self.dp = [[False for _ in range(c)] for _ in range(r)]\n\n res = []\n\n for i in range(r):\n for j in range(c):\n self.pac, self.alt = False, False\n self.dfs(matrix, i, j, [])\n if self.pac and self.alt:\n res.append([i, j])\n self.dp[i][j] = True\n\n return res\n\n def dfs(self, board, i, j, rec):\n if self.dp[i][j]:\n self.pac, self.alt = True, True\n return\n if self.pac and self.alt: return\n if not (0 <= i <= len(board)) or not (0 <= j <= len(board[0])) or [i, j] in rec:\n return\n if i == 0 or j == 0:\n self.pac = True\n if i == len(board) - 1 or j == len(board[0]) - 1:\n self.alt = True\n\n rec.append([i, j])\n\n if i > 0 and board[i][j] >= board[i - 1][j]:\n self.dfs(board, i - 1, j, rec[:])\n if i < len(board) - 1 and board[i][j] >= board[i + 1][j]:\n self.dfs(board, i + 1, j, rec[:])\n if j > 0 and board[i][j] >= board[i][j - 1]:\n self.dfs(board, i, j - 1, rec[:])\n if j < len(board[0]) - 1 and board[i][j] >= board[i][j + 1]:\n self.dfs(board, i, j + 1, rec[:])\n\na = Solution()\nmat = [[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]]\nr = a.pacificAtlantic(mat)\nprint(r)","repo_name":"xiaoqing928/leetcode","sub_path":"搜索/5太平洋大西洋.py","file_name":"5太平洋大西洋.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"}
+{"seq_id":"70926761777","text":"# Given the pickle file containing all the triples, creates a new vocabulary\n\nimport os\nimport argparse\nimport pickle\n\nimport synspace.vocabulary_utils\n\ndef pickle2txt(input_file, output_file):\n p = pickle.load(open(input_file, \"rb\"))\n with open(output_file, 'w') as out:\n for i in p:\n out.write('{}\\t{}\\t{}\\n'.format(*i))\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Creates a vocabulary')\n parser.add_argument('input_file', type=str,\n help='Pickle file containing all the tuples')\n\n args = parser.parse_args()\n return args\n\ndef main(args):\n # Creates a file that the vocabulary functions can handle\n in_txt = args.input_file + '.txt'\n pickle2txt(args.input_file, in_txt)\n\n # Call vocabulary functions\n dir_name = os.path.dirname(in_txt)\n synspace.vocabulary_utils.new_vocabulary([in_txt], dir_name, 0, 'split',\n False, None, 'word_triples',\n lambda line: \" \".join(line.split('\\t'))[:-1])\n\n # After the files have been created, I can call\n # w2i, i2w = synspace.vocabulary_utils.load_vocabulary(vocab_path)\n # to load the vocabulary, and I can call\n # w2v = reload_w2v(w2i)\n # to initialize a new `w2v` object. This is what I want to use to initialize\n # my Embedding layer.\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n\n","repo_name":"jcbgamboa/torch_language_model","sub_path":"synspace/create_vocabulary.py","file_name":"create_vocabulary.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"3259245843","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom . import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'timeonpage', views.TimeOnPageViewSet)\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'timeoncourse/', views.times_on_course, name=\"times\"),\n path(r'timeoncourse/overview/', views.general_times_overview_course, name=\"times-overview\"),\n path(r'timeoncourse/overview/detailed/', views.detailed_times_overview_course, name=\"times-overview-detailed\"),\n]\n","repo_name":"eol-uchile/edx-stats","sub_path":"back/times/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"30698093124","text":"import os.path as osp\nimport os\nfrom rdkit import Chem\nimport numpy as np\nfrom easydict import EasyDict\nfrom rdkit import Chem\nimport subprocess\nfrom rdkit.Chem.rdMolAlign import CalcRMS\nimport shutil\nimport re\nimport copy\n\ndef read_sdf(sdf_file, sanitize=False):\n supp = Chem.SDMolSupplier(sdf_file, sanitize=sanitize)\n mols_list = [i for i in supp]\n return mols_list\n\ndef write_sdf(mol_list,file, voice=False):\n writer = Chem.SDWriter(file)\n mol_cnt = 0\n for i in mol_list:\n try:\n writer.write(i)\n mol_cnt+=1\n except:\n pass\n writer.close()\n if voice: \n print('Write {} molecules to {}'.format(mol_cnt,file))\n\ndef set_mol_position(mol, pos):\n mol = copy.deepcopy(mol)\n for i in range(pos.shape[0]):\n mol.GetConformer(0).SetAtomPosition(i, pos[i].tolist())\n return mol \n\ndef sdf2centroid(sdf_file):\n supp = Chem.SDMolSupplier(sdf_file, sanitize=False)\n lig_xyz = supp[0].GetConformer().GetPositions()\n centroid_x = lig_xyz[:,0].mean()\n centroid_y = lig_xyz[:,1].mean()\n centroid_z = lig_xyz[:,2].mean()\n return centroid_x, centroid_y, centroid_z\n\ndef mol2centroid(mol2_file):\n mol = Chem.MolFromMol2File(mol2_file, sanitize=False)\n lig_xyz = mol.GetConformer().GetPositions()\n centroid_x, centroid_y, centroid_z = lig_xyz.mean(axis=0)\n return centroid_x, centroid_y, centroid_z\n\ndef get_result(docked_sdf, ref_mol=None):\n suppl = Chem.SDMolSupplier(docked_sdf,sanitize=False)\n results = []\n for i, mol in enumerate(suppl):\n if mol is None:\n continue\n line = mol.GetProp('REMARK').splitlines()[0].split()[2:]\n try:\n rmsd = CalcRMS(ref_mol, mol)\n except:\n rmsd = np.nan\n results.append(EasyDict({\n 'rdmol': mol,\n 'mode_id': i,\n 'affinity': float(line[0]),\n 'rmsd_lb': float(line[1]),\n 'rmsd_ub': float(line[2]),\n 'rmsd_ref': rmsd\n }))\n return results\n\ndef sdf2mol2(sdf_file, mol2_file=None, verbose=True):\n '''\n sdf_file: str\n mol2_file: str\n '''\n if mol2_file is None:\n mol2_file = sdf_file.replace('.sdf', '.mol2')\n if os.path.exists(mol2_file):\n return mol2_file\n \n command = f'obabel {sdf_file} -O {mol2_file}'\n result = subprocess.run(command, shell=True, capture_output=True, text=True)\n if verbose:\n if result.returncode == 0:\n print(f'Have been converted to mol2 file! {sdf_file}')\n else:\n print(result.stderr)\n return mol2_file\n\ndef pdb2mol2(pdb_file, out_file=None):\n '''\n SurfLex needs protein file in mol2 format\n '''\n if out_file is None:\n out_file = pdb_file.replace('.pdb', '.mol2')\n if os.path.exists(out_file):\n # print(f'{out_file}: Exists!')\n return out_file\n \n command = f'obabel {pdb_file} -O {out_file}'\n result = subprocess.run(command, shell=True, capture_output=True, text=True)\n if result.returncode == 0:\n print(f'{pdb_file} Have been converted to mol2 file!')\n else:\n print(result.stderr)\n return out_file\n\ndef rmfiles(*files, verbose=True):\n for file in files:\n try:\n os.remove(file)\n if verbose:\n print(f\"File {file} has been successfully removed.\")\n except FileNotFoundError:\n print(f\"File {file} not found.\")\n except Exception as e:\n print(f\"An error occurred while deleting {file}: {e}\")\n\ndef compute_rmsd_obrms(true_sdf, docked_sdf):\n command = f'obrms {docked_sdf} {true_sdf}'\n result = subprocess.run(command, shell=True, capture_output=True, text=True)\n outputs = result.stdout.split('\\n')\n rmsd_list = []\n for output in outputs[:-1]:\n rmsd_list.append(float(output.split()[-1]))\n return rmsd_list","repo_name":"HaotianZhangAI4Science/delete-protocol","sub_path":"docking/chem.py","file_name":"chem.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"23681970635","text":"import random\n\n\ndef mutFlipBit(variable, prob=0.5):\n \"\"\"Flip the value of the attributes of the input individual and return the\n mutant. The *individual* is expected to be a :term:`sequence` and the values of the\n attributes shall stay valid after the ``not`` operator is called on them.\n The *prob* argument is the probability of each attribute to be\n flipped. This mutation is usually applied on boolean individuals.\n\n :param variable: Decision Variable to be mutated.\n :param prob: Independent probability for each attribute to be flipped.\n :returns: A tuple of one variable.\n\n This function uses the :func:`~random.random` function from the python base\n :mod:`random` module.\n \"\"\"\n for i in xrange(len(variable)):\n # for i in xrange(variable.size):\n if random.random() < prob:\n variable[i] = type(variable[i])(not variable[i])\n\n return variable\n","repo_name":"QPanProjects/Surrogate-Model","sub_path":"surrogate/mutation/mutFlipBit.py","file_name":"mutFlipBit.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"28460518839","text":"from django.urls import path\nfrom . import views\n\napp_name = 'payment'\nurlpatterns = [\n path('pay/',views.AddTransection.as_view(),name='pay'),\n path('view/',views.ViewTransection.as_view(),name='view'),\n path('delete/',views.DeleteTransection.as_view(),name='delete'),\n path('api/balance',views.giveBalance,name='apibalance'),\n]\n","repo_name":"dilipj17/dairy-webstie","sub_path":"payment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"12163511610","text":"import warnings\n\nfrom alaska2.adabn import bn_update\n\nwarnings.simplefilter(\"ignore\", UserWarning)\nwarnings.simplefilter(\"ignore\", FutureWarning)\n\nimport argparse\nimport os\nimport pandas as pd\nimport numpy as np\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm import tqdm\n\nfrom collections import defaultdict\nfrom catalyst.utils import any2device\nfrom pytorch_toolbelt.utils import to_numpy, fs\nfrom pytorch_toolbelt.utils.catalyst import report_checkpoint\n\nfrom alaska2 import *\nfrom alaska2.submissions import sigmoid, parse_classifier_probas\n\n\ndef update_bn(model: nn.Module, dataset: Dataset, batch_size=1, workers=0):\n \"\"\"\n BatchNorm buffers update (if any).\n Performs 1 epochs to estimate buffers average using train dataset.\n :param loader: train dataset loader for buffers average estimation.\n :param model: model being update\n :return: None\n \"\"\"\n loader = DataLoader(\n dataset, batch_size=batch_size, num_workers=workers, shuffle=True, drop_last=True, pin_memory=True\n )\n bn_update(loader, model)\n\n\n@torch.no_grad()\ndef compute_oof_predictions(model, dataset: Dataset, batch_size=1, workers=0) -> pd.DataFrame:\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n model = model.eval()\n\n df = defaultdict(list)\n for batch in tqdm(\n DataLoader(\n dataset, batch_size=batch_size, num_workers=workers, shuffle=False, drop_last=False, pin_memory=True\n )\n ):\n batch = any2device(batch, device=\"cuda\")\n\n if INPUT_TRUE_MODIFICATION_FLAG in batch:\n y_trues = to_numpy(batch[INPUT_TRUE_MODIFICATION_FLAG]).flatten()\n df[INPUT_TRUE_MODIFICATION_FLAG].extend(y_trues)\n\n if INPUT_TRUE_MODIFICATION_TYPE in batch:\n y_labels = to_numpy(batch[INPUT_TRUE_MODIFICATION_TYPE]).flatten()\n df[INPUT_TRUE_MODIFICATION_TYPE].extend(y_labels)\n\n image_ids = batch[INPUT_IMAGE_ID_KEY]\n df[INPUT_IMAGE_ID_KEY].extend(image_ids)\n\n outputs = model(**batch)\n\n if OUTPUT_PRED_MODIFICATION_FLAG in outputs:\n df[OUTPUT_PRED_MODIFICATION_FLAG].extend(to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG]).flatten())\n\n if OUTPUT_PRED_MODIFICATION_TYPE in outputs:\n df[OUTPUT_PRED_MODIFICATION_TYPE].extend(to_numpy(outputs[OUTPUT_PRED_MODIFICATION_TYPE]).tolist())\n\n if OUTPUT_PRED_EMBEDDING in outputs:\n df[OUTPUT_PRED_EMBEDDING].extend(to_numpy(outputs[OUTPUT_PRED_EMBEDDING]).tolist())\n\n if OUTPUT_PRED_EMBEDDING_ARC_MARGIN in outputs:\n df[OUTPUT_PRED_EMBEDDING_ARC_MARGIN].extend(to_numpy(outputs[OUTPUT_PRED_EMBEDDING_ARC_MARGIN]).tolist())\n\n # Save also TTA predictions for future use\n if OUTPUT_PRED_MODIFICATION_FLAG + \"_tta\" in outputs:\n df[OUTPUT_PRED_MODIFICATION_FLAG + \"_tta\"].extend(\n to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG + \"_tta\"]).tolist()\n )\n\n if OUTPUT_PRED_MODIFICATION_TYPE + \"_tta\" in outputs:\n df[OUTPUT_PRED_MODIFICATION_TYPE + \"_tta\"].extend(\n to_numpy(outputs[OUTPUT_PRED_MODIFICATION_TYPE + \"_tta\"]).tolist()\n )\n\n df = pd.DataFrame.from_dict(df)\n return df\n\n\ndef score_predictions(predictions_fname):\n holdout_predictions = pd.read_csv(predictions_fname)\n\n print(predictions_fname)\n print(\n \"\\tbAUC\",\n alaska_weighted_auc(\n holdout_predictions[INPUT_TRUE_MODIFICATION_FLAG].values,\n holdout_predictions[OUTPUT_PRED_MODIFICATION_FLAG].apply(sigmoid).values,\n ),\n )\n\n print(\n \"\\tcAUC\",\n alaska_weighted_auc(\n holdout_predictions[INPUT_TRUE_MODIFICATION_FLAG].values,\n holdout_predictions[OUTPUT_PRED_MODIFICATION_TYPE].apply(parse_classifier_probas).values,\n ),\n )\n\n\n@torch.no_grad()\ndef main():\n # Give no chance to randomness\n torch.manual_seed(0)\n np.random.seed(0)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"checkpoint\", type=str, nargs=\"+\")\n parser.add_argument(\"-dd\", \"--data-dir\", type=str, default=os.environ.get(\"KAGGLE_2020_ALASKA2\"))\n parser.add_argument(\"-b\", \"--batch-size\", type=int, default=1)\n parser.add_argument(\"-w\", \"--workers\", type=int, default=0)\n parser.add_argument(\"-d4\", \"--d4-tta\", action=\"store_true\")\n parser.add_argument(\"-hv\", \"--hv-tta\", action=\"store_true\")\n parser.add_argument(\"-f\", \"--force-recompute\", action=\"store_true\")\n parser.add_argument(\"-oof\", \"--need-oof\", action=\"store_true\")\n parser.add_argument(\"-emb\", \"--need-embedding\", action=\"store_true\")\n parser.add_argument(\"-adabn\", \"--adabn\", action=\"store_true\")\n\n args = parser.parse_args()\n\n checkpoint_fnames = args.checkpoint\n data_dir = args.data_dir\n batch_size = args.batch_size\n workers = args.workers\n\n d4_tta = args.d4_tta\n hv_tta = args.hv_tta\n force_recompute = args.force_recompute\n need_embedding = args.need_embedding\n adabn = args.adabn\n\n outputs = [OUTPUT_PRED_MODIFICATION_FLAG, OUTPUT_PRED_MODIFICATION_TYPE]\n suffix = (\n (\"_w_emb\" if need_embedding else \"\")\n + (\"_adabn\" if adabn else \"\")\n + (\"_flip_hv_tta\" if hv_tta else \"\")\n + (\"_d4_tta\" if d4_tta else \"\")\n )\n\n for checkpoint_fname in checkpoint_fnames:\n model, checkpoints, required_features = ensemble_from_checkpoints(\n [checkpoint_fname], strict=True, outputs=outputs, activation=None, tta=None, need_embedding=need_embedding\n )\n\n report_checkpoint(checkpoints[0])\n model = model.cuda()\n if hv_tta:\n model = wrap_model_with_tta(model, \"flip-hv\", inputs=required_features, outputs=outputs).eval()\n elif d4_tta:\n model = wrap_model_with_tta(model, \"d4\", inputs=required_features, outputs=outputs).eval()\n\n if args.need_oof:\n fold = checkpoints[0][\"checkpoint_data\"][\"cmd_args\"][\"fold\"]\n _, valid_ds, _ = get_datasets(data_dir, fold=fold, features=required_features)\n\n oof_predictions_csv = fs.change_extension(checkpoint_fname, f\"_oof_predictions{suffix}.csv\")\n if force_recompute or not os.path.exists(oof_predictions_csv):\n oof_predictions = compute_oof_predictions(model, valid_ds, batch_size=batch_size, workers=workers)\n oof_predictions.to_csv(oof_predictions_csv, index=False)\n print(f\"OOF score ({suffix})\")\n score_predictions(oof_predictions_csv)\n\n # Holdout\n holdout_ds = get_holdout(data_dir, features=required_features)\n holdout_predictions_csv = fs.change_extension(checkpoint_fname, f\"_holdout_predictions{suffix}.csv\")\n if force_recompute or not os.path.exists(holdout_predictions_csv):\n if adabn:\n update_bn(model, holdout_ds, batch_size=batch_size // torch.cuda.device_count(), workers=workers)\n holdout_predictions = compute_oof_predictions(model, holdout_ds, batch_size=batch_size, workers=workers)\n holdout_predictions.to_csv(holdout_predictions_csv, index=False)\n print(f\"Holdout score ({suffix})\")\n score_predictions(holdout_predictions_csv)\n\n # Test\n test_ds = get_test_dataset(data_dir, features=required_features)\n test_predictions_csv = fs.change_extension(checkpoint_fname, f\"_test_predictions{suffix}.csv\")\n if force_recompute or not os.path.exists(test_predictions_csv):\n if adabn:\n update_bn(model, test_ds, batch_size=batch_size // torch.cuda.device_count(), workers=workers)\n test_predictions = compute_oof_predictions(model, test_ds, batch_size=batch_size, workers=workers)\n test_predictions.to_csv(test_predictions_csv, index=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BloodAxe/Kaggle-2020-Alaska2","sub_path":"oof_predictions.py","file_name":"oof_predictions.py","file_ext":"py","file_size_in_byte":7934,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"57"}
+{"seq_id":"39742505273","text":"\"\"\"Lilly PAINS pattern to score for various assays\n\nThis uses the substructure tool to match for known PAINS patterns. Matches\nare not counted.\n\"\"\"\n\n__all__ = [\"LillyPAINS\"]\nimport os\nimport csv\nimport re\nimport operator\nimport shlex\nfrom dataclasses import dataclass\nfrom typing import List, Dict\nimport logging\n\nimport numpy as np\n\nfrom reinvent_plugins.normalize import normalize_smiles\nfrom ..run_program import run_command\nfrom ..component_results import ComponentResults\nfrom ..add_tag import add_tag\n\nlogger = logging.getLogger(\"reinvent\")\n\nLILLY_HOME = \"LILLY_MOL_ROOT\"\nACCEPTED_STEM = \"accepted\"\nTSUB_CMD = (\n \"{topdir}/bin/Linux/tsubstructure -E autocreate -b -u -i smi -o smi -A D -m - -m QDT \"\n \"-n {accepted} -q F:{topdir}/data/queries/PAINS/queries_latest -\"\n)\nSCORES_FILENAME = os.path.join(os.path.dirname(__file__), \"pains_scores.csv\")\nKNOWN_ASSAYS = [\n \"Alpha\",\n \"ELISA\",\n \"FB\",\n \"FP\",\n \"FRET\",\n \"SPA\",\n \"OverallActivityEnrichment\",\n \"QCEnrichment\",\n \"Alpha_HS\",\n \"ELISA_HS\",\n \"FB_HS\",\n \"FP_HS\",\n \"FRET_HS\",\n \"SPA_HS\",\n \"HSEnrichment\",\n \"TotalScore\",\n]\n\n\n@add_tag(\"__parameters\")\n@dataclass\nclass Parameters:\n assay: List[str]\n\n\n@add_tag(\"__component\")\nclass LillyPAINS:\n def __init__(self, params: Parameters):\n if any([assay not in KNOWN_ASSAYS for assay in params.assay]):\n raise RuntimeError(f\"{__name__}: one or more assays not in {', '.join(KNOWN_ASSAYS)}\")\n\n self.assays = params.assay\n\n if LILLY_HOME not in os.environ:\n raise RuntimeError(f\"{__name__}: {LILLY_HOME} not in environment\")\n\n self.pains_scores = read_scores_from_csv(SCORES_FILENAME)\n tsub_cmd = TSUB_CMD.format(topdir=os.environ[LILLY_HOME], accepted=ACCEPTED_STEM)\n self.tsub_cmd = shlex.split(tsub_cmd)\n\n self.smiles_type = \"lilly_smiles\"\n\n @normalize_smiles\n def __call__(self, smilies: List[str]) -> np.array:\n headers = self.pains_scores[\"_headers\"]\n idx = [headers.index(assay_name) - 1 for assay_name in headers if assay_name in self.assays]\n\n smilies_ids = [f\"{smiles} ID{num}\\n\" for num, smiles in enumerate(smilies)]\n result = run_command(self.tsub_cmd, input=\"\\n\".join(smilies_ids))\n scores = parse_output(result.stdout, idx, self.pains_scores, len(smilies))\n\n return ComponentResults(scores)\n\n\ndef read_scores_from_csv(filename) -> Dict[str, List[int]]:\n \"\"\"Read the Lilly PAINS scores from a CSV file\"\"\"\n\n scores = {}\n\n with open(filename, \"r\") as cfile:\n reader = csv.reader(cfile)\n scores[\"_headers\"] = next(reader, None)\n\n for row in reader:\n name = row[0]\n assay_scores = row[1:]\n scores[name] = [int(score) for score in assay_scores]\n\n return scores\n\n\n# \\1 SMILES ID\n# \\2 number of SMARTS pattern matches\n# \\3 name of pattern matched\nREJECTED_PATTERN = re.compile(r\".*? ID(\\d+) \\((\\d+) matches to '(.*)'\\)\")\nACCEPTED_PATTERN = re.compile(r\".* ID(\\d+)\")\n\n\ndef parse_output(\n lines: str, idx: List[int], pains_scores: Dict[str, List[int]], nsmilies: int\n) -> List[np.ndarray[float]]:\n \"\"\"Parse the output from tsubstructure and extract the desired columns.\"\"\"\n\n if len(idx) > 1:\n get_rows = lambda row: [float(item) for item in operator.itemgetter(*idx)(row)]\n else:\n get_rows = lambda row: [float(name_scores[idx[0]])]\n\n rows = {} # collect PAINS hits\n\n # FIXME: handle multiple matches\n for line in lines.splitlines():\n match = re.match(REJECTED_PATTERN, line)\n ID, _, name = match.groups()\n ID = int(ID)\n\n # Lookup groups\n name_scores = pains_scores[name]\n rows[ID] = get_rows(name_scores)\n\n accepted_ids = []\n\n with open(f\"{ACCEPTED_STEM}.smi\", \"r\") as accepted:\n for line in accepted:\n match = re.match(ACCEPTED_PATTERN, line)\n ID = int(match.group(1))\n accepted_ids.append(ID)\n\n for i in range(nsmilies):\n if i not in rows.keys():\n rows[i] = np.full(len(idx), np.nan)\n\n if i in accepted_ids:\n rows[i] = np.full(len(idx), 0.0)\n\n if len(rows) != nsmilies:\n logger.warning(f\"{__name__}: Processed only {len(rows)} of {nsmilies} SMILES\")\n\n sorted_rows = {k: rows[k] for k in sorted(rows)}\n\n scores = []\n\n for col in zip(*sorted_rows.values()):\n scores.append(np.array(col))\n\n return scores\n","repo_name":"MolecularAI/REINVENT4","sub_path":"contrib/reinvent_plugins/components/Lilly/comp_lilly_pains.py","file_name":"comp_lilly_pains.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"57"}
+{"seq_id":"43667309363","text":"\"\"\" Test the main particle routines.\"\"\"\nfrom particle_model import Timestepping\nfrom particle_model import Particles\nfrom particle_model import ParticleBase\nfrom particle_model import IO\nfrom particle_model import Collision\nfrom particle_model import DragModels\nfrom particle_model import System\n\nimport vtk\nimport numpy\n\nclass dc(object):\n def __init__(self):\n \"\"\"Mock data cache.\"\"\"\n self.get = lambda x,y: None\n\nclass temp_cache(object):\n \n def __init__(self, fname='rightward_0.vtu', ldir='particle_model/tests/data'):\n \"\"\"Mock temporal cache.\"\"\"\n\n self.fname = fname\n self.ldir = ldir\n self.cache = dc()\n\n def get(self, infile, name):\n return infile.GetPointData().GetArray(name)\n \n\n def __call__(self, time):\n \"\"\"Factory function to mock a temporal cache.\"\"\"\n del time\n reader = vtk.vtkXMLUnstructuredGridReader()\n reader.SetFileName(self.ldir+'/'+self.fname)\n reader.Update()\n\n locator = vtk.vtkCellLocator()\n locator.SetDataSet(reader.GetOutput())\n locator.BuildLocator()\n\n return ([[0.0, self.fname, reader.GetOutput(), locator],\n [1.0, self.fname, reader.GetOutput(), locator]], 0.0,\n [['Velocity', 'Pressure'], ['Velocity', 'Pressure']])\n\n\nBOUNDARY = IO.BoundaryData('particle_model/tests/data/rightward_boundary.vtu')\nBOUNDARY3D = IO.BoundaryData('particle_model/tests/data/cube_boundary.vtu')\nSYSTEM = System.System(BOUNDARY, coeff=1.0, temporal_cache=temp_cache(),\n rho=1.0e3, )\nSYSTEM3D = System.System(BOUNDARY3D, coeff=1.0,\n temporal_cache=temp_cache('cube_0.vtu'))\n\nMESH = IO.GmshMesh()\nMESH.read('particle_model/tests/data/Structured.msh')\nMESH3D = IO.GmshMesh()\nMESH3D.read('particle_model/tests/data/Structured_cube.msh')\n\nPAR0 = ParticleBase.PhysicalParticle(diameter=1.0e32,rho=1.0)\nPAR1 = ParticleBase.PhysicalParticle(diameter=100.0e-4,rho=1.0e3)\n\ndef test_tests():\n \"\"\" Test test structure with a minimal test.\"\"\"\n assert 1\n\ndef test_base_particle_initialization():\n \"\"\"Test basic particle initialization\"\"\"\n from numpy import zeros\n\n pres = zeros(3)\n vel = zeros(3)\n\n part = ParticleBase.ParticleBase(pres, vel)\n\n assert all(part.pos == pres) and all(part.vel == vel)\n\n\n\ndef test_basic_particle_bucket_initialization():\n \"\"\" Test initializing a particle bucket.\"\"\"\n from numpy import zeros\n\n num = 10\n\n pres = zeros((num, 3))\n vel = zeros((num, 3))\n\n part = Particles.ParticleBucket(pres, vel)\n\n assert part\n\n\ndef test_particle_bucket_step_do_nothing():\n \"\"\" Test initializing a full particle bucket.\"\"\"\n from numpy import zeros\n\n bndc = IO.BoundaryData('particle_model/tests/data/boundary_circle.vtu')\n system = System.System(bndc, base_name='particle_model/tests/data/circle')\n\n num = 1\n\n pres = zeros((num, 3))\n vel = zeros((num, 3))\n\n bucket = Particles.ParticleBucket(pres, vel, 0.0, delta_t=0.5,\n system=system)\n\n bucket.run(5.0, write=False)\n\n assert bucket.time == 5.0\n assert all(bucket.particles[0].pos == 0.0)\n assert all(bucket.particles[0].vel == 0.0)\n\n\ndef test_picker_constant():\n \"\"\"Test vtk picker.\"\"\"\n\n part = Particles.Particle((0, 0), system=SYSTEM)\n fluid_velocity, grad_p = part.picker((0.5, 0.5, 0.0), 0.0)\n\n assert all(fluid_velocity == numpy.array((1.0, 0.0, 0.0)))\n assert all(grad_p == numpy.array((0.0, 0.0, 0.0)))\n\n\ndef test_picker_linear(tmpdir):\n \"\"\"Test vtk picker.\"\"\"\n\n pos = ((0.5, 0.5, 0.0),\n (0.25, 0.75, 0.0))\n\n err = numpy.array((1.0e-8, 1.0e-8, 1.0e-8))\n fname = tmpdir.join('linear.vtu').strpath\n\n print(fname)\n\n def vel(pos):\n \"\"\"Fluid velocity\"\"\"\n return numpy.array((pos[0], pos[1], 0))\n\n def pres(pos):\n \"\"\"Fluid pressure\"\"\"\n return pos[0]\n\n IO.make_unstructured_grid(MESH, vel, pres, 0.0, fname)\n\n system = System.System(temporal_cache=temp_cache('linear.vtu',\n tmpdir.strpath))\n\n part = Particles.Particle((0, 0), system=system)\n\n for point in pos:\n\n fluid_velocity, grad_p = part.picker(point, 0.0)\n\n assert all(abs(fluid_velocity - vel(point)) < err)\n assert all(grad_p == numpy.array((1.0, 0.0, 0.0)))\n\ndef test_picker_linear_3d(tmpdir):\n \"\"\"Test vtk picker in 3D.\"\"\"\n\n pos = ((0.5, 0.5, 0.5),\n (0.25, 0.75, 0.25))\n\n err = numpy.array((1.0e-8, 1.0e-8, 1.0e-8))\n fname = tmpdir.join('linear3D.vtu').strpath\n\n print(fname)\n\n def vel(pos):\n \"\"\" Fluid velocity\"\"\"\n return numpy.array((pos[0], pos[1], pos[2]))\n\n def pres(pos):\n \"\"\" Fluid pressure\"\"\"\n return pos[0]\n\n IO.make_unstructured_grid(MESH3D, vel, pres, 0.0, fname)\n\n system = System.System(temporal_cache=temp_cache('linear3D.vtu',\n tmpdir.strpath))\n\n part = Particles.Particle((0, 0), system=system)\n\n for point in pos:\n\n fluid_velocity, grad_p = part.picker(point, 0.0)\n\n assert all(abs(fluid_velocity - vel(point)) < err)\n assert all(grad_p == numpy.array((1.0, 0.0, 0.0)))\n\n\n\ndef test_step_constant_velocity():\n \"\"\"Test single step at constant velocity.\"\"\"\n\n for method in Timestepping.methods:\n\n pos = numpy.array((0.5, 0.5, 0.0))\n vel = numpy.array((1.0, 0.0, 0.0))\n\n part = Particles.Particle((pos, vel), delta_t=0.1, parameters=PAR0,\n system=SYSTEM)\n print(method)\n part.update(method=method)\n assert all(part.pos == numpy.array((0.6, 0.5, 0.0)))\n assert part.time == 0.1\n part.update()\n assert all(part.pos == numpy.array((0.7, 0.5, 0.0)))\n\n\ndef test_step_spin_up_turbulent_drag():\n \"\"\"Test turbulent drag function\"\"\"\n\n pos = numpy.array((0.1, 0.5, 0.0))\n vel = numpy.array((0.0, 0.0, 0.0))\n\n phys_par = ParticleBase.PhysicalParticle(drag=DragModels.turbulent_drag,\n rho=1.0e3)\n\n part = Particles.Particle((pos, vel), delta_t=0.001,\n system=SYSTEM,\n parameters=phys_par)\n part.update(method=\"RungeKutta4\")\n assert all(abs(part.pos - numpy.array((0.100345, 0.5, 0))) < 1.e-8)\n assert part.time == 0.001\n\ndef test_step_spin_up_transitional_drag():\n \"\"\" Test transitional drag function.\"\"\"\n\n pos = numpy.array((0.1, 0.5, 0.0))\n vel = numpy.array((0.0, 0.0, 0.0))\n\n phys_par = ParticleBase.PhysicalParticle(drag=DragModels.transitional_drag,\n rho=1.0e3)\n\n part = Particles.Particle((pos, vel), delta_t=0.001,\n system=SYSTEM,\n parameters=phys_par)\n part.update(method=\"RungeKutta4\")\n assert all(abs(part.pos - numpy.array((0.10373956, 0.5, 0))) < 1.e-8)\n assert part.time == 0.001\n\ndef test_stokes_terminal_velocity():\n \"\"\"Test stokes terminal\"\"\"\n\n bndc = IO.BoundaryData('particle_model/tests/data/boundary_circle.vtu')\n system = System.System(bndc, base_name='particle_model/tests/data/circle',\n gravity=numpy.array((0.0, -1.0, 0.0)),\n rho=0.0, viscosity=1.0)\n diameter = 1e-3\n delta_t = 1.0e-8\n\n par = ParticleBase.PhysicalParticle(diameter=diameter,\n drag=DragModels.stokes_drag,\n rho=1.0)\n\n pos = numpy.zeros((1, 3))\n vel = numpy.zeros((1, 3))\n\n bucket = Particles.ParticleBucket(pos, vel, 0.0, delta_t=delta_t,\n parameters=par,\n system=system)\n\n bucket.run(100*delta_t, write=False, method=\"RungeKutta4\")\n assert abs(bucket.time - 100*delta_t) < 1.0e-8\n assert all(abs(bucket.particles[0].vel\n - numpy.array((0,\n -1.0/18./system.viscosity*par.diameter**2,\n 0))) < 1.e-8)\n\ndef test_step_head_on_collision():\n \"\"\" Test a head-on collision.\"\"\"\n\n pos = numpy.array((0.9995, 0.5, 0.0))\n vel = numpy.array((1.0, 0.0, 0.0))\n\n part = Particles.Particle((pos, vel), delta_t=0.001, parameters=PAR0,\n system=SYSTEM)\n part.update(method=\"ForwardEuler\")\n assert all(abs(part.pos - numpy.array((0.9995, 0.5, 0.0))) < 1.0e-8)\n assert all(part.vel == numpy.array((-1., 0., 0.)))\n assert part.time == 0.001\n\n assert len(part.collisions) == 1\n assert all(part.collisions[0].pos == numpy.array((1., 0.5, 0.)))\n assert part.collisions[0].time == 0.0005\n assert all(part.collisions[0].vel == numpy.array((1., 0., 0.)))\n assert part.collisions[0].angle == numpy.pi/2.0\n\ndef test_diagonal_collision():\n \"\"\"Test a collision at an angle\"\"\"\n\n pos = numpy.array((0.9995, 0.4995, 0.0))\n vel = numpy.array((1.0, 1.0, 0.0))\n\n part = Particles.Particle((pos, vel), delta_t=0.001, parameters=PAR0,\n system=SYSTEM)\n part.update()\n assert all(abs(part.pos - numpy.array((0.9995, 0.5005, 0))) < 1.0e-8)\n assert all(part.vel == numpy.array((-1., 1.0, 0.0)))\n assert part.time == 0.001\n\n assert len(part.collisions) == 1\n assert all(part.collisions[0].pos - numpy.array((1., 0.5, 0.)) < 1.0e-8)\n assert part.collisions[0].time - 0.0005 < 1e-8\n assert all(part.collisions[0].vel == numpy.array((1., 1., 0.)))\n assert part.collisions[0].angle - numpy.pi / 4.0 < 1e-10\n\ndef test_diagonal_collision_3D():\n \"\"\"Test a collision at an angle\"\"\"\n\n pos = numpy.array((0.9995, 0.4995, 0.4995))\n vel = numpy.array((1.0, 1.0, 1.0))\n\n part = Particles.Particle((pos, vel), delta_t=0.001, parameters=PAR0,\n system=SYSTEM3D)\n part.update()\n assert all(abs(part.pos - numpy.array((0.9995, 0.5005, 0.5005))) < 1.0e-8)\n assert all(part.vel == numpy.array((-1., 1.0, 1.0)))\n assert part.time == 0.001\n\n assert len(part.collisions) == 1\n assert all(part.collisions[0].pos - numpy.array((1., 0.5, 0.5)) < 1.0e-8)\n assert part.collisions[0].time - 0.0005 < 1e-8\n assert all(part.collisions[0].vel == numpy.array((1., 1., 1.)))\n assert part.collisions[0].angle - numpy.pi / 4.0 < 1e-10\n\n\ndef test_gyre_collision():\n \"\"\"Regression test for Mclaury coefficient\"\"\"\n\n bndg = IO.BoundaryData('particle_model/tests/data/gyre_boundary.vtu')\n system = System.System(bndg, coeff=1.0,\n temporal_cache=temp_cache('gyre_0.vtu'))\n\n from math import pi\n\n pos = numpy.array((0.8, 0.45, 0.0))\n vel = numpy.array((2.0 * pi, 0.0, 0.0))\n\n part = Particles.Particle((pos, vel), delta_t=0.001, parameters=PAR1,\n system=system)\n\n for i in range(100):\n del i\n part.update(method=\"AdamsBashforth2\")\n\n assert part.pos[0] < 1.0\n assert part.pos[1] < 1.0\n assert part.pos[0] > 0.0\n assert part.pos[1] > 0.0\n\n assert len(part.collisions) == 1\n assert part.collisions[0].pos[0] == 1.0\n assert abs(Collision.mclaury_mass_coeff(part.collisions[0]) - 16.444037345317486) < 1.0e-8\n\n\n\n\ndef test_coefficient_of_restitution():\n \"\"\"Test of coefficient of restitution parameter.\"\"\"\n\n pos = numpy.array((0.95, 0.5, 0.0))\n vel = numpy.array((1.0, 0.0, 0.0))\n\n system = System.System(BOUNDARY, coeff=0.5, temporal_cache=temp_cache())\n\n part = Particles.Particle((pos, vel), delta_t=0.1, parameters=PAR0,\n system=system)\n part.update()\n assert all(abs(part.pos-numpy.array((0.975, 0.5, 0))) < 1.0e-8)\n assert all(part.vel == numpy.array((-0.5, 0, 0)))\n assert part.time == 0.1\n\n assert len(part.collisions) == 1\n assert all(part.collisions[0].pos == numpy.array((1., 0.5, 0.)))\n assert part.collisions[0].time == 0.05\n assert all(part.collisions[0].vel == numpy.array((1., 0., 0.)))\n assert part.collisions[0].angle == numpy.pi/2.0\n","repo_name":"jrper/ParticleModule","sub_path":"particle_model/tests/test_particles.py","file_name":"test_particles.py","file_ext":"py","file_size_in_byte":12063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"40339157971","text":"from django import template\nimport ast\n\nregister = template.Library()\n\n# Formatting for profession list\n\n@register.filter\ndef prof_fix(string):\n l = ast.literal_eval(string)\n l = [i.strip() for i in l]\n return (', '.join(l))\n\n","repo_name":"twhyte/lipad","sub_path":"dilipadsite/members/templatetags/members_extras.py","file_name":"members_extras.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"}
+{"seq_id":"9559669560","text":"from typing import List\n\n\ndef find_path(grid, row=0, col=0, path: List = []):\n rows = len(grid)\n cols = len(grid[0])\n\n print(f\" Path {row}, {col}\")\n\n # Boundary check\n if row >= rows or col >= cols:\n print(f\" boundary check\")\n return []\n\n # If cell is off limits\n if not grid[row][col]:\n print(f\" offlimits\")\n return []\n\n # Reached the end\n if row == rows - 1 and col == cols - 1:\n print(f\" reached end\")\n return [(row, col)]\n\n # Try path to the right\n found = find_path(grid, row + 1, col, path)\n if found:\n return [(row, col)] + found\n\n # Try path bellow\n found = find_path(grid, row, col + 1, path)\n if found:\n return [(row, col)] + found\n\n print(f\" deadend\")\n\n # Dead end\n return []\n\n\ngrid = [\n [True, False],\n [True, True],\n]\n\nprint(\"Starting\")\nassert find_path(grid) == [(0, 0), (1, 0), (1, 1)]\n\ngrid = [\n [True, True, True],\n [True, False, True],\n [True, False, True],\n]\n\nprint(\"Starting\")\nassert find_path(grid) == [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2)]\n\ngrid = [\n [True, True, False],\n [True, False, True],\n [True, True, True],\n]\n\nprint(\"Starting\")\nassert find_path(grid) == [(0, 0), (1, 0), (2, 0), (2, 1), (2, 2)]\n\ngrid = [\n [True, True, False, True],\n [False, True, False, True],\n [True, True, True, False],\n [True, False, True, True],\n]\n\nprint(\"Starting\")\nassert find_path(grid) == [(0, 0), (0, 1), (1, 1), (2, 1), (2, 2), (3, 2), (3, 3)]\n","repo_name":"eduardovra/CrackingTheCodingInterview","sub_path":"chapter_08/q02_robot_in_a_grid.py","file_name":"q02_robot_in_a_grid.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"8193894686","text":"from QNetbots.bots_ai.arabicnlp_bot import ArabicBot\nfrom QNetbots.bots_access.livestream_bot import LiveStreamBot\nfrom QNetbots.bots_pim.manager_bot import ManagerBot\n\nimport configparser\n\nif __name__ == \"__main__\":\n config = configparser.ConfigParser()\n config.read('config.ini')\n \n arabbot = ArabicBot('arabibot','dadashmeisam','https://quranic.network')\n arabbot.run()\n\n streambot = LiveStreamBot('streambot','DaDa$hMeiS@m','https://quranic.network')\n streambot.run()\n\n managerbot = ManagerBot('managerbot','mtnxoO6C','https://quranic.network')\n managerbot.run()\n # Infinitely read stdin to stall main thread while the bot runs in other threads\n while True:\n input()\n","repo_name":"meahmadi/QNet-bots","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"2850887634","text":"from zope.formlib import form\nfrom zope import schema, component\nfrom zope.interface import implements\nfrom zope.component import getMultiAdapter\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\nfrom zope.schema.interfaces import IVocabularyFactory\n\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.CMFCore.utils import getToolByName\n\nfrom plone.app.portlets.portlets import base\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.app.portlets.cache import render_cachekey\n\nfrom plone.memoize.instance import memoize\nfrom plone.memoize.compress import xhtml_compress\n\nfrom sc.mailchimp.newsletter import MessageFactory as _\nfrom sc.mailchimp.newsletter import interfaces\n\n\ndef availableLists(context):\n conn = interfaces.IAccountConnector(context,context)\n lists = conn.getLists()\n if hasattr(context,'data'):\n context.data.availableLists = [lst for lst in context.data.availableLists if lst in [i['id'] for i in lists['data']]]\n return SimpleVocabulary([SimpleTerm(value=li['id'], title=li['name']) for li in lists['data']])\n\nclass IMChimpPortlet(IPortletDataProvider):\n \"\"\"\n \"\"\"\n portletname = schema.TextLine(\n title=_(u'Title'),\n description=_(u'Title of the portlet')\n )\n \n availableLists = schema.List(\n title=_(u'Available lists'),\n description=_(u'Select available lists to subscribe to.'),\n required=True,\n min_length=1,\n value_type=schema.Choice(source='sc.mailchimp.newsletter.availableLists')\n )\n\nclass Assignment(base.Assignment):\n \"\"\"\n \"\"\"\n implements(IMChimpPortlet, interfaces.IProperties)\n\n _all_lists = {}\n \n def __init__(self, portletname=u'', availableLists=[]):\n self.portletname = portletname\n self.availableLists = availableLists\n\n @property\n def title(self):\n return _(u\"NewsLetter\")\n \n def getAvailableList(self):\n return self.availableLists\n\nclass Renderer(base.Renderer):\n \"\"\"\n \"\"\"\n _template = ViewPageTemplateFile('templates/mailchimp.pt')\n \n @property\n def name(self):\n return self.data.name or _(u\"Subscribe to newsletter\")\n\n def render(self):\n return xhtml_compress(self._template())\n\nclass AddForm(base.AddForm):\n \"\"\"Portlet add form\"\"\"\n form_fields = form.Fields(IMChimpPortlet)\n \n def update(self):\n super(AddForm, self).update()\n \n def create(self, data):\n return Assignment(**data)\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form\"\"\"\n def __call__(self):\n return super(EditForm, self).__call__()\n form_fields = form.Fields(IMChimpPortlet)\n","repo_name":"simplesconsultoria/sc.mailchimp.newsletter","sub_path":"sc/mailchimp/newsletter/portlets/portlet.py","file_name":"portlet.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"}
+{"seq_id":"8116847768","text":"import itertools\n\nN = int(input())\nK = int(input())\ncard = []\nfor i in range(N):\n card.append(input())\n\nans = []\nfor arr in itertools.permutations(card, K):\n s = ''.join(arr)\n ans.append(s)\n\nprint(len(set(ans)))","repo_name":"masaya722/msya3","sub_path":"ProgramingContest/JOI/yosen/card_line_up.py","file_name":"card_line_up.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"39883942715","text":"import torch\r\nimport re\r\ndef get_device():\r\n\r\n dev = 'cpu'\r\n\r\n if torch.cuda.is_available():\r\n dev = 'cuda'\r\n\r\n print(f'Device = {dev}')\r\n\r\n return dev\r\n\r\n# Function Source: Book - Machinelearning with Pytorch and Sklearn - Sbastian .. Pg:514\r\ndef string_tokenizer(text : str):\r\n\r\n text = re.sub('<[^>]*>', '', text)\r\n\r\n emoticons = re.findall(\r\n '(?::|;|=)(?:-)?(?:\\)|\\(|D|P)',\r\n text.lower()\r\n )\r\n\r\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')\r\n\r\n tokenized = text.split()\r\n\r\n return tokenized\r\n\r\ndef get_sentences_from_text(text : str):\r\n pat = re.compile(r'([A-Z][^\\.!?]*[\\.!?])', re.M)\r\n\r\n return pat.findall(text.lower())\r\n\r\n","repo_name":"gurusarath1/Next_word_prediction_using_lstm","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"32273074739","text":"from math import ceil\n\n########\n# PART 1\n\ndef get_buses(fn):\n with open(\"event2020/day13/\" + fn) as file:\n return int(file.readline()), [(int(x), t) for t, x in enumerate(file.readline().split(',')) if x != 'x']\n\n\ndef get_earliest(timestamp, buses):\n return min([(bus * ceil(timestamp / bus) - timestamp, bus) for bus, _ in buses])\n\n\nmin_wait = get_earliest(*get_buses(\"example1.txt\"))\nassert min_wait[0] * min_wait[1] == 295\n\n\nearliest, buses = get_buses(\"input.txt\")\nmin_wait = get_earliest(earliest, buses)\nanswer = min_wait[0] * min_wait[1]\nprint(\"Part 1 =\", answer)\nassert answer == 2545 # check with accepted answer\n\n########\n# PART 2\n\ndef get_first_in_sequence(buses):\n period = 1\n min_for_all = 0\n for bus, ts in buses:\n while ((min_for_all + ts) % bus) != 0:\n min_for_all += period\n period *= bus\n\n return min_for_all\n\n\nassert get_first_in_sequence(get_buses(\"example1.txt\")[1]) == 1068781\n\n\nanswer = get_first_in_sequence(get_buses(\"input.txt\")[1])\nprint(\"Part 2 =\", answer)\nassert answer == 266204454441577 # check with accepted answer\n","repo_name":"rjbatista/AoC","sub_path":"aoc/event2020/day13/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"14423383371","text":"\"\"\"\n Alex Dong\n 1413809\n\n List any resources you used below (eg. urls, name of the algorithm from our code archive).\n Remember, you are permitted to get help with general concepts about algorithms\n and problem solving, but you are not permitted to hunt down solutions to\n these particular problems!\n\n (KMP algorithm string matching) - https://www.geeksforgeeks.org/kmp-algorithm-for-pattern-searching/\n\n List any classmate you discussed the problem with. Remember, you can only\n have high-level verbal discussions. No code should be shared, developed,\n or even looked at in these chats. No formulas or pseudocode should be\n written or shared in these chats.\n\n textLength/A\n\n By submitting this code, you are agreeing that you have solved in accordance\n with the collaboration policy in CMPUT 403.\n\n\"\"\"\n\n\ndef computeLPSArray(pat, patternLength, lps):\n longest = 0 # length of the previous longest prefix suffix\n\n lps[0] # lps[0] is always 0\n i = 1\n\n # the loop calculates lps[i] for i = 1 to patternLength-1\n while i < patternLength:\n if pat[i] == pat[longest]:\n longest += 1\n lps[i] = longest\n i += 1\n else:\n # if we found a prefix prior - that will be the last known pattern that we know exists\n if longest != 0:\n longest = lps[longest-1]\n\n # Also, note that we do not increment i here\n else:\n lps[i] = 0\n i += 1\n\n\ndef KMPSearch(pat, txt, lps):\n patternLength = len(pat)\n textLength = len(txt)\n solution = []\n j = 0 # index for pat[]\n i = 0 # index for txt[]\n while i < textLength:\n # found a matching value - increment\n if pat[j] == txt[i]:\n i += 1\n j += 1\n\n # completion - did we find the matched string\n if j == patternLength:\n # print(\"Found pattern at index \" + str(i-j), \"for string\", pat, \"in\", txt, i,j) # why i-j : j is whole pattern, and i is the last spot where pattern is found i-j gives the extra index from 0 -> (i-j)\n solution.append(str(i-j))\n j = lps[j-1]\n\n # mismatch after j matches\n elif i < textLength and pat[j] != txt[i]:\n # Do not match lps[0..lps[j-1]] characters,\n # they will match anyway\n if j != 0:\n j = lps[j-1]\n else:\n i += 1\n if len(solution) == 1 and solution[0] == 0:\n return []\n return solution\n\nsolution = []\n\ntry:\n while True:\n pattern = input()\n patternLength = len(pattern)\n\n # precompute the prefix for the pattern\n lps = [0 for _ in range(patternLength)]\n computeLPSArray(pattern, patternLength, lps)\n string = input()\n\n # input the string and see if a pattern is found\n solution.append(KMPSearch(pattern, string, lps))\n # print(string)\nexcept EOFError:\n # print(' '.join(solution))\n for line in solution:\n print(' '.join(line))\n\n","repo_name":"dong-alex/kattis","sub_path":"stringmatching/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"13915683404","text":"from copy import deepcopy\r\nfrom os.path import join\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom tensorboardX import SummaryWriter\r\nfrom Q_network import Q_network\r\n\r\n#with warnings.catch_warnings():\r\n# warnings.filterwarnings(\"ignore\", category=UserWarning)\r\n\r\n\r\nclass DDQN_agent:\r\n\r\n def __init__(self, env, rew_thre, buffer, learning_rate=0.001, initial_epsilon=0.5, batch_size= 64):\r\n\r\n self.env = env\r\n\r\n\r\n self.network = Q_network(env, learning_rate)\r\n self.target_network = deepcopy(self.network)\r\n self.buffer = buffer\r\n self.epsilon = initial_epsilon\r\n self.batch_size = batch_size\r\n self.window = 50\r\n self.reward_threshold = rew_thre\r\n self.initialize()\r\n self.step_count = 0\r\n self.episode = 0\r\n\r\n\r\n def take_step(self, mode='exploit'):\r\n # choose action with epsilon greedy\r\n if mode == 'explore':\r\n action = self.env.action_space.sample()\r\n else:\r\n action = self.network.greedy_action(torch.FloatTensor(self.s_0))\r\n\r\n #simulate action\r\n s_1, r, done, info = self.env.step(action)\r\n\r\n #put experience in the buffer\r\n self.buffer.append(self.s_0, action, r, done, s_1)\r\n\r\n self.rewards += r\r\n\r\n self.s_0 = s_1.copy()\r\n\r\n self.step_count += 1\r\n if done:\r\n\r\n self.s_0 = self.env.reset()\r\n return done\r\n\r\n # Implement DQN training algorithm\r\n def train(self, gamma=0.99, max_episodes=15000,\r\n network_update_frequency=10,\r\n network_sync_frequency=200):\r\n self.gamma = gamma\r\n self.loss_function = nn.MSELoss()\r\n self.s_0 = self.env.reset()\r\n self.writer = SummaryWriter(\"tensorboard\")\r\n self.load_models()\r\n\r\n # Populate replay buffer\r\n while self.buffer.burn_in_capacity() < 1:\r\n self.take_step(mode='explore')\r\n ep = 8000\r\n training = True\r\n self.populate = False\r\n best_reward = 0\r\n while training:\r\n self.s_0 = self.env.reset()\r\n\r\n self.rewards = 0\r\n done = False\r\n while not done:\r\n self.env.render()\r\n if ((ep % 500) == 0):\r\n torch.save(self.network.state_dict(), join(\"checkpoints\", f'{ep}_SuperMarioBros-1-1-v0.dat'))\r\n\r\n p = np.random.random()\r\n if p < self.epsilon:\r\n done = self.take_step(mode='explore')\r\n else:\r\n done = self.take_step(mode='exploit')\r\n # Update network\r\n if self.step_count % network_update_frequency == 0:\r\n self.update(ep)\r\n # Sync networks\r\n if self.step_count % network_sync_frequency == 0:\r\n self.target_network.load_state_dict(\r\n self.network.state_dict())\r\n self.sync_eps.append(ep)\r\n\r\n if done:\r\n if self.epsilon >= 0.05:\r\n self.epsilon = self.epsilon * 0.7\r\n ep += 1\r\n self.training_rewards.append(self.rewards)\r\n if len(self.update_loss) == 0:\r\n self.training_loss.append(0)\r\n else:\r\n self.training_loss.append(np.mean(self.update_loss))\r\n self.update_loss = []\r\n mean_rewards = np.mean(self.training_rewards[-self.window:])\r\n mean_loss = np.mean(self.training_loss[-self.window:])\r\n self.mean_training_rewards.append(mean_rewards)\r\n if self.rewards>best_reward:\r\n best_reward=self.rewards\r\n torch.save(self.network.state_dict(), 'best_models/SuperMarioBros-1-1-v0.dat')\r\n print(\r\n \"\\rEpisode {:d} Best Reward {:.2f} Mean Rewards {:.2f} Episode reward = {:.2f} mean loss = {:.2f}\\t\\t\".format(\r\n ep, best_reward,mean_rewards, self.rewards, mean_loss))\r\n self.writer.add_scalar(\"Train_{}/Reward\".format(id), self.rewards, ep)\r\n if ep >= max_episodes:\r\n training = False\r\n print('\\nEpisode limit reached.')\r\n break\r\n if mean_rewards >= self.reward_threshold:\r\n training = False\r\n print('\\nEnvironment solved in {} episodes!'.format(ep))\r\n #break\r\n # save models\r\n self.save_models()\r\n\r\n def save_models(self):\r\n torch.save(self.network.state_dict(), 'best_models/SuperMarioBros-1-1-v0.dat')\r\n\r\n def load_models(self,eval=False):\r\n self.network.load_state_dict(torch.load('best_models/SuperMarioBros-1-1-v0.dat'))\r\n #self.network = torch.load('best_models/SuperMarioBros-1-1-v0.dat')\r\n if eval:\r\n self.network.eval()\r\n\r\n def calculate_loss(self, batch,episode):\r\n #extract info from batch\r\n states, actions, rewards, dones, next_states = list(batch)\r\n rewards = torch.FloatTensor(rewards).reshape(-1, 1)\r\n actions = torch.LongTensor(np.array(actions)).reshape(-1, 1)\r\n dones = torch.IntTensor(dones).reshape(-1, 1)\r\n states = from_tuple_to_tensor(states)\r\n next_states = from_tuple_to_tensor(next_states)\r\n\r\n ###############\r\n # DDQN Update #\r\n ###############\r\n # Q(s,a) = ??\r\n qvals = self.network.get_qvals(states)\r\n qvals = torch.gather(qvals, 1, actions)\r\n\r\n next_qvals= self.target_network.get_qvals(next_states)\r\n next_qvals_max = torch.max(next_qvals, dim=-1)[0].reshape(-1, 1)\r\n target_qvals = rewards + (1 - dones)*self.gamma*next_qvals_max\r\n\r\n # loss = self.loss_function( Q(s,a) , target_Q(s,a))\r\n loss = self.loss_function(qvals, target_qvals)\r\n self.writer.add_scalar(\"Train_{}/Loss\".format(id), loss, episode)\r\n\r\n return loss\r\n\r\n\r\n def update(self,episode):\r\n self.network.optimizer.zero_grad()\r\n batch = self.buffer.sample_batch(batch_size=self.batch_size)\r\n loss = self.calculate_loss(batch,episode)\r\n\r\n loss.backward()\r\n self.network.optimizer.step()\r\n\r\n self.update_loss.append(loss.item())\r\n\r\n def initialize(self):\r\n self.training_rewards = []\r\n self.training_loss = []\r\n self.update_loss = []\r\n self.mean_training_rewards = []\r\n self.sync_eps = []\r\n self.rewards = 0\r\n self.step_count = 0\r\n\r\n def evaluate(self, eval_env):\r\n done = False\r\n s= eval_env.reset()\r\n rew = 0\r\n while not done:\r\n eval_env.render()\r\n action = self.network.greedy_action(torch.FloatTensor(s))\r\n s, r, done, info =eval_env.step(action)\r\n rew += r\r\n\r\n print(\"Evaluation cumulative reward: \", rew)\r\n\r\n\r\ndef from_tuple_to_tensor(tuple_of_np):\r\n tensor = torch.zeros((len(tuple_of_np), tuple_of_np[0].shape[1],tuple_of_np[0].shape[2],tuple_of_np[0].shape[3]))\r\n for i, x in enumerate(tuple_of_np):\r\n tensor[i] = torch.FloatTensor(x)\r\n return tensor","repo_name":"MrBadonzi/-RL-Project-A3C-Mario","sub_path":"DDQN/DDQN.py","file_name":"DDQN.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"8992396976","text":"from collections import deque, defaultdict\nfrom bisect import bisect_left\n\ndic = defaultdict(int)\nstack = deque()\n\n\ndef insert_q(b):\n if not stack:\n stack.append(b)\n else:\n temp = bisect_left(stack, b)\n stack.insert(temp, b)\nN=int(input())\n\nfor j in range(N):\n M=int(input())\n for i in range(M):\n a, b = input().split()\n b = int(b)\n if a == 'I':\n if dic[b] == 0:\n insert_q(b)\n dic[b] += 1\n else:\n dic[b] += 1\n else:\n if b == 1 and stack:\n if dic[stack[-1]] == 1:\n dic[stack[-1]] = 0\n stack.pop()\n else:\n dic[stack[-1]] -= 1\n\n if b == -1 and stack:\n if dic[stack[0]] == 1:\n dic[stack[0]] = 0\n stack.popleft()\n else:\n dic[stack[0]] -= 1\n if stack:\n print(stack[-1],stack[0])\n else:\n print(\"EMPTY\")\n","repo_name":"denhur62/Python-Algorithm","sub_path":"baekjoon/구현/이중우선순위큐.py","file_name":"이중우선순위큐.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"1640406461","text":"#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author: wangye(Wayne) \n@license: Apache Licence \n@file: Maximum AND Sum of Array.py \n@time: 2022/02/13\n@contact: wang121ye@hotmail.com\n@site: \n@software: PyCharm \n\n# code is far away from bugs.\n\"\"\"\n\nfrom typing import *\nimport collections\n\n\nclass Solution:\n def maximumANDSum(self, nums: List[int], numSlots: int) -> int:\n res = collections.defaultdict(list)\n for i in range(1, 16):\n res1 = []\n for j in range(1, numSlots + 1):\n res2 = i & j\n res1.append((res2, j))\n res1.sort(key=lambda x: (-x[0], x[1]))\n res[i] = res1.copy()\n # print(res)\n ret = 0\n import random\n k1 = 0\n while k1 < 300:\n k1 += 1\n ret1 = 0\n res3 = collections.defaultdict(int)\n random.shuffle(nums)\n for n in nums:\n for k, v in res[n]:\n if res3[v] == 2:\n continue\n else:\n ret1 += k\n res3[v] += 1\n break\n ret = max(ret, ret1)\n return ret\n\n\nso = Solution()\nprint(so.maximumANDSum(nums=[1, 2, 3, 4, 5, 6], numSlots=3))\nprint(so.maximumANDSum([14, 7, 9, 8, 2, 4, 11, 1, 9], 8))\n","repo_name":"wangyendt/LeetCode","sub_path":"Contests/201-300/week 280/2172. Maximum AND Sum of Array/Maximum AND Sum of Array.py","file_name":"Maximum AND Sum of Array.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"}
+{"seq_id":"7051806828","text":"\"\"\"this is the main script\"\"\"\nimport argparse\nimport random\nimport sys\n\n\nclass DOORs:\n status = True #to define if a door is chose-able or not\n chosen = False\n def __init__(self, car_goat:bool = False):\n \"\"\"\n define if there is a car or goat behind the door\n :param car_goat:\n \"\"\"\n self.car_goat = car_goat\n\n def open_goat_door(self):\n \"\"\"this function are used for open the goat door after individual choose a door\"\"\"\n if self.chosen == False and self.car_goat == False:\n self.status = False\n else:\n self.status = True\n\n def pick(self):\n \"\"\" this method are used for pick a door\"\"\"\n if self.status == True:\n self.chosen = True\n\n\ndef get_para():\n \"\"\"to get parameters from command line\"\"\"\n parser = argparse.ArgumentParser(description=\"set up the parameter\")\n parser.add_argument('-r', '--round', dest=\"ROUND\", help=\"set up round times\", required=False,\\\n type = int, default=1000)\n parser.add_argument('-d', '--doors', dest=\"DOORs\", help=\"the number of doors are created\",\\\n required=False, type = int, default=3)\n parser.add_argument('-c', '--chose', dest=\"Door_c\", help=\"the index of doors that was chosen\",\\\n required=False, type=int, default=1)\n return parser.parse_args()\n\n\ndef modify(r, doors, chosen):\n \"\"\"\n this function is used for checking the legibility of the those parameters\n :param r: times the function will repeat\n :param doors: the number of doors that will be created\n :param chosen: the index of door that are chosen\n :return: Nont\n \"\"\"\n if r < 100:\n print(\"\\nthe round that you input is too small to get a reliable result, you may want to input a larger round number\\n\")\n if doors < 3:\n sys.exit('\\nthe doors number are needed to be more than three, please try another doors amount\\n' )\n if chosen > doors:\n sys.exit('\\nthe chosen number need to be less than the doors you choose\\n')\n\n\n\ndef generate_doors(doors:int):\n \"\"\"\n this function are used for generate a certain number of doors, and place a car behind a door\n :param doors: int\n :return: list, n(the index of door that has car)\n \"\"\"\n door_list = []\n n = random.randint(0, doors-1)\n for i in range(doors):\n if i != n:\n door = DOORs(False)\n door_list.append(door)\n else:\n door = DOORs(True)\n door_list.append(door)\n return door_list, n\n\n\ndef close_doors(door_list:list[DOORs]):\n \"\"\"\n this function should close all of doors that are not chosen and don't have cars.\n :param door_list:\n :return:\n \"\"\"\n for i in door_list:\n i.open_goat_door()\n\ndef main():\n args = get_para()\n r, doors, chosen = args.ROUND, args.DOORs, args.Door_c-1\n modify(r, doors, chosen)\n\n change_mind_win_count = 0\n not_change_mind_win_count = 0\n\n for _ in range(r):\n door_list, n = generate_doors(doors)\n door_list[chosen].pick()\n close_doors(door_list)\n #donot change my mind\n if door_list[chosen].car_goat == True and door_list[chosen].chosen == True:\n not_change_mind_win_count += 1\n change_mind_win_count = r - not_change_mind_win_count\n\n\n\n #output result\n print(f\"\\n\\tchange\\t\\tnot_change\")\n print(f\"win\\t{change_mind_win_count}\\t\\t{not_change_mind_win_count}\")\n print(f\"loss\\t{r-change_mind_win_count}\\t\\t{r-not_change_mind_win_count}\")\n print(f\"\\nThe winning possibility if you change your mind is: {change_mind_win_count/r:.2%}\\n\")\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"chenfengMeng2021/Monty_Hall_problem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"71843177457","text":"from pylibdmtx.pylibdmtx import decode\r\nimport cv2\r\nimport time\r\n\r\nSEUIL_THRESH = 225\r\n\r\nstart_time = time.time()\r\n\r\nimg = cv2.imread('B00501_001.jpg')\r\n\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\nret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\r\n\r\ncv2.imwrite(\"thresh.jpg\", thresh1)\r\n\r\nrect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) #analyse de char (1, 5)\r\n\r\ndilatation = cv2.dilate(thresh1, rect_kernel, iterations=1)\r\n\r\ncv2.imwrite(\"dilatation.jpg\", dilatation)\r\n\r\ncontours, hierarchy = cv2.findContours(dilatation, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\r\n\r\ncontoursRetained = [c for c in contours\r\n if(65 < cv2.boundingRect(c)[2] < 110 and 65 < cv2.boundingRect(c)[3] < 110)]\r\n\r\n\r\ndec = []\r\nimg_copy = img.copy()\r\nfor c in contoursRetained:\r\n # coordonées abscisse point haut/gauche, ordonnée point haut/gauche, largeur, hauteur\r\n x, y, w, h = cv2.boundingRect(c)\r\n\r\n rect = cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n \r\n crop_img = img[y-5:y+h+5, x-5:x+w+5]\r\n datamatrix = decode(crop_img)\r\n if(datamatrix!=[]):\r\n dec.append(str(datamatrix[0][0], \"utf-8\"))\r\n\r\ncv2.imwrite(\"detected.jpg\", img_copy)\r\n\r\nend_time = time.time()\r\n\r\nprint(dec)\r\nprint(f\"{end_time - start_time} s.\")\r\n","repo_name":"LHuitre/Projet-entomologie","sub_path":"Decoupage_lamelles/decode_datamtx.py","file_name":"decode_datamtx.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"1283352072","text":"original = [[2,3], [4,5]]\ncopiaSuperficial = original.copy()\ncopiaSuperficial[1][0] = 2000\n\nprint(original)\n\nimport copy # para cópia profunda precisa desse import\noriginal = [[2,3], [4,5]]\ncopiaProfunda = copy.deepcopy(original)\ncopiaProfunda[1][0] = 2000\n\nprint(original)","repo_name":"PETComputacaoUFPR/curso_basico_python","sub_path":"9-problemasListas.py","file_name":"9-problemasListas.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"39612782778","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# 과적합(Overfitting)\n# 과적합이란 학습 데이터를 사용해서 합습했을시 결과가 잘 맞지만, \n# 학습 데이터에만 너무 맞춰져있어서 그 외의 데이터에는 잘 맞지 않는 현상을 뜻한다.\n# 쉽게말해 학습 데이터들에 대해서만 예측을 잘 하고, 정작 실제 데이터는 예측을 못하는 것을 말한다.\n\n# 드롭아웃(Dropout)이란?\n# 과적합 현상을 해결하기 위한 방법론이다.\n# 방법은 상당히 단순한 편으로 학습할때 전체 신경망중에서 일부만을 사용하도록 하는 것이다.\n# 또한 학습 회차마다 신경망을 다르게 설정하도록 한다.\n\n\n# In[2]:\n\n\n# 앞의 6.1에서 작업하였던 코드에 적용하여 보자.\n# 신경망을 만들기 전까지는 이전 코드와 같다.\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data as mnist_input_data\n\n# 이 코드에서 mnist 정보를 다운로드 받고 레이블 데이터를 원-핫 인코딩 방식으로 읽어들인다.\nmnist = mnist_input_data.read_data_sets(\"./mnist/data/\", one_hot=True)\n\n# MNIST 의 손글씨 이미지는 28*28 픽셀(784)로 이루어져 있다.\n# 그리고 레이블은 0부터 9까지의 숫자이므로 10개의 분류로 나눌 수 있다.\n# 그러므로 입력과 출력 플레이스 홀더는 아래와 같이 구성할 수 있다.\n\nX = tf.placeholder(tf.float32, [None, 784]) # 784 픽셀\nY = tf.placeholder(tf.float32, [None, 10]) # 10종류의 숫자\n\n\n# In[3]:\n\n\n# 784 (특징 갯수) => 256 (첫번째 은닉층 뉴런 갯수) => 256 (두번째 은닉층 뉴런 갯수) => 10 (결과값 분류 갯수)\n# 이제 여기서 중요한 점이 dropout 이라는 함수를 추가로 사용하였다는 점이다.\n\ndropoutRate = tf.placeholder(tf.bool)\n\nW1 = tf.Variable(tf.random_normal([784, 256], stddev=0.01))\nb1 = tf.Variable(tf.zeros([256]))\nL1 = tf.nn.relu(tf.add(tf.matmul(X, W1), b1))\nL1 = tf.nn.dropout(L1, dropoutRate)\n\nW2 = tf.Variable(tf.random_normal([256, 256], stddev=0.01))\nb2 = tf.Variable(tf.zeros([256]))\nL2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), b2))\nL2 = tf.nn.dropout(L2, dropoutRate)\n\nW3 = tf.Variable(tf.random_normal([256, 10], stddev=0.01))\nb3 = tf.Variable(tf.zeros([10]))\nmodel = tf.add(tf.matmul(L2, W3), b3)\n\n# tf.nn.dropout(L1, dropoutRate) 에서 dropoutRate은 사용할 뉴런의 비율을 뜻한다. \n# 만약 dropoutRate가 0.8 이었다면 80%의 뉴런을 사용하는 것이다.\n\n# 여기서 dropoutRate라는 플레이스 홀더를 사용한 이유는\n# 학습이 끝나고 값 예측을 할때에는 신경망 전체를 사용해야 하기 때문이다.\n# 그런고로 학습할때에는 0.8 값을 넣고, 예측을 할때는 1을 넣도록 한다.\n\n\n# In[4]:\n\n\ncost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=model,\n labels=Y))\noptimizer = tf.train.AdamOptimizer(0.001).minimize(cost)\n\n\n# In[10]:\n\n\n# 세션 시작\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nbatch_size = 100\ntotal_batch = int(mnist.train.num_examples / batch_size)\n\n# 여기에서 드롭아웃 기법을 적용한 뒤 학습을 진행하면 학습이 느리게 진행된다.\n# 그렇기 때문에 에포크를 2배인 30으로 늘려서 더 많이 학습해보도록 하자.\n\nfor epoch in range(30):\n total_cost = 0\n \n for i in range(total_batch):\n # 반복문 안에서 배치 사이즈 만큼의 배치를 가져온다.\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n \n _, cost_val = sess.run(\n [optimizer, cost], \n feed_dict={X: batch_xs, Y: batch_ys, dropoutRate: 0.8})\n \n total_cost += cost_val\n \n print('Epoch:', '%04d' % (epoch + 1), 'Avg. cost =', '{:.3f}'.format(total_cost / total_batch))\n \nprint('최적화 완료!')\n\n\n# In[11]:\n\n\n# 이제 학습결과��� 잘 나오는지 확인해볼 시간이다.\n\nis_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y, 1))\naccurary = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('정확도:', sess.run(\n accurary, \n feed_dict={\n X: mnist.test.images, \n Y: mnist.test.labels,\n dropoutRate: 1}))\n\nsess.close()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kelowood/StudyTensorFlow","sub_path":"GolbinTensorFlow/Sources/Kelo6_2_드롭아웃.py","file_name":"Kelo6_2_드롭아웃.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"35954842706","text":"#Crie um programa que leia cinco nomes e exiba a quantidade de nomes que começam com vogal\n\nnomes = [] #criando uma lista vazia\nfor _ in range(5): #iteração com números\n nome = input('Nome: ')\n nomes.append(nome) #append - adiciona um item no fim da lista\n\nqtd = 0\nfor nome in nomes: #iteração com itens da lista\n if(nome[0]=='A' or nome[0]=='E' or nome[0]=='I' or nome[0]=='O' or nome[0]=='U' ):\n qtd += 1\n \nprint(f'{qtd} dos nomes começam com vogal')","repo_name":"ViniciusYoda/python","sub_path":"python/facul/lista/lista18.py","file_name":"lista18.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"29790086384","text":"import os\nimport time\nfrom multiprocessing import Process, Queue\n\nfrom image_spider.baidu_spider.image_baidu_spider import ImageBaiduSpider\nfrom image_db.img2mongodb.image2mgodb import image_to_mongodb\n\nimagegspider = ImageBaiduSpider()\n\n\nclass ImageToDb(object):\n def __init__(self):\n self.queue = Queue()\n\n def producer(self):\n\n img_list = imagegspider.run_image_baidu_spider()\n if img_list:\n for i in img_list:\n # print(len(i))\n for img in i:\n print(img)\n self.queue.put(img)\n print(\"{}成功放入队列\".format(img.md5))\n\n def consumer(self):\n while True:\n if self.queue.empty():\n print('队列为空')\n time.sleep(10)\n if self.queue.empty():\n break\n\n img = self.queue.get()\n print(\"{}成功从队列取出\".format(img.md5))\n if img:\n image_to_mongodb(img)\n\n def run(self):\n pro_task = Process(target=self.producer)\n con_task = Process(target=self.consumer)\n pro_task.start()\n\n con_task.start()\n\n\nif __name__ == '__main__':\n\n # 添加环境路径,后续写入配置文件\n env_path = os.environ['PATH']\n mydir = os.getcwd()\n os.environ['PATH'] = mydir + ';' + env_path\n\n ImageToDb().run()\n\n\n\n","repo_name":"zhangsir104/image","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"26136821524","text":"#-------------------------------#\n# Taken from lecture 1 slide 21 #\n#-------------------------------#\n\nfrom random import randint\nfrom random import shuffle\n\n\ndef select(A, t):\n \"\"\"\n: func finds recursivly the\n: t^th value of A if A was ordered\n:\n: complex Good: pivot in the middle\n: every iteration: T(n) = {T(n/2) + O(n)}\n: ==> O(n)\n:\n: complex Bad: pivot in the corner\n: every iteration: T(n) = {T(n-1) + O(n)}\n: ==> O(n^2)\n:\n: param A: unsorted array\n: type A: array of integers\n: param t: index to find if A was ordered\n: type t: integer (start from 0)\n \"\"\"\n # get a randomal pivot k\n k = A[randint(0, len(A) - 1)]\n \n S1 = [x for x in A if x < k]\n S2 = [x for x in A if x > k]\n \n if len(S1) == t:\n return k\n \n elif len(S1) > t:\n return select(S1, t)\n \n else:\n return select(S2, t - len(S1) - 1)\n\n\ndef main():\n # create an array\n arr = [i for i in range(1,100 + 1)]\n \n # shuffle it\n shuffle(arr)\n \n print(arr)\n\n # find the median.\n m = select(arr, len(arr)//2)\n print(\"Median is: \" + str(m))\n\n\nif __name__ == \"__main__\":\n main() \n ","repo_name":"SimchaTeich/Algorithms1","sub_path":"Lectures/lesson_01/select1.py","file_name":"select1.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"6840187222","text":"import pickle\nfrom flask import Flask, request, jsonify\nimport numpy as np\n\nwith open('model.bin', 'rb') as f_in:\n dv, lr = pickle.load(f_in)\n\ndef prepare_features(ride):\n features ={}\n features['PU_DO']='{}_{}'.format(ride['PULocationID'],ride['DOLocationID'])\n features['trip_distance']=ride['trip_distance']\n return features\n\ndef predict_mean(features):\n X_val = dv.transform(features)\n y_pred = lr.predict(X_val)\n return np.mean(y_pred)\n\n\napp = Flask('duration-prediction')\n\n@app.route('/predict', methods=['POST'])\ndef predict_endpoint():\n ride = request.get_json()\n features = prepare_features(ride)\n\n pred = predict_mean(features)\n\n result = {'duration': pred}\n return jsonify(result)\n\nif __name__==\"__main__\":\n app.run(debug=True, host='0.0.0.0', port=9696)\n ","repo_name":"irombie/mlops_zoomcamp_homeworks","sub_path":"hw4/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"73793507696","text":"\nimport pandas as pd\nimport numpy as np \nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport datetime as dt \n#import matplotlib.pyplot as plt \nfrom datetime import datetime \nimport streamlit as st\ntry: \n json_normalize = pd.json_normalize\nexcept:\n from pandas.io.json import json_normalize\n\nprogress_bar = st.sidebar.progress(0)\nstatus_text = st.sidebar.empty()\n\nfrom tqdm.auto import tqdm\n\nclass tqdm:\n def __init__(self, iterable, title=None):\n if title:\n st.write(title)\n self.prog_bar = st.progress(0)\n self.iterable = iterable\n self.length = len(iterable)\n self.i = 0\n\n def __iter__(self):\n for obj in self.iterable:\n yield obj\n self.i += 1\n current_prog = self.i / self.length\n self.prog_bar.progress(current_prog)\n\n#@st.cache(allow_output_mutation=True) \ndef process_fitbit_sleep_data(fileList):\n full_sleep_df = None\n cnt = 0\n #tqdm(follow_links,title='Scrape in Progress. Please Wait.')\n for input_file in tqdm(fileList,title='Loading in short interval of sleep fitbit data'):\n input_df = pd.read_json(input_file)\n detail_df = json_normalize(input_df['levels'])\n sleep_df = pd.concat([input_df, detail_df], axis =1)\n full_sleep_df = pd.concat([full_sleep_df, sleep_df], sort=True)\n \n progress_bar.progress(cnt/len(fileList))\n status_text.text(\"Data Reading %i%% Complete\" % float(cnt/len(fileList))) \n cnt+=1\n\n full_sleep_df['dateOfSleep']= pd.to_datetime(full_sleep_df['dateOfSleep'])\n full_sleep_df['dayOfWeek'] = full_sleep_df['dateOfSleep'].dt.day_name()\n full_sleep_df = full_sleep_df.set_index('dateOfSleep')\n full_sleep_df.sort_index(inplace=True)\n\n full_sleep_df['duration'] = full_sleep_df['duration']/(1000*60) # convert duration to minutes\n\n for col in ['rem','deep','wake','light']:\n full_sleep_df[col + '.%'] = 100*full_sleep_df['summary.' + col + '.minutes']/full_sleep_df['duration']\n\n full_sleep_df['startMin'] = pd.to_datetime(full_sleep_df['startTime']).dt.minute + 60 * pd.to_datetime(full_sleep_df['startTime']).dt.hour\n\n full_sleep_df['startMin'] = np.where(full_sleep_df['startMin'] < 240, full_sleep_df['startMin'] + 1440, full_sleep_df['startMin']) # handle v late nights\n\n full_sleep_df['endMin'] = pd.to_datetime(full_sleep_df['endTime']).dt.minute + 60 * pd.to_datetime(full_sleep_df['endTime']).dt.hour\n\n #remove rows which are not mainSleep == True (these are naps not sleeps)\n full_sleep_df = full_sleep_df[full_sleep_df.mainSleep != False]\n\n #remove column which are not needed/useful\n full_sleep_df.drop(['logId', 'data', 'shortData', 'infoCode', 'levels'], axis=1, inplace=True)\n\n return full_sleep_df\n'''\nimport dask\ndef dask_map_function(eval_,invalid_ind):\n results = []\n for x in invalid_ind:\n y = dask.delayed(eval_)(x)\n results.append(y)\n fitnesses = dask.compute(*results)\n return fitnesses\n'''\ndef visit_files(fileList):\n '''\n filter files into useful sub categories\n '''\n resting_heart_rate = []\n moderately_active = []\n very_active_minutes = []\n for input_file in tqdm(fileList,title='Loading in massive everything data set'):\n if \"resting_heart_rate\" in input_file:\n resting_heart_rate.append(input_file)\n if \"moderately_active_minutes\" in input_file:\n moderately_active.append(input_file)\n if \"very_active_minutes\" in input_file:\n very_active_minutes.append(input_file)\n return (very_active_minutes,moderately_active,resting_heart_rate)\n\ndef process_fitbit_other_data(list_of_lists):\n '''\n visit files from subcategories build large frames via concatonation.\n '''\n dict_of_frames = {}\n list_of_frames = []\n for list_of_files in list_of_lists:\n for input_file in list_of_files:\n input_df = pd.read_json(input_file)\n dict_of_frames[input_file] = input_df\n return dict_of_frames\n\n #if cnt>0:\n # df = pd.concat([df, input_df], axis =1)\n #else:\n # df = input_df\n #cnt+=1\n #st.write(reduced_df.describe())\n #progress_bar.progress(cnt/len(list_of_lists)*)\n #status_text.text(\"Data Reading %i%% Complete\" % float(cnt/len(list_of_lists))) \n \n","repo_name":"AnthonyNicholas/innerGalileo","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"28935303911","text":"import os\nimport numpy as np\nimport pybullet as p\nimport cv2\n\nfrom PIL import Image\n\n\ndef make_video_linear(\n name: str,\n duration: float,\n camera_init_pos,\n camera_final_pos,\n target_init_pos,\n target_final_pos,\n fov_init,\n fov_final,\n simulation_step=None,\n movement=None,\n width=1280,\n height=720,\n frame_rate=24,\n release=False,\n):\n if release:\n video = []\n n_frame = np.round(frame_rate * duration)\n camera_step = (camera_final_pos - camera_init_pos) / n_frame\n target_step = (target_final_pos - target_init_pos) / n_frame\n fov_step = (fov_final - fov_init) / n_frame\n if simulation_step is None:\n simulation_step = n_frame\n assert simulation_step >= n_frame\n step_per_frame = simulation_step // n_frame\n\n i_frame = 0\n for i_step in range(1, simulation_step + 1):\n if movement is not None:\n movement()\n if i_step % step_per_frame == 0:\n projectionMatrix = p.computeProjectionMatrixFOV(\n fov=fov_init + i_frame * fov_step,\n aspect=width / height,\n nearVal=0.1,\n farVal=50\n )\n viewMatrix = p.computeViewMatrix(\n cameraEyePosition=list(i_frame * camera_step + camera_init_pos),\n cameraTargetPosition=list(i_frame * target_step + target_init_pos),\n cameraUpVector=[0, 0, 1]\n )\n width, height, rgbImg, depthImg, segImg = p.getCameraImage(\n width=width,\n height=height,\n viewMatrix=viewMatrix,\n projectionMatrix=projectionMatrix,\n renderer=p.ER_BULLET_HARDWARE_OPENGL\n )\n video.append(rgbImg)\n i_frame += 1\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter(f'{name}.mp4', fourcc, frame_rate, (width, height))\n if not os.path.exists(f'video/{name}'):\n os.mkdir(f'video/{name}')\n for i_img, img in enumerate(video):\n img_pil = Image.fromarray(img, 'RGBA')\n img_rgb = img_pil.convert('RGB')\n img_rgb.save(f'video/{name}/{i_img}.png')\n img_new = cv2.imread(f'video/{name}/{i_img}.png')\n out.write(img_new)\n out.release()\n else:\n if movement is not None:\n for i_step in range(1, simulation_step + 1):\n movement()\n","repo_name":"syzhang092218-source/robot-golf","sub_path":"robot_golf/video_maker.py","file_name":"video_maker.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"41612216671","text":"import pandas as pd\nimport numpy as np\nimport requests\nimport re\nimport os\nfrom bs4 import BeautifulSoup\nimport datetime\n\n# You should use this source!\n# https://en.wikipedia.org/wiki/2020_coronavirus_pandemic_in_Germany\n\nRKI_url = 'https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html'\nrequestRKI = requests.get(RKI_url)\nsoup = BeautifulSoup(requestRKI.content, features=\"html.parser\")\ntable = soup.find(\"table\")\nallP = list(soup.find_all(\"p\"))\ndateP = [p for p in allP if str(p).__contains__(\"online aktualisiert um\")]\n# old way to find date\n# date = soup.find_all(\"div\", class_=\"dateOfIssue\")\ndate = re.findall('\\\\d+', str(dateP))\ndate = [int(part) for part in date]\ndate = datetime.date(date[2], date[1], date[0])\ndate = date.strftime(\"%d-%m-%Y\")\n\noutput_rows = []\nfor table_row in table.findAll('tr'):\n columns = table_row.findAll('td')\n output_row = []\n for column in columns:\n output_row.append(column.text)\n output_rows.append(output_row)\n\noutput_rows = list(np.delete(output_rows, [0, 1, len(output_rows) - 1]))\n\nheaders = ['Bundesland', 'Anzahl', 'Differenz zum Vortag', 'Fälle/100.000 Einw.', 'Todesfälle']\noutputTable = pd.DataFrame(output_rows, columns=headers)\noutputTable.insert(0, 'Date', date)\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\noutputLoc = \"./raw/\"\nfileName = outputLoc + 'RKI_Covid19_' + date + '.csv'\noutputTable.to_csv(fileName, sep=',', encoding='utf-8', index=False)\n","repo_name":"Phrytes/COVID19_RKI_Germany","sub_path":"download_RKI-Covid19-data.py","file_name":"download_RKI-Covid19-data.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"}
+{"seq_id":"6216583294","text":"# 输入一个正整数 target ,输出所有和为 target 的连续正整数序列(至少含有两个数)。\n#\n# 序列内的数字由小到大排列,不同序列按照首个数字从小到大排列。\n#\n#\n#\n# 示例 1:\n#\n# 输入:target = 9\n# 输出:[[2,3,4],[4,5]]\n#\n#\n# 示例 2:\n#\n# 输入:target = 15\n# 输出:[[1,2,3,4,5],[4,5,6],[7,8]]\n#\n#\n#\n#\n# 限制:\n#\n#\n# 1 <= target <= 10^5\n#\n#\n#\n# 👍 148 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution:\n def findContinuousSequence(self, target: int) -> List[List[int]]:\n i, j = 1, 1 # ∵正整数序列,∴滑动窗口的左右边界均从1开始;\n sum, rst = 0, [] # cnt:滑动窗口内数字和\n while i <= target // 2: # [i, j]: 递增序列,故\n if sum < target: # 右边界右移\n sum += j\n j += 1\n elif sum > target: # 左边界右移\n sum -= i\n i += 1\n else: # cnt == target\n sub_rst = list(range(i, j))\n rst.append(sub_rst)\n sum -= i # 左边界右移\n i += 1\n return rst\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"ljyljy/LeetCode-2020","sub_path":"Two_Pointers/Sliding_Window/qo_57_ii-Sum-of-Continuous-Sequence.py","file_name":"qo_57_ii-Sum-of-Continuous-Sequence.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"19577199972","text":"from setuptools import setup, find_packages\n\nwith open(\"requirements.txt\") as f:\n\tinstall_requires = f.read().strip().split(\"\\n\")\n\n# get version from __version__ variable in taysir/__init__.py\nfrom taysir import __version__ as version\n\nsetup(\n\tname=\"taysir\",\n\tversion=version,\n\tdescription=\"A coran school management system\",\n\tauthor=\"S-Amine\",\n\tauthor_email=\"saidi.amine.p@gmail.com\",\n\tpackages=find_packages(),\n\tzip_safe=False,\n\tinclude_package_data=True,\n\tinstall_requires=install_requires\n)\n","repo_name":"S-Amine/Taysir","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"8593607301","text":"from astropy.table import Table\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys, pdb\nfrom PIL import Image\n\n\n\ndef calculate_cropping_box(crop_size, img_size):\n\t\"\"\"\n\tcrop_size will be a percentage of the original image size\n\n\tfrom that percentage, calculate the x,y coordinates of the resulting box\n\t\"\"\"\n\n\tx, y = img_size\n\n\tx_0, y_0 = img_size[0]/2, img_size[1]/2.\n\n\tx1, y1 = int(x/2*(1-crop_size)), int(y/2*(1-crop_size))\n\tx2, y2 = int(x-x/2*(1-crop_size)), int(y-y/2*(1-crop_size))\n\n\treturn (x1, y1, x2, y2)\n\n\n\n\ndef main():\n\n\tclumpy = Table.read('GZ2_no-s82_low-z_low-smooth_clumpzoo.fits')\n\n\t# Create a log file to catch any problematic jpgs\n\tlog = open('clumpy_resize.log', 'w')\n\n\t'''\n\t# Create an HTML file to make quick comparison of resizing/cropping\n\tf = open('clumpy_resize_compare.html','w')\n\tf.write(\"\"\"\n\n\n Clumpy Galaxies: Crop Only or Resize? \n \n \n\n\n\n\n\nOriginal \nCrop 50% \nCrop 70% \nResize 50% \nResize 70% \n\n
\"\"\")\n\n\t'''\n \n\tfor gal in clumpy:\n\t\tbasename = '{}.jpg'.format(gal['id'])\n\n\t\t# The original jpg will be found in this directory..\n\t\tinfile = '/data/extragal/willett/gz2/jpg/'+basename\n\n\t\ttry:\n\t\t\timg = Image.open(infile)\n\t\texcept:\n\t\t\tlog.write(basename+'\\n')\n\n\t\t# Save the image in MY directory\n\t\t#img.save('jpg/'+basename)\n\t\t#f.write(' \\n'.format('jpg/'+basename, s=img.size))\n\n\t\tif img:\n\t\t\torig_size = img.size\n\t\t\tcrop_size = 0.5\n\n\t\t\t#for i, crop_size in enumerate(np.arange(0.5, 1.0, 0.1)):\n\t\t\tcrop_box = calculate_cropping_box(crop_size, orig_size)\n\n\t\t\t# Crop original image\n\t\t\timg2 = img.crop(crop_box)\n\t\t\t\"\"\"\n\t\t\t#outfile = 'jpg/resized/{0}_crop{1}.jpg'.format(gal['id'], i)\n\t\t\t# Save just the cropped image\n\t\t\t#try: \n\t\t\t#\timg2.save(outfile, \"JPEG\")\n\t\t\t#\tif i==0 or i==3:\n\t\t\t#\t\tf.write(' \\n'.format(outfile, s=img2.size))\n\t\t\t#except:\n\t\t\t#\tlog.write(outfile+'\\n')\n\t\t\t\"\"\"\n\n\t\t\t# Resize the cropped image\n\t\t\timg3 = img2.resize(orig_size, Image.ANTIALIAS)\n\t\t\toutfile = 'jpg/resized/{0}_resized.jpg'.format(gal['id'])\n\n\t\t\t# Save the resized image -- \n\t\t\t# I think these will be too blurry!\n\t\t\ttry: \n\t\t\t\timg3.save(outfile, \"JPEG\")\n\t\t\t\t#if (i==0) or (i==3):\n\t\t\t\t#f.write(' \\n'.format(outfile, s=img3.size))\n\t\t\texcept:\n\t\t\t\tlog.write(outfile+'\\n')\n\n\tlog.close()\n\tf.write(\"\"\"\n\"\"\")\n\tf.close()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"melaniebeck/ClumpyGals","sub_path":"clump_scout/img_dl_manipulation/resize_jpgs.py","file_name":"resize_jpgs.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"1332796827","text":"#Увести з клавіатури дійсні числа х і у, не рівні одне одному.\n#Менше з цих двох чисел замінити половиною їх суми, а більше - їх подвоєним добутком.\nimport re\ndef float_input(text):\n pattern = \"^-?\\d+(\\.\\d+)?$\"\n user_input = input(text)\n while not re.match(pattern, user_input):\n user_input = input(\"Введене значення некоректне , Введіть число:\")\n return float(user_input)\nx= float_input(\"Введите х\")\ny= float_input(\"Введите у\")\nif x==y:\n print(\"Значения равны\")\nelif xy:\n print(\"x=\", x * y * 2)\n print(\"y=\", (x + y) / 2)","repo_name":"BogdanVynnychuk/python-laboratory","sub_path":"Laboratory - 1/Laba 1. Task 2.py","file_name":"Laba 1. Task 2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"30971004318","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom . import models\n\n# Register your models here.\n\n\n@admin.register(models.User)\nclass CustomUserAdmin(UserAdmin):\n \"\"\"Custom User Admin\"\"\"\n\n # list_display = (\"username\", \"email\", ...)\n # list_filter = (\"language\", \"currency\", ...)\n # 장고는 user을 위한 admin패널이 만들어져 있기 때문에 UserAdmin을 상속 받아 사용했다.\n\n fieldsets = UserAdmin.fieldsets + ( # 내부 패널 추가\n (\n \"Custom Profile\", # 파란거\n {\n \"fields\": ( # 그안에 필드들이 뭐가 들어갈지\n \"avatar\",\n \"gender\",\n \"bio\",\n \"birthdate\",\n \"language\",\n \"currency\",\n \"superhost\",\n )\n },\n ),\n )\n\n list_display = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"is_active\",\n \"language\",\n \"currency\",\n \"is_staff\",\n \"is_superuser\",\n \"date_joined\",\n )\n\n list_filter = UserAdmin.list_filter + (\"superhost\",)\n","repo_name":"minchoul2/airbnb-clone","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"41661381735","text":"'''\nA Tree has such structure: the root is always the minimum of its two left and right node.\n\n\n\nIndicate; root is the minimum\nASK: find the second miminum\n'''\n\n# better than O(n) since it does not travese to un-needed subtrees\n# time: O(N), space O(h)\nclass Solution:\n def findSecondMinimumValue(self, root: Optional[TreeNode]) -> int:\n \n '''\n More formally, the property root.val = min(root.left.val, root.right.val) always holds.\n '''\n self.res = math.inf\n treeMin = root.val\n \n def dfs(node):\n if not node:\n return\n if root.val < node.val < self.res:\n self.res = node.val\n if node.val == treeMin:\n dfs(node.left)\n dfs(node.right)\n \n dfs(root)\n return self.res if self.res != math.inf else -1","repo_name":"derrickweiruluo/optimizedLeetcode","sub_path":"Companies/Linkedin/树/671 Minimum node in a special binar ytree.py","file_name":"671 Minimum node in a special binar ytree.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}
+{"seq_id":"17985933162","text":"\r\n\r\nfrom pathlib import Path\r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\ndriver=r'C:\\Users\\HP\\AppData\\Local\\Temp\\Rar$EXa18176.42998\\chromedriver.exe'\r\ndef get_html(url):\r\n browser=webdriver.Chrome(executable_path=driver)\r\n browser.get(url)\r\n return browser.page_source\r\n \r\ndef main():\r\n url='https://in.godaddy.com/domainsearch/find?checkAvail=1&domainToCheck=bjmtuc.club'\r\n html=get_html(url)\r\n\r\n \r\n \r\n soup=BeautifulSoup(html,'lxml')\r\n cards=soup.find_all('div',class_='d-flex d-flex-row')\r\n for card in cards:\r\n \r\n \r\n price=card.find('span',class_='text-nowrap d-inline-block').text\r\n print(price)\r\n\r\nif __name__ =='__main__':\r\n main()\r\n","repo_name":"AAA07cr7/Python-projects","sub_path":"webscr.py","file_name":"webscr.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"27270193914","text":"import serial, time\nimport numpy as np\nfrom data import Data\nimport pickle\nfrom swipeGestureFunctions import *\nfrom utils import *\nfrom pose_classifier import PoseClassifier\nfrom getGestures import *\nfrom poses_fsm import *\nimport actions\n\n\nOPEN = 0\nFIST = 1\nONE = 2\nTWO = 3\nTHREE = 4\nFOUR = 5\nMIDDLE = 6\nOK = 7\nROCK =8\nNEUTRAL = 9\nCALI = 10\nTHUMB = 11\nGUN = 12\n\n\n\ndef main():\n ser1 = serial.Serial('/dev/ttyACM0', 9600)\n ser2 = serial.Serial('/dev/ttyACM1', 9600)\n key_ser = serial.Serial('/dev/ttyTHS1', 9600)\n\n clf = PoseClassifier()\n\n chrome_fsm = OpenChromeFSM()\n closeWin_fsm = CloseWindowFSM()\n minim_fsm = MinimizeFSM()\n ft_fsm = FacetimeFSM()\n it_fsm = ITunesFSM()\n ent_fsm = EnterFSM()\n showAllWin_fsm = ShowWindowsFSM()\n\n chrome_fsm1 = OpenChromeFSM()\n closeWin_fsm1 = CloseWindowFSM()\n minim_fsm1 = MinimizeFSM()\n ft_fsm1 = FacetimeFSM()\n it_fsm1 = ITunesFSM()\n ent_fsm1 = EnterFSM()\n showAllWin_fsm1 = ShowWindowsFSM()\n\n time.sleep(1)\n count = 0\n tempcount = -100\n\n previous_r_action = None\n previous_l_action = None\n mode = 2\n\n while True:\n if ser1.inWaiting() > 0 and ser2.inWaiting() > 0:\n try:\n raw_data = ser1.readline().decode('utf-8')\n raw_data2 = ser2.readline().decode('utf-8')\n line = raw_data.replace(\"\\r\\n\", \"\")\n line2 = raw_data2.replace(\"\\r\\n\", \"\")\n # print(line.split(\" \"))\n data = str_to_list(line) + [mode]\n data2 = str_to_list(line2) + [mode]\n #print(data)\n\n except:\n print(\"failed serial, ignoring\")\n continue\n\n\n # data = np.fromstring(line, dtype=int, sep=\" \")\n\n if len(data) < 15 or len(data2) < 15:\n print(\"incorrect data, ignoring\")\n continue\n\n data = Data(data)\n data2 = Data(data2)\n flex_data = data.flex_data()\n flex_data2 = data2.flex_data()\n\n data.mode = mode\n data2.mode = mode\n left_action = actions.call_left_action(data)\n rght_action = actions.call_right_action(data2)\n #print(left_action, rght_action)\n action = left_action if rght_action == None else rght_action\n # checks mode switch\n if action == 100:\n mode = data2.mode\n print(\"New Mode: \", mode)\n data.mode = mode\n continue\n elif mode == 0:\n pass\n elif mode == 1:\n if action != None:\n key_ser.write(chr(action).encode())\n continue\n elif mode == 2:\n\n count+=1\n\n pose = clf.classify_pose(data, right = False)\n pose2 = clf.classify_pose(data2, right= True)\n\n left_swipeDir, tempcount = getSwipeInfo(pose, data, count, tempcount, ser1, clf, False)\n rght_swipeDir, tempcount = getSwipeInfo(pose2, data2, count, tempcount, ser1, clf, True)\n\n if pose== OPEN:\n print(\"left hand open\")\n elif pose== NEUTRAL:\n print(\"left hand in neutral position\")\n elif pose== FIST:\n print(\"left hand fist\")\n elif pose== ONE:\n print(\"left hand one\")\n elif pose== TWO:\n print(\"left hand two\")\n elif pose== THREE:\n print(\"left hand three\")\n elif pose== FOUR:\n print(\"left hand four\")\n elif pose== MIDDLE:\n print(\"left hand middle finger\")\n elif pose== ROCK:\n print(\"left hand rock and roll\")\n elif pose== CALI:\n print(\"left hand surfs up\")\n elif pose== THUMB:\n print(\"left hand thumbs up\")\n elif pose== GUN:\n print(\"left hand pew pew\")\n\n if pose2 == OPEN:\n print(\"right hand open\")\n elif pose2 == NEUTRAL:\n print(\"right hand in neutral position\")\n elif pose2 == FIST:\n print(\"right hand fist\")\n elif pose2 == ONE:\n print(\"right hand one\")\n elif pose2 == TWO:\n print(\"right hand two\")\n elif pose2 == THREE:\n print(\"right hand three\")\n elif pose2 == FOUR:\n print(\"right hand four\")\n elif pose2 == MIDDLE:\n print(\"right hand middle finger\")\n elif pose2 == ROCK:\n print(\"right hand rock and roll\")\n elif pose2 == CALI:\n print(\"right hand surfs up\")\n elif pose2 == THUMB:\n print(\"right hand thumbs up\")\n elif pose2 == GUN:\n print(\"right hand pew pew\")\n\n #print(left_swipeDir, rght_swipeDir)\n if rght_swipeDir != None:\n #print(\"right ges\")\n #serial send right hand gesture\n if rght_swipeDir == \"SWIPE UP\":\n key_ser.write(chr(3).encode())\n elif rght_swipeDir == \"SWIPE DOWN\":\n key_ser.write(chr(4).encode())\n elif rght_swipeDir == \"SWIPE RIGHT\":\n key_ser.write(chr(0).encode())\n elif rght_swipeDir == \"SWIPE LEFT\":\n key_ser.write(chr(1).encode())\n elif rght_swipeDir == \"volume up\":\n key_ser.write(chr(133).encode())\n elif rght_swipeDir == \"volume down\":\n key_ser.write(chr(134).encode())\n elif left_swipeDir != None:\n #print(\"left ges\")\n if left_swipeDir == \"SWIPE UP\":\n key_ser.write(chr(3).encode())\n elif left_swipeDir == \"SWIPE DOWN\":\n key_ser.write(chr(4).encode())\n elif rght_swipeDir == \"SWIPE RIGHT\":\n key_ser.write(chr(0).encode())\n elif rght_swipeDir == \"SWIPE LEFT\":\n key_ser.write(chr(1).encode())\n elif left_swipeDir == \"volume up\":\n key_ser.write(chr(133).encode())\n elif left_swipeDir == \"volume down\":\n key_ser.write(chr(134).encode())\n #serial send left hand gesture\n # time.sleep(0.25)\n left_pose = pose\n right_pose = pose2\n print(pose, pose2)\n if chrome_fsm.update(data, left_pose, clf):\n key_ser.write(chr(6).encode())\n elif closeWin_fsm.update(data, left_pose, clf):\n key_ser.write(chr(7).encode())\n elif minim_fsm.update(data, left_pose, clf):\n key_ser.write(chr(2).encode())\n elif ft_fsm.update(data, left_pose, clf):\n key_ser.write(chr(130).encode())\n elif it_fsm.update(data, left_pose, clf):\n key_ser.write(chr(131).encode())\n elif ent_fsm.update(data, left_pose, clf):\n key_ser.write(chr(132).encode())\n elif showAllWin_fsm.update(data,left_pose, clf):\n key_ser.write(chr(129).encode())\n\n if chrome_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(6).encode())\n elif closeWin_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(7).encode())\n elif minim_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(2).encode())\n elif ft_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(130).encode())\n elif it_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(131).encode())\n elif ent_fsm1.update(data2, left_pose, clf):\n key_ser.write(chr(132).encode())\n elif showAllWin_fsm1.update(data2,left_pose, clf):\n key_ser.write(chr(129).encode())\n\nif __name__ == '__main__':\n main()\n","repo_name":"EdwardLu2018/power-glove","sub_path":"testMain.py","file_name":"testMain.py","file_ext":"py","file_size_in_byte":8531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"37656345653","text":"import pscan\n\ntargets_ip = input('Enter the target to scan: ')\nport_num = int(input('Amount of ports to scan: '))\nvul_file = input(\"Enter path to the file with vunerable software: \")\nprint('\\n')\n\ntarget = pscan.PortScan(targets_ip,port_num)\ntarget.scan()\n\nwith open(vul_file, 'r') as file:\n count = 0\n for banner in target.banners:\n file.seek(0)\n for line in file.readlines():\n if line.strip() in banner:\n print('[!!] Vunerable banner: \"' + banner + '\" On port: \"' + str(target.open_ports) + 'found.')\n count += 1\n\n","repo_name":"raina-sarthak/CEH","sub_path":"vulnscanner.py","file_name":"vulnscanner.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"35411615966","text":"def convertTxtToList(text: str):\n \"\"\"\n Receives a text in the format \\\"abc def ghi\\\";\n Returns a list: [\\\"abc\\\", \\\"def\\\", \\\"ghi\\\"]\n \"\"\"\n try:\n lista = text.split(\" \")\n for i in range(lista.count(\"\")):\n lista.remove(\"\")\n return lista\n except:\n print(\"Incorrect format\")\n return None\n\n\ndef clampPositivemeter(i: int):\n if i > 1:\n i = 1\n if i < -1:\n i = -1\n\n return i\n","repo_name":"Oscann/WordParser","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"41758746286","text":"def level(mylist):\n if isinstance(mylist,list):\n if len(mylist)==0:\n return 1\n a=[]\n for i in mylist:\n a.append(level(i))\n print(a)\n max_level=max(a)\n return max_level+1\n else:\n return 0\norigin=eval(input())\nprint(level(origin))","repo_name":"troyxxf/pythonAssistant","sub_path":"王若其嵌套列表.py","file_name":"王若其嵌套列表.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"38106385341","text":"from os import join, abspath, curdir\r\n\r\ncode_file = open(join(abspath(curdir), 'auth_code.txt'), 'r')\r\n\r\nspotify_token = code_file.read()\r\n\r\nspotify_user_id = \"sinistersandwich\"\r\n\r\n# Recent Song Documentation: https://developer.spotify.com/documentation/web-api/reference/player/get-recently-played/\r\n\r\n","repo_name":"cliffchenn/Automatic-Spotify-Database-Compiler","sub_path":"Automatic Spotify Database Compiler/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"}
+{"seq_id":"1065679702","text":"from django.shortcuts import render\nfrom .models import MainMenu\nfrom django.db import connection\n\n\ndef get_category_nav(request, categories=None, menu_name=None):\n if categories is None:\n categories = MainMenu.objects.filter(main_menu=None,\n menu__name=menu_name)\n categories[0].active = True\n else:\n yield 'in'\n\n for category in categories:\n yield category\n subcats = MainMenu.objects.select_related().filter(main_menu=category)\n if len(subcats):\n category.leaf = False\n for x in get_category_nav(request, subcats):\n yield x\n else:\n category.leaf = True\n yield 'out'\n\n\ndef index(request):\n mmenu = MainMenu.objects.filter(menu__name='Main')\n cq = connection.queries\n path = request.path\n path_raw = path.replace('/', '')\n context = {\n 'mmenu': mmenu,\n 'cq': cq,\n 'path': path,\n 'path_raw': path_raw,\n }\n return render(request, 'index.html', context)\n\n\ndef menu_link(request, slug):\n post = MainMenu.objects.get(slug=slug)\n path = request.path\n path_raw = path.replace('/', '')\n context = {\n 'post': post,\n 'path_raw': path_raw,\n }\n return render(request, 'menu_link.html', context)\n","repo_name":"d3f11/tech_zadanie","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"11544561177","text":"#!/usr/bin/env python3\n# Author: Jonas, ChainLayer\n\nimport json, subprocess, platform, os, socket, getpass\nfrom contextlib import closing\nfrom sys import exit\nfrom progress.bar import ChargingBar\n\n\n# Get user input\ndef get_user_data():\n global keystore_password\n keystore_password = getpass.getpass(prompt='[+] Please enter your keystore password: ')\n\n global consensus_node\n consensus_node = input('[+] Please enter IP address of a consensus node (testnet/mainnet): ')\n\n global consensus_node_port\n consensus_node_port = input('[+] Please enter RPC port of the consensus node: ')\n \n# Determine which ethdo binary to use (Linux / Darwin)\ndef determine_os():\n global ethdo_binary\n global ethdo_base_dir\n\n try:\n operating_system = platform.system()\n print(\"\\u001b[32m[+] Found operating system:\\t %s\" % (operating_system))\n\n if operating_system.lower() == \"darwin\":\n ethdo_binary = \"./ethdo/darwin/ethdo\"\n print(\"\\u001b[32m[+] Using the ethdo binary for:\\t Darwin\")\n\n elif operating_system.lower() == \"linux\":\n ethdo_binary = \"./ethdo/linux/ethdo\"\n print(\"\\u001b[32m[+] Using the ethdo binary for:\\t Linux\")\n \n ethdo_base_dir = \"--base-dir=wallets\"\n \n except Exception as e:\n print(e)\n\ndef check_consensus_connection():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.settimeout(5)\n if sock.connect_ex((str(consensus_node), int(consensus_node_port))) == 0:\n print(\"\\u001b[32m[+] Connected to consensus node %s on port %s\" % (consensus_node, consensus_node_port))\n else:\n print(\"\\u001b[33m[+] Could not connect to consensus node %s on port %s. Exiting...\" % (consensus_node, consensus_node_port))\n exit()\n\n# Generate offline preparation data from consensus node\ndef offline_preparation():\n print(\"\\u001b[33m[+] Starting to prepare offline data from consensus node. This might take a while...\")\n try:\n cmd = \"%s --connection http://%s:%s validator exit --prepare-offline --allow-insecure-connections --timeout 10m\" % (ethdo_binary, consensus_node, consensus_node_port)\n print(\"\\u001b[33m[+] Running command: \" + cmd)\n subprocess.run(cmd, shell=True, capture_output=True)\n print(\"\\u001b[32m[+] Network state fetched\")\n\n except Exception as e:\n print(e)\n\ndef count_loaded_keystores():\n dir_path = \"./data/input/\"\n total_files = 0\n for path in os.listdir(dir_path):\n if os.path.isfile(os.path.join(dir_path, path)):\n if path.startswith(\"keystore\"):\n total_files += 1\n print(\"\\u001b[32m[+] Loaded %s keystores for processing\" % total_files)\n return total_files\n\n# Create temporary wallet that is located in the wallets folder.\ndef create_wallet():\n cmd = \"%s --base-dir=wallets wallet create --wallet=wallet\" % (ethdo_binary)\n subprocess.run(cmd, shell=True, capture_output=True)\n\n# Add key/account to wallet from keystore file.\ndef add_key_from_keystore(file):\n cmd = \"%s %s account import --account=wallet/account --keystore=./data/input/%s --keystore-passphrase='%s' --passphrase=pass --allow-weak-passphrases --timeout 10m\" % (ethdo_binary, ethdo_base_dir, file, keystore_password)\n subprocess.run(cmd, shell=True)\n\n# Generate and sign exit messages to use in the ejector.\ndef generate_and_sign_exit_messages(pubKey): \n cmd = \"%s %s validator exit --account=wallet/account --passphrase=pass --json --verbose --offline --allow-weak-passphrases --timeout 10m\" % (ethdo_binary, ethdo_base_dir)\n result = subprocess.run(cmd, shell=True, capture_output=True)\n with open(\"./data/output/\" + pubKey + '.json', 'w') as f:\n f.write(result.stdout.decode())\n\n# Clean up wallets\ndef cleanup_wallets():\n try:\n cmd = \"%s %s wallet delete --wallet=wallet\" % (ethdo_binary, ethdo_base_dir)\n subprocess.run(cmd, shell=True)\n\n except Exception as e:\n print(e)\n\ndef cleanup_offline_preparation_data():\n try:\n cmd = \"rm offline-preparation.json\"\n subprocess.run(cmd, shell=True)\n except Exception as e:\n print(e)\n\ndef main():\n total_loaded_keys = count_loaded_keystores() \n get_user_data()\n check_consensus_connection()\n determine_os()\n offline_preparation()\n print(\"\\u001b[33m[+] Starting to create wallets, adding keys from keystores and generating pre-signed messages\")\n keystores_count = 0\n with ChargingBar('[+] Processing keystores', max=total_loaded_keys) as bar:\n for i in range(total_loaded_keys):\n for file in os.listdir(\"./data/input\"):\n if file.startswith(\"keystore\"):\n create_wallet()\n with open(\"./data/input/\" + file, 'r') as f:\n result = json.load(f)\n pubKey = \"0x\" + result[\"pubkey\"]\n add_key_from_keystore(file)\n generate_and_sign_exit_messages(pubKey)\n cleanup_wallets()\n keystores_count += 1\n bar.next()\n bar.finish()\n \n print(\"\\u001b[32m[+] Successfully created pre-signed messages for %d keystores, check your output folder\" % (keystores_count))\n print(\"\\u001b[31m[+] Remember to clean your input/output folders.\")\n cleanup_offline_preparation_data()\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print('[+] User aborted.')\n","repo_name":"chainlayer/validator-exit-pre-signer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"25019389804","text":"import logging\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import entropy\n\nMODULE_DIR = os.path.dirname(os.path.realpath(__file__))\nparent_dir = os.path.dirname(MODULE_DIR)\nsys.path.append(parent_dir)\n\nfrom common.configs_read import ConfigsData\n\nfrom utilities.common_utilities import is_num_present, is_uppercase_present\nfrom utilities.file_utilities import read_pickle_file\n\nlogger = logging.getLogger(\"xgg_logger\")\n\n\ndef ml_prediction_process(\n model_name, training_data, detection_data, git_env=\"enterprise\"\n):\n \"\"\"\n for the given training data and detection data\n Format the detections snf training data as model needed\n Predict the detection using model\n Return the Dataframe of actual detections\n params: training_data - dataframe\n params: detection_data - dataframe - Detection Data\n returns: post_prediction_data - Dataframe - Actual detections\n \"\"\"\n logger.debug(\"<<<< 'Current Executing Function' >>>>\")\n pre_prediction_data = detection_data.copy()\n if git_env == \"public\":\n detection_data = detection_data.drop(\n [\n \"Source\",\n \"Primary_Key\",\n \"Commit_Details\",\n \"URL\",\n \"Owner\",\n \"Repo_Name\",\n \"Detected_Timestamp\",\n \"Year\",\n \"Month\",\n \"Day\",\n ],\n axis=1,\n )\n else:\n detection_data = detection_data.drop(\n [\n \"Source\",\n \"Commit_Details\",\n \"URL\",\n \"Owner\",\n \"Repo_Name\",\n \"Detected_Timestamp\",\n \"Year\",\n \"Month\",\n \"Day\",\n ],\n axis=1,\n )\n try:\n detection_data[\"Len_Key\"] = detection_data.apply(\n lambda x: len(x[\"Secret\"]), axis=1\n )\n detection_data[\"Len_Code\"] = detection_data.apply(\n lambda x: len(x[\"Code\"]), axis=1\n )\n detection_data[\"Has_Digit\"] = detection_data.apply(\n lambda x: is_num_present(x[\"Secret\"]), axis=1\n )\n detection_data[\"Has_Cap\"] = detection_data.apply(\n lambda x: is_uppercase_present(x[\"Secret\"]), axis=1\n )\n\n detection_data = detection_data.drop([\"Secret\", \"Code\"], axis=1)\n train_dummies = pd.get_dummies(training_data)\n detection_dummies = pd.get_dummies(detection_data)\n train_dummies, detection_dummies = train_dummies.align(\n detection_dummies, join=\"left\", axis=1\n )\n detection_dummies = detection_dummies.fillna(0)\n\n config_dir = os.path.abspath(\n os.path.join(os.path.dirname(MODULE_DIR), \".\", \"output\")\n )\n model_file = os.path.join(config_dir, model_name)\n # Read pre trained Model object\n rf = read_pickle_file(model_file)\n # Predict the current detection\n predictions = rf.predict(detection_dummies)\n indexes = [i for i, e in enumerate(predictions) if e != 0]\n post_prediction_data = pre_prediction_data.iloc[indexes, :]\n return post_prediction_data\n except Exception as e:\n print(f\"Error in predicting through model: {e}\")\n post_prediction_data = pd.DataFrame()\n return post_prediction_data\n\n\ndef entropy_calc(labels, base=None):\n \"\"\"\n Calculates Shannon Entropy for given labels\n params: labels - list\n params: base - Optional\n returns: entropy values - list\n \"\"\"\n # logger.debug(\"<<<< 'Current Executing Function' >>>>\")\n _, counts = np.unique(labels, return_counts=True)\n return entropy(counts, base=base)\n","repo_name":"Comcast/xGitGuard","sub_path":"xgitguard/common/ml_process.py","file_name":"ml_process.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"47"}
+{"seq_id":"34140343286","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as scio\n# PCA\ntrain_data = scio.loadmat(\"./ex7data1.mat\")\nX = train_data['X']\nprint(X.shape)\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1)\nax1.set_title('Data distribution')\nplt.xlabel('')\nplt.ylabel('')\nplt.scatter(X[:,0], X[:,1], color='green', marker='o')\nplt.show()\n\n\ndef normalize(X):\n return X - np.mean(X, axis=0)\n\n\ndef pca(X, k):\n Sigma = X.T.dot(X) / len(X)# Covarriance matrix\n U, S, V = np.linalg.svd(Sigma)\n Z = X.dot(U[:,:k])\n X_approx = Z.dot(U[:,:k].T)\n return X_approx\n\n# Normalize first in order to compute the covariance matrix\nX = normalize(X)\nX_approx = pca(X, 1)\nprint(X_approx[0])\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(1, 1, 1)\nax1.set_title('Data distribution')\nplt.xlabel('')\nplt.ylabel('')\n# Original points\nplt.scatter(X[:, 0], X[:, 1], color='green', marker='o')\n# Plot the low dimention points\nplt.scatter(X_approx[:,0], X_approx[:, 1], color='red', marker='x')\n# Plot the projection direction\nplt.plot([X[:,0], X_approx[:,0]], [X[:,1], X_approx[:,1]], linestyle='--', color='blue')\nplt.show()\n\nfrom PIL import Image\ntrain_data = scio.loadmat(\"./ex7faces.mat\")\nX = train_data['X']\nprint(X.shape)\nfig = plt.figure(figsize=(10, 10))\nX = X[:100, :]\nSIZE = 10\nfig, ax_array = plt.subplots(SIZE, SIZE, sharey=True, sharex=True, figsize=(SIZE, SIZE))\nfor i in range(0, SIZE):\n for j in range(0, SIZE):\n arr = X[i * SIZE + j].reshape((32, 32), order='F')# order 'F' is Fortan-Style!\n ax_array[i, j].matshow(arr, cmap=plt.cm.gray)\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\nplt.show()\nX = normalize(X)\nX_approx = pca(X, 100)\nprint(X_approx.shape)\n\n# Plot the data using low dimentation images\nfig = plt.figure(figsize=(10, 10))\nSIZE = 10\nfig, ax_array = plt.subplots(SIZE, SIZE, sharey=True, sharex=True, figsize=(SIZE, SIZE))\nfor i in range(0, SIZE):\n for j in range(0, SIZE):\n arr = X_approx[i * SIZE + j].reshape((32, 32), order='F')\n ax_array[i, j].matshow(arr, cmap=plt.cm.gray)\n plt.xticks(np.array([]))\n plt.yticks(np.array([]))\nplt.show()\n","repo_name":"j-yi-11/CS229-Self-Learning","sub_path":"PrincipleComponentAnalysis/PrincipleComponentAnalysis.py","file_name":"PrincipleComponentAnalysis.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"35698455841","text":"from scrapy import Spider\nfrom itertools import chain\n\nfrom thespy.items import ThespyItem\n\n\ndef extract_synopsis(current_movie, paragraph_num):\n \"\"\"\n Check paragraph for synopsis\n :param current_movie: The current movie\n :param paragraph_num: The current paragraph\n :return: The movie synopsis if present or None\n \"\"\"\n raw_synopsis_list = current_movie.xpath(\n 'p[' + str(paragraph_num) + ']/text()'\n ).extract()\n\n # Format synopsis if synopsis is present else return None\n return format_synopsis(raw_synopsis_list) if raw_synopsis_list else None\n\n\ndef format_synopsis(synopsis_list):\n \"\"\"\n Makes sure synopsis is actual movie synopsis and not movie Show Times\n :param synopsis_list: Supposed synopsis. Content found within the paragraph\n :return: Formatted synopsis, striped of non-characters or break out of the function\n \"\"\"\n raw_synopsis = ''.join(synopsis_list)\n days = ['Mon:', 'Tue:', 'Wed:', 'Thur:', 'Fri:', 'Sat:', 'Sun:',]\n\n if any(day in raw_synopsis for day in days):\n\n # Paragraph contains movie Show Time... wrong paragraph! Exit function\n return\n\n # Remove unicode characters from movie synopsis...\n new_synopsis = ''.join(\n [elem if ord(elem) < 128 else '' for elem in raw_synopsis]\n )\n\n # Remove whitespace characters (try import string; translate(None, string.whitespace))\n return ' '.join(\n str(new_synopsis).translate(None, '\\t\\n').split()\n )\n\n\nclass OzSpy(Spider):\n name = 'ozonecinemas'\n allowed_domains = ['ozonecinemas.com',]\n start_urls = ['http://ozonecinemas.com/now_showing.htm',]\n\n def parse(self, response):\n raw_data = response.xpath(\"//td[@id='content-area']/table//table//td\")\n\n # Extract and assign relevant data to the movies list\n movies_tuples = zip(raw_data[2:-2:3], raw_data[3:-2:3])\n movies = list(chain(*movies_tuples))\n base_url = 'http://ozonecinemas.com/'\n\n for movie in movies:\n item = ThespyItem()\n\n # Movie Image\n rel = movie.xpath('img/@src').extract()\n if rel:\n image_url = [base_url + ' '.join(str(rel[0]).split())]\n item['image_urls'] = image_url\n\n # Movie Title\n raw_title = movie.xpath('p/strong/text()').extract()\n if raw_title:\n m_title = ' '.join(str(raw_title[0]).split())\n item['title'] = m_title\n\n # Movie Starring\n l_starring = movie.xpath('p/strong/text()').extract()\n r_starring = movie.xpath('p/strong[2]/following-sibling::text()[1]').extract()\n\n if l_starring and r_starring:\n l = [x.strip() for x in r_starring[0].split(',')]\n\n actors = []\n for j in l:\n j = ''.join([k if ord(k) < 128 else '' for k in j])\n actors.append(' '.join(str(j).split()))\n\n actors = ', '.join(actors)\n item['starring'] = actors\n\n # Movie Show times\n show_times = movie.xpath(\"p[text()][contains(., 'Mon:') or \"\n \"contains(., 'Tue:') or\"\n \"contains(., 'Wed:') or \"\n \"contains(., 'Thur:') or \"\n \"contains(., 'Fri:') or \"\n \"contains(., 'Sat:') or \"\n \"contains(., 'Sun:')]\").extract()\n\n if show_times:\n lst_times = []\n for i in show_times:\n lst_times.append(\n ' '.join(str(i).replace('&', '&').strip('
').translate(\n None, '\\t\\n'\n ).split())\n )\n\n item['show_times'] = ''.join(lst_times)\n\n # Movie Synopsis\n movie_synopsis = []\n\n # Loop through paragraphs 6-10 searching for movie synopsis\n for i in range(6, 10):\n formatted_synopsis = extract_synopsis(movie, i)\n if formatted_synopsis:\n movie_synopsis.append(formatted_synopsis)\n break\n\n if movie_synopsis:\n item['synopsis'] = ''.join(movie_synopsis)\n\n yield item\n","repo_name":"iamlordaubrey/thespy","sub_path":"thespy/spiders/thewizard.py","file_name":"thewizard.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"10722749965","text":"import pickle\nimport os\nimport sys\nfrom calc_distance_by_matching_vertices import calc_espectre_distance\n\n\n# ARGS\nmatching_folder = sys.argv[1]\n# a = float(sys.argv[2])\n# b = float(sys.argv[3])\n# c = float(sys.argv[4])\n# d = float(sys.argv[5])\nthreshold_factors = []#[a, b, c, d]\n\n# MAIN\nmatching_results_list = os.listdir(matching_folder)\n\ntempl_query_aux_list = [] #tuples\n\ndef get_minor_value(comp_dict):\n minor_val = float('Inf')\n minor_templ_name = \"\"\n minor_pts = None\n\n for (templ_name, (dist, n_pts)) in comp_dict.items():\n if dist < minor_val and n_pts >= 15:\n minor_val = dist\n minor_templ_name = templ_name\n minor_pts = n_pts\n \n return (minor_templ_name, minor_val, minor_pts)\n\n\nfor matching_filename in matching_results_list:\n\n #print(matching_filename.split(\"-\"))\n [template_name, query_name, _] = matching_filename.split(\"-\")\n #template_name = template_name[3:]\n #query_name = query_name[3:]\n full_file_name = matching_folder + \"/\" + matching_filename\n dist, n_pts = calc_espectre_distance(full_file_name, threshold_factors)\n\n templ_query_aux_list.append((template_name, query_name, dist, n_pts))\n\n# Assembly dictionary\nmatrix_distance_dict = {}\n\nfor (template_name, query_name, dist, n_pts) in templ_query_aux_list:\n\n if not(query_name in matrix_distance_dict):\n matrix_distance_dict[query_name] = {}\n \n matrix_distance_dict[query_name][template_name] = dist, n_pts\n\n#print(\"dict: \", matrix_distance_dict)\nacertos = 0\n\nfor (query_name, comp_dict) in matrix_distance_dict.items():\n (minor_templ_name, minor_val, n_pts) = get_minor_value(comp_dict)\n matching_status = \"ERROR\"\n \n if minor_templ_name == query_name.split('_')[0]:\n matching_status = \"OK!\"\n acertos = acertos + 1\n else:\n matching_status = matching_status + \" (\" + query_name.split('_')[0] + \", \" + str(comp_dict[query_name.split('_')[0]]) + \")\"\n \n print(query_name + \" x \" + minor_templ_name + \": \" + str(minor_val) + \" \" + matching_status + \" pts = \" + str(n_pts))\n\nn_items = len(matrix_distance_dict.items())\n\nacuracia = float(acertos)/n_items\n\nprint(\"Acertos = \", acertos)\nprint(\"Accuracy = \", acuracia)","repo_name":"Charamba/Cross-Ratio-Arrays-Shape-Descriptor","sub_path":"src/test_script_distance_matrix_generation.py","file_name":"test_script_distance_matrix_generation.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"18634249936","text":"import random\nimport math\n\ndef len_string(A):\n\ti=0\n\tfor j in (A):\n\t\ti += 1\n\treturn i\n\ndef sumar(A):\n\tresult=0\n\tfor j in (A):\n\t\tresult += j\n\treturn result\n\ndef multiplicar(A):\n\tif len(A) == 0:\n\t\treturn 0\n\tresult=1\t\n\tfor j in (A):\n\t\tresult *= j\n\t\t#print(result)\n\treturn result\n\ndef inversa_string(A):\n\tj=0\n\tinv_str = \"\"\n\tfor i in range(len(A)-1,-1,-1):\n\t\t#print(A[i])\n\t\tinv_str += A[i]\n\treturn inv_str\t\n\ndef histograma(A):\n\tfor i in (A):\n\t\tprint(\"*\"*i)\n\ndef nMayusculas (S):\n\tn=0\n\tfor l in S:\n\t\tif l.isalpha() and l.isupper():\n\t\t\tn += 1\n\treturn n\n\ndef nEdades(A, x):\n\tn=0\n\tfor i in (A):\n\t\tif i > x:\n\t\t\tn += 1\n\t\t\t#print(\"i=\",i,\" n=\",n)\n\treturn n\n\ndef es_bisiesto(anio):\n\tes_bis = False\n\tif (anio % 4 == 0 and anio % 100 != 0) or (anio % 400 == 0):\n\t\tes_bis = True\n\treturn es_bis\n\ndef adivinar_string(A, N):\n\tif A==N:\n\t\tprint(\"Adivinaste el numero !!!\", A)\n\t\treturn True\n\ti=0\n\tnum=0\n\tfor d in (A):\n\t\tif d == N[i]:\n\t\t\tnum += 1\n\t\ti+=1\n\tprint (\"Haz adivinado: \",num, \" numeros\")\n\treturn False\n\ndef Future_Value(C,i,n):\n\treturn C*(1+i/100)**n\n\ndef distancia (array):\n\tdistancia = 0\n\tfor i in range(len(array)):\n\t\tdistancia += (array[i][0] - array[i][1])**2\n\tprint(\"Distancias al cuadrado: \", distancia)\n\tdistancia = math.sqrt(distancia)\n\treturn distancia\n\ndef IsCasiPalindromo(word):\n\tlgw = len(word)-1\n\tmitad = int(len(word)//2)\n\tcasi = 0\n\tfor i in range(mitad):\n\t\tif word[i]!=word[lgw]:\n\t\t\tcasi+=1\n\t\tlgw -= 1\n\tif casi <= 1:\n\t\treturn(True)\n\telse:\n\t\treturn(False)\n\ndef numMasPopular(array):\n\t#Returna el numero de cantidad repetidas mayor, pero si el numero se repite, retorna el mayor de los dos \n\tnum = {}\n\tfor i in array:\n\t\tif i in num:\n\t\t\tnum[i] += 1\n\t\telse:\n\t\t\tnum[i] = 1\n\n\tmayor = 0\n\tnumero = None\n\tfor i in num:\n\t\tprint(i, num[i])\n\t\tif num[i] > mayor:\n\t\t\tmayor = num[i]\n\t\t\tnumero = i\n\t\telif num[i] == mayor and i > numero:\n\t\t\tnumero = i\n\t\t\t\n\treturn numero\n\ndef fizzBuzz(n):\n\t#if not (0\",nEdades((1,21,20,15,25),20))\n#ano = int(input(\"Ingrese año:\"))\n#print (\"Es bisiesto el año: \",ano,\" result= \", es_bisiesto(ano))\n\n\"\"\" comentareado desde aqui: Llamado al numero\nlong=int(input(\"Digite longitud: \"))\n\naleatorio=\"\"\nfor i in range(long):\n\ta = random.randint(0, 9)\n\taleatorio += str(a)\n\nprint(\"Numero aleatorio: \",aleatorio)\nintentos=0\nwhile True:\n\tnum=input(\"Digite numero de {} cifras: \".format(long))\n\tadivinar=adivinar_string(aleatorio, num)\n\tintentos += 1\n\tif intentos >= 3 or adivinar:\n\t\tbreak\n\n hasta aqui..\"\"\"\n\n\n\n\n#*** Funciones ***\n#s.find(\"world\")\n#s.startswith(\"Hola\")\n#s.endswith(\"mundo\")\n#\"1234\".isnumeric()\n#\"abc123\".isalnum()\n#\"abcdef\".islower()\n#\"hola mundo\".capitalize()\n#s = \" Hola mundo! \"; s.strip()\n#s = \"Hola mundo\"; s.replace(\"mundo\", \"world\")\n#\"Hola mundo!\\nHello world!\".split()\n\n","repo_name":"jeimararias/erptestjav","sub_path":"src/prueba_funciones.py","file_name":"prueba_funciones.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26883993049","text":"# count_True.py\n# By THC. Shows how a while-loop can count using a boolean variable.\n\nfrom time import sleep\n\nx = True\nprint(x)\nsleep(3) # so we can see the value of x before all heck breaks loose\n\nn = 1\n\nwhile x:\n print(n)\n n = n + 1\n","repo_name":"electronsandbits/python-learning","sub_path":"CS1/Fall_2019/week_2/count_True.py","file_name":"count_True.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"43729943195","text":"from xml.dom import minidom\n\nfrom Products.Five.browser import BrowserView\n\nfrom Products.CMFCore.utils import getToolByName\nimport transaction\n\nfrom pcommerce.core.interfaces import IPaymentProcessor\nfrom pcommerce.core.interfaces import IOrderRegistry\nfrom time import time\nfrom urllib import urlencode\nfrom urllib2 import urlopen, Request\n\nimport logging\nlogger = logging.getLogger(\"Plone\")\n\nclass ProcessPaypal(BrowserView):\n \"\"\"process Paypal payments\n \"\"\"\n\n def __call__(self):\n lang = self.request.get('QUERY_STRING', None)\n if not lang:\n lang = None\n data = self.request.form\n # If there is no txn_id in the received arguments don't proceed\n if not \"txn_id\" in data:\n return \"No Parameters\"\n \n # Verify the data received with Paypal\n if not self.verify_ipn(data):\n logger.info(\"pcommerce.payment.paypal: Error with paypal verify\")\n return \"Error with paypal\" \n else:\n processor = IPaymentProcessor(self.context)\n return processor.processOrder(data['item_number1'], 'pcommerce.payment.paypal', lang)\n\n def verify_ipn(self,data):\n # prepares provided data set to inform PayPal we wish to validate the response\n data[\"cmd\"] = \"_notify-validate\"\n params = urlencode(data)\n\n props = getToolByName(self.context, 'portal_properties').paypal_properties\n # sends the data and request to the PayPal Sandbox\n paypalurl = self.getPayPalURL()\n req = Request(paypalurl, params)\n req.add_header(\"Content-type\", \"application/x-www-form-urlencoded\")\n # reads the response back from PayPal\n response = urlopen(req)\n status = response.read()\n # If not verified\n if not status == \"VERIFIED\":\n return False\n \n # if not the correct receiver ID\n if not data[\"receiver_id\"] == props.receiver_id:\n return False\n \n # if not the correct currency\n if not data[\"mc_currency\"] == \"EUR\":\n return False\n # already processed?\n order_registry = IOrderRegistry(self.context)\n order = order_registry.getOrder(int(data['item_number1']))\n try:\n if order.txn_id == data['txn_id']:\n logger.info(\"pcommerce.payment.paypal: Transaction already processed\")\n return False\n except:\n order.txn_id = data['txn_id'] \n transaction.commit() \n # otherwise...\n return True\n\n def getPayPalURL(self):\n \"\"\"\"\"\"\n context = self.context \n props = getToolByName(context, 'portal_properties').paypal_properties\n if props.test:\n return \"\"\"https://www.sandbox.paypal.com/cgi-bin/webscr\"\"\"\n return \"\"\"https://www.paypal.com/cgi-bin/webscr\"\"\"\n","repo_name":"Gomez/pcommerce.payment.paypal","sub_path":"pcommerce/payment/paypal/browser/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10494802058","text":"# Сервер для відповіді на повідомлення\nimport socket\n\nm1 = \"Hello!\"\nm2 = \"How are you?\"\nm3 = \"What is your name?\"\na1 = \"H!\"\na2 = \"Thanks,I am fine!\"\na3 = \"Intel.\"\na4 = \"Sorry, I do not understand you.\"\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind(('localhost', 55000))\nsock.listen(10)\nprint('Server is running, please, press ctrl+c to stop')\nwhile True:\n conn, addr = sock.accept()\n print('connected:', addr)\n data = conn.recv(1024)\n data_str = (data.decode())\n print(data_str)\n if data_str == m1:\n conn.send(bytes(a1, encoding='UTF-8'))\n elif data_str == m2:\n conn.send(bytes(a2, encoding='UTF-8'))\n elif data_str == m3:\n conn.send(bytes(a3, encoding='UTF-8'))\n else:\n conn.send(bytes(a4, encoding='UTF-8'))\nconn.close()","repo_name":"5Slava5/Viacheslav_Shapoval","sub_path":"dz_11/dz_11_2_server.py","file_name":"dz_11_2_server.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"18693416484","text":"### CLASSIC_EBM\n### Jake Aylmer\n###\n### Plotting sub-routines.\n### ---------------------------------------------------------------------------\n\nfrom __future__ import division\nimport parameters as pm, analytics as an, math_methods as math\nimport numpy as np\nimport matplotlib as mpl, matplotlib.pyplot as plt\n\n\ndef StabilityPlot(xi, norm_Q_arrays, relative_D, cols=['k']):\n \"\"\"Generate a plot of x_i against Q given multiple data sets for different\n values of D. Plots are added with labels so that a legend may be added to \n the returned MatPlotLib axis object if desired. (Returns (fig, ax)).\n \n --Args--\n xi : NumPy array, coordinates of sine of ice-edge latitude.\n norm_Q_arrays : NumPy array of shape (len(relative_D), len(xi)). Contains\n a set of Q(x_i) data for each value of D = relative_D*D0.\n relative_D : NumPy array containing the values of D relative to (i.e. in\n units of) the standard value of D (D_0), in the order\n corresponding to the data sets in norm_Q_arrays.\n (cols) : array containing MatPlotLib color identifiers which are\n selected sequentially as the plots are added, cycling back\n to the beginning if len(cols) < len(relative_D).\n \"\"\"\n fig, ax = plt.subplots()\n ax.axhline(0.0, color=[.5,.5,.5], linewidth=0.8)\n ax.axhline(1.0, color=[.5,.5,.5], linewidth=0.8)\n \n for k in xrange(len(norm_Q_arrays)):\n \n xi_split, Q_split = math.SplitByGradient(xi, norm_Q_arrays[k])\n \n for j in xrange(len(Q_split)):\n lnst = '--' if Q_split[j][1]1)) )\n fig.tight_layout()\n return fig, ax\n\n\ndef PlotTemperature(x, T, xi):\n \"\"\"\"\"\"\n fig, ax1 = plt.subplots()\n ax1.axvline(xi, linestyle='--', color='b')\n ax1.plot(x, T, color='k')\n \n ax2 = ax1.twiny()\n ax2.set_xticks( np.sin((np.pi/180)*np.arange(0, 90.1, 10)) )\n ax2.set_xticklabels( np.arange(0, 90, 10) )\n \n ax1.set_xlabel(r'$x=\\sin \\phi$')\n ax1.set_ylabel(r'Surface temperature, $T$ ($^\\circ$C)')\n ax2.set_xlabel(r'Latitude, $\\phi$ (deg)', y=2)\n ax2.tick_params(axis='both', which='major', labelsize=17, pad=0)\n ax2.grid(False, which='both')\n ax2.minorticks_off()\n fig.tight_layout()\n return fig, ax1\n\n\ndef PlotHeatTransport(x, HT, xi, latitude_axis=False):\n \"\"\"Plot the zonally integrated heat transport in PW over the hemisphere for\n a given solution to the EBM (note that heat transports are input to this\n function in W, which are then converted to PW automatically). Returns the\n MatPlotLib figure and axis objects (fig, ax).\n \n --Args--\n x : (NumPy) array, containing x-coordinates between 0 and 1.\n HT : (NumPy) array, containing Heat transport [W] at each x\n coordinate.\n xi : float, sine of ice-edge latitude.\n (latitude_axis) : bool, whether to convert to latitude (deg).\n \"\"\"\n fig, ax = plt.subplots()\n if latitude_axis:\n x = np.degrees(np.arcsin(x))\n xi = np.degrees(np.arcsin(xi))\n ax.set_xlim([0,90])\n ax.set_xlabel(r'Latitude, $\\phi$ ($^\\circ$)')\n else:\n ax.set_xlim([0,1])\n ax.set_xlabel(r'$x=\\sin \\phi$')\n ax.axvline(xi, linestyle='--', label=r'Ice edge')\n ax.plot(x, HT/(1E15), color='k')\n ax.set_ylabel(r'Poleward Heat Transport (PW)')\n fig.canvas.set_window_title('HeatTransport')\n fig.tight_layout()\n return fig, ax\n\n\ndef PlotHeatFluxConvergence(x, HFC, xi, latitude_axis=False):\n \"\"\"Plot the heat flux convergence (HFC) [W m^-2] over the hemisphere for a\n given solution to the EBM. Returns the MatPlotLib figure and axis objects\n (fig, ax).\n \n --Args--\n x : (NumPy) array, containing x-coordinates between 0 and 1.\n HFC : (NumPy) array, containing heat flux convergences [W m^-2]\n at each x.\n xi : float, sine of ice-edge latitude.\n (latitude_axis) : bool, whether to convert to latitude (deg).\n \"\"\"\n fig, ax = plt.subplots()\n ax.axhline(0, color=[.2,.2,.2], linewidth=0.8)\n if latitude_axis:\n x = np.degrees(np.arcsin(x))\n xi = np.degrees(np.arcsin(xi))\n ax.set_xlim([0,90])\n ax.set_xlabel(r'Latitude, $\\phi$ ($^\\circ$)')\n else:\n ax.set_xlim([0,1])\n ax.set_xlabel(r'$x=\\sin \\phi$')\n ax.axvline(xi, linestyle='--', label=r'Ice edge')\n ax.plot(x, HFC, color='k')\n ax.set_ylabel(r'Heat flux convergence (W m$^{-2}$)')\n fig.canvas.set_window_title('HeatFluxConvergence')\n fig.tight_layout()\n return fig, ax\n\n\ndef PlotHFCIceEdge(relative_D=np.array([0.75,1.0,1.25]), smooth_coalbedo=False,\n add_linear_fit=False):\n \"\"\"Plot the heat flux convergence (HFC) at the ice edge as the ice edge\n varies (i.e. HFC(x=xi) vs xi) for each value of D = relative_D * D0 where\n D0 is standard value (set in the parameters file). Calculations are done\n here and the MatPlotLib figure and axis objects (fig, ax) are returned.\n \n --Args--\n (relative_D) : (NumPy) array of values of D to be used in units of D0.\n (smooth_coalbedo) : bool, whether to use the smoothed coalbedo function.\n (add_linear_fit) : bool, if True, adds a linear fit to the first data set.\n \"\"\"\n \n xi = np.arange(0.0, 1.001, 0.01)\n HFC = np.zeros( (len(relative_D), len(xi)) )\n \n for j in xrange(len(relative_D)):\n for k in xrange(len(xi)):\n Q = an.Q(xi[k], pm.D*relative_D[j], smooth_coalbedo)\n HFC[j][k] = an.HeatFluxConvergence(xi[k], xi[k], Q,\n pm.D*relative_D[j], smooth_coalbedo)\n \n fig, ax = plt.subplots()\n ax.axhline(0, color=[.2,.2,.2], linewidth=0.8)\n firstplot = ax.plot(xi, HFC[0], label=r'$D/D_0=%.2f$' % relative_D[0])\n \n if add_linear_fit:\n a = np.argmin(abs(xi-pm.xi_HFC_lim1)) # index of lowest xi to fit to\n b = np.argmin(abs(xi-pm.xi_HFC_lim2)) # index of upper xi to fit to\n fit = np.polyfit(xi[a:b], HFC[0][a:b], 1)\n ax.plot(xi[a:b], fit[1]+fit[0]*xi[a:b], linestyle='--',\n color=firstplot[0].get_color() )\n \n for j in xrange(1, len(relative_D)):\n ax.plot(xi, HFC[j], label=r'$D/D_0=%.2f$' % relative_D[j])\n ax.set_xlim([0,1])\n ax.set_xlabel(r'Ice edge position, $x_\\mathrm{i}=\\sin\\phi_\\mathrm{i}$')\n ax.set_ylabel(r'Heat flux convergence (W m$^{-2}$)')\n ax.legend(loc='upper left')\n fig.canvas.set_window_title(\n 'HeatFluxConvergenceIceEdge' + '_Multiple'*(len(relative_D)>1))\n fig.tight_layout()\n return fig, ax\n\n\n###############################################################################\n\n\ndef SetRCParams():\n \"\"\"Set default MatPlotLib formatting styles (rcParams) which will be set\n automatically for any plotting method.\n \"\"\"\n # FONTS (NOTE: SOME OF THESE ARE SET-ORDER DEPENDENT):\n mpl.rcParams['font.sans-serif'] = 'Calibri' #Set font for sans-serif style\n mpl.rcParams['font.family'] = 'sans-serif' #Choose sans-serif font style\n mpl.rcParams['mathtext.fontset'] = 'custom' #Allow customising maths fonts\n mpl.rcParams['mathtext.rm'] = 'sans' #Maths roman font in sans-serif format\n mpl.rcParams['mathtext.it'] = 'sans:italic' #Maths italic font\n mpl.rcParams['mathtext.default'] = 'it' #Maths in italic by default\n \n # PLOT ELEMENT PROPERTIES:\n mpl.rcParams['lines.linewidth'] = 1.5 #Default plot linewidth (thickness)\n mpl.rcParams['lines.markersize'] = 4 #Default marker size (pts)\n mpl.rcParams['lines.markeredgewidth'] = 0 #Default marker edge width (pts)\n \n # LABEL PROPERTIES:\n mpl.rcParams['axes.titlesize'] = 20 #Title font size (pts)\n mpl.rcParams['axes.labelsize'] = 19 #Axis label font sizes (pts)\n mpl.rcParams['xtick.labelsize'] = 18 #X-tick label font size (pts)\n mpl.rcParams['ytick.labelsize'] = 18 #Y-tick label font size (pts)\n \n # GRID PROPERTIES:\n mpl.rcParams['axes.grid'] = True #Major grid on by default\n mpl.rcParams['grid.color'] = 'bfbfbf' #Grid line color\n mpl.rcParams['xtick.minor.visible'] = True #X-minor ticks on by default\n mpl.rcParams['ytick.minor.visible'] = True #Y-minor ticks on by default\n mpl.rcParams['xtick.major.pad'] = 8 #X-major tick padding\n mpl.rcParams['ytick.major.pad'] = 8 #Y-major tick padding\n mpl.rcParams['axes.axisbelow'] = True\n \n # LEGEND PROPERTIES:\n mpl.rcParams['legend.fancybox'] = False #Whether to use a rounded box\n mpl.rcParams['legend.fontsize'] = 16 #Legend label font size (pts)\n mpl.rcParams['legend.framealpha'] = 1 #Legend alpha (transparency)\n mpl.rcParams['legend.edgecolor'] = '#000000' #\n \n # GENERAL FIGURE PROPERTIES\n mpl.rcParams['figure.figsize'] = 8, 6 #Figure window size (inches)\n mpl.rcParams['savefig.format'] = 'pdf' #Default format to save to\n \n pass\n","repo_name":"nholschuh/CLASSIC_EBM","sub_path":"src/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":9787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"18026627352","text":"\n# Google Search Engine\n\n# imported necessary libraries\nfrom tkinter import * \nimport tkinter as tk\nimport webbrowser\nfrom PIL import ImageTk, Image\nfrom googlesearch import search\n\n\n# created main window\nroot = tk.Tk()\nroot.title(\"Google Search Engine\")\nroot.geometry(\"1000x700\")\nroot.iconbitmap('Images/google_icon.ico')\n\n# created a callback function to open linked web browser\ndef callback(url):\n webbrowser.open_new_tab(url)\n\n# function defined to search any things in this google search engine\ndef search_query():\n query = text.get(\"1.0\",\"end-1c\")\n s = search(query, tld=\"co.in\", num=10, stop=1, pause=2)\n for j in s:\n print(webbrowser.open(j))\n\n\n\n# label to create top blask strip\nl1 = Label(root,bg=\"black\",width=500,height=2)\nl1.grid(sticky=\"w\")\n\n# app logo added\napps_logo = ImageTk.PhotoImage(Image.open('Images/apps.jpg'))\napps_logo_lbl = Label(root, image = apps_logo,borderwidth=0)\napps_logo_lbl.place(x=13,y=11)\napps_lbl = Label(root,text=\"Apps\",bg=\"black\",fg=\"white\",cursor=\"hand2\")\napps_lbl.place(x=30,y=10)\napps_lbl.bind(\"\",lambda e: callback(\"https://about.google/intl/en/products/\"))\n\n\n# google drive drive logo added\ndrive_logo = ImageTk.PhotoImage(Image.open('Images/Google drive.png'))\ndrive_logo_lbl = Label(root, image = drive_logo,borderwidth=0)\ndrive_logo_lbl.place(x=85,y=8)\ndrive_lbl = Label(root,text=\"Google Drive\",bg=\"black\",fg=\"white\",cursor=\"hand2\")\ndrive_lbl.place(x=110,y=10)\ndrive_lbl.bind(\"\",lambda e: callback(\"https://drive.google.com/drive/u/0/my-drive\"))\n\n\n#youtube logo added\nyt_logo = ImageTk.PhotoImage(Image.open('Images/youtube.png'))\nyt_logo_lbl = Label(root, image = yt_logo,borderwidth=0)\nyt_logo_lbl.place(x=210,y=8)\nyt_lbl = Label(root,text=\"YouTube\",bg=\"black\",fg=\"white\",cursor=\"hand2\")\nyt_lbl.place(x=240,y=10)\nyt_lbl.bind(\"\",lambda e: callback(\"https://www.youtube.com/\"))\n\n\n# gmail logo added\ngmail_logo = ImageTk.PhotoImage(Image.open('Images/gmail.jpg'))\ngmail_logo_lbl = Label(root, image = gmail_logo,borderwidth=0)\ngmail_logo_lbl.place(x=315,y=8)\ngmail_lbl = Label(root,text=\"Gmail\",bg=\"black\",fg=\"white\",cursor=\"hand2\")\ngmail_lbl.place(x=340,y=10)\ngmail_lbl.bind(\"\",lambda e: callback(\"https://mail.google.com/mail/\"))\n\n\n# right gmail link\ng_word = Label(root,text=\"Gmail\",cursor=\"hand2\")\ng_word.place(x=810,y=55)\ng_word.bind(\"\",lambda e: callback(\"https://mail.google.com/mail/\"))\n\n# right images link\ni_word = Label(root,text=\"Images\",cursor=\"hand2\")\ni_word.place(x=850,y=55)\ni_word.bind(\"\",lambda e: callback(\"https://www.google.co.in/imghp?hl=en&tab=wi&ogbl\"))\n\n# created signin button\nsigninb = Button(root,text=\"sign in\",font=('roboto',10,'bold'),bg=\"#4583EC\",fg=\"white\",cursor=\"hand2\")\nsigninb.place(x=920,y=50)\nsigninb.bind(\"\",lambda e: callback(\"https://accounts.google.com/signin/v2/identifier?hl=en&continue=https%3A%2F%2Fwww.google.com%2F&ec=GAlAmgQ&flowName=GlifWebSignIn&flowEntry=AddSession\"))\n\n\n# google big logo added\ng_logo = ImageTk.PhotoImage(Image.open('Images/google logo.png'))\nl2 = Label(root, image = g_logo)\nl2.place(x=350,y=190)\n\n\n# search entry box added\ntext = Text(root,width=90,height=2,relief=RIDGE,font=('roboto',10,'bold'),borderwidth=2)\ntext.place(x=170,y=300)\n\n# search button added\nsearch1 = Button(root, text=\"Google Search\",relief=RIDGE,font=('arial',10),bg=\"#F3F3F3\",fg=\"#222222\",cursor=\"hand2\",command=search_query)\nsearch1.place(x=350,y=360)\n\n\n# Lucky Button added\nlucky = Button(root, text=\"i' m Felling Lucky\",relief=RIDGE,font=('arial',10),bg=\"#F3F3F3\",fg=\"#222222\",cursor=\"hand2\")\nlucky.place(x=500,y=360)\nlucky.bind(\"\",lambda e: callback(\"https://www.google.com/doodles\"))\n\n\n# different language offered label\noffered = Label(root,text=\"Google offered in:\")\noffered.place(x=240,y=410)\nlang = Label(root,text=\"हिन्दी বাংলা తెలుగు मराठी தமிழ் ગુજરાતી ಕನ್ನಡ മലയാളം ਪੰਜਾਬੀ\",fg=\"blue\")\nlang.place(x=350,y=410)\n\n# About label\nabout_lbl = Label(root,text=\"About\",cursor=\"hand2\")\nabout_lbl.place(x=50,y=650)\nabout_lbl.bind(\"\",lambda e: callback(\"https://about.google/?utm_source=google-IN&utm_medium=referral&utm_campaign=hp-footer&fg=1\"))\n\n# advertising label\nad_lbl = Label(root,text=\"Advertising\",cursor=\"hand2\")\nad_lbl.place(x=100,y=650)\nad_lbl.bind(\"\",lambda e: callback(\"https://ads.google.com/intl/en_in/home/?subid=ww-ww-et-g-awa-a-g_hpafoot1_1!o2&utm_source=google.com&utm_medium=referral&utm_campaign=google_hpafooter&fg=1\"))\n\n# business label\nbusiness_lbl = Label(root,text=\"Business\",cursor=\"hand2\")\nbusiness_lbl.place(x=180,y=650)\nbusiness_lbl.bind(\"\",lambda e: callback(\"https://www.google.com/intl/en_in/business/\"))\n\n# how search works label\nsearch_work_lbl = Label(root,text=\"How Search works\",cursor=\"hand2\")\nsearch_work_lbl.place(x=250,y=650)\nsearch_work_lbl.bind(\"\",lambda e: callback(\"https://www.google.com/search/howsearchworks/?fg=1\"))\n\n# privacy label\nprivacy_lbl = Label(root,text=\"Privacy\",cursor=\"hand2\")\nprivacy_lbl.place(x=850,y=650)\nprivacy_lbl.bind(\"\",lambda e: callback(\"https://policies.google.com/privacy?hl=en-IN&fg=1\"))\n\n# terms label\nterms_lbl = Label(root,text=\"Terms\",cursor=\"hand2\")\nterms_lbl.place(x=900,y=650)\nterms_lbl.bind(\"\",lambda e: callback(\"https://policies.google.com/terms?hl=en-IN&fg=1\"))\n\nroot.mainloop()","repo_name":"prathimacode-hub/Awesome_Python_Scripts","sub_path":"GUIScripts/Google Search Engine/google_search_engine.py","file_name":"google_search_engine.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":317,"dataset":"github-code","pt":"47"}
+{"seq_id":"2547918040","text":"import copy\n\nimport torch\nfrom sklearn.decomposition import TruncatedSVD as PCA\n\nfrom utils.utils import getFloatSubModules\n\n\ndef getPCA(X, n_components):\n out = getPCA_torch_over(X, n_components)\n return out\n\n\ndef getPCA_sklearn(X, n_components):\n n_components = min(n_components, X.shape[0] - 1)\n pca = PCA(n_components=n_components)\n pca.fit(X.permute(1, 0))\n projection = pca.transform(X.permute(1, 0))\n out = torch.Tensor(projection.T)\n return out\n\n\ndef getPCA_torch_under(X, n_components):\n '''\n X is m by n. m is the number of features, n is the number of samples.\n When m (dict):\n '''\n apply function `f` to each submodules of `Delta`\n '''\n param_float = getFloatSubModules(Delta)\n\n result = dict(((k, f(Delta[k])) for k in param_float))\n out = copy.deepcopy(Delta)\n out.update(result)\n\n return out\n\n\ndef net2vec(net) -> (torch.Tensor):\n '''\n convert state dict to a 1 dimension Tensor\n \n Delta : torch module state dict\n \n return\n vec : torch vector with shape([d]), d is the number of Float elements in `Delta`\n '''\n param_float = getFloatSubModules(net)\n\n components = []\n for param in param_float:\n components.append(net[param])\n vec = torch.cat([component for component in components])\n return vec\n\n\ndef _convertWithPCA(data):\n proj = applyToEachSubmodule(data, lambda x: getPCA(x.cpu(), 10))\n proj_vec = net2vec(proj)\n return proj_vec\n\n\ndef convertWithPCA(path_to_data):\n data = torch.load(path_to_data)\n proj_vec = _convertWithPCA(data)\n # save path defaulted to 'xxxxx/pca_FedAvg_i.pt'\n sub = path_to_data.split(\"/\")\n sub[-1] = \"pca_\" + sub[-1]\n savepath = \"/\".join(sub)\n\n torch.save(proj_vec, savepath)\n print(f\"Done, saved to \\n\\t{savepath}\")\n\n\nif __name__ == \"__main__\":\n import glob\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--path_to_data_folder\", type=str, required=True,\n help=\"path to the data to be converted, e.g. \\'./AggData/train_noiid_cifar/backdoor_2/\\'\")\n args = parser.parse_args()\n\n print(\"#\" * 64)\n for i in vars(args):\n print(f\"#{i:>20}: {str(getattr(args, i)):<20}#\")\n print(\"#\" * 64)\n\n path_to_data_folder = args.path_to_data_folder\n\n paths_to_data = glob.glob(f\"{path_to_data_folder}/FedAvg_*.pt\")\n paths_to_data = sorted(paths_to_data)\n\n for (i, path_to_data) in enumerate(paths_to_data):\n print(f\"{i}/{len(paths_to_data)}:{path_to_data}\")\n convertWithPCA(path_to_data)\n","repo_name":"cpwan/Attack-Adaptive-Aggregation-in-Federated-Learning","sub_path":"utils/convert_pca.py","file_name":"convert_pca.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"47"}
+{"seq_id":"2052029930","text":"# llia.synths.mixer.mixer_data\n\nfrom __future__ import print_function\n\nfrom llia.program import Program\nfrom llia.bank import ProgramBank\nfrom llia.performance_edit import performance\nfrom llia.util.lmath import clip, db_to_amp, amp_to_db\n\nprototype = {\n \"gainA\" : 1.0,\n \"muteA\" : 0, # 0 -> enable, 1 -> mute\n \"modA\" : 0.0, # 0 -> no mod, 1 -> 100% modulation\n \"panA\" : 0.0,\n \"gainB\" : 1.0,\n \"muteB\" : 0,\n \"modB\" : 0.0,\n \"panB\" : 0.0,\n \"gainC\" : 1.0,\n \"muteC\" : 0,\n \"modC\" : 0.0,\n \"panC\" : 0.0,\n \"gainD\" : 1.0,\n \"muteD\" : 0,\n \"modD\" : 0.0,\n \"panD\" : 0.0,\n \"gain1\" : 1,\n \"gain2\" : 1}\n \n\n\nclass Mixer(Program):\n\n def __init__(self, name):\n super(Mixer, self).__init__(name, \"Mixer\", prototype)\n self.performance = performance()\n\nprogram_bank = ProgramBank(Mixer(\"Init\"))\n\n\ndef mixer(slot, name,\n chanA = [-99, 0.0, 0.0, 0], # [gain(db), mod-depth, pan, mute]\n chanB = [-99, 0.0, 0.0, 0],\n chanC = [-99, 0.0, 0.0, 0],\n chanD = [-99, 0.0, 0.0, 0],\n main = [0,0]):\n program=Mixer(name)\n def fill_channel_list(lst):\n acc = []\n for i,dflt in enumerate([-99.0, 0.0, 0.0, 0]):\n try:\n acc.append(float(lst[i]))\n except (IndexError,ValueError,TypeError):\n acc.append(dflt)\n return acc\n def set_channel_params(prefix,chanlist):\n program[\"gain%s\" % prefix] = db_to_amp(chanlist[0])\n program[\"mod%s\" % prefix] = chanlist[1]\n program[\"pan%s\" % prefix] = chanlist[2]\n program[\"mute%s\" % prefix] = chanlist[3]\n set_channel_params(\"A\", fill_channel_list(chanA))\n set_channel_params(\"B\", fill_channel_list(chanB))\n set_channel_params(\"C\", fill_channel_list(chanC))\n set_channel_params(\"D\", fill_channel_list(chanD))\n program[\"gain1\"]=float(db_to_amp(main[0]))\n program[\"gain2\"]=float(db_to_amp(main[1]))\n program_bank[slot] = program\n return program\n\n\ndef pp(program, slot=127):\n return \"\"\n \n\nmixer(0, \"Unity\",\n chanA = [0,0,0,0],\n chanB = [0,0,0,0],\n chanC = [0,0,0,0],\n chanD = [0,0,0,0],\n main = [0,0])\n\nmixer(1, \"MuteAll\",\n chanA = [-99,0,0,1],\n chanB = [-99,0,0,1],\n chanC = [-99,0,0,1],\n chanD = [-99,0,0,1],\n main = [-99,-99])\n \n","repo_name":"plewto/Llia","sub_path":"llia/synths/mixer/mixer_data.py","file_name":"mixer_data.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"47"}
+{"seq_id":"73111894862","text":"import time\nimport threading\nimport pyduke.common.core_util as util\n\ndef tinfo():\n t = threading.current_thread()\n print('[{0}] [{1}] Hello'.format(util.now(), t.name))\n time.sleep(1)\n\ndef main():\n for i in range(3):\n threading.Thread(target=tinfo).start()\n\nif __name__ == '__main__':\n main()\n","repo_name":"cafeduke/learn","sub_path":"Python/Advanced/maga/concurrency/thread_basic.py","file_name":"thread_basic.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"20709331990","text":"#!/usr/bin/env python\n\n\"\"\"\n@package mi.core.time\n@file mi/core/time.py\n@author Bill French\n@brief Common time functions for drivers\n\"\"\"\n# Needed because we import the time module below. With out this '.' is search first\n# and we import ourselves.\nfrom __future__ import absolute_import\n\n__author__ = 'Bill French'\n__license__ = 'Apache 2.0'\n\nfrom mi.core.log import get_logger ; log = get_logger()\n\nimport datetime\nimport ntplib\nimport time\nimport re\nfrom dateutil import parser\n\nDATE_PATTERN = r'^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(\\.\\d+)?Z?$'\nDATE_MATCHER = re.compile(DATE_PATTERN)\n\ndef get_timestamp_delayed(format):\n '''\n Return a formatted date string of the current utc time,\n but the string return is delayed until the next second\n transition.\n\n Formatting:\n http://docs.python.org/library/time.html#time.strftime\n\n @param format: strftime() format string\n @return: formatted date string\n @raise ValueError if format is None\n '''\n if(not format):\n raise ValueError\n\n result = None\n now = datetime.datetime.utcnow()\n\n # If we are too close to a second transition then sleep for a bit.\n if(now.microsecond < 100000):\n time.sleep(0.2)\n now = datetime.datetime.utcnow()\n\n current = datetime.datetime.utcnow()\n while(current.microsecond > now.microsecond):\n current = datetime.datetime.utcnow()\n\n return time.strftime(format, time.gmtime())\n\n\ndef get_timestamp(format):\n '''\n Return a formatted date string of the current utc time.\n\n Formatting:\n http://docs.python.org/library/time.html#time.strftime\n\n @param format: strftime() format string\n @return: formatted date string\n @raise ValueError if format is None\n '''\n if(not format):\n raise ValueError\n\n return time.strftime(format, time.gmtime())\n\ndef string_to_ntp_date_time(datestr):\n \"\"\"\n Extract an ntp date from a ISO8601 formatted date string.\n @param str an ISO8601 formatted string containing date information\n @retval an ntp date number (seconds since jan 1 1900)\n @throws InstrumentParameterException if datestr cannot be formatted to\n a date.\n \"\"\"\n if not isinstance(datestr, str):\n raise IOError('Value %s is not a string.' % str(datestr))\n if not DATE_MATCHER.match(datestr):\n raise ValueError(\"date string not in ISO8601 format YYYY-MM-DDTHH:MM:SS.SSSSZ\")\n\n try:\n # This assumes input date string are in UTC (=GMT)\n if datestr[-1:] != 'Z':\n datestr += 'Z'\n\n # the parsed date time represents a GMT time, but strftime\n # does not take timezone into account, so these are seconds from the\n # local start of 1970\n local_sec = float(parser.parse(datestr).strftime(\"%s.%f\"))\n # remove the local time zone to convert to gmt (seconds since gmt jan 1 1970)\n gmt_sec = local_sec - time.timezone\n # convert to ntp (seconds since gmt jan 1 1900)\n timestamp = ntplib.system_to_ntp_time(gmt_sec)\n\n except ValueError as e:\n raise ValueError('Value %s could not be formatted to a date. %s' % (str(datestr), e))\n\n log.debug(\"converting time string '%s', unix_ts: %s ntp: %s\", datestr, gmt_sec, timestamp)\n\n return timestamp\n\ndef time_to_ntp_date_time(unix_time=None):\n \"\"\"\n return an NTP timestamp. Currently this is a float, but should be a 64bit fixed point block.\n TODO: Fix return value\n @param unit_time: Unix time as returned from time.time()\n \"\"\"\n if unix_time is None:\n unix_time = time.time()\n\n timestamp = ntplib.system_to_ntp_time(unix_time)\n return float(timestamp)\n","repo_name":"ooici/marine-integrations","sub_path":"mi/core/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"47"}
+{"seq_id":"15264005342","text":"from booksdatasource import BooksDataSource\r\nimport argparse\r\nimport csv\r\n\r\n#CS257 BOOOOOKS project\r\n#written by Aldo Polanco and Michael Xia\r\n\r\ndef get_parsed_arguments():\r\n parser = argparse.ArgumentParser(description='find books!')\r\n parser.add_argument('function', metavar='function', help='sepcify the function you want this program to perform')\r\n parser.add_argument('--searchWord', metavar='searchWord', help='search the title that contains this word')\r\n parser.add_argument('--searchWord2', metavar='searchWord2', help='only useful when searching by year')\r\n parser.add_argument('--title', '-t', action='store_const', const=True, help='sort by title?')\r\n parser.add_argument('--year', '-y', action='store_const', const=True, help='sort by year?')\r\n parser.add_argument('--Help', '-H', action='store_const', const=True, help='want help?')\r\n parsed_arguments = parser.parse_args()\r\n return parsed_arguments\r\n\r\ndef get_list(arguments, books1_data):\r\n if (arguments.function).lower() == 'title':\r\n if (arguments.year and arguments.title):\r\n return False\r\n # added elif for when -t and -y are input as arguments\r\n elif arguments.year:\r\n return books1_data.books(arguments.searchWord, 'year')\r\n else:\r\n return books1_data.books(arguments.searchWord, 'title')\r\n elif (arguments.function).lower() == 'author':\r\n return books1_data.authors(arguments.searchWord)\r\n elif (arguments.function).lower() == 'year':\r\n return books1_data.books_between_years(arguments.searchWord, arguments.searchWord2)\r\n else:\r\n return False\r\n\r\n\r\ndef main():\r\n arguments = get_parsed_arguments()\r\n books1_data = BooksDataSource('books1.csv')\r\n if (arguments.Help or get_list(arguments, books1_data)==False): #added or, to direct user to Help.\r\n with open(\"usage.txt\") as helptext:\r\n help = helptext.read()\r\n print(help)\r\n else:\r\n for i in get_list(arguments, books1_data):\r\n print(i)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"amp05/cs257","sub_path":"books/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"9196968516","text":"import requests\nimport json\nimport time\nimport urllib\nimport hashlib\nimport datetime\nimport pymysql\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nheader = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) Appl\\\neWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36'}\n\npayTimeList = []\n\ndef connect_database():\n mysql_conn = {\n 'host': '127.0.0.1',\n 'port': 3306,\n 'user': 'root',\n 'password': password,\n 'db': dbname,\n 'charset': 'utf8'\n }\n db = pymysql.connect(**mysql_conn)\n cursor = db.cursor()\n return db\n\ndef getSign(ret):\n tuple = sorted(ret.items(), key=lambda e: e[0], reverse=False)\n md5_string = urllib.parse.urlencode(tuple).encode(encoding='utf_8', errors='strict')\n md5_string += b'&p=das41aq6'\n sign = hashlib.md5(md5_string).hexdigest()[5: 21]\n return sign\n\ndef querry_database():\n db = connect_database()\n cursor = db.cursor()\n cursor.execute(\"SELECT SUM(backer_money) FROM strawberry\")\n results = cursor.fetchall()\n for row in results:\n \tdbMoney = row[0]\n \treturn dbMoney\n\ndef querry_payTime():\n db = connect_database()\n cursor = db.cursor()\n cursor.execute(\"SELECT pay_time FROM strawberry WHERE DATE_FORMAT(pay_time,'%m-%d') = DATE_FORMAT(now(),'%m-%d')\")\n results = cursor.fetchall()\n for row in results:\n \tpayTime = str(row[0])\n \tpayTimeList.append(payTime)\n\ndef getDetail():\n url = 'https://wds.modian.com/api/project/detail'\n form = {\n 'pro_id': pro_id\n }\n sign = getSign(form)\n form['sign'] = sign\n response = requests.post(url, form, headers=header).json()\n already_raised = response['data'][0]['already_raised']\n return already_raised\n\ndef getOrders():\n\tpage = 1\n\turl = 'https://wds.modian.com/api/project/orders'\n\ta = True\n\twhile a:\n\t\tform = {\n\t 'page': page,\n\t 'pro_id': pro_id\n\t\t}\n\t\tsign = getSign(form)\n\t\tform['sign'] = sign\n\t\tresponse = requests.post(url, form, headers=header).json()\n\t\tpage +=1\n\t\tdatas = response['data']\n\t\tdate = datetime.datetime.now().strftime('%Y-%m-%d')\n\t\tfor data in datas:\n\t\t\tif date in data['pay_time']:\n\t\t\t\tif data['pay_time'] not in payTimeList:\n\t\t\t\t\tuser_id = data['user_id']\n\t\t\t\t\tnickname = data['nickname']\n\t\t\t\t\tbacker_money = data['backer_money']\n\t\t\t\t\tpay_time = data['pay_time']\n\t\t\t\t\tdb = connect_database()\n\t\t\t\t\tcursor = db.cursor()\n\t\t\t\t\tcursor.execute(\"INSERT INTO strawberry VALUES (%s,%s,%s,%s,%s)\", (pro_id,user_id,nickname,backer_money,pay_time))\n\t\t\t\t\tdb.commit()\n\t\t\t\t\tmsg = str(time.strftime(\"%a %b %d %H:%M:%S\", time.localtime())) + ' '+ \\\n\t\t\t\t\t'[ERROR] 发现遗漏订单,数据补偿机制启动 ' + \\\n\t\t\t\t\tstr(user_id)+' '+nickname+' '+str(backer_money)+' '+str(pay_time)+'\\n'\n\t\t\t\t\tprint(msg)\n\t\t\t\telse:\n\t\t\t\t\ta = False\n\t\t\telse:\n\t\t\t\tmsg = str(time.strftime(\"%a %b %d %H:%M:%S\", time.localtime())) + ' '+ \\\n\t\t\t\t\t'[WARNING] 数据异常,请及时处理\\n'\n\t\t\t\ta = False\n\t\twith open('/var/log/FCC/dataCompensation.log', 'a+') as f:\n\t\t\tf.write(msg)\n\t\tprint(msg)\n\ndef main():\n\tquerry_payTime()\n\tif getDetail() != querry_database():\n\t\tgetOrders()\n\telse:\n\t\tmsg = str(time.strftime(\"%a %b %d %H:%M:%S\", time.localtime())) + ' '+ \\\n\t\t\t\t\t'[INFO] 例行巡检完成,本地数据正常\\n'\n\t\twith open('/var/log/FCC/dataCompensation.log', 'a+') as f:\n\t\t\tf.write(msg)\nif __name__ == '__main__':\n pro_id = 12767\n main()\n\n\n","repo_name":"ultraxia/DataV-For-Nemo","sub_path":"dataCompensation.py","file_name":"dataCompensation.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"47"}
+{"seq_id":"71102902863","text":"from flask import Flask, Response, request, jsonify, Blueprint\nfrom marshmallow import Schema, fields, validates, ValidationError\nfrom marshmallow.validate import Length, Range\nfrom api.db import get_db\nfrom api.scans.scan_dir import run\nimport os, threading, api\n\nscans_bp = Blueprint(\"scans\", __name__)\n\nscan_thread = None\nstop_thread = False\n\nclass ScanSchema(Schema):\n dir = fields.Str(required=True, validate=Length(max=255))\n\n @validates(\"dir\")\n def validate_error(self, value):\n if not os.path.isdir(value):\n raise ValidationError(\"Scan directory not found\")\n\nclass PagingSchema(Schema):\n page = fields.Int(required=True, validate=[Range(min=0, error=\"Pages start at 1\")])\n pageSize = fields.Int(required=True, validate=[Range(min=10, max=100, error=\"Page size can be between 10 and 100\")])\n\n@scans_bp.route(\"/\", methods=[\"GET\"])\ndef get_scans():\n pageSize = int(request.args.get(\"pageSize\", 20));\n page = int(request.args.get(\"page\", 1)) - 1; # subtract 1 so pages start at 0\n PagingSchema().load({\"page\": page, \"pageSize\": pageSize})\n\n cur = get_db().cursor()\n\n cur.execute(\"select count(sid) as count from scans;\")\n count = dict(cur.fetchall()[0])[\"count\"]\n\n cur.execute(\"select * from scans limit ? offset ?;\", (pageSize, int(page*pageSize)))\n scans = [dict(row) for row in cur.fetchall()]\n return jsonify({\"err\": None, \"data\": scans, \"paging\": {\"currentPage\": page+1, \"totalResults\": count, \"pageSize\": pageSize}})\n\n@scans_bp.route(\"/\", methods=[\"GET\"])\ndef get_scan(sid):\n cur = get_db().cursor()\n cur.execute(\"select * from scans where sid= ?;\", (sid,))\n scan = [dict(row) for row in cur.fetchall()]\n if len(scan):\n return jsonify({\"err\": None, \"data\": scan[0]})\n return jsonify({\"err\": \"Invalid sid {}\".format(sid), \"data\": None})\n\n@scans_bp.route(\"/running\", methods=[\"GET\"])\ndef get_running():\n cur = get_db().cursor()\n cur.execute(\"select * from scans where stop is null;\")\n scan = [dict(row) for row in cur.fetchall()]\n if len(scan):\n return jsonify({\"err\": None, \"data\": scan[0]})\n return jsonify({\"err\": \"No scan is running!\", \"data\": None})\n\ndef broadcast_scan(scan):\n api.socketio.emit(\"scan\", scan, broadcast=True, json=True, namespace=\"/data\")\n\ndef broadcast_scan_status(scanning):\n api.socketio.emit(\"scan\", {\"scanning\": scanning}, broadcast=True, json=True, namespace=\"/status\")\n\n@api.socketio.on(\"connect\", namespace=\"/status\")\ndef handle_scan_status():\n api.socketio.emit(\"scan\", {\"scanning\": bool(scan_thread and scan_thread.isAlive())}, namespace=\"/status\")\n\n@api.socketio.on(\"scan\", namespace=\"/status\")\ndef handle_status_request():\n api.socketio.emit(\"scan\", {\"scanning\": bool(scan_thread and scan_thread.isAlive())}, namespace=\"/status\")\n\n@scans_bp.route(\"/start\", methods=[\"POST\"])\ndef start_scan():\n scan = ScanSchema().load(request.json)\n\n cur = get_db().cursor()\n cur.execute(\"select * from scans where stop is null;\")\n if len(cur.fetchall()):\n return jsonify({\"err\": \"A scan is already running!\", \"data\": None}), 403\n\n cur.execute(\"insert into scans (dir) values (?);\", (scan[\"dir\"],))\n\n cur.execute(\"select * from scans where sid = ?;\", (cur.lastrowid,))\n scan = [dict(row) for row in cur.fetchall()][0]\n\n global scan_thread\n global stop_thread\n scan_thread = threading.Thread(target=run, daemon=True, args=(lambda : stop_thread, scan[\"sid\"], scan[\"dir\"]))\n scan_thread.start()\n broadcast_scan_status(True)\n return jsonify({\"err\": None, \"data\": scan}), 201\n\n\n@scans_bp.route(\"/stop/\", methods=[\"POST\"])\ndef stop_scan(sid):\n cur = get_db().cursor()\n cur.execute(\"select * from scans where sid = ? and stop is not null;\", (sid,))\n if not scan_thread or len(cur.fetchall()):\n return jsonify({\"err\": \"Scan has already stopped!\"}), 403\n\n global stop_thread\n stop_thread = True\n scan_thread.join()\n stop_thread = False\n return Response()\n","repo_name":"Discolai/encodeHub","sub_path":"distributor/api/scans/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"43784716897","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# Xiang Wang @ 2019-05-09 14:31:05\n\n\nfrom PIL import ImageFont, ImageDraw, Image, ImageColor\n\n\nimage = Image.new(\n \"RGBA\", (480, 320),\n ImageColor.getrgb(\"#f00\")\n)\ndraw = ImageDraw.Draw(image)\n\n# font = ImageFont.truetype(\"VeraMono.ttf\", 15)\n# font = ImageFont.truetype(\"VeraMono\", 15)\n# font = ImageFont.load_default()\nfont = ImageFont.truetype(\"/usr/share/fonts/TTF/DroidSansFallbackFull.ttf\", 32)\n\ndraw.text((10, 10), \"您好\", fill=ImageColor.getrgb(\"#fff\"), font=font)\nsize1 = draw.textsize(\"您好\", font=font)\nsize2 = font.getsize(\"您好\")\n\n\nimage.save(\"test.png\")\n","repo_name":"ramwin/python-reference","sub_path":"pillow_example/imagefont.py","file_name":"imagefont.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"7964190089","text":"import string\nwith open('input.txt', 'r') as fh:\n orig = list(fh.read().strip())\n\n# orig = list(\"dabAcCaCBAcCcaDA\")\n# print(orig)\n\ndef react(polymer):\n n = 0\n while n < (len(polymer)-1):\n # print(n, polymer[n], polymer[n+1])\n if abs(ord(polymer[n]) - ord(polymer[n+1])) == 32:\n #found match, skip ahead\n polymer.pop(n+1)\n polymer.pop(n)\n if n > 0:\n n -= 1\n else:\n n += 1\n # print(''.join(polymer))\n return len(polymer)\n\nlens = []\nfor c in string.ascii_lowercase:\n lens.append(react([o for o in orig if o not in (c, c.upper())]))\nlens.sort()\nprint(lens[0])\n","repo_name":"kimvanwyk/advent_of_code","sub_path":"2018/05/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"36636362088","text":"import numpy as np\nimport os, sys\nif os.path.exists('../../Python-Lib/'):\n\tsys.path.insert(1, '../../Python-Lib')\nimport tensorflow as tf\nimport math, time\nfrom Model import *\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport cv2, json, socket, glob, time\n\ndef setAvaiGPUs(num_gpus = 1):\n\timport subprocess as sp\n\tACC_AVAI_MEM = 10240\n\tCOMMAND = 'nvidia-smi --query-gpu=memory.free --format=csv'\n\t# try:\n\t_output_to_list = lambda x: x.decode('ascii').split('\\n')[:-1]\n\tmemory_free_info = _output_to_list(sp.check_output(COMMAND.split()))[1:]\n\tmemory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)]\n\tavai_gpus = [i for i, x in enumerate(memory_free_values) if x > ACC_AVAI_MEM]\n\tif len(avai_gpus) < num_gpus:\n\t\traise ValueError('Found only %d usable GPUs in the system.' % len(avai_gpus))\n\tprint(avai_gpus[:num_gpus])\n\tos.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, avai_gpus[:num_gpus]))\n\t# except Exception as e:\n\t\t# print('\\\"nvidia-smi\\\" is probably not installed. GPUs are not masked.', e)\n\treturn\n\nif socket.gethostname() == 'ait-server-03':\n\tsetAvaiGPUs()\n\ndef find_peaks_with_val(heatmap, th, gaussian = False):\n\thm10000 = np.array(heatmap * 10000, np.uint32)\n\tif gaussian:\n\t\tfrom scipy.ndimage.filters import gaussian_filter\n\t\theatmap = gaussian_filter(heatmap, sigma = 3)\n\theatmap[heatmap < th] = 0\n\tmap_l = np.zeros(heatmap.shape)\n\tmap_l[1:, :] = heatmap[:-1, :]\n\tmap_r = np.zeros(heatmap.shape)\n\tmap_r[:-1, :] = heatmap[1:, :]\n\tmap_u = np.zeros(heatmap.shape)\n\tmap_u[:, 1:] = heatmap[:, :-1]\n\tmap_d = np.zeros(heatmap.shape)\n\tmap_d[:, :-1] = heatmap[:, 1:]\n\n\tpeaks_binary = np.logical_and.reduce(\n\t\t(heatmap >= map_l, heatmap >= map_r, heatmap >= map_u, heatmap >= map_d, heatmap >= th)\n\t)\n\tpeaks = [item for item in zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])]\n\tpeaks_vals = [(x, y, int(hm10000[y, x])) for x, y in peaks]\n\treturn peaks_vals\n\nimg_t = tf.placeholder(tf.float32, [None, None, None, 3])\npred_t = Predict(img_t)\n\nd = {'kernel:0': '_0', 'bias:0': '_1'}\nassign_op = []\nfor v in tf.global_variables():\n\tparts = v.name.split('/')\n\tif len(parts) == 3:\n\t\tname = parts[1] + '_' + parts[0] + d[parts[2]] + '.npy'\n\telse:\n\t\tname = parts[0] + d[parts[1]] + '.npy'\n\tweights = np.load('weights/' + name)\n\tif len(weights.shape) > 1:\n\t\tassign_op.append(v.assign(weights.transpose([2, 3, 1, 0])))\n\telse:\n\t\tassign_op.append(v.assign(weights))\n\nclass NumpyEncoder(json.JSONEncoder):\n\tdef default(self, obj):\n\t\tif isinstance(obj, np.integer):\n\t\t\treturn int(obj)\n\t\telif isinstance(obj, np.floating):\n\t\t\treturn float(obj)\n\t\telif isinstance(obj, np.ndarray):\n\t\t\treturn obj.tolist()\n\t\telse:\n\t\t\treturn super(NumpyEncoder, self).default(obj)\n\nchoose = sys.argv[1]\nos.popen('mkdir heatmap_%s' % choose)\nf = open('out.out', 'w')\nf.close()\n\nwith tf.Session() as sess:\n\tfor item in assign_op:\n\t\titem.op.run()\n\n\tbox_size = 368\n\tshapes = np.array([0.5, 1, 1.5, 2])\n\n\tdef predict_heatmap(img):\n\t\tsize = max(img.shape[0], img.shape[1])\n\t\tmultipliers = (box_size * shapes) / size\n\t\tres = np.zeros(img.shape[:2] + (19, ))\n\t\tfor m in multipliers:\n\t\t\tinput_img = cv2.resize(img, (0, 0), fx = m, fy = m, interpolation = cv2.INTER_CUBIC)[np.newaxis, ...]\n\t\t\tl1, l2 = sess.run(pred_t, feed_dict = {img_t: input_img / 255.0 - 0.5})\n\t\t\tres += cv2.resize(l2[0, ...], (img.shape[1], img.shape[0]), interpolation = cv2.INTER_CUBIC)\n\t\tres /= len(multipliers)\n\t\treturn res\n\n\tresult = {}\n\tfiles = glob.glob('/disks/data4/zyli/coco2017data/%s/*' % choose) # ['data/000000000000.jpg'] # \n\tfiles.sort()\n\tfor seq, file in enumerate(files):\n\t\timg_id = file.split('/')[-1].replace('.jpg', '')\n\t\tif img_id in result:\n\t\t\tcontinue\n\t\timg = np.array(Image.open(file).convert('RGB'), np.float32)\n\t\tt = time.time()\n\t\tres = predict_heatmap(img)\n\t\tres_single = []\n\t\tfor i in range(18):\n\t\t\tres_single.append(find_peaks_with_val(res[..., i], res[..., i].max() * 0.4))\n\t\tresult[img_id] = res_single\n\t\thm = np.array(np.maximum(np.minimum(1 - res[..., -1], 1), 0) * 255.0, np.uint8)\n\t\tImage.fromarray(hm).save('heatmap_%s/%s.png' % (choose, img_id))\n\t\tt = time.time() - t\n\t\twith open('out.out', 'a') as f:\n\t\t\tf.write('%d, %s, %dx%d, %.3lf\\n' % (seq, img_id, img.shape[1], img.shape[0], t))\n\t\t\tf.flush()\n\t\tif seq % 1000 == 0:\n\t\t\twith open('heatmap_%s.json' % choose, 'w') as fp:\n\t\t\t\tfp.write(json.dumps(result, cls = NumpyEncoder))\n\t\t\t\tfp.close()\n\twith open('heatmap_%s.json' % choose, 'w') as fp:\n\t\tfp.write(json.dumps(result, cls = NumpyEncoder))\n\t\tfp.close()\n\n\n","repo_name":"lizuoyue/Realtime-MPPE","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"7935019648","text":"import pandas as pd\nfrom scipy.stats import norm\nimport math\n\nZ = norm.ppf\n\n\ndef SDT(hits, misses, fas, crs):\n \"\"\" returns a dict with d-prime measures given hits, misses, false alarms, and correct rejections\"\"\"\n # Floors an ceilings are replaced by half hits and half FA's\n half_hit = 0.5 / (hits + misses)\n half_fa = 0.5 / (fas + crs)\n\n # Calculate hit_rate and avoid d' infinity\n hit_rate = hits / (hits + misses)\n if hit_rate == 1:\n hit_rate = 1 - half_hit\n if hit_rate == 0:\n hit_rate = half_hit\n\n # Calculate false alarm rate and avoid d' infinity\n fa_rate = fas / (fas + crs)\n if fa_rate == 1:\n fa_rate = 1 - half_fa\n if fa_rate == 0:\n fa_rate = half_fa\n\n # Calculate hit_rate and avoid d' infinity\n hit_rate = hits / (hits + misses)\n if hit_rate == 1:\n hit_rate = 1 - half_hit\n if hit_rate == 0:\n hit_rate = half_hit\n\n # Calculate false alarm rate and avoid d' infinity\n fa_rate = fas / (fas + crs)\n if fa_rate == 1:\n fa_rate = 1 - half_fa\n if fa_rate == 0:\n fa_rate = half_fa\n\n d_prime = Z(hit_rate) - Z(fa_rate)\n beta = math.exp((Z(fa_rate) ** 2 - Z(hit_rate) ** 2) / 2)\n c = -(Z(hit_rate) + Z(fa_rate)) / 2\n ad = norm.cdf(d_prime / math.sqrt(2))\n\n return d_prime, beta, c, ad\n\n\nif __name__ == '__main__':\n to_excel = False\n is_main_exp = True\n if is_main_exp:\n # removed 4 rows where hit+miss == 0 or FA+CR == 0\n # e.g. in non RM trails, participant A, large stimuli, there was no upright face trails\n totalData = pd.read_csv(\"../../data/rm_face_to_cal_SDT.csv\")\n else:\n totalData = pd.read_csv(\"../../data/rm_face_disc_to_cal_SDT.csv\")\n\n # remove condition when hit + miss == 0 or FA + cor == 0\n totalData = totalData[(totalData[\"miss\"] + totalData[\"hit\"] != 0)]\n totalData = totalData[(totalData[\"CR\"] + totalData[\"FA\"] != 0)]\n\n totalData[\"d_prime\"] = totalData.apply(lambda x: SDT(x[\"hit\"], x[\"miss\"], x[\"FA\"], x[\"CR\"])[0], axis = 1)\n totalData[\"beta\"] = totalData.apply(lambda x: SDT(x[\"hit\"], x[\"miss\"], x[\"FA\"], x[\"CR\"])[1], axis = 1)\n totalData[\"c\"] = totalData.apply(lambda x: SDT(x[\"hit\"], x[\"miss\"], x[\"FA\"], x[\"CR\"])[2], axis = 1)\n totalData[\"ad\"] = totalData.apply(lambda x: SDT(x[\"hit\"], x[\"miss\"], x[\"FA\"], x[\"CR\"])[3], axis = 1)\n\n if to_excel:\n if is_main_exp:\n totalData.to_excel(\"rm_face_SDT.xlsx\", index = False)\n else:\n totalData.to_excel(\"rm_face_disc_SDT.xlsx\", index = False)\n","repo_name":"miaoli-psy/RM_face","sub_path":"src/preprocess/rm_face_exp2_cal_SDT.py","file_name":"rm_face_exp2_cal_SDT.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"22527532515","text":"import sys\nfrom scapy.all import *\n\npacket = Ether(src=RandMAC(\"*:*:*:*:*:*\"),\n dst=RandMAC(\"*:*:*:*:*:*\")) / \\\n IP(src=RandIP(\"*.*.*.*\"),\n dst=RandIP(\"*.*.*.*\")) / \\\n ICMP()\n\nif len(sys.argv) < 2:\n dev = \"wlp4s0\"\n\nelse:\n dev = sys.argv[1]\n\nprint(\"Flooding net with random packet on dev \" + dev)\nsendp(packet, iface=dev, loop=1)\n","repo_name":"MaxMeiY/network_hack","sub_path":"MAC_flooder.py","file_name":"MAC_flooder.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"4593170713","text":"import os\n\nDEFAULT_LOG_LEVEL = \"info\"\nDEFAULT_VENDOR_ID = 33\nSTUBS_FOLDER = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), \"stubs\")\n\n\n# Stubs\nSTUB_PTA_H = \"pta_attestation.h\"\nSTUB_SPONGENT_H = \"spongent.h\"\nSTUB_AE_H = \"authentic_execution.h\"\nSTUB_SPONGENT = \"spongent.c\"\nSTUB_Makefile = \"Makefile\"\nSTUB_USER_TA_HEADER_DEFINES = \"user_ta_header_defines.h\"\nSTUB_SUB_MK = \"sub.mk\"\nSTUB_IMPORT = \"import.c\"\nSTUB_TA_H = \"ta.h\"\nSTUB_AUTH_EXEC = \"authentic_execution.c\"\nSTUB_CRYPTO = \"crypto.c\"\nSTUB_CRYPTO_H = \"crypto.h\"\nSTUB_CONNECTION = \"connection.c\"\nSTUB_CONNECTION_H = \"connection.h\"\n\n# Starting entrypoint index\n# 0 is set_key, 1 is attest, 2 is disable, 3 is handle_input\nSTART_ENTRY_INDEX = 4\n# Starting indexes of inputs, outputs\n# They need to have different indexes, because the `index` field in Connection does\n# not distinguish between them. If the same index is used for different types, bad\n# things can happen. Moreover, having these \"ranges\" allow us do identify what is\n# an index: e.g., 25848 is an output, 44 is an input, etc.\n# We believe 16384 values for each type is more than enough for a single module.\nSTART_INPUT_INDEX = 0\nSTART_OUTPUT_INDEX = 16384\n\nOUTPUT_PATTERN = '^[ \\t]*SM_OUTPUT[ \\t]*\\([ \\t]*([a-zA-Z_][a-zA-Z_0-9]*)[ \\t]*\\)[ \\t]*;[ \\t]*$'\nINPUT_PATTERN = ('^[ \\t]*SM_INPUT[ \\t]*\\([ \\t]*([a-zA-Z_][a-zA-Z_0-9]*)[ \\t]*,'\n '[ \\t]*[a-zA-Z_][a-zA-Z_0-9]*[ \\t]*,[ \\t]*[a-zA-Z_][a-zA-Z_0-9]*[ \\t]*\\)')\nENTRY_PATTERN = ('^[ \\t]*SM_ENTRY[ \\t]*\\([ \\t]*([a-zA-Z_][a-zA-Z_0-9]*)[ \\t]*,'\n '[ \\t]*[a-zA-Z_][a-zA-Z_0-9]*[ \\t]*,[ \\t]*[a-zA-Z_][a-zA-Z_0-9]*[ \\t]*\\)')\nOUTPUT_REPL = \"SM_OUTPUT_AUX(\\\\1, {});\"\nIO_SIGNATURE = (\"void {}(unsigned char *data, uint32_t len);\\n\")\n","repo_name":"AuthenticExecution/TZ-Code-Generator","sub_path":"tzcodegen/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"41498846357","text":"from urllib.parse import urljoin\n\nfrom scrapy import Request\n\nfrom product_spider.items import RawData\nfrom product_spider.utils.spider_mixin import BaseSpider\n\n\nclass ECOSpider(BaseSpider):\n name = \"eco\"\n start_urls = [\"http://eco-canada.com/search/\", ]\n base_url = \"http://eco-canada.com/\"\n\n def parse(self, response):\n values = tuple(set(response.xpath('//div[@class=\"pardrug\"]//select/option[position()>1]/@value').extract()))\n for value in values:\n url = f\"http://eco-canada.com/search/?ptag={value}\"\n yield Request(url, meta={\"parent\": value}, callback=self.list_parse)\n\n def list_parse(self, response):\n urls = response.xpath('//div[contains(@class, \"pro_list\")]/div[@class=\"pro_title\"]/a/@href').extract()\n for url in urls:\n yield Request(urljoin(self.base_url, url), meta=response.meta, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tmp = '//span[contains(text(),\"{}\")]/following-sibling::font/text()'\n d = {\n \"brand\": \"eco\",\n \"parent\": response.meta.get(\"parent\"),\n \"cat_no\": response.xpath(tmp.format(\"Catalogue number\")).get(),\n \"cas\": response.xpath(tmp.format(\"CAS Number\")).get(),\n \"en_name\": response.xpath('//div[@class=\"p_vtitle\"]/text()').get(),\n \"img_url\": urljoin(self.base_url,\n response.xpath('//div[@class=\"p_viewimg pcshow\"]//img/@src').get()),\n \"mf\": response.xpath(tmp.format(\"Molecular Formula\")).get(),\n \"mw\": response.xpath(tmp.format(\"Molecular Weight\")).get(),\n \"prd_url\": response.url,\n }\n yield RawData(**d)\n\n","repo_name":"Pandaaaa906/product_spider","sub_path":"product_spider/spiders/eco_spider.py","file_name":"eco_spider.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"5366898244","text":"import os\nimport datetime \nimport time\nfrom linebot import LineBotApi, WebhookParser\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage, TemplateSendMessage\n\n\nchannel_access_token = os.getenv(\"LINE_CHANNEL_ACCESS_TOKEN\", None)\n\n\ndef send_text_message(reply_token, text):\n line_bot_api = LineBotApi(channel_access_token)\n line_bot_api.reply_message(reply_token, TextSendMessage(text=text))\n return \"OK\"\n\n\ndef send_template_message(reply_token, template):\n line_bot_api = LineBotApi(channel_access_token)\n line_bot_api.reply_message(reply_token, template)\n\n return \"OK\"\n\ndef active_send_text_msg(uid,msg,timer,number):\n print(\"start to wait for \"+str(timer))\n waitRate=(number['target']-number['now'])\n line_bot_api = LineBotApi(channel_access_token)\n time.sleep(timer.seconds*waitRate)\n line_bot_api.push_message(uid, TextSendMessage(text=msg))\n\ndef active_send_clock_msg(uid,msg,timer,number,index,fun,get_name):\n print(\"start to wait for \"+str(timer))\n waitRate=(number['target']-number['now'])\n start=datetime.datetime.now()\n setTime=datetime.timedelta(hours=start.hour,minutes = start.minute, seconds = start.second)\n line_bot_api = LineBotApi(channel_access_token)\n time.sleep(timer.seconds*waitRate*3/4)\n name=get_name(index)\n if(name!=None):\n line_bot_api.push_message(uid, TextSendMessage(text=name+msg+\" (\"+str(setTime)+\"設定的號碼牌)\"))\n else:\n line_bot_api.push_message(uid, TextSendMessage(text=msg+\" (\"+str(setTime)+\"設定的號碼牌)\"))\n fun(index)\n\n\"\"\"\ndef send_image_url(id, img_url):\n pass\n\ndef send_button_message(id, text, buttons):\n pass\n\"\"\"\n","repo_name":"JiaAnTW/TOC-Final-Project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10068195025","text":"# Don't know my licence. ask.\nfrom __future__ import print_function\nimport sys\nimport os\nimport pprint\nfrom point import Point3\nfrom stiffeners import Stiffener\nfrom cladding import Cladding\nfrom roofing import Roofing, RoofExpansionDefs\nfrom windowframer import WindowFramer, windowDef, HoleDef\nimport itertools #import izip\nimport json\nimport math\nfrom helpers import *\n\n\ncornerwoodcolor = 41\n#chimney_x = 3110\nporch_depth = 3000\n#chimney_y = 3080 + porch_depth\n\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n\ndef centroid(points):\n x = [p.x for p in points]\n y = [p.y for p in points]\n z = [p.z for p in points]\n centroid = Point3(sum(x) / len(points), sum(y) / len(points), sum(z) / len(points))\n return centroid\n\ndef toDistances(distanceList):\n sum = 0.0\n absolutes = []\n for dd in distanceList:\n absolutes.append(dd + sum)\n sum += dd\n return absolutes\n\n\ndef generate_loop(grid_x, grid_y, grid_z, pairs):\n master_polygon = []\n xx = toDistances(grid_x)\n yy = toDistances(grid_y)\n if not grid_z: #hack\n grid_z = [0.0]\n zz = toDistances(grid_z)\n #for x_ind,y_ind,z_ind in pairs:\n for item in pairs:\n z_ind = None\n try:\n x_ind,y_ind,z_ind = item\n except ValueError:\n x_ind,y_ind = item\n if not z_ind:\n z_ind = 0\n # zero level z\n edge = Point3(xx[x_ind], yy[y_ind], zz[z_ind])\n master_polygon.append(edge)\n return master_polygon\n\ndef is_closed_loop(grid):\n closed = grid[0].Compare(grid[-1])\n #trace(\"closed: \", closed)\n return closed\n\ndef is_short_side(p1, p2):\n # TODO: assumes purulaatikko always oriented same\n return abs(p2.y-p1.y) > 5000 \n\ndef clad_def(polygon, windows, elevations, usefits=False):\n return {'poly': polygon, 'windows': windows, \"elevations\": elevations, 'usefits': usefits}\n\ndef get_wall_section_by(section, attribute, getprop, wallname):\n #wallname = attribute #sect['name']\n for x in section: #specification['holedefs']:\n if x[attribute] == wallname:\n #trace(\"i found it!\")\n prop = x[getprop]\n break\n else:\n trace(\"did not find it: \", wallname)\n prop = None\n return prop\n\ndef write_out(specification):\n # main grids\n grid_x = specification['grid_x']\n grid_y = specification['grid_y']\n grid_z = specification['grid_z']\n pelevs_z = specification['elev_z']\n roof_angle = specification['roofangle']\n porchroofangle = roofangle\n chimney_x = specification['chimney_x']\n chimney_y = specification['chimney_y']\n chimney_profile = specification['chimney_profile']\n footingProfile = specification['foundations'][0][\"profile1\"]\n sockleProfile = specification['foundations'][0][\"profile2\"]\n #sockle = \"800*200\"\n #footing = \"200*500\"\n\n\n # define line, or grid intersect\n pairs = specification['foundations'][0][\"edges\"]\n master_polygon = generate_loop(grid_x, grid_y, None, pairs)\n #trace(pprint.pformat(master_polygon))\n\n # porch grid elevations (keep separate)\n porch = specification['foundations'][1][\"edges\"]\n porch_polygon = generate_loop(grid_x, grid_y, None, porch)\n # get_plane_data\n\n high_pairs1 = [(0,1),\n (3,1)]\n high_pairs2 = [(0,3),\n (3,3)]\n high_polygon1 = generate_loop(grid_x, grid_y, None, high_pairs1)\n high_polygon2 = generate_loop(grid_x, grid_y, None, high_pairs2)\n\n # create window aabb's, possibly to be used all over the place\n #level1 = 1160\n #level2 = 4500 # random ass, todo: measure it\n holedefs = []\n holedefs_byname = {}\n for holedef in specification['holedefs']:\n wall_name = holedef['wall_line']\n wall_def = get_wall_section_by(specification['wall_sections'], \"name\", \"line\", wall_name)\n wall_line = generate_loop(grid_x, grid_y, grid_z, wall_def)\n window_defs = [windowDef([wd['offset'], wd['level']], wd[\"hole\"], wd[\"splitters\"]) for wd in holedef['holes']]\n holedefs.append((wall_line, window_defs,))\n holedefs_byname[wall_name] = window_defs\n window_cuts, window_woods = create_window_boxes(holedefs)\n trace(\"Holes for: \", len(window_cuts), \" windows.\", window_cuts)\n\n # chimney pipe\n section_cut = generate_loop(grid_x, grid_y, grid_z, [(0,1,0), (0,3,0), (0,3,3)])\n chimney_parts, pipe_cut = create_chimneypipe(section_cut, x=chimney_x, y=chimney_y, profile=chimney_profile, roofangle=roof_angle)\n\n # todo: move somplace more appropriate?\n fieldsaw = Cladding(\"cladding\")\n board_areas = {}\n for sect in specification['wall_sections']:\n wallname = sect['name']\n elevations_key = sect['elevations']\n holes = holedefs_byname.get(wallname, None)\n segment_def = [tuple(pt) for pt in sect['line']]\n #trace(\"segdef: \", segment_def)\n board_areas[wallname] = clad_def(segment_def, holes, elevations_key, sect['usefits'])\n\n #porch_facades = {\n # \"kuisti_vas\": clad_def([(0,1,1),(0,0,1),(0,0,3),(0,1,3)], None),\n # \"kuisti_etu\": clad_def([(0,0,1),(2,0,1),(2,0,3),(1,0,4),(0,0,3)], None, usefits=True),\n # \"kuisti_oik\": clad_def([(2,0,1),(2,1,1),(2,1,3),(2,0,3)], None)\n # }\n\n # corner boards\n corner_boards = create_corners_woods(grid_x, grid_y, specification, cornerwoodcolor)\n\n mass_center = centroid(master_polygon)\n # TODO: make the layers bottom up and increase z offset\n z_offset = parse_height(footingProfile)\n\t# todo: use previous profile for xy-plane offset\n\t#xy_offset = parse_width(footingProfile)\n footing = generate_footing(master_polygon, footingProfile, sockleProfile)\n sockle = generate_sockle(master_polygon, sockleProfile, z_offset)\n lower_reach = generate_lower_reach(master_polygon, 1000.0)\n higher_reach = generate_lower_reach(high_polygon1, 4750.0, mass_center)\n higher_reach += generate_lower_reach(high_polygon2, 4750.0, mass_center)\n wall_studs = generate_wall_studs(master_polygon, 1000.0, 3650, roof_angle)\n\n # bit different\n roof_woody = generate_main_roof(grid_x, grid_y, specification, pipe_cut)\n\n # porch\n #porch_decline = porch_depth*math.tan(math.radians(roof_angle))\n footing += generate_footing(porch_polygon, footingProfile, sockleProfile)\n sockle += generate_sockle(porch_polygon, sockleProfile, z_offset)\n offset_porch_woods_outwards(porch_polygon, mass_center)\n lower_reach += generate_lower_reach(porch_polygon, toDistances(pelevs_z)[1])\n higher_reach += generate_lower_reach(porch_polygon, toDistances(pelevs_z)[2])\n wall_studs += generate_wall_studs(porch_polygon, toDistances(pelevs_z)[1], pelevs_z[2]-100)\n porch_roofer = create_porch_roof(grid_x, grid_y, pelevs_z, roof_woody)\n\n # inner walls\n inside_walls = create_inside(chimney_x, chimney_y)\n #inside_walls = []\n #trace(\"iw: \", inside_walls)\n\n ## main json\n combined_data = [named_section(\"footing\", footing),\n named_section(\"sockle\", sockle),\n named_section(\"chimney\", chimney_parts),\n named_section(\"lower_reach\", lower_reach, 4),\n named_section(\"higher_reach\", higher_reach, 3),\n named_section(\"wall_studs\", wall_studs, 3),\n #named_section(\"inside_walls\", inside_walls[0], 3),\n named_section(\"window_edges\", window_woods),\n named_section(\"corner_boards\", corner_boards)]\n\n # inside walls\n for idx in range(len(inside_walls)):\n wall, aabb = inside_walls[idx]\n cutsolids = None\n if aabb is not None:\n cutsolids = [aabb]\n combined_data.append(named_section(\"inside_wall_{}\".format(idx), wall, 3, solids=cutsolids))\n\n\n # stiffener experiment\n stiffeners = stiffen_wall(\"mainwall\", master_polygon, 1000.0, 3850, roof_angle, mass_center)\n # todo: \n porch_height = pelevs_z[-2] + 100\n porch_stiffeners = stiffen_wall(\"porch\", porch_polygon, 1000.0, porch_height, roof_angle, mass_center)\n \n\n for stf in stiffeners + porch_stiffeners:\n cuts = stf.get_cut_planes()\n fits = stf.get_fit_planes()\n #combined_data.append(named_section(stf.name, stf.get_stiffener_data(), planes=cuts, fits=fits, solids=window_cuts))\n\n # cladding boards\n append_cladding_data(board_areas, combined_data, specification, fieldsaw, window_cuts)\n #append_cladding_data(porch_facades, combined_data, grid_x, grid_y, pelevs_z, fieldsaw, [])\n\n #for key, value in board_areas.items():\n # segment_name = \"cladding_\" + key\n # trace(\"Creating cladding for: \", segment_name)\n # segment_polygon = value['poly']\n # segment_windows = value['windows']\n # segment_isfitted = value['usefits']\n # cladding_loop = generate_loop(grid_x, grid_y, grid_z, segment_polygon)\n # wall_parts, fittings = fieldsaw.create_cladding(cladding_loop, \"22*125\", 33, segment_windows, fittings=segment_isfitted)\n # combined_data.append(named_section(segment_name, wall_parts, 44, solids=window_cuts, planes=fittings, fits=fittings))\n\n #trace(\"roof_woody: \", roof_woody.get_roofs_faces(), porch_roofer.get_roofs_faces())\n for roof_face in roof_woody.get_roofs_faces() + porch_roofer.get_roofs_faces():\n trace(\"add roof: \", roof_face.get_name())\n #TDD 553 3\n # woods\n part_data, coord_sys, cut_aabbs, fit_planes = roof_face.get_woods_data()\n name = roof_face.get_name()\n combined_data.append(named_section(\"roof_woods_\"+name, part_data, ts_class=12, csys=coord_sys,\n solids=cut_aabbs, fits=fit_planes))\n # steels\n geom_data, coord_sys, cut_aabbs, cut_planes, cub_objs = roof_face.get_steel_data()\n combined_data.append(named_section(\"roof_steels_\"+name, geom_data, ts_class=3, csys=coord_sys, \n solids=cut_aabbs, planes=cut_planes, contours=cub_objs))\n # side steels\n sides_data, coord_sys, fit_planes = roof_face.get_sides_data()\n combined_data.append(named_section(\"roof_sides_\"+name, sides_data, ts_class=3, csys=coord_sys,\n fits=fit_planes))\n\n with open('data.json', 'w') as jsonfile:\n json.dump(combined_data, jsonfile, cls=MyEncoder, indent=2)\n #jsonfile.write(pprint.pformat(combined_data))\n print(\"wrote:\\n\\b\", os.getcwd() + os.path.sep + \"data.json\")\n\ndef append_cladding_data(clad_defs, append_to, specification, fieldsaw, window_cuts):\n gridx = specification['grid_x']\n gridy = specification['grid_y']\n for key, value in clad_defs.items():\n segment_name = \"cladding_\" + key\n trace(\"Creating cladding for: \", segment_name)\n segment_polygon = value['poly']\n segment_windows = value['windows']\n segment_isfitted = value['usefits']\n segment_elevations_name = value['elevations']\n trace(\"s.e.n: \", segment_elevations_name)\n gridz = specification[segment_elevations_name]\n cladding_loop = generate_loop(gridx, gridy, gridz, segment_polygon)\n wall_parts, fittings = fieldsaw.create_cladding(cladding_loop, \"22*125\", 33, segment_windows, fittings=segment_isfitted)\n append_to.append(named_section(segment_name, wall_parts, 44, solids=window_cuts, planes=fittings, fits=fittings))\n\ndef named_section(name, part_list, ts_class=None, planes=None, csys=None, solids=None, fits=None, contours=None):\n # todo: can add assembly meta, classes etc.\n if ts_class is not None:\n for part in part_list:\n part[\"klass\"] = ts_class\n return { \n \"section\": name, \n \"parts\": part_list, \n \"planes\": remove_none_elements_from_list(planes), \n \"coordinate_system\": csys,\n \"cutobjects\": remove_none_elements_from_list(solids),\n \"fitplanes\": remove_none_elements_from_list(fits),\n \"cutcontours\": remove_none_elements_from_list(contours)\n }\n\ndef generate_lower_reach(polygon, z_offset, mass_center=None):\n return generate_offsetted_beams(polygon, \"100*100\", 50.0, z_offset + 50.0, \"Timber_Undefined\", mass_center)\n\ndef offset_porch_woods_outwards(porch_polygon, mass_center):\n porch_mass = centroid(porch_polygon)\n between = porch_mass.GetVectorTo(mass_center)\n if abs(between.x) > abs(between.y):\n # extrude x-axis\n outwards_for_stiffeners = Point3(-22, 0, 0)\n else:\n outwards_for_stiffeners = Point3(0, -22, 0)\n porch_polygon[0].Translate(outwards_for_stiffeners)\n porch_polygon[-1].Translate(outwards_for_stiffeners)\n\ndef create_window_boxes(windows):\n # in: wall plane in 3d\n # + vector wall-normal(?)\n # + distance from start\n # + window size\n aabbs = []\n #trace(\"glb: \", cornerwoodcolor)\n windower = WindowFramer(cornerwoodcolor)\n for wall_line in windows:\n line, defs = wall_line\n transform, rotation = create_vertical_stdplane(line)\n wall_local = transform.convertToLocal(line)\n # create aabb's\n for win_def in defs:\n low, high = win_def.minmax_points()\n in_world = transform.convertToGlobal([low, high])\n aabbs.append(create_cut_aabb(in_world))\n # window wood cutter\n windower.add_window(transform, low, high, rotation, win_def.multiframe())\n return aabbs, windower.get_framing_woods()\n\ndef create_corners_woods(grid_x, grid_y, spec, cornerwoodcolor):\n corner_woods = []\n for corner_spec in spec[\"corner_woods\"]:\n segment_elevations_name = corner_spec[\"elevations\"]\n grid_z = gridz = spec[segment_elevations_name]\n z_level = corner_spec[\"z_level\"]\n loop = generate_loop(grid_x, grid_y, grid_z, corner_spec[\"points\"])\n corner_woods += create_corner_boards([loop], cornerwoodcolor, z_level)\n return corner_woods\n\ndef create_corner_boards(corner_loops, cornerwoodcolor, z_level):\n corner_woods = []\n for corner_tri in corner_loops:\n #trace(\"corner tri: \", corner_tri)\n # create 2d coord sys\n height = corner_tri[1].z - corner_tri[0].z\n X = corner_tri[0].GetVectorTo(corner_tri[-1])\n Y = corner_tri[0].GetVectorTo(corner_tri[1])\n rotation = direction_to_rotation(Point3.Cross(X, Y))\n rotation2 = direction_to_rotation(Point3.Reversed(X))\n #trace(\"corner xy: \", corner_tri[0], X, Y)\n coordinate_system = TransformationPlane(corner_tri[0], X, Y)\n transform = Transformer(coordinate_system)\n corner_local = transform.convertToLocal(corner_tri)\n # offsetting\n profile1 = \"22*125\" # forwards\n half1 = parse_width(profile1)/2\n thick1 = parse_height(profile1)\n profile2 = \"22*100\" # behind\n half2 = parse_width(profile2)/2\n thick2 = parse_height(profile2)\n # forwards\n offset_x1 = -(z_level+thick2) + half1\n offset_z1 = z_level + thick1/2\n locallow = corner_local[0].CopyLinear(offset_x1, 0, offset_z1)\n localhigh = locallow.CopyLinear(0,height,0)\n in_world = transform.convertToGlobal([locallow, localhigh])\n corner_woods.append(create_wood_at(in_world[0], in_world[1], profile1, rotation, cornerwoodcolor))\n # behind\n offset_x2 = -(z_level+thick2/2)\n offset_z2 = z_level - half2\n locallow = corner_local[0].CopyLinear(offset_x2, 0, offset_z2)\n localhigh = locallow.CopyLinear(0,height,0)\n in_world = transform.convertToGlobal([locallow, localhigh])\n corner_woods.append(create_wood_at(in_world[0], in_world[1], profile2, rotation2, cornerwoodcolor))\n return corner_woods\n\ndef create_chimneypipe(section_cut, x, y, profile, roofangle):\n pro_x = parse_height(profile)\n pro_y = parse_width(profile)\n delta_x = pro_x/2\n delta_y = pro_y/2\n startPoint = Point3(x+delta_x, y+delta_y, 0)\n # section cut at least 3 points\n # tilt cut section plane to yz plane at placement of chimney\n projection = projection_matrix(startPoint.ToArr(), [1,0,0], direction=[1,0,0])\n section_cut_at_chimney = Transformer.convert_by_matrix(section_cut, projection)\n profiler = create_cut_aabb(section_cut_at_chimney)\n height = profiler['max_point'].z\n wall_lenght = profiler['max_point'].y - profiler['min_point'].y\n ceiling = get_ceiling(startPoint, section_cut_at_chimney[0], height, wall_lenght, roofangle)\n main_elevation = ceiling + 800\n endPoint = startPoint.CopyLinear(0, 0, main_elevation) # regs. 800 mm over\n cutting_aabb = create_cut_aabb([Point3(x,y,0), Point3(x+pro_x, y+pro_y, main_elevation)])\n concrete_parts = []\n concrete_parts.append(get_part_data(profile, None, [startPoint, endPoint], \"Concrete_Undefined\"))\n # hifistely\n joined = endPoint.Clone()\n top_elev = joined.CopyLinear(0, 0, 85) # module\n end_profile = \"{}*{}\".format(parse_height(profile)+130, parse_width(profile)+130)\n concrete_parts.append(get_part_data(end_profile, None, [joined, top_elev], \"Concrete_Undefined\"))\n return concrete_parts, cutting_aabb\n\ndef generate_main_roof(grid_x, grid_y, spec, chimney_pipe):\n testroof0 = spec[\"roof_sections\"][0]\n testgridz = spec[\"grid_z\"]\n # xy plane\n #roof_tuples_1 = [(0,2,3), # with porch roof extension\n # (0,1,3),\n # #(1,1,4),\n # #(1,0,4),\n # #(2,0,4),\n # #(2,1,4),\n # (3,1,3),\n # (3,2,3)]\n roof_polygon_1 = generate_loop(grid_x, grid_y, testgridz, testroof0[\"faces\"][0][\"edges\"])\n #trace(\"roof poly 1: \", roof_polygon_1)\n #roof_tuples_2 = [(3,2,3),\n # (3,3,3),\n # (0,3,3),\n # (0,2,3)]\n roof_polygon_2 = generate_loop(grid_x, grid_y, testgridz, testroof0[\"faces\"][1][\"edges\"])\n # centerline at highest elevation\n centerline = generate_loop(grid_x, grid_y, testgridz, testroof0[\"centerline\"])\n #trace(\"roof centerline: \", centerline)\n roofer = Roofing(\"roof_studs\", chimney_pipe)\n #direction1 = roof_polygon1[0].GetVectorTo(\n roofer.do_one_roof_face(\"lape_1\", roof_polygon_1, centerline[0])\n roofer.do_one_roof_face(\"lape_2\", roof_polygon_2, centerline[1])\n # todo: hack, return face1 plane outside\n\n return roofer\n\ndef create_porch_roof(grid_x, grid_y, pgrid_z, main_roofer):\n # isect porch roof to main roof: p0, p1, origo, normal\n pgrid_x = grid_x[:3]\n pgrid_y = grid_y[:2]\n centerline = generate_loop(pgrid_x, pgrid_y, pgrid_z, [(1,1,-1),(1,0,-1)])\n pco, pno = main_roofer.roof_decs[0].get_plane_data()\n p1 = centerline[0].ToArr()\n p2 = centerline[1].ToArr()\n pl = generate_loop(pgrid_x, pgrid_y, pgrid_z, [(0,1,2)])\n pr = generate_loop(pgrid_x, pgrid_y, pgrid_z, [(2,1,2)])\n #trace(\"wtf: \", p1,p2,pco,pno)\n rooftip = Point3.FromArr(isect_line_plane_v3(p1,p2,pco,pno))\n lape1l = Point3.FromArr(isect_line_plane_v3(p1,pl[0].ToArr(),pco,pno))\n lape2r = Point3.FromArr(isect_line_plane_v3(p1,pr[0].ToArr(),pco,pno))\n\n # xy plane\n roof_tuples_1 = [(1,-1,-2),\n (0,-1,-2),\n (0,0,-2),\n (1,0,-2)]\n #trace(\"pw3: \", pgrid_x, pgrid_y, pgrid_z)\n roof_polygon_1 = generate_loop(pgrid_x, pgrid_y, pgrid_z, roof_tuples_1)\n roof_tuples_2 = [(1,0,-2),\n (2,0,-2),\n (2,-1,-2),\n (1,-1,-2)]\n #trace(\"pw3: \", pgrid_x, pgrid_y, pgrid_z)\n roof_polygon_2 = generate_loop(pgrid_x, pgrid_y, pgrid_z, roof_tuples_2)\n\n cut_object = None\n if rooftip is not None and lape1l is not None and lape2r is not None:\n #dist_ytop = rooftip.distFrom(centerline[0]\n xyplane_elevation = roof_polygon_1[0].z\n #highpoint = centerline[0].\n unadjusted_y = centerline[0].y\n #unadjusted_yr = centerline[1].y\n #centerline[0] = rooftip.Clone()\n # lape 1\n cut_tip = rooftip.Clone()\n rooftip.z = xyplane_elevation\n #lape1p2 = Point3(lape1l.x, rooftip.y, xyplane_elevation)\n lape1p3 = Point3(lape1l.x, unadjusted_y, xyplane_elevation)\n # lape 2\n lape2p_3 = Point3(lape2r.x, unadjusted_y, xyplane_elevation)\n lape2p_2 = Point3(lape2r.x, rooftip.y, xyplane_elevation)\n #roof_polygon_1 = [rooftip, lape1p2, lape1p3] + roof_polygon_1[1:]\n roof_polygon_1 = [rooftip] + roof_polygon_1[1:]\n roof_polygon_2 = roof_polygon_2[:-1] + [rooftip]\n trace(\"roof_polygon_1: \", roof_polygon_1)\n trace(\"centerline: \", centerline)\n # cut main roof with the extension\n cut_vector1 = cut_tip.GetVectorTo(lape1l)\n cut_vector2 = cut_tip.GetVectorTo(lape2r)\n cut_vector1 = cut_vector1.Normalize(1.2*(1000 + cut_vector1.magnitude()))\n cut_vector2 = cut_vector2.Normalize(1.2*(1000 + cut_vector2.magnitude()))\n cut_polygon = [cut_tip, cut_tip.CopyLinear(cut_vector1), cut_tip.CopyLinear(cut_vector2)]\n #cut_object = get_part_data(\"PL200\", Rotation.FRONT, cut_polygon, \"ANTIMATERIAL\")\n # TODO: hardcoder roofdeck order, 0 is the porch side\n cut_world = main_roofer.roof_decs[0].add_ext_cut_part(cut_polygon)\n\n roofer = Roofing(\"porch_rafters\", None)\n\n expansion1 = RoofExpansionDefs(right=Point3(600, 0, 0))\n expansion2 = RoofExpansionDefs(left=Point3(-600, 0, 0))\n start1 = centerline[0].Clone()\n start1.z = xyplane_elevation\n #direction1 = start1.GetVectorTo(roof_polygon_1[1])\n #trace(\"direction1\", direction1)\n roofer.do_one_roof_face(\"porch_lape_1\", roof_polygon_1, centerline[0], start1, main_expansion=expansion1)\n #start2 = centerline[1].Clone()\n #start2.z = xyplane_elevation\n #direction2 = start2.GetVectorTo(roof_polygon_2[1])\n roofer.do_one_roof_face(\"porch_lape_2\", roof_polygon_2, centerline[1], main_expansion=expansion2)\n # todo: hack, return face1 plane outside\n #if cut_polygon is not None:\n\n return roofer #, cut_polygon\n\n\ndef create_one_side_trusses(begin, mainwall, mainwall_length, count, last, holppa, half_width, roofelevation, halflife2, roofdeclination):\n direction = mainwall.Clone()\n pt_array = point_grid(begin, direction, count, holppa, 900)\n # stupid way to add last roof truss\n towards = Point3.Normalize(direction, last)\n last_ninja = pt_array[-1].Clone()\n last_ninja.Translate(towards)\n pt_array.append(last_ninja)\n roofparts = []\n for ii in range(len(pt_array)):\n lowpoint = pt_array[ii]\n highpoint = lowpoint.CopyLinear(0, half_width, roofelevation)\n lowpoint.Translate(0, halflife2, roofdeclination)\n # roof truss 5x2's\n roofparts.append(create_wood_at(lowpoint, highpoint, \"50*125\", Rotation.FRONT))\n return roofparts\n\ndef stiffen_wall(prefix, stiff_poly, z_offset, height, roof_angle, mass_center):\n centerlines = generate_offsetted_lines(stiff_poly, -11.0, z_offset, None, mass_center)\n # todo stiff it up\n stiffs = []\n counter = 1\n for aa,bb in centerlines:\n use_angle = None\n is_short = False\n if is_short_side(aa,bb):\n use_angle = roof_angle\n is_short = True\n wallline = aa.GetVectorTo(bb)\n #rotation = direction_to_rotation(wallline)\n eps = Stiffener(prefix + \"_\" + str(counter), mass_center)\n #trace(aa, bb, height, use_angle)\n eps.stiffener_one_plane(aa, bb, height, use_angle)\n stiffs.append(eps)\n #for ss,tt in eps:\n # stiffs.append(create_wood_at(ss,tt, \"22*100\", Rotation.FRONT))\n return stiffs\n\n\n\ndef generate_wall_studs(polygon, z_offset, height, roofangle=None):\n # todo: purulaatikko constant\n xy_offset = 50.0\n centerlines = generate_offsetted_lines(polygon, xy_offset, 1100.0)\n # run points around each lower wood k600\n studpoints = []\n first_item = True\n for start,end in centerlines:\n wall_begin = start.Clone()\n # do one wall line\n direction = start.GetVectorTo(end)\n rotation = direction_to_rotation(direction)\n #trace(\"beam rot type: \", type(rotation))\n length = start.distFrom(end)\n count = int((length - 200.0) / 600.0)\n # stud grid along one edge\n first_offset = -50\n if first_item and not is_closed_loop(polygon):\n first_offset = 0\n wood_grid = point_grid(start, direction, count, first_offset, 600)\n use_ceiling = is_short_side(start,end) and roofangle # todo something smarter\n for ii in range(len(wood_grid)):\n lowpoint = wood_grid[ii].Clone()\n current_height = height\n # normal 4x2's\n profile = \"50*100\"\n if ii == 0:\n # corners have 4x4\n profile = \"100*100\"\n elif use_ceiling:\n current_height = get_ceiling(lowpoint, wall_begin, height, length, roofangle) + 50\n highpoint = lowpoint.CopyLinear(0,0,current_height)\n studpoints.append(create_wood_at(lowpoint, highpoint, profile, rotation))\n first_item = False\n return studpoints\n\ndef point_grid(startpoint, dir_vector, count, first_offset, kdist):\n grid = []\n direction = dir_vector.Clone()\n # normal 4x2's\n current = startpoint.Clone()\n if abs(first_offset) > 0.5:\n towards = Point3.Normalize(direction, first_offset)\n current.Translate(towards)\n grid.append(current.Clone())\n towards = Point3.Normalize(direction, kdist)\n #trace(\"start: {0}, end:{1}, direction: {2}\".format(start, end, direction))\n for i in range(count):\n #trace(\"counting: \", i)\n current.Translate(towards)\n grid.append(current.Clone())\n return grid\n\n\ndef generate_sockle(foundationPolygon, sockleProfile, z_offset):\n sockleCenter = []\n\n for node in foundationPolygon:\n # kinda cloning\n clonepoint = node.Clone()\n clonepoint.Translate(0, 0, z_offset)\n sockleCenter.append(clonepoint)\n # todo: closedloop or not..\n return [get_part_data(sockleProfile, None, sockleCenter, \"Concrete_Undefined\", 1)]\n\ndef generate_footing(foundationPolygon, footingProfile, sockleProfile):\n\t# footing is not centerline, but polybeam concrete panel is outer limits\n # move sockle to footing center, sockle polygon is not centerline but\n\t# outer limits to get the polybeam fully casted in closed loop corner.\n sockleWidth = parse_width(sockleProfile)\n xy_offset = sockleWidth / 2\n footingCenterZ = parse_height(footingProfile) / 2\n return generate_offsetted_beams(foundationPolygon, footingProfile, xy_offset, footingCenterZ, \"Concrete_Undefined\")\n \ndef generate_offsetted_beams(polygon, profile, xy_offset, z_offset, material, mass_center = None):\n #xy_offset = parse_width(profile)/2\n centerlines = generate_offsetted_lines(polygon, xy_offset, z_offset, profile, mass_center)\n beams = []\n first_item = True\n for aa,bb in centerlines:\n first_offset = -50\n if is_closed_loop(polygon) or not first_item:\n first_offset = 0\n if abs(first_offset) > 0.5:\n direction = aa.GetVectorTo(bb)\n towards = Point3.Normalize(direction, first_offset)\n aa.Translate(towards)\n beams.append(get_part_data(profile, None, [aa, bb], material))\n first_item = False\n return beams\n \ndef extend_or_subtract(polygon, xy_offset, z_offset, mass_center):\n if not mass_center:\n mass_center = centroid(polygon)\n footingCenter = []\n\t#if instanceof(profile, float):\n\t#\th_offset = profile\n\t#else\n\t#\th_offset = parse_width(profile)/2\n for node in polygon:\n # todo: parse from profile\n #endpoint = node\n #if len(polygon) > 2:\n endpoint = node.moveCloserTo(mass_center, xy_offset)\n endpoint.Translate(0, 0, z_offset)\n footingCenter.append(endpoint)\n return footingCenter\n\ndef generate_offsetted_lines(master_polygon, xy_offset, z_offset, adjustByProfile=None, mass_center=None):\n # move points i.e. 50mm closer to structure center (to 100*100 beam centerline)\n footingCenter = extend_or_subtract(master_polygon, xy_offset, z_offset, mass_center)\n footingLines = pairwise(footingCenter)\n polygonMidpoints = []\n adjustEndPoints = xy_offset\n if adjustByProfile is not None:\n # footing pads have different widht than xy_offset\n adjustEndPoints = parse_width(adjustByProfile) / 2\n #undisclosed_ending =\n first_item = True\n for start,end in footingLines:\n vector = start.GetVectorTo(end)\n #trace(start, )\n corners = Point3.Normalize(vector, adjustEndPoints)\n #trace(\"start: {0}, end:{1}, direction: {2} translate vector:\n #{3}\".format(start, end, vector, corners,))\n #start.Translate(corners)\n #end.Translate(corners)\n aa = start.Clone()\n bb = end.Clone()\n if not first_item or is_closed_loop(master_polygon):\n aa.Translate(corners)\n #else:\n # trace(\"------------ skip 1\")\n #if not is_last_item(master_polygon, bb) and\n #is_closed_loop(master_polygon):\n bb.Translate(corners)\n #else:\n # trace(\"------------ skip -1\")\n polygonMidpoints.append((aa,bb,))\n #footings.append({\n # \"profile\": profile,\n # \"points\": [aa, bb],\n # \"material\": material\n #})\n first_item = False\n return polygonMidpoints\n\ndef looper(gx, gy, gz, xy1, xy2, zlow=0, zup=1):\n grid_points = []\n for x,y in [xy1, xy2]:\n grid_points.append((x,y,zlow))\n for x,y in [xy2, xy1]:\n grid_points.append((x,y,zup))\n return generate_loop(gx, gy, gz, grid_points)\n\ndef create_inside(chimney_x, chimney_y):\n # create grids\n boards = 22 #+ 13\n outer_wall = 100 + boards\n inside_wall = 170 #100 + 2*boards\n porch_off = porch_depth + outer_wall\n igrid_x = [outer_wall, chimney_x - outer_wall, 620-inside_wall/2, inside_wall/2, 5280-boards]\n igrid_y = [porch_off, (chimney_y-porch_off), inside_wall/2, 900-inside_wall, inside_wall/2, 3520-boards]\n igrid_z = [1200.00, 2500.0]\n # first inside wall\n wall1, aabb1 = create_inside_wall(looper(igrid_x, igrid_y, igrid_z, (0,2), (1,2)), holedef=HoleDef(1250, \"9*19\"))\n wall2, aabb2 = create_inside_wall(looper(igrid_x, igrid_y, igrid_z, (3,3), (4,3)), holedef=HoleDef(1600, \"9*19\"))\n wall3, aabb3 = create_inside_wall(looper(igrid_x, igrid_y, igrid_z, (2,1), (2,0)), holedef=HoleDef(1100, \"9*19\"))\n wall4, aabb4 = create_inside_wall(looper(igrid_x, igrid_y, igrid_z, (2,4), (2,5)), holedef=HoleDef(1000, \"9*19\"))\n return [(wall1, aabb1), (wall2, aabb2), (wall3, aabb3), (wall4, aabb4)]\n\ndef create_inside_wall(wall_loop, holedef=None):\n transform, rotation = create_vertical_stdplane(wall_loop[:2])\n wall_local = transform.convertToLocal(wall_loop)\n #length = wall_local[1].x\n profile = \"50*100\"\n halfpro = 25\n # alajuoksu, ei se oikeesti nain mee mutta..\n lower = [wall_local[0].CopyLinear(0, halfpro, 0), wall_local[1].CopyLinear(0, halfpro, 0)]\n #upper = [wall_local[-1].CopyLinear(0, -halfpro, 0), wall_local[-2].CopyLinear(0, -halfpro, 0)]\n # studs\n for point in wall_local[:2]:\n # up by 50 mm\n point.Translate(0, halfpro*2, 0)\n #for point in wall_local[2:]:\n # # down by 50 mm\n # point.Translate(0, -halfpro*2, 0)\n on_face_point_pairs = create_hatch(wall_local, 600.0, halfpro, halfpro)\n # to some csys?\n stud_data = []\n for pp in [lower]:\n to_world = transform.convertToGlobal(pp)\n stud_data.append(create_wood_at(to_world[0], to_world[1], profile, Rotation.TOP))\n legal_face_point_pairs = []\n aabb = None\n if holedef is None:\n legal_face_point_pairs = on_face_point_pairs\n else:\n x0, y0, dx, dy = holedef.minmax_coords()\n low, high = holedef.minmax_points()\n ## aabb\n\n height = holedef.height()-halfpro*2\n left = Point3(low.x-halfpro, 2*halfpro, 0)\n right = Point3(high.x+halfpro, 2*halfpro, 0)\n stud_found = False\n for pp in on_face_point_pairs:\n #if pp[0].x < x0-halfpro and pp[0].distFrom(left) < 100:\n # pp[0].x = left.x\n # pp[1].x = left.x\n if pp[0].x > x0-3*halfpro and pp[0].x < x0+dx+3*halfpro:\n stud_found = True\n else:\n legal_face_point_pairs.append(pp)\n if stud_found:\n legal_face_point_pairs.append([left, left.CopyLinear(0,height,0)])\n legal_face_point_pairs.append([right, right.CopyLinear(0,height,0)])\n # upper wood\n over = [left.CopyLinear(-halfpro,height+halfpro,0), right.CopyLinear(halfpro,height+halfpro,0)]\n to_world = transform.convertToGlobal(over)\n stud_data.append(create_wood_at(to_world[0], to_world[1], profile, Rotation.TOP))\n\n for pp in legal_face_point_pairs:\n to_world = transform.convertToGlobal(pp)\n stud_data.append(create_wood_at(to_world[0], to_world[1], profile, rotation-1))\n # hatch..\n pps_local = create_hatch(wall_local, 100, first_offset=0, horizontal=True, holes=None)\n for pp in pps_local:\n # both sides\n l, r = pp[0], pp[1]\n board_strength = 22/2\n l1 = l.CopyLinear(0,0,50+board_strength)\n r1 = r.CopyLinear(0,0,50+board_strength)\n l2 = l.CopyLinear(0,0,-(50+board_strength))\n r2 = r.CopyLinear(0,0,-(50+board_strength))\n for p1,p2 in [(l1,r1),(l2,r2)]:\n to_world = transform.convertToGlobal([p1,p2])\n stud_data.append(create_wood_at(to_world[0], to_world[1], \"22*100\", Rotation.FRONT))\n # aabb's\n aabb = None\n if holedef is not None:\n low, high = holedef.minmax_points()\n to_world = transform.convertToGlobal([low, high])\n aabb = create_cut_aabb(to_world)\n\n return stud_data, aabb\n\n\nfrom json import JSONEncoder\nclass MyEncoder(JSONEncoder):\n def default(self, o):\n return o.__dict__\n\nif __name__ == \"__main__\":\n \"\"\"\n This script generates parts for purulaatikko\n\n Usage:\n $ python geometry.py\n\n After completion, implement geometry generator in Tekla..\n\n ..to view what has changed.\n\n TODO list\n - lattiajuoksut\n - valipohjavasat\n - ullakko ristikko\n - (holpat)\n - loop-object with continues(), corners adjust\n \"\"\"\n filename = sys.argv[1]\n specification = json.load(open(filename))\n\n roofangle = specification['roofangle']\n #porch_decline = porch_depth*math.tan(math.radians(roofangle))\n\n # todo: add centerline to master grid later..\n grid_z = specification['grid_z']\n trace(\"roof tip error: ~\", int(round(abs(3800*math.tan(math.radians(roofangle))-grid_z[-1]))), \"mm.\")\n # harja\n #grid_z.append(grid_z[-1] + math.tan(math.radians(roofangle)))\n #centerline = [Point3(0, porch_depth + 7600.0 / 2, 0), Point3(sum(grid_x), porch_depth + 7600.0 / 2, 0)]\n\n #trace(\"CONVERTED:\\n\" + pprint.pformat(attr_dict))\n write_out(specification)\n\n","repo_name":"kakkaroolari/purulaatikkogeneraattori","sub_path":"python-proto/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":34922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"34195579740","text":"# **************************************************************************\n# *\n# * Authors: Grigory Sharov (gsharov@mrc-lmb.cam.ac.uk)\n# *\n# * MRC Laboratory of Molecular Biology, MRC-LMB\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\n\nfrom pyworkflow.protocol.params import StringParam\nfrom pyworkflow.em.protocol import ProtProcessParticles\nfrom pyworkflow.em import ALIGN_PROJ\nfrom convert import (writeSetOfParticles, getVersion, V1_3,\n V1_4, readSetOfParticles, setRelionAttributes)\nimport pyworkflow.em.metadata as md\n\n \nclass ProtRelionExpandSymmetry(ProtProcessParticles):\n \"\"\"\n Given an input set of particles with angular assignment,\n expand the set by applying a pseudo-symmetry.\n\n Be aware that input symmetry values follow Xmipp conventions as described in:\n http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/Symmetry\n \"\"\"\n _label = 'expand symmetry'\n\n @classmethod\n def isDisabled(cls):\n return getVersion() in [V1_3, V1_4]\n\n #--------------------------- DEFINE param functions -------------------------------------\n def _defineProcessParams(self, form):\n form.addParam('symmetryGroup', StringParam, default=\"c1\",\n label='Symmetry group',\n help=\"See http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/Symmetry\"\n \" for a description of the symmetry groups format in Xmipp.\\n\")\n form.addParallelSection(threads=0, mpi=0)\n\n #--------------------------- INSERT steps functions -------------------------------------\n def _insertAllSteps(self):\n imgsFn = self._getPath('input_particles.star')\n self._insertFunctionStep('convertInputStep', imgsFn)\n self._insertFunctionStep('expandSymmetryStep', imgsFn)\n self._insertFunctionStep('createOutputStep')\n\n #--------------------------- STEPS functions --------------------------------------------\n\n def convertInputStep(self, outputFn):\n \"\"\" Create a metadata with the images and geometrical information. \"\"\"\n writeSetOfParticles(self.inputParticles.get(), outputFn, self._getPath())\n\n def expandSymmetryStep(self, imgsFn):\n outImagesMd = self._getExtraPath('expanded_particles.star')\n args = \" --i %s --sym %s --o %s\" % (imgsFn, self.symmetryGroup.get(),\n outImagesMd)\n self.runJob(\"relion_particle_symmetry_expand\", args)\n\n def createOutputStep(self):\n imgSet = self.inputParticles.get()\n partSet = self._createSetOfParticles()\n partSet.copyInfo(imgSet)\n outImagesMd = self._getExtraPath('expanded_particles.star')\n\n mdOut = md.MetaData(outImagesMd)\n mdOut.removeLabel(md.RLN_IMAGE_ID) # remove repeating rlnImageId in mdOut\n mdOut.write(outImagesMd, md.MD_OVERWRITE)\n\n readSetOfParticles(outImagesMd, partSet, alignType=ALIGN_PROJ,\n postprocessImageRow=self._postprocessImageRow)\n\n self._defineOutputs(outputParticles=partSet)\n self._defineSourceRelation(imgSet, partSet)\n\n #--------------------------- INFO functions ---------------------------------------------\n def _summary(self):\n summary = []\n if not hasattr(self, 'outputParticles'):\n summary.append(\"Output particles not ready yet.\")\n else:\n summary.append(\"Symmetry: %s\" % self.symmetryGroup.get())\n return summary\n \n def _validate(self):\n errors = []\n self.validatePackageVersion('RELION_HOME', errors)\n\n if not self.inputParticles.get().hasAlignmentProj():\n errors.append('Input particles must have angular assignment.')\n\n return errors\n \n def _citations(self):\n return []\n \n def _methods(self):\n methods = []\n\n return methods\n\n #--------------------------- Utils functions --------------------------------------------\n def _postprocessImageRow(self, img, imgRow):\n setRelionAttributes(img, imgRow, md.RLN_MLMODEL_GROUP_NAME)\n\n","repo_name":"joshua4289/scipion_1_2_headless","sub_path":"pyworkflow/em/packages/relion/protocol_expand_symmetry.py","file_name":"protocol_expand_symmetry.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"28419961380","text":"class Node:\n def __init__(self,val,left=None,right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef serialize(root):\n if root != None:\n data = root.val\n if root.left != None:\n data += \"-\" + serialize(root.left)\n if root.right != None:\n data += \"-\" + serialize(root.right)\n return data\n\ndef deserialize(treestring):\n tree_data = treestring.split(\"-\")\n return decode(tree_data)\n\ndef decode(data):\n node = Node(None)\n if len(data) > 0:\n val = data.pop(0)\n node.val = val\n node.left = decode(data)\n node.right = decode(data)\n return node\n\nnode = Node('root', Node('left', Node('left.left'), Node('right.right')), Node('right'))\nassert deserialize(serialize(node)).left.left.val == 'left.left'","repo_name":"Alferdize/Data-Structure-and-Algorithms","sub_path":"problem3/four.py","file_name":"four.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"1623940783","text":"from datetime import datetime\n\nfrom content.component.expand.view import ExpandView\nfrom content.component.include_page.view import IncludePageView\nfrom core.git.branch import Branch\nfrom core.specs.service.spec import ServiceType\nfrom data.confluence.model.title import NetworkBranchDiagramPageTitle, NetworkBranchLinksPageTitle\nfrom data.specs.service_spec_ext import ServiceSpecExt\nfrom data.template.templates_storage import HtmlTemplatesStorage, HtmlPageTemplateName\n\n\nclass NetworkBranchView:\n __html_templates_storage: HtmlTemplatesStorage\n\n __include_page_view: IncludePageView\n\n __expand_view: ExpandView\n\n def __init__(self, html_templates_storage: HtmlTemplatesStorage):\n self.__html_templates_storage = html_templates_storage\n self.__include_page_view = IncludePageView(html_templates_storage)\n self.__expand_view = ExpandView(html_templates_storage)\n\n def __template(self, spec: ServiceSpecExt):\n if spec.type == str(ServiceType.kafka.value):\n template_name = HtmlPageTemplateName.network_branch_kafka\n else:\n template_name = HtmlPageTemplateName.network_branch\n return self.__html_templates_storage.get_page(template_name)\n\n async def render(self, spec: ServiceSpecExt, branch: Branch) -> str:\n render_params = {\n \"include_page_network_diagram\": await self.__include_page_view.render(\n NetworkBranchDiagramPageTitle.title(spec, branch)),\n \"include_page_network_links\": await self.__include_page_view.render(\n NetworkBranchLinksPageTitle.title(spec, branch)),\n \"current_time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n html_s = await self.__template(spec).render_async(render_params)\n return html_s\n","repo_name":"rshafeev/arch-specs","sub_path":"src/confluence_publisher/src/content/page/handbook/network/branch/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"34477195924","text":"#!/usr/bin/env python\n\n\"\"\"\nSpringAlgProject.py by Ryan Becker and Ginnie White\n\nBased on schedulability.py - suite of schedulability tests by Tanya Amert\n\nNOTE: To run this, you must have taskset.py in the same folder!\n\"\"\"\n\nfrom taskset import TaskSetJsonKeys, Task\n\nimport matplotlib.pyplot as plt\nimport random\n\ndef getUniformValue(a, b):\n \"\"\"\n Returns a value uniformly selected from the range [a,b].\n \"\"\"\n return random.uniform(a,b)\n\n# Per-task density functions\ndensityFunc = lambda : getUniformValue(0.1, 0.5)\n#Function included in case we want to explore different ways of generating numbers\ndensityFunc2 = lambda: random.randint(0.1, 0.5)\n\n# deadlines are in milliseconds\ndeadlineFunc = lambda : getUniformValue(1, 10)\n#Function included in case we want to explore different ways of generating numbers\ndensityFunc2 = lambda: random.randint(1,10)\n\n## TODO: update to reflect density, potentially ditch the periodFunc\ndef generateRandomTaskSet(targetDensity, densityFunc, deadlineFunc):\n \"\"\"\n Generates a random task set with total density targetDensity.\n\n Just returns the task set as a list of Task objects, rather than\n the proper TaskSet type.\n \"\"\"\n densitySum = 0\n\n # Generate tasks until the utilization target is met\n taskSet = []\n i = 0\n while densitySum < targetDensity:\n taskId = i+1\n # Choose the utilization for the task based on the utilization function\n task_density = densityFunc()\n # If the task's utilization would push it over the target, instead choose\n # its utilization to be the remaining utilization to reach the target sum.\n if task_density+ densitySum > targetDensity:\n task_density = targetDensity - densitySum\n densitySum += task_density\n # Choose task parameters:\n # * offset\n offset = random.randint(0, 10)\n # * period\n relativeDeadline = deadlineFunc()\n # * WCET <-- change this to be task density * min(di, ti)\n wcet = task_density * relativeDeadline\n\n period = 0\n i += 1\n\n # Build the dictionary for the task parameters\n taskDict = {}\n taskDict[TaskSetJsonKeys.KEY_TASK_ID] = taskId\n taskDict[TaskSetJsonKeys.KEY_TASK_PERIOD] = period\n taskDict[TaskSetJsonKeys.KEY_TASK_WCET] = wcet\n taskDict[TaskSetJsonKeys.KEY_TASK_DEADLINE] = relativeDeadline\n taskDict[TaskSetJsonKeys.KEY_TASK_OFFSET] = offset\n\n task = Task(taskDict)\n taskSet.append(task)\n\n return taskSet\n\n'''\nCompute the schedule that the heuristic would use on the task set, then sees if\nthat schedule is actually feasible. Return True if it is, False otherwise.\nscore_calculator changes based on the heuristic used to order tasks the way we\nwant. Computed dynamically, so the computer only makes choices about task sets\nafter they have been releeased\n'''\ndef testFunc(taskSet, score_calculator):\n #actually come up with the order\n time = 0\n tree= []\n\n topJob = 0\n topJobScore = 10000\n #if we have not gone through all the tasks, keep going\n while (len(tree) != 0 or len(taskSet) != 0):\n jobsToRemove = []\n nextList = []\n for task in taskSet:\n #get all jobs that released at or before time\n if task.offset <= time:\n tree.append(task)\n jobsToRemove.append(task)\n #remove all the released jobs from the taskSet\n for task in taskSet:\n if task not in jobsToRemove:\n nextList.append(task)\n taskSet = nextList\n\n if (len(tree) !=0):\n topJob = tree[0]\n topJobScore = score_calculator(topJob)\n #pick job via Heuristic\n for task in tree:\n if score_calculator(task) < topJobScore:\n topJob = task\n topJobScore = score_calculator(task)\n\n time += topJob.wcet\n if (topJob.offset + topJob.relativeDeadline < time):\n return False\n tree.remove(topJob)\n\n #If nothing released at the time, then increment time by one and keep going\n else:\n time += 1\n #if we made it through all the jobs, the schedule is feasible\n return True\n\n#These calculate the score each heuristic\n#Heuristic 1: Order by the shortest relative deadline\ndef h1Score(task):\n return task.relativeDeadline\n#Heuristic 2: Di + W*Ci. Paper was unclear about what weight to use so we chose 3\ndef h2Score(task):\n return (task.relativeDeadline + (3 * task.wcet))\n#Heuristic #: Di - (Ri + Ci), laxity\ndef h3Score(task):\n return ((task.relativeDeadline - (task.offset + task.wcet)))\n#Heuristic 4: Order by lowest task density\ndef h4Score(task):\n return (task.wcet/ task.relativeDeadline)\n#Heuristic 5: Order by lowest task density (with denominator squared)\ndef h5Score(task):\n return (task.wcet/ (task.relativeDeadline**2))\n\n\ndef checkSchedulability(numTaskSets, targetUtilization, densityFunc, deadlineFunc, scoreFunc):\n \"\"\"\n Generates numTaskSets task sets using a given density-generation function\n and a given deadline-generation function, such that the task sets have a given\n target system density. Uses the given schedulability test along with a function\n that makes choices based on a heuristic to determine what fraction of the\n task sets are schedulable.\n\n Returns: the fraction of task sets that pass the schedulability test.\n \"\"\"\n count = 0\n for i in range(numTaskSets):\n taskSet = generateRandomTaskSet(targetUtilization, densityFunc, deadlineFunc)\n\n if testFunc(taskSet, scoreFunc):\n count += 1\n\n return count / numTaskSets\n\ndef performTests(numTests):\n densityVals = []\n for i in range(10):\n val = 0.1 + i * 0.1\n densityVals.append(val)\n\n\n results = {}\n results[\"Di\"] = []\n results[\"Di + W*Ci\"] = []\n results[\"Laxity\"] = []\n results[\"Density\"] = []\n results[\"Modified Density\"] = []\n\n for density in densityVals:\n h1Result = checkSchedulability(numTests, density, densityFunc, deadlineFunc, h1Score)\n h2Result = checkSchedulability(numTests, density, densityFunc, deadlineFunc, h2Score)\n h3Result = checkSchedulability(numTests, density, densityFunc, deadlineFunc, h3Score)\n h4Result = checkSchedulability(numTests, density, densityFunc, deadlineFunc, h4Score)\n h5Result = checkSchedulability(numTests, density, densityFunc, deadlineFunc, h5Score)\n\n results[\"Di\"].append(h1Result)\n results[\"Di + W*Ci\"].append(h2Result)\n results[\"Laxity\"].append(h3Result)\n results[\"Density\"].append(h4Result)\n results[\"Modified Density\"].append(h5Result)\n\n return densityVals, results\n\ndef plotResults(densityVals, results):\n plt.figure()\n\n LINE_STYLE = ['b:+', 'g-^', 'r-s', 'b', 'g']\n\n\n for (styleId, label) in enumerate(results):\n yvals = results[label]\n\n plt.plot(densityVals, yvals, LINE_STYLE[styleId], label=label)\n\n plt.legend(loc=\"best\")\n\n plt.xlabel(\"System Density\")\n plt.ylabel(\"Schedulability\")\n plt.title(\"Schedulability for Different Heuristics\")\n\n plt.show()\n\ndef testSchedulability():\n random.seed(None) # seed the random library\n\n # Perform the schedulability tests\n densityVals, results = performTests(1000)\n\n # Plot the results\n plotResults(densityVals, results)\n\nif __name__ == \"__main__\":\n testSchedulability()\n","repo_name":"Ginnie313/Past-CS-Work","sub_path":"RT Project/SpringAlgProject.py","file_name":"SpringAlgProject.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"17274067489","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom chandelier_web.apps.admin_page_locations.models import Location, OpeningHour\nfrom chandelier_web.apps.admin_page_states.models import State\nfrom chandelier_web.apps.admin_page_themes.models import Theme\nfrom chandelier_web.apps.home_page.models import Message\nfrom chandelier_web.apps.home_page.form import MessageForm\n\n# Create your views here.\nclass indexHome(View):\n def get(self, request, *args, **kwargs):\n locations = Location.objects.all()\n estados = State.objects.all()\n return render(request, 'index.html', {\n 'locations': locations, \n 'estados': estados,\n })\n \nclass fastQuoteHome(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'fastQuote.html')\n \nclass quoteHome(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'quote.html')\n \nclass locationHome(View):\n def get(self, request ,reference, id):\n temas = Theme.objects.all()\n dato = None\n places = None\n if reference != \"all\":\n if reference == \"estado\":\n dato = State.objects.get(id=id)\n places = Location.objects.filter(state=id)\n else:\n dato = Theme.objects.get(id=id)\n places = Location.objects.filter(theme=id)\n else:\n places = Location.objects.all()\n new_reference = reference\n return render(request, 'location.html',{\n 'temas':temas,\n 'new_reference':new_reference,\n 'dato':dato,\n 'locations':places,\n })\n\nclass locationInfoHome(View):\n def get(self, request, id, **kwargs):\n location = Location.objects.get(id=id)\n opening_hours = OpeningHour.objects.filter(location=location)\n \n return render(request, 'locationInfo.html', {\n 'location': location,\n 'opening_hours': opening_hours,\n })\n \nclass aboutUsHome(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'aboutUs.html')\n \nclass contactHome(View):\n def get(self, request, *args, **kwargs):\n form = MessageForm()\n \n return render(request, 'contactUs.html', {\n 'form': form,\n })\n \n def post(self, request, *args, **kwargs):\n form = MessageForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n return render(request, 'contactUs.html', {\n 'form': form,\n })\n return redirect('contactHome')\n","repo_name":"AlexOT03/Chandelier_Web2.0","sub_path":"chandelier_web/apps/home_page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"20425399743","text":"#time:O(logn)\n#space:O(1)\n#LeetCode: Accepted\n#Problem Faced:applying binary search in the region where the array is rotated\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n l=0\n r=len(nums)-1\n while l <= r:\n mid=l+(r-l)//2\n \n #if the element at mid is target then return mid\n if nums[mid]==target:\n return mid\n #check in which half is the target element\n elif nums[mid] >= nums[l]:\n #checking weather the target is rotated half or not\n if target >= nums[l] and nums[mid] > target:\n r = mid - 1\n else:\n l = mid + 1\n else:\n if target <= nums[r] and target > nums[mid]:\n l=mid+1\n else:\n r=mid-1\n #not found the target so returning -1\n return -1","repo_name":"08vishal/Binary-Search-1","sub_path":"Search_in_a_Rotated_Sorted_Array.py","file_name":"Search_in_a_Rotated_Sorted_Array.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"}
+{"seq_id":"28319371587","text":"from twarc import Twarc2, expansions\nfrom pathlib import Path \nimport json\nimport config\nimport os\nimport config\n\nappConfig = config.Config\nclient = Twarc2(bearer_token=appConfig.bearer_token)\nfile_path = Path(f\"{appConfig.file_path}{appConfig.file_name}\")\n\ndef main():\n # result params \n try:\n search_results = client.search_all(\n query=appConfig.query,\n start_time = appConfig.start_date,\n end_time = appConfig.end_date,\n max_results = appConfig.max_results\n )\n except Exception as e:\n print(f\"An exception ocurred: {e}\")\n\n # write results\n try:\n for page in search_results:\n result = expansions.flatten(page)\n # print(json.dumps(tweet))\n with open(file_path, \"a+\") as fh:\n for tweet in result:\n fh.write(\"%s\\n\" % json.dumps(tweet))\n except IOError as e:\n print(f\"Error: Issue writing to path {file_path}: {e}\")\n else:\n print(f\"Write to {file_path} successful.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"kecleveland/mhdn_app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"22881571487","text":"import os\nimport cProfile\nimport pstats\n\ndef do_profile(prof_file, sorted_by=\"tottime\"):\n '''\n 性能分析装饰器:\n args: \n prof_file: prof文件名\n sorted_by: 按照指定指标排序\n '''\n def wrapper(func):\n def profiled_func(*args, **kwargs):\n # 获取环境变量,判断是否进行性能分析\n DO_PROF = os.getenv('PROFILING')\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n # 排序\n ps = pstats.Stats(profile).sort_stats(sorted_by)\n ps.dump_stats(prof_file)\n else:\n result = func(*args, **kwargs)\n return result\n return profiled_func\n return wrapper","repo_name":"TJMATH/DesignMode","sub_path":"tools/kprofile.py","file_name":"kprofile.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"6208047792","text":"import numpy as np\nimport pandas as pd\nimport re, string, demoji\nfrom sklearn.metrics import classification_report\nfrom sklearn.preprocessing import LabelEncoder\nfrom npc_gzip.compressors.base import BaseCompressor\nfrom npc_gzip.compressors.gzip_compressor import GZipCompressor\nfrom npc_gzip.knn_classifier import KnnClassifier\n\n# Deep Cleaning methods from https://www.kaggle.com/code/ludovicocuoghi/twitter-sentiment-analysis-with-bert-vs-roberta\ndef strip_emoji(text):\n return demoji.replace(text, '') #remove emoji\n\n#Remove punctuations, links, mentions and \\r\\n new line characters\ndef strip_all_entities(text): \n text = text.replace('\\r', '').replace('\\n', ' ').replace('\\n', ' ').lower() #remove \\n and \\r and lowercase\n text = re.sub(r\"(?:\\@|https?\\://)\\S+\", \"\", text) #remove links and mentions\n text = re.sub(r'[^\\x00-\\x7f]',r'', text) #remove non utf8/ascii characters such as '\\x9a\\x91\\x97\\x9a\\x97'\n banned_list= string.punctuation + 'Ã'+'±'+'ã'+'¼'+'â'+'»'+'§'\n table = str.maketrans('', '', banned_list)\n text = text.translate(table)\n return text\n\n#clean hashtags at the end of the sentence, and keep those in the middle of the sentence by removing just the # symbol\ndef clean_hashtags(tweet):\n new_tweet = \" \".join(word.strip() for word in re.split('#(?!(?:hashtag)\\b)[\\w-]+(?=(?:\\s+#[\\w-]+)*\\s*$)', tweet)) #remove last hashtags\n new_tweet2 = \" \".join(word.strip() for word in re.split('#|_', new_tweet)) #remove hashtags symbol from words in the middle of the sentence\n return new_tweet2\n\n#Filter special characters such as & and $ present in some words\ndef filter_chars(a):\n sent = []\n for word in a.split(' '):\n if ('$' in word) | ('&' in word):\n sent.append('')\n else:\n sent.append(word)\n return ' '.join(sent)\n\ndef remove_mult_spaces(text): # remove multiple spaces\n return re.sub(\"\\s\\s+\" , \" \", text)\n\ndef pass_data():\n\n train_df = pd.read_csv('Corona_NLP_train.csv', encoding='latin-1')\n\n train_text = []\n for t in train_df.OriginalTweet:\n train_text.append(remove_mult_spaces(filter_chars(clean_hashtags(strip_all_entities(strip_emoji(t))))))\n\n train_df['text'] = train_text\n train_df['label'] = train_df['Sentiment']\n \n test_df = pd.read_csv('Corona_NLP_test.csv', encoding='latin-1')\n\n test_text = []\n for t in test_df.OriginalTweet:\n test_text.append(remove_mult_spaces(filter_chars(clean_hashtags(strip_all_entities(strip_emoji(t))))))\n \n test_df['text'] = test_text\n test_df['label'] = test_df['Sentiment']\n\n return test_df, train_df\n\ndef get_data():\n train_df, test_df = pass_data()\n\n # Splitting the dataset\n X_train, X_test, y_train, y_test = train_df['text'], test_df['text'], train_df['label'], test_df['label'] \n\n # Convert to numpy arrays\n label_encoder = LabelEncoder()\n\n train_text = X_train.to_numpy()\n train_labels = label_encoder.fit_transform(y_train.to_numpy())\n test_text = X_test.to_numpy()\n test_labels = label_encoder.fit_transform(y_test.to_numpy())\n\n train = (train_text, train_labels)\n test = (test_text, test_labels)\n\n return (train, test)\n\n\ndef fit_model(\n train_text: np.ndarray, train_labels: np.ndarray, distance_metric: str = \"ncd\"\n) -> KnnClassifier:\n \"\"\"\n Fits a Knn-GZip compressor on the train\n data and returns it.\n\n Arguments:\n train_text (np.ndarray): Training dataset as a numpy array.\n train_labels (np.ndarray): Training labels as a numpy array.\n\n Returns:\n KnnClassifier: Trained Knn-Compressor model ready to make predictions.\n \"\"\"\n\n compressor: BaseCompressor = GZipCompressor()\n model: KnnClassifier = KnnClassifier(\n compressor=compressor,\n training_inputs=train_text,\n training_labels=train_labels,\n distance_metric=distance_metric,\n )\n\n return model\n\n\ndef main() -> None:\n print(\"Fetching data...\")\n ((train_text, train_labels), (test_text, test_labels)) = get_data()\n\n print(\"Fitting model...\")\n model = fit_model(train_text, train_labels)\n\n # Randomly sampling from the test set.\n # The IMDb test data comes in with all of the\n # `1` labels first, then all of the `2` labels\n # last, so we're shuffling so that our model\n # has something to predict other than `1`.\n\n random_indicies = np.random.choice(test_text.shape[0], 1000, replace=False)\n\n sample_test_text = test_text[random_indicies]\n sample_test_labels = test_labels[random_indicies]\n\n print(\"Generating predictions...\")\n top_k = 5\n\n # Here we use the `sampling_percentage` to save time\n # at the expense of worse predictions. This\n # `sampling_percentage` selects a random % of training\n # data to compare `sample_test_text` against rather\n # than comparing it against the entire training dataset.\n (distances, labels, similar_samples) = model.predict(\n sample_test_text, top_k, sampling_percentage=0.1\n )\n\n print(classification_report(sample_test_labels, labels.reshape(-1)))\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Kiyotaka86/compression_text_classification","sub_path":"part2/test_codes_part2/cc_test_corona.py","file_name":"cc_test_corona.py","file_ext":"py","file_size_in_byte":5084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33028544762","text":"import sys\nfrom collections import deque\nfrom itertools import combinations\nsys.setrecursionlimit(10**5)\n\ninput = sys.stdin.readline\n\nN , M = map(int,input().split())\nchicken = []\nhouse = []\narr = [[]for i in range(N)]\nresult = 1e9\nfor i in range(N):\n arr[i] = list(map(int,input().split()))\n for j in range(N):\n if arr[i][j] == 1:\n house.append([i,j])\n elif arr[i][j] == 2:\n chicken.append([i,j])\nfor item in list(combinations(chicken, M)):\n cur = 0\n \n for i in range(len(house)):\n # 전에것들 중에 가장 작은값 보다 클 경우 중지\n if cur >=result:\n break\n cur_c = 1e9\n x = house[i][0]\n y = house[i][1]\n \n # 가장 가까운 치킨집 거리를 찾음\n for x1,y1 in item:\n cur_c = min(cur_c,abs(x-x1)+abs(y-y1))\n cur+=cur_c\n result = min(result,cur)\n # print(result)\nprint(result)","repo_name":"slbin-park/Algorithm","sub_path":"백준/Gold/15686. 치킨 배달/치킨 배달.py","file_name":"치킨 배달.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"28623285028","text":"# Django imports\nfrom django.conf.urls import url, include\n\n# Local imports\nfrom .views import (\n AssignmentDetailAPIView,\n CourseAssignmentDetailAPIView,\n AssignmentSolutionCreateAPIView,\n AssignmentSolutionDetailAPIView,\n AssignmentSolutionFileUploadAPIView,\n AssignmentSolutionDetailWithFileAPIView,\n )\n\nurlpatterns = [\n url(\n r'^assignment/(?P[0-9a-z-]+)/$',\n AssignmentDetailAPIView.as_view(),\n name='user_assignment_detail',\n ),\n url(\n r'^course-assignments/(?P[0-9a-z-]+)/$',\n CourseAssignmentDetailAPIView.as_view(),\n name='course_assignments_detail',\n ),\n url(\n r'^assignment-solution/$',\n AssignmentSolutionCreateAPIView.as_view(),\n name='assignment_solution_create',\n ),\n url(\n r'^assignment-solution-detail/(?P[0-9a-z-]+)/$',\n AssignmentSolutionDetailAPIView.as_view(),\n name='assignment_solution_detail',\n ),\n url(\n r'^assignment-solution-all-details/(?P[0-9a-z-]+)/$',\n AssignmentSolutionDetailWithFileAPIView.as_view(),\n name='assignment_solution_detail_with_file',\n ),\n url(\n r'^assignment-solution-file/$',\n AssignmentSolutionFileUploadAPIView.as_view(),\n name='assignment_solution_file_upload',\n ),\n]","repo_name":"manisharmagarg/oddnary","sub_path":"assignments/api/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"5185836892","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# ==============================================================================\n# \\file main.py\n# \\author chenghuige \n# \\date 2022-02-10 07:02:54.233162\n# \\Description \n# ==============================================================================\n\n \nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom gezi.common import *\nsys.path.append('..')\nsys.path.append('../../../../utils')\nsys.path.append('../../../../third')\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nos.environ[\"NCCL_DEBUG\"] = 'WARNING'\n\n\nfrom tensorflow import keras\nfrom torch.utils.data import DataLoader\n\nfrom transformers import (\n AutoModelForMaskedLM,\n DataCollatorForLanguageModeling,\n)\nimport datasets\n\nimport gezi\nfrom gezi import tqdm\nlogging = gezi.logging\nimport melt as mt\nimport lele\n\nimport src\nimport src.eval as ev\nfrom src import config\nfrom src.config import *\nfrom src.preprocess import *\nfrom src import util\n\nimport tensorflow as tf\nfrom absl import app, flags\nFLAGS = flags.FLAGS\n\ndef main(_):\n timer = gezi.Timer()\n fit = mt.fit \n \n rank = gezi.get('RANK', 0)\n if rank != 0:\n ic.disable()\n \n FLAGS.caching = True\n FLAGS.hug_inputs = True\n FLAGS.continue_pretrain = False\n FLAGS.online = True\n FLAGS.ep = FLAGS.ep or 8\n FLAGS.bs = FLAGS.bs or 32\n config.init()\n FLAGS.gpus = -1\n ic(FLAGS.gpus, FLAGS.bs)\n backbone = FLAGS.backbone.split('/')[-1]\n # notice for lr, all other converge using 5e-5 except deberta-xlarge which also use bs 8 instead of 16 due to OOM on 4 A100 GPUs, so deberta-xlarge using 2.5e-5\n FLAGS.lr_decay_power = 1.\n \n FLAGS.mn = backbone\n FLAGS.model_dir = f'{FLAGS.root}/pretrain/base'\n FLAGS.sie = 1\n FLAGS.awp_train = False\n FLAGS.ema_train = False\n \n mt.init()\n \n ic(FLAGS.model_dir, FLAGS.mn, FLAGS.backbone, backbone)\n tokenizer = get_tokenizer(FLAGS.backbone)\n ic(tokenizer)\n \n text_column_name = 'text'\n \n dfs = []\n # ifile = f'{FLAGS.root}/pppm-abstract/pppm_abstract.csv'\n ifile = f'{FLAGS.root}/sampled-patent-titles/sampled-patent-titles.csv'\n df = pd.read_csv(ifile)\n # df[text_column_name] = df['abstract']\n df[text_column_name] = df['title'] + '[SEP]' + df['abstract']\n df['text_len'] = df[text_column_name].apply(lambda x: len(x) if isinstance(x, str) else 0)\n df = df[df.text_len > 5]\n df = df[[text_column_name]]\n dfs.append(df)\n \n df = pd.concat(dfs)\n ds = datasets.Dataset.from_pandas(df)\n \n ic(ds, ds[-1])\n num_proc = 32 if FLAGS.pymp else 1\n gezi.try_mkdir(f'{FLAGS.root}/cache')\n \n def preprocess(text, method=None):\n return text\n \n ds = ds.map(lambda example: {text_column_name: preprocess(example[text_column_name])}, \n remove_columns=ds.column_names,\n batched=False, \n num_proc=num_proc, \n )\n ic(ds, ds[-1])\n\n def tokenize_function(examples):\n # Remove empty lines\n examples[text_column_name] = [\n line for line in examples[text_column_name] if line and len(line) > 0 and not line.isspace()\n ]\n return tokenizer(\n examples[text_column_name],\n padding=False,\n truncation=True,\n max_length=256,\n return_special_tokens_mask=True,\n )\n\n ds = ds.map(\n tokenize_function,\n batched=True,\n num_proc=num_proc,\n remove_columns=[text_column_name],\n desc=\"Running tokenizer on dataset line_by_line\",\n )\n ic(ds)\n \n collate_fn = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm_probability=0.15,\n pad_to_multiple_of=None,\n )\n\n # collate_fn=gezi.DictPadCollate()\n sampler = lele.get_sampler(ds, shuffle=True)\n kwargs = {'num_workers': 8, 'pin_memory': True, 'persistent_workers': True, 'collate_fn':collate_fn} \n dl = torch.utils.data.DataLoader(ds, batch_size=gezi.batch_size(), sampler=sampler, **kwargs)\n \n model = AutoModelForMaskedLM.from_pretrained(FLAGS.backbone)\n model.resize_token_embeddings(len(tokenizer)) \n if FLAGS.opt_fused:\n lele.replace_with_fused_layernorm(model)\n \n fit(model, \n dataset=dl,\n opt_params=lele.get_opt_params(model, weight_decay=FLAGS.weight_decay),\n ) \n\n if rank == 0: \n tokenizer.save_pretrained(FLAGS.model_dir)\n model.save_pretrained(FLAGS.model_dir)\n \nif __name__ == '__main__':\n app.run(main) \n","repo_name":"chenghuige/U.S.-Patent-Phrase-to-Phrase-Matching","sub_path":"projects/kaggle/usp/src/mlm-main.py","file_name":"mlm-main.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"47"}
+{"seq_id":"71494862542","text":"import argparse\nimport logging\nimport pandas as pd\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom apache_beam.io.gcp import gcsio\n\n# Define the schema dictionaries for each table\nschemas = {\n \"clients\": (\"client_id,client_name,industry,country,state,city,contact_name,contact_email,contact_phone_number\", \"clients_schema\"),\n \"projects\": (\"project_id,client_id,project_name,project_type,start_date,end_date,project_status\", \"projects_schema\"),\n \"employees\": (\"employee_id,first_name,last_name,email,job_title,department,hire_date,country,state,city\", \"employees_schema\"),\n \"financial_transactions\": (\"transaction_id,project_id,employee_id,client_id,transaction_date,transaction_type,amount,currency,description\", \"financial_transactions_schema\"),\n \"customer_behavior\": (\"customer_id,age,gender,country,state,city,total_spending,preferred_communication_channel\", \"customer_behavior_schema\"),\n \"sales\": (\"sales_id,product_service_id,customer_id,employee_id,sale_date,quantity,unit_price,discount,total_amount\", \"sales_schema\"),\n \"marketing_campaigns\": (\"campaign_id,campaign_name,start_date,end_date,channel,target_audience,budget,leads_generated,conversions\",\"marketing_campaigns_schema\"),\n}\n\nclass ReadCSVFiles(beam.PTransform):\n def __init__(self, project_id, bucket_name):\n self.project_id = project_id\n self.bucket_name = bucket_name\n\n def list_csv_files(self):\n from google.cloud import storage\n gcs = storage.Client(project=self.project_id)\n bucket = gcs.get_bucket(self.bucket_name)\n return [f\"gs://{self.bucket_name}/{blob.name}\" for blob in bucket.list_blobs(prefix='raw/') if blob.name.endswith(\".csv\")]\n\n def expand(self, pcoll):\n return (\n pcoll\n | \"List CSV Files\" >> beam.Create(self.list_csv_files())\n | \"Read CSV Files\" >> beam.ParDo(self.read_csv_file())\n )\n\n class read_csv_file(beam.DoFn):\n def process(self, file_name):\n gcs = gcsio.GcsIO()\n with gcs.open(file_name) as f:\n table_name = file_name.split(\"/\")[-1].split(\".\")[0]\n schema, table_schema = schemas[table_name]\n yield (table_name, pd.read_csv(f, header=None, names=schema.split(\",\"), na_values=\"\"))\n\nclass WriteParquetFiles(beam.PTransform):\n def __init__(self, gcp_project, bucket_name):\n self.gcp_project = gcp_project\n self.bucket_name = bucket_name\n\n def expand(self, pcoll):\n return (pcoll\n | \"Drop NA Rows\" >> beam.MapTuple(lambda table_name, df: (table_name, df.dropna()))\n | \"Write Parquet Files\" >> beam.ParDo(self.write_parquet_file(self.bucket_name)))\n\n class write_parquet_file(beam.DoFn):\n def __init__(self, bucket_name):\n self.bucket_name = bucket_name\n\n def process(self, table_name_df):\n table_name, df = table_name_df\n output_file = f\"gs://{self.bucket_name}/process/{table_name}.parquet\"\n gcs = gcsio.GcsIO()\n with gcs.open(output_file, mode=\"wb\") as f:\n df.to_parquet(f, index=False)\n\ndef run(argv=None,save_main_session=True):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--project\", default=\"wu5projectgcp\",help=\"The Google Cloud Project ID.\")\n parser.add_argument(\"--input_bucket\",default=\"wu5raw\",help=\"The name of the input bucket.\")\n parser.add_argument(\"--output_bucket\",default=\"wu5process\",help=\"The name of the output bucket.\")\n known_args, pipeline_args = parser.parse_known_args(argv)\n \n pipeline_options = PipelineOptions(\n flags=argv,\n runner='DataflowRunner',\n project='wu5projectgcp',\n job_name='wu5job',\n temp_location='gs://wu5raw/temp',\n region='us-central1')\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n\n with beam.Pipeline(options=pipeline_options) as p:\n (p\n | \"Read CSV Files\" >> ReadCSVFiles(known_args.project, known_args.input_bucket)\n | \"Write Parquet Files\" >> WriteParquetFiles(known_args.project, known_args.output_bucket))\n\nif __name__ == \"__main__\":\n logging.getLogger().setLevel(logging.INFO)\n run()","repo_name":"gomes0499/Deloitte-GCP-Data-Engineering","sub_path":"scripts/process-data.py","file_name":"process-data.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"29861537904","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pandas import read_csv\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nurl = \"vh_data15.csv\"\nnames = ['Trust_Science_Community', 'Fear_Needles', 'Trust_National', 'Vaccine_Trust_Index', 'Household_Income', 'Vaccine_Hesitant']\ndataset = read_csv(url, usecols=names)\ndataset = dataset.dropna(axis=0)\narray = dataset.values\n\nX = array[:,0:4]\ny = array[:,5]\n\nX_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)\n\nmodel = LogisticRegression()\nmodel.fit(X_train, Y_train)\npredictions = model.predict(X_validation)\n\n\nfor i in range(len(predictions)):\n print(predictions[i], '-||-', Y_validation[i])\n\nscore = accuracy_score(predictions, Y_validation)\nprint(score*100,'%')\n","repo_name":"miheerp33/Predicting-vaccine-hesitancy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"1195770646","text":"#python3.5\n\nfrom ctypes import cdll\n\napi=cdll.LoadLibrary(\"./test.so\")\n#api=CDLL(\"./test.so\")\napi.add.argtypes[c_int, c_int]\napi.add.restype=c_int\napi.sub.argtypes[c_int, c_int]\napi.sub.restype=c_int\n\n\na=api.add(3,2)\nb=api.sub(3,2)\nprint (a)\nprint (b)\n\n","repo_name":"CoreImagine/Depth_Sensor_Rpi","sub_path":"so_test/call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"25559286408","text":"from nose.tools import eq_\n\nfrom trading_platform.strategy.strategy_execution import StrategyExecution\nfrom trading_platform.core.test.util_methods import eq_ignore_certain_fields\nfrom trading_platform.exchanges.data.financial_data import FinancialData\nfrom trading_platform.storage.daos.strategy_execution_dao import StrategyExecutionDao\nfrom trading_platform.storage.test.daos.test_dao import TestDao\n\n\nclass TestStrategyExecutionDao(TestDao):\n __test__ = True # important, makes sure tests are not run on base class\n dao_class = StrategyExecutionDao\n popo_class = StrategyExecution\n\n def setUp(self):\n self.dto1 = StrategyExecution(**{\n 'strategy_id': 'ema_50_and_200_crossover',\n 'state': {\n 'base': 'BTC',\n 'quote': 'XMR',\n 'buy_order_id': '89y12hidsa781',\n 'sell_price_stop': FinancialData(100),\n 'sell_price_limit': FinancialData(70)\n }\n })\n self.dto2 = StrategyExecution(**{\n 'strategy_id': 'bollinger_band',\n 'state': {\n 'base': 'BTC',\n 'quote': 'NEO',\n 'upper_band': FinancialData(150),\n 'middle_band': FinancialData(140),\n 'lower_band': FinancialData(130)\n }\n })\n\n def test_fetch_by_strategy_id(self):\n self.dao.bulk_save(session=self.session, commit=True, popos=[self.dto1, self.dto2])\n fetched = self.dao.fetch_by_column(session=self.session, column_name='strategy_id',\n column_value=self.dto1.strategy_id)\n eq_(len(fetched), 1)\n eq_ignore_certain_fields(fetched[0], self.dto1, ['db_id', 'db_create_timestamp'])\n\n def test_fetch_by_strategy_execution_id(self):\n self.dao.bulk_save(session=self.session, commit=True, popos=[self.dto1, self.dto2])\n fetched = self.dao.fetch_by_column(session=self.session, column_name='strategy_execution_id',\n column_value=self.dto1.strategy_execution_id)\n eq_(len(fetched), 1)\n eq_ignore_certain_fields(fetched[0], self.dto1, ['db_id', 'db_create_timestamp'])\n","repo_name":"skeller88/trading_platform","sub_path":"trading_platform/storage/test/daos/test_strategy_execution_dao.py","file_name":"test_strategy_execution_dao.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"74478530381","text":"import uuid\nimport time \n\nfrom Backend.wallet.wallet import Wallet\nfrom Backend.config import MINING_REWARD, MINING_REWARD_INPUT\n\nclass Transaction():\n \"\"\"\n Documents exchange of currency from sender to one or more recipients\n \"\"\"\n\n def __init__(self, \n sender_wallet=None, \n recipient=None, \n amount=None,\n id=None,\n input=None,\n output=None\n #None represents a variable that hasn't been given a default value\n ):\n self.id = id or str(uuid.uuid4())[0:8]\n self.output = output or self.create_output(\n sender_wallet,\n recipient,\n amount\n )\n self.input = input or self.create_input(sender_wallet, self.output)\n\n def create_output(self, sender_wallet, recipient, amount):\n \"\"\"\n Structure the output data for transaction.\n \"\"\"\n if amount > sender_wallet.balance:\n raise Exception(\"Amount exceeds sender wallet's balance !\")\n \n output = {}\n output[recipient] = amount\n output[sender_wallet.address] = sender_wallet.balance - amount\n\n return output\n\n def create_input(self, sender_wallet, output):\n \"\"\"\n Structure the input data for transaction. \n Sign the transaction and include sender's public key and address.\n (Others can verify the sign using public key)\n \"\"\"\n\n return {\n 'timestamp' : time.time_ns(),\n 'amount' : sender_wallet.balance,\n 'address' : sender_wallet.address,\n 'public_key' : sender_wallet.public_key,\n 'signature' : sender_wallet.sign(output)\n }\n\n def update(self, sender_wallet, recipient, amount):\n \"\"\"\n Update the transaction with an existing or new recipient. \n \"\"\"\n if amount > sender_wallet.balance:\n raise Exception (\"Amount exceeds sender wallet's balance !\")\n\n if recipient in self.output:\n self.output[recipient] == self.output[recipient] + amount\n else:\n self.output[recipient] = amount \n\n self.output[sender_wallet.address] = \\\n self.output[sender_wallet.address] - amount\n\n self.input = self.create_input(sender_wallet, self.output) \n\n def to_json(self):\n \"\"\"\n Serialize the data into it's dictionary representation\n \"\"\"\n return self.__dict__\n\n @staticmethod\n def from_json(transaction_json):\n \"\"\"\n Deserialize the transaction data in json form into transaction instance.\n \"\"\"\n return Transaction(\n **transaction_json\n )\n \n @staticmethod\n def validate_transaction(transaction):\n \"\"\"\n Validate the transaction.\n Raise an exception for invalid transaction.\n \"\"\"\n if transaction.input == MINING_REWARD_INPUT:\n if list(transaction.output.values()) != [MINING_REWARD]:\n raise Exception('Invalid Mining Reward')\n return\n\n output_total = sum(transaction.output.values())\n \n if output_total != transaction.input['amount']:\n raise Exception (\"Invalid transaction output values\")\n\n if not Wallet.verify_sign(\n transaction.input['public_key'],\n transaction.output, \n transaction.input['signature']\n ):\n raise Exception(\"Invalid transaction signature\")\n\n @staticmethod\n def reward_transaction(miner_wallet):\n \"\"\"\n Generate a reward transaction that awards the miner.\n \"\"\"\n output = {}\n output[miner_wallet.address] = MINING_REWARD\n\n return Transaction(input = MINING_REWARD_INPUT, output = output)\n\n \ndef main():\n sender_wallet = Wallet()\n\n transaction = Transaction(sender_wallet, 'recipient', 12)\n \n\n transaction_json = transaction.to_json()\n restored_json = transaction.from_json(transaction_json)\n\n print(f' transaction_json : {transaction_json}')\n print(f' restored_json.__dict__ : {restored_json.__dict__}')\n\nif __name__ == \"__main__\":\n main()","repo_name":"Aysha-Hussaini/cryptocurrency-using-python","sub_path":"Backend/wallet/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"10035941923","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = [\"Rachel P. B. Moraes\", \"Igor Montagner\", \"Fabio Miranda\"]\n\n\nimport rospy\nimport numpy as np\nimport tf\nimport math\nimport cv2\nimport time\nfrom geometry_msgs.msg import Twist, Vector3, Pose\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import Image, CompressedImage\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cormodule\nfrom sensor_msgs.msg import LaserScan\n\nbridge = CvBridge()\n\nmode = \"Searching\"\ncv_image = None\ndist = None\nmedia = []\ncentro = []\natraso = 1.5E9 # 1 segundo e meio. Em nanossegundos\n\narea = 0.0 # Variavel com a area do maior contorno\n\n# Só usar se os relógios ROS da Raspberry e do Linux desktop estiverem sincronizados. \n# Descarta imagens que chegam atrasadas demais\ncheck_delay = False \n\ndef scaneou(dado):\n global dist #Definindo distância como uma variável global\n dist = np.array(dado.ranges).round(decimals=2)[0]\n\n# A função a seguir é chamada sempre que chega um novo frame\ndef roda_todo_frame(imagem):\n\n\tglobal cv_image\n\tglobal media\n\tglobal dist\n\tglobal centro\n\tglobal mode\n\tglobal area\n\n\tnow = rospy.get_rostime()\n\timgtime = imagem.header.stamp\n\tlag = now-imgtime # calcula o lag\n\tdelay = lag.nsecs\n\n\tif delay > atraso and check_delay==True:\n\t\tprint(\"Descartando por causa do delay do frame:\", delay)\n\t\treturn \n\ttry:\n\t\tantes = time.clock()\n\t\tcv_image = bridge.compressed_imgmsg_to_cv2(imagem, \"bgr8\")\n\t\t# cv_image = cv2.flip(cv_image, -1)\n\t\tmedia, centro, maior_area = cormodule.identifica_cor(cv_image, dist, mode)\n\t\tarea = maior_area\n\t\tdepois = time.clock()\n\t\tcv2.imshow(\"Camera\", cv_image)\n\texcept CvBridgeError as e:\n\t\tprint('ex', e)\n\t\nif __name__==\"__main__\":\n\trospy.init_node(\"cor\")\n\n\t# topico_imagem = \"/kamera\"\n\ttopico_imagem = \"/camera/rgb/image_raw/compressed\"\n\t\n\n\n\n\trecebedor = rospy.Subscriber(topico_imagem, CompressedImage, roda_todo_frame,queue_size=4, buff_size = 2**24)\n\trecebe_scan = rospy.Subscriber(\"/scan\", LaserScan, scaneou)\n\tvelocidade_saida = rospy.Publisher(\"/cmd_vel\", Twist, queue_size = 1)\n\n\ttry:\n\t\twhile not rospy.is_shutdown():\n\t\t\t\n\t\t\tif notturn == True:\n\t\t\t\tcurrent_angle = 0\n\t\t\tvel = Twist(Vector3(0,0,0), Vector3(0,0,0))\n\t\t\tt0 = rospy.Time.now().to_sec()\n\t\t\tangular_speed = (math.pi/10)\n\t\t\tobjects = []\n\t\t\twhile current_angle < (2*math.pi):\n\t\t\t\tvel = Twist(Vector3(0,0,0), Vector3(0,0,angular_speed))\n\t\t\t\tvelocidade_saida.publish(vel)\n\t\t\t\trospy.sleep(0.2)\n\t\t\t\tprint(area)\n\t\t\t\t#objects.append(area)\n\t\t\t\tt1 = rospy.Time.now().to_sec()\n\t\t\t\tcurrent_angle = angular_speed*(t1-t0)\n\t\t\t\tprint(current_angle)\n\t\t\tvel = Twist(Vector3(0,0,0), Vector3(0,0,0))\n\t\t\tnotturn = False\n\t\t\tvelocidade_saida.publish(vel)\n\t\t\trospy.sleep(0.1)\n\n\texcept rospy.ROSInterruptException:\n\t print(\"Ocorreu uma exceção com o rospy\")\n\n","repo_name":"marcosvds/A4_RoboticaComputacional_2020.1","sub_path":"ros/python_aula4/scripts/cor_A4_test.py","file_name":"cor_A4_test.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"32180468399","text":"from pathlib import Path\nfrom typing import List\n\nimport numpy as np\n\nfrom igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer\nfrom image_utils import cvt_img_float_to_uint8\nfrom json_utils import NeRFFrameMetadata\n\n\ndef get_rgb_image_for_camera_params(renderer: MeshRenderer, camera_position: List[float], camera_target: List[float],\n camera_up: List[float], hidden_instances: List = []) -> np.ndarray:\n renderer.set_camera(camera_position, camera_target, camera_up, cache=False)\n frames = renderer.render(modes=(\"rgb\",), hidden=hidden_instances)\n assert len(frames) == 1\n img = frames[0]\n assert len(img.shape) == 3 and img.shape[-1] == 4\n print(img.min(), img.max(), img.shape, img.dtype)\n rgb = img[:, :, :3]\n rgb = cvt_img_float_to_uint8(rgb)\n return rgb\n\n\ndef get_rgb_images_w_metadata_for_list_of_camera_params(renderer: MeshRenderer, camera_params_list: List,\n hidden_instances: List = []) -> List:\n \"\"\"\n camera_params_list should be a list of lists/tuples, each of the format: [camera_position, camera_target, camera_up]\n \"\"\"\n frames_with_metadata = []\n for camera_params in camera_params_list:\n camera_position, camera_target, camera_up = camera_params\n rgb_img_arr = get_rgb_image_for_camera_params(renderer=renderer, camera_position=camera_position,\n camera_target=camera_target, camera_up=camera_up,\n hidden_instances=hidden_instances)\n c2w_transform = np.linalg.inv(renderer.V)\n frame_metadata = NeRFFrameMetadata.from_params(file_path=Path(\"\"), transform_matrix=c2w_transform)\n frames_with_metadata.append([rgb_img_arr, frame_metadata])\n return frames_with_metadata\n","repo_name":"jacob-zietek/coral-lab-dev","sub_path":"utils_syed/igibson_image_utils.py","file_name":"igibson_image_utils.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"35769821626","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\n\nfrom .models import Channel, Program, Episode\n\nimport time\nimport urllib.request, json\nfrom sent_frontend.models import Channel, Program, Episode\nimport requests\nfrom time import mktime\nfrom datetime import datetime, timedelta\nimport pytz\nfrom django.utils.timezone import make_aware\n\ndef the_time(days=0):\n return datetime.now() - timedelta(days=days)\n\ndef create_time(a_time):\n return str(a_time.year) + \\\n \"{:02d}\".format(a_time.month) + \\\n \"{:02d}\".format(a_time.day) + \\\n \"{:02d}\".format(a_time.hour) + \\\n \"{:02d}\".format(a_time.minute)\n \ndef index(request):\n return render(request, 'sent_frontend/index.html')\n\ndef channels(request):\n channel_list = Channel.objects.all()\n \n context = {'channel_list': channel_list}\n return render(request, 'sent_frontend/channels.html', context)\n\ndef programs(request, cid):\n \n c = get_object_or_404(Channel, pk=cid)\n\n\n starttime = create_time(the_time(1))\n endtime = create_time(the_time())\n \n url = \"https://timesofindia.indiatimes.com/tvschedulejson.cms?\" \\\n \"userid=0\" \\\n \"&channellist=\" + c.name.replace(' ', '%20') + \\\n \"&fromdatetime=\" + starttime + \\\n \"&todatetime=\" + endtime + \\\n \"%20&deviceview=other&channellogo=1\"\n \n try:\n data = requests.get(url).json()\n \n for episode in data['ScheduleGrid']['channel'][0]['programme']:\n if not Program.objects.filter(program_id = episode['programmeid']).count():\n p = Program(channel = Channel.objects.get(channel_id=episode['channelid']),\n program_id = episode['programmeid'],\n name = episode['title'],\n image = episode['programmeurl'],\n genre = episode['subgenre'],\n duration = episode['duration'])\n p.save()\n except Exception as e:\n print(\"THERE WAS AN ERROR DURING SCRAPE\\n\" + str(e))\n \n c_programs = Program.objects.filter(channel=c)\n\n context = {'channel' : c, 'programs': c_programs}\n return render(request, 'sent_frontend/programs.html', context)\n\ndef episodes(request, cid, sid):\n\n s = get_object_or_404(Program, pk=sid)\n c = get_object_or_404(Channel, pk=cid)\n\n starttime = create_time(the_time(1))\n endtime = create_time(the_time())\n \n print(starttime)\n print(endtime)\n url = \"https://timesofindia.indiatimes.com/tvschedulejson.cms?\" \\\n \"userid=0\" \\\n \"&channellist=\" + c.name.replace(' ', '%20') + \\\n \"&fromdatetime=\" + starttime + \\\n \"&todatetime=\" + endtime + \\\n \"%20&deviceview=other&channellogo=1\"\n \n print(url)\n \n try:\n data = requests.get(url).json()\n \n for episode in data['ScheduleGrid']['channel'][0]['programme']:\n\n startdate = time.strptime(episode['date'],'%Y%m%d')\n starttime = time.strptime(episode['start'],'%Y%m%d%H%M')\n endtime = time.strptime(episode['stop'],'%Y%m%d%H%M')\n \n startdate = datetime.fromtimestamp(mktime(startdate))\n starttime = datetime.fromtimestamp(mktime(starttime))\n endtime = datetime.fromtimestamp(mktime(endtime))\n\n coimbatore = pytz.timezone('Asia/Kolkata')\n \n startdate = make_aware(startdate, timezone=coimbatore)\n starttime = make_aware(starttime, timezone=coimbatore)\n endtime = make_aware(endtime, timezone=coimbatore)\n\n print(Episode.objects.filter(program = Program.objects.filter(program_id = episode['programmeid']).first(), program__channel = Channel.objects.filter(channel_id=episode['channelid']).first(), airtime=starttime).count())\n \n print(episode['title'])\n\n if not Episode.objects.filter(program = Program.objects.filter(program_id = episode['programmeid']).first(), program__channel = Channel.objects.filter(channel_id=episode['channelid']).first(), airtime=starttime).count():\n print('one episode was not there')\n e = Episode(program=Program.objects.get(program_id=episode['programmeid']),\n airdate = startdate,\n airtime = starttime,\n endtime = endtime)\n e.save()\n except Exception as e:\n print(\"THERE WAS AN ERROR DURING SCRAPE\\n\" + str(e))\n\n s_episodes = Episode.objects.filter(program=s)\n\n context = {'show': s, 'episodes': s_episodes, 'channel': c}\n return render(request, 'sent_frontend/episodes.html', context)\n\ndef episode_sentiment(request, cid, sid, eid):\n c = get_object_or_404(Channel, pk=cid)\n s = get_object_or_404(Program, pk=sid)\n e = get_object_or_404(Episode, pk=eid)\n\n context = {'episode': e, 'show': s, 'channel': c}\n return render(request, 'sent_frontend/episode_sentiment.html', context)","repo_name":"clasick/Intellisent","sub_path":"intellisent/sent_frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"31126034189","text":"#!/usr/bin/env python3\nimport os\nimport argparse\nimport numpy as np\n\nfrom table_tools import load_table\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description='Mask the data objects to a right ascension / declination '\n 'bound.')\n parser.add_argument(\n '-i', '--input', required=True, help='file path of input data table')\n parser.add_argument(\n '--i-format', default='fits',\n help='astropy.table format specifier of the input tables ')\n parser.add_argument(\n '-b', '--bounds', nargs=4, type=float, required=True,\n help='bounds of polygon in degrees: RA_min RA_max DEC_min DEC_max')\n parser.add_argument(\n '--ra', required=True, help='fits column name of RA')\n parser.add_argument(\n '--dec', required=True, help='fits column name of DEC')\n parser.add_argument(\n '-o', '--output', required=True, help='file path of output table')\n parser.add_argument(\n '--o-format', default='fits',\n help='astropy.table format specifier of the output table '\n '(default: %(default)s)')\n args = parser.parse_args()\n\n RAmin, RAmax, DECmin, DECmax = args.bounds\n # check input bounds\n if not all(-90.0 <= dec <= 90.0 for dec in (DECmin, DECmax)):\n parser.error(\"DEC_min and DEC_max must be between -90 and 90 degrees\")\n if not all(0.0 <= ra <= 360.0 for ra in (RAmin, RAmax)):\n parser.error(\"RA_min and RA_max must be between 0 and 360 degrees\")\n if DECmax <= DECmin:\n parser.error(\"DEC_min must be lower than DEC_max\")\n table = load_table(args.input, args.i_format, [args.ra, args.dec])\n\n # apply filter rule\n print(\n (\"mask data to bounds with RA: %011.7f-%011.7f \" % (RAmin, RAmax)) +\n (\"and DEC: %0+11.7f-%0+11.7f \" % (DECmin, DECmax)))\n # collect RA/DEC from the table\n ra_data = table[args.ra].data\n dec_data = table[args.dec].data\n if RAmax >= RAmin:\n mask = ( # mask data to bounds\n (ra_data >= RAmin) & (ra_data < RAmax) &\n (dec_data >= DECmin) & (dec_data < DECmax))\n else:\n mask = ( # mask data to bounds\n (ra_data >= RAmin) | (ra_data < RAmax) &\n (dec_data >= DECmin) & (dec_data < DECmax))\n if np.count_nonzero(mask) == 0:\n sys.exit(\"ERROR: no data found within RA/DEC limits\")\n masked_table = table[mask]\n # write to specified output path\n print(\n \"removed %d / %d rows\" % (len(table) - len(masked_table), len(table)))\n print(\"write table to: %s\" % args.output)\n masked_table.write(args.output, format=args.o_format, overwrite=True)\n","repo_name":"yanzastro/DC2_mock","sub_path":"pipeline/data_hdf5_mask.py","file_name":"data_hdf5_mask.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"18656541882","text":"import json, jinja2, time, os, json\nfrom data import startgen\nfrom flask import Flask, redirect, render_template, request, url_for, session\n\napp = Flask(__name__)\n\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/quiz\",methods=[\"GET\",\"POST\"])\ndef quiz():\n if request.method == \"POST\":\n loc = request.form['location']\n pop = request.form['pop']\n spec = request.form['spec']\n return render_template(\"postquiz.html\",loc=loc,pop=pop,spec=spec)\n else:\n return render_template(\"quiz.html\")\n\n@app.route(\"/generatedlist///\",methods=[\"GET\",\"POST\"])\ndef retgen(loc,pop,spec):\n gen = startgen(loc,pop,spec)\n retgen = json.dumps(gen)\n #print retgen\n return retgen\n\nif __name__ == '__main__':\n #retgen('rural','large','med')\n app.debug = True\n app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)))\n","repo_name":"pacqu/STARTSurvey","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"72423055824","text":"# The sandbox API key passed in 'authorization' is public.\n# Don't submit any personally identifiable information in any requests made with this key.\n# Sign in to developer.dojo.tech to create your own private sandbox key and use that instead\n# for secure testing.\n\nimport http.client\n\nconn = http.client.HTTPSConnection(\"api.dojo.tech\")\n\npayload = \"{\\\"emails\\\":[\\\"gabriel@dojo.com\\\",\\\"mari@dojo.com\\\"]}\"\n\nheaders = {\n 'Content-Type': \"application/json\",\n 'Version': \"SOME_STRING_VALUE\",\n 'Authorization': \"Basic REPLACE_BASIC_AUTH\"\n }\n\nconn.request(\"POST\", \"/payment-intents/%7BpaymentIntentId%7D/receipt\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\nprint(data.decode(\"utf-8\"))\n\nconn.close()","repo_name":"dojo-engineering/dojo-samples","sub_path":"manage-payments/python/send-receipt.py","file_name":"send-receipt.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"32982814701","text":"# import streamlit as st\nimport matplotlib.pyplot as plt\nfrom pymongo import MongoClient\nimport urllib\nfrom Levenshtein import jaro\nimport json\nimport time\nimport pprint\nfrom datetime import datetime, timedelta\n\n# dict_kw = {}\n# dict_kw_topic = {}\nall_topic = [\"Thể thao\", \"Giải trí\", \"Công nghệ\", \"Khác\", \"Xã hội\", \"Kinh doanh\", \"Chính trị\", \"Chiến tranh\"]\ndef get_database():\n username = urllib.parse.quote_plus('sontung2310')\n password = urllib.parse.quote_plus(\"Vmagcut99!\")\n # Provide the mongodb atlas url to connect python to mongodb using pymongo\n # CONNECTION_STRING = \"mongodb+srv://{}:{}@cluster0.gyiy6tr.mongodb.net/?retryWrites=true&w=majority\".format(username,password)\n CONNECTION_STRING = \"mongodb://127.0.0.1:27017/\"\n client = MongoClient(CONNECTION_STRING)\n return client['social_listening_storage']\n\ndef compare_text(tag, lst_keyword):\n if len(lst_keyword) == 0: return False, \"\"\n for KW in lst_keyword:\n # print(\"{} compare to {}, with score {}\".format(tag,KW,jaro(tag,KW)))\n if jaro(tag,KW)>0.8:\n # print(\"Find similarity: \",tag,KW)\n return True, KW\n else:\n continue\n return False, \"\"\n\ndef draw_piechart(database,weight_comment=0.5):\n collection = database['youtube']\n pipeline_comment = [\n {\"$match\":{\"check_flag\": True}},\n ]\n\n cur_list = list(collection.aggregate([\n *pipeline_comment,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline_comment } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline_comment } },\n {\"$group\": {\"_id\": \"$topic\", \"count\": { \"$sum\": { \"$sum\": [ {\"$multiply\":[\"$num_comments_get\",weight_comment]}, 1 ] } }}},\n ]))\n \n return cur_list\n\ndef total_count(database, task):\n if task == 'news_data':\n collection = database[\"news_data\"]\n elif task == 'youtube':\n collection = database[\"youtube\"]\n elif task == 'facebook':\n collection = database[\"facebook\"]\n elif task == 'forum':\n collection = database[\"forum\"]\n \n # count_comment = 0\n \n count_analysis = collection.aggregate( [\n {\"$match\": {\"check_flag\": True}},\n { \"$count\": \"count\" },\n ] )\n \n count_comment = collection.aggregate( [\n {\"$unwind\": \"$comments_infor\"},\n {\"$match\": {\"comments_infor.check_flag\": True}},\n { \"$count\": \"count\" },\n ] )\n return list(count_analysis)[0]['count'], list(count_comment)[0]['count']\n\ndef append_url_task(url_array,item,task):\n url_array.append([item,task])\n \ndef list_KW(database, topic=\"\", time_delta=7):\n list_task = ['youtube', 'news_data', 'facebook', 'forum']\n dict_kw = {}\n for task in list_task:\n if task == 'news_data':\n collection = database[\"news_data\"]\n elif task == 'youtube':\n collection = database[\"youtube\"]\n elif task == 'facebook':\n collection = database[\"facebook\"]\n elif task == 'forum':\n collection = database[\"forum\"]\n \n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n \n if topic in all_topic:\n item_details = collection.find({\"check_flag\": True,\"topic\": topic,\"time_upload\":{\"$gte\":margin_time}}, batch_size=100)\n else:\n item_details = collection.find({\"check_flag\": True,\"time_upload\":{\"$gte\":margin_time}}, batch_size=100)\n for item in item_details:\n for artical_tag_dict in item[\"tags\"]:\n artical_tag = artical_tag_dict['KW']\n if artical_tag.lower()=='none':\n continue\n # Preprocess tag\n # artical_tag = preprocess_tag(artical_tag)\n # if artical_tag not in dict_kw:\n flag_artcile, KW_article = compare_text(artical_tag,list(dict_kw.keys()))\n if not flag_artcile:\n dict_kw[artical_tag] = {\"num_call\":1,\"url\":[],\"sentiment\":{\"Positive\":{\"num\":0, \"text\":[]},\"Negative\":{\"num\":0, \"text\":[]},\"Neural\":{\"num\":0, \"text\":[]}}}\n append_url_task(dict_kw[artical_tag]['url'], item[\"_id\"], task=task)\n else:\n dict_kw[KW_article][\"num_call\"] += 1\n if task==\"youtube\" or task==\"forum\": #Title\n dict_kw[KW_article][\"sentiment\"][item[\"title\"][\"sentiment\"]][\"num\"] += 1\n dict_kw[KW_article][\"sentiment\"][item[\"title\"][\"sentiment\"]][\"text\"].append(\n item[\"title\"][\"text\"].strip())\n elif task==\"news_data\" or task==\"facebook\": #Content\n dict_kw[KW_article][\"sentiment\"][item[\"content\"][\"sentiment\"]][\"num\"] += 1\n dict_kw[KW_article][\"sentiment\"][item[\"content\"][\"sentiment\"]][\"text\"].append(\n item[\"content\"][\"text\"].strip())\n append_url_task(dict_kw[KW_article]['url'], item[\"_id\"], task=task)\n for comment in item[\"comments_infor\"]:\n # print(comment)\n try:\n for cmt_tag_dict in comment[\"key_infor\"]:\n cmt_tag = cmt_tag_dict['KW']\n # Preprocess tag\n # cmt_tag = preprocess_tag(cmt_tag)\n\n # If Keywords is NONE -> Only update to KW of article\n if (len(comment[\"key_infor\"])==1) and cmt_tag.lower()==\"none\":\n for artical_tag_dict in item[\"tags\"]:\n artical_tag = artical_tag_dict['KW']\n # artical_tag = preprocess_tag(artical_tag)\n dict_kw[artical_tag][\"num_call\"] += 1\n dict_kw[artical_tag][\"sentiment\"][comment[\"sentiment\"]][\"num\"] += 1\n dict_kw[artical_tag][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n append_url_task(dict_kw[artical_tag]['url'], item[\"_id\"], task=task)\n continue\n if cmt_tag.lower()=='none':\n continue\n # if cmt_tag not in dict_kw:\n flag_cmt, KW_cmt = compare_text(cmt_tag, list(dict_kw.keys()))\n if not flag_cmt:\n dict_kw[cmt_tag] = {\"num_call\":1,\"type\":task,\"sentiment\":{\"Positive\":{\"num\":0, \"text\":[]},\"Negative\":{\"num\":0, \"text\":[]},\"Neural\":{\"num\":0, \"text\":[]}}}\n dict_kw[cmt_tag][\"sentiment\"][comment[\"sentiment\"]][\"num\"] +=1\n dict_kw[cmt_tag][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n append_url_task(dict_kw[cmt_tag]['url'], item[\"_id\"], task=task)\n else:\n dict_kw[KW_cmt][\"num_call\"] += 1\n dict_kw[KW_cmt][\"sentiment\"][comment[\"sentiment\"]][\"num\"] +=1\n dict_kw[KW_cmt][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n append_url_task(dict_kw[KW_cmt]['url'], item[\"_id\"], task=task)\n except:\n continue\n new_dict = dict(sorted(dict_kw.items(), key=lambda item: item[1][\"num_call\"], reverse=True))\n return new_dict\n\ndef list_KW_url(database):\n list_task = ['facebook','youtube', 'news_data', 'forum']\n dict_kw = {}\n for task in list_task:\n if task == 'news_data':\n collection = database[\"news_data\"]\n elif task == 'youtube':\n collection = database[\"youtube\"]\n elif task == 'facebook':\n collection = database[\"facebook\"]\n elif task == 'forum':\n collection = database[\"forum\"]\n \n item_details = collection.find({\"check_flag\": True}, batch_size=100)\n \n for item in item_details:\n for artical_tag_dict in item[\"tags\"]:\n artical_tag = artical_tag_dict['KW']\n if artical_tag.lower()=='none':\n continue\n # Preprocess tag\n # artical_tag = preprocess_tag(artical_tag)\n # if artical_tag not in dict_kw:\n flag_artcile, KW_article = compare_text(artical_tag,list(dict_kw.keys()))\n if not flag_artcile:\n # print(item[\"title\"][\"sentiment\"])\n dict_kw[artical_tag] = {task:{\"url\":[],\"sentiment\":{\"Positive\":0,\"Negative\":0,\"Neural\":0}}}\n # print(\"alooo: \",dict_kw)\n # print(\"artical_tag: \",artical_tag)\n print(\"task: \",task)\n dict_kw[artical_tag][task]['url'].append(item['_id'])\n if task==\"youtube\" or task==\"forum\": #Title\n dict_kw[artical_tag][task][\"sentiment\"][item[\"title\"][\"sentiment\"]] += 1\n\n elif task==\"news_data\" or task==\"facebook\": #Content\n dict_kw[artical_tag][task][\"sentiment\"][item[\"content\"][\"sentiment\"]] += 1\n\n else:\n dict_kw[KW_article][task]['url'].append(item['_id'])\n if task==\"youtube\" or task==\"forum\": #Title\n dict_kw[KW_article][task][\"sentiment\"][item[\"title\"][\"sentiment\"]] += 1\n\n elif task==\"news_data\" or task==\"facebook\": #Content\n dict_kw[KW_article][task][\"sentiment\"][item[\"content\"][\"sentiment\"]] += 1\n\n for comment in item[\"comments_infor\"]:\n # print(comment)\n try:\n for cmt_tag_dict in comment[\"key_infor\"]:\n cmt_tag = cmt_tag_dict['KW']\n # Preprocess tag\n # cmt_tag = preprocess_tag(cmt_tag)\n\n # If Keywords is NONE -> Only update to KW of article\n # if (len(comment[\"key_infor\"])==1) and cmt_tag.lower()==\"none\":\n # for artical_tag_dict in item[\"tags\"]:\n # artical_tag = artical_tag_dict['KW']\n # # artical_tag = preprocess_tag(artical_tag)\n # dict_kw[artical_tag][\"num_call\"] += 1\n # dict_kw[artical_tag][\"sentiment\"][comment[\"sentiment\"]][\"num\"] += 1\n # dict_kw[artical_tag][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n # continue\n if cmt_tag.lower()=='none':\n continue\n # if cmt_tag not in dict_kw:\n flag_cmt, KW_cmt = compare_text(cmt_tag, list(dict_kw.keys()))\n if not flag_cmt:\n dict_kw[cmt_tag] = {task:{\"url\":[],\"sentiment\":{\"Positive\":0,\"Negative\":0,\"Neural\":0}}}\n dict_kw[cmt_tag][task]['url'].append(item['_id'])\n dict_kw[cmt_tag][task][\"sentiment\"][comment[\"sentiment\"]] += 1\n # dict_kw[cmt_tag] = {\"num_call\":1,\"type\":task,\"sentiment\":{\"Positive\":{\"num\":0, \"text\":[]},\"Negative\":{\"num\":0, \"text\":[]},\"Neural\":{\"num\":0, \"text\":[]}}}\n # dict_kw[cmt_tag][\"sentiment\"][comment[\"sentiment\"]][\"num\"] +=1\n # dict_kw[cmt_tag][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n else:\n dict_kw[KW_cmt][task]['url'].append(item['_id'])\n dict_kw[KW_cmt][task][\"sentiment\"][comment[\"sentiment\"]] += 1\n # dict_kw[KW_cmt][\"num_call\"] += 1\n # dict_kw[KW_cmt][\"sentiment\"][comment[\"sentiment\"]][\"num\"] +=1\n # dict_kw[KW_cmt][\"sentiment\"][comment[\"sentiment\"]][\"text\"].append(comment[\"comment\"].strip())\n except:\n continue\n # new_dict = dict(sorted(dict_kw.items(), key=lambda item: item[1][\"num_call\"], reverse=True))\n return dict_kw\n\ndef query_channel(database, channel_url):\n collection = database['youtube']\n pipeline = [\n \n {\"$match\": {\"check_flag\": True, \"channel_url\":channel_url}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n {\"$match\": {\"$comments_infor.check_flag\": True}},\n # {\"$unwind\": \"$comments_infor.key_infor\"},\n # {\"$unwind\": \"$tags\"},\n\n ]\n return list(collection.aggregate([\n *pipeline,\n # {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } }, \n # {\"$unionWith\": { \"coll\": \"forum\", \"pipeline\": pipeline } }, \n {\n \"$group\": { \n \"_id\" : { \n \"sentiment\": \"$comments_infor.sentiment\",\n \"comment\": \"$comments_infor.comment\",\n },\n # \"total\": { \"$sum\" : 1 },\n }\n },\n #2\n {\n \"$group\" : { \n \"_id\" : {\n \"sentiment\":\"$_id.sentiment\",\n },\n \"count\": {\n \"$sum\": 1\n },\n \"comment\": { \n \"$push\": { \n \"comment\": \"$_id.comment\",\n }\n },\n \n }\n },\n #3\n {\n \"$group\" : { \n \"_id\" : \"$_id.sentiment\",\n \"list_comments\": { \n \"$push\": { \n \"comments\": \"$comment\",\n \"count\":\"$count\"\n }\n }\n }\n },\n {\"$sort\": {\"list_comments.count\": 1}},\n ] \n ))\n\ndef query_keywords(database, topic):\n collection = database['youtube']\n pipeline = [\n \n {\"$match\": {\"check_flag\": True,\"topic\":topic}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n {\"$unwind\": \"$comments_infor.key_infor\"},\n {\"$unwind\": \"$tags\"},\n \n ]\n pprint.pprint(list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } }, \n {\"$unionWith\": { \"coll\": \"forum\", \"pipeline\": pipeline } }, \n {\n \"$group\": { \n \"_id\" : { \n \"KW\": \"$comments_infor.key_infor.KW\",\n \"sentiment_cmt\": \"$comments_infor.sentiment\",\n \"comment\": \"$comments_infor.comment\",\n },\n }\n },\n #2\n {\n \"$group\" : { \n \"_id\" : {\n \"KW\":\"$_id.KW\",\n \"sentiment_cmt\": \"$_id.sentiment_cmt\",\n },\n \"count\": {\n \"$sum\": 1\n },\n \"comment\": { \n \"$push\": { \n \"comment\": \"$_id.comment\",\n }\n }\n }\n },\n #3\n {\n \"$group\" : { \n \"_id\" : \"$_id.KW\",\n \"sentiments\": { \n \"$push\": { \n \"sentiment_cmt\": \"$_id.sentiment_cmt\",\n \"comments\": \"$comment\",\n \"count\":\"$count\"\n }\n }\n }\n },\n\n {\"$sort\": {\"sentiments.count\": 1}},\n ] \n )))\n \ndef search_by_KW(database, keyword):\n collection = database['youtube']\n pipeline = [\n \n {\"$match\": {\"check_flag\": True}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n {\"$unwind\": \"$comments_infor.key_infor\"},\n {\"$unwind\": \"$tags\"},\n \n ]\n return collection.aggregate([\n *pipeline,\n # {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n # {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } }, \n {\"$match\": \n {\n \"comments_infor.key_infor.KW\": keyword\n # \"comments_infor\":\n # { \"$elemMatch\":\n # {\n # \"key_infor.KW\":\"None\",\n # }\n # }\n }\n },\n #1\n {\n \"$group\": { \n \"_id\" : { \n \"KW\": \"$comments_infor.key_infor.KW\",\n \"sentiment_cmt\": \"$comments_infor.sentiment\",\n \"comment\": \"$comments_infor.comment\",\n },\n }\n },\n #2\n {\n \"$group\" : { \n \"_id\" : {\n \"KW\":\"$_id.KW\",\n \"sentiment_cmt\": \"$_id.sentiment_cmt\",\n },\n \"count\": {\n \"$sum\": 1\n },\n \"comment\": { \n \"$push\": { \n \"comment\": \"$_id.comment\",\n }\n }\n }\n },\n #3\n {\n \"$group\" : { \n \"_id\" : \"$_id.KW\",\n \"sentiments\": { \n \"$push\": { \n \"sentiment_cmt\": \"$_id.sentiment_cmt\",\n \"comments\": \"$comment\",\n \"count\":\"$count\"\n }\n }\n }\n },\n\n {\"$sort\": {\"sentiments.count\": 1}},\n ] \n )\n\ndef keyword_report(collection, keyword, time_delta=60):\n \n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n \n pipeline = [\n {\"$match\": {\"check_flag\": True,\"time_upload\":{\"$gte\":margin_time}}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n # {\"$unwind\": \"$comments_infor.key_infor\"},\n # {\"$unwind\": \"$tags\"},\n \n ]\n \n list_ids = list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"forum\", \"pipeline\": pipeline } }, \n {\"$match\": \n {\n \"comments_infor.check_flag\":True,\n \"$or\":[{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$tags\"]}},{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$comments_infor.key_infor\"]}}],\n }\n },\n {\"$group\":\n {\n \"_id\" : \"$_id\",\n }},\n ]))\n pprint.pprint(list_ids)\n # print(len(list_ids))\n \n search_ids = list(collection.aggregate([\n \n {\"$unionWith\": { \"coll\": \"news_data\" } },\n {\"$unionWith\": { \"coll\": \"facebook\" } },\n {\"$unionWith\": { \"coll\": \"forum\"} }, \n # {\"list_ids\" : list(list_ids[i]['_id'] for i in range(len(list_ids)))},\n {\"$match\": {\"check_flag\": True}},\n {\"$match\": \n {\n # \"comments_infor.check_flag\":True,\n \"_id\": {\"$in\": list(list_ids[i]['_id'] for i in range(len(list_ids)))}\n # \"$or\":[{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$tags\"]}},{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$comments_infor.key_infor\"]}}],\n }\n },\n ]))\n # pprint.pprint(search_ids)\n \n \ndef keyword_tree(collection, keyword, time_delta=7):\n # all_items = collection.find({\"check_flag\":True,\"tags\": {\"$elemMatch\":{\"KW\":keyword}}})\n # for item in all_items:\n # print(item)\n \n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n\n pipeline = [\n {\"$match\": {\"check_flag\": True,\"time_upload\":{\"$gte\":margin_time}}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n # {\"$unwind\": \"$comments_infor.key_infor\"},\n # {\"$unwind\": \"$tags\"},\n \n ]\n pprint.pprint(list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"forum\", \"pipeline\": pipeline } }, \n {\"$match\": \n {\n \"comments_infor.check_flag\":True,\n \"$or\":[{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$tags\"]}},{\"$expr\": {\"$in\": [{\"KW\":keyword}, \"$comments_infor.key_infor\"]}}],\n }\n },\n { \"$project\": { \"total_KW\": {\"$concatArrays\": [ \"$tags\", \"$comments_infor.key_infor\" ] } } },\n {\"$unwind\": \"$total_KW\"},\n #1\n {\n \"$group\": { \n \"_id\" : {\n \"query_keyword\": keyword,\n \"KW\": \"$total_KW.KW\",\n },\n \"total\": { \"$sum\" : 1 } \n }, \n },\n {\"$sort\": {\"total\": 1}},\n ])))\ndef group_kw(collection, keyword, time_delta=7):\n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n # collection = database['youtube']\n pipeline = [\n {\"$match\": {\"check_flag\": True, \"time_upload\":{\"$gte\":margin_time}}},\n # {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n {\"$unwind\": \"$comments_infor.key_infor\"},\n {\"$unwind\": \"$tags\"},\n \n ]\n pprint.pprint(list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"forum\", \"pipeline\": pipeline } }, \n {\"$match\": \n {\n \"tags.KW\": keyword\n }\n },\n #1\n {\n \"$group\": { \n \"_id\" : { \n \"KW_article\": \"$tags.KW\",\n \"KW_cmt\": \"$comments_infor.key_infor.KW\",\n # \"comment\": \"$comments_infor.comment\",\n },\n \"total\": { \"$sum\" : 1 } \n },\n \n },\n \n #2\n { \"$group\" : { \n \"_id\" : \"$_id.KW_article\", \n \"related_kw\": { \n \"$push\": { \n \"KW\": \"$_id.KW_cmt\",\n \"total\":\"$total\"\n }\n }\n }\n },\n {\"$unwind\": \"$related_kw\"},\n {\"$sort\": {\"related_kw.total\": 1}},\n ])))\n \ndef trend_line(database, time_delta=7):\n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n pipeline = [\n {\"$match\": {\"check_flag\": True, \"time_upload\":{\"$gte\":margin_time}}},\n {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$comments_infor\"},\n {\"$unwind\": \"$comments_infor.key_infor\"},\n ]\n \n collection = database['youtube']\n \n\n list_trend = list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } },\n \n { \n \"$group\": { \n \"_id\" : { \n \"KW\": \"$comments_infor.key_infor.KW\",\n \"day\": { \"$floor\": { \"$divide\": [ \"$time_upload\", 60*60*24 ] } },\n },\n \"total\": { \"$sum\" : 1 } \n }\n },\n { \"$group\" : { \n \"_id\" : \"$_id.KW\", \n \"dates\": { \n \"$push\": { \n \"day\": \"$_id.day\",\n \"total\":\"$total\"\n }\n }\n }\n },\n {\"$sort\": {\"dates.total\": 1}},\n ] \n ))\n return list_trend\n\ndef trend_line_tag(database, time_delta=7):\n margin_time = (datetime.now() - timedelta(days=time_delta)).timestamp()\n \n pipeline = [\n {\"$match\": {\"check_flag\": True, \"time_upload\":{\"$gte\":margin_time}}},\n {\"$match\": {\"time_upload\" : { \"$type\" : \"int\" }}},\n {\"$unwind\": \"$tags\"},\n # {\"$unwind\": \"$comments_infor\"},\n # {\"$unwind\": \"$comments_infor.key_infor\"},\n ]\n \n collection = database['youtube']\n \n\n list_trend_tag = list(collection.aggregate([\n *pipeline,\n {\"$unionWith\": { \"coll\": \"news_data\", \"pipeline\": pipeline } },\n {\"$unionWith\": { \"coll\": \"facebook\", \"pipeline\": pipeline } },\n \n { \n \"$group\": { \n \"_id\" : { \n \"KW\": \"$tags.KW\",\n \"day\": { \"$floor\": { \"$divide\": [ \"$time_upload\", 60*60*24 ] } },\n },\n # \"total\": { \"$sum\" : \"$num_comments_get\" } \n \"total\": { \"$sum\" : 1 } \n }\n },\n { \"$group\" : { \n \"_id\" : \"$_id.KW\", \n \"dates\": { \n \"$push\": { \n \"day\": \"$_id.day\",\n \"total\":\"$total\"\n }\n }\n }\n },\n {\"$sort\": {\"dates.total\": 1}},\n ] \n ))\n return list_trend_tag\n \ndef group_keywords(collection_temp):\n pipeline = [\n {\"$unwind\": \"$dates\"},\n {\n \"$group\" : { \n \"_id\" : {\n \"kw\":\"$kw\",\n \"dates\": \"$dates.day\",\n },\n \"total\": {\n \"$sum\": \"$dates.total\"\n },\n }\n },\n {\n \"$group\" : { \n \"_id\" : \"$_id.kw\",\n \"dates\": {\n \"$push\": { \n \"day\": \"$_id.dates\",\n \"total\":\"$total\"\n }\n }\n }\n }, \n ]\n return list(collection_temp.aggregate(pipeline))\n\ndef get_trend_kw(candidate_kw, temp_db, list_trend_cmt, list_trend_tag):\n temp_db.remove({}) #REMOVE\n \n list_trend = list_trend_cmt + list_trend_tag\n for dict_list_trend in list_trend:\n try:\n if jaro(candidate_kw.lower(),dict_list_trend['_id'].lower())>0.8:\n print(dict_list_trend)\n new_dict = {\"kw\":candidate_kw,\"dates\":dict_list_trend['dates']}\n temp_db.insert_one(new_dict)\n except:\n continue\n temp_kw = group_keywords(temp_db)\n return temp_kw\n # for dict_kw in temp_kw:\n # if dict_kw['_id']==candidate_kw:\n # return dict_kw\n\nif __name__ == \"__main__\":\n social_db = get_database()\n list_collection = ['youtube']\n collection_ytb = social_db[\"youtube\"]\n \n # lstcmt = query_channel(social_db,channel_url='hhsb.vn')\n # pprint.pprint(list(lstcmt))\n # group_kw(collection_ytb,keyword='phim')\n # result = search_by_KW(social_db,keyword='vinfast')\n # pprint.pprint(list(result))\n # keyword_tree(collection_ytb,keyword='jav', time_delta=30)\n keyword_report(collection_ytb,'arsenal')\n # pprint.pprint(list(trend_line(social_db))) \n \n # count_analysis, count_cmt = total_count(social_db,task='facebook')\n # print(count_analysis, count_cmt)\n # kw_url = list_KW_url(social_db)\n # with open('result_kw_url.json', 'w', encoding=\"utf8\") as fp:\n # json.dump(kw_url, fp, ensure_ascii=False)\n # temp_db = social_db[\"temp\"]\n # list_trend = trend_line(social_db)\n # list_trend_tag = trend_line_tag(social_db)\n # pprint.pprint(list_trend_tag)\n # # vehinh_time = time.time()\n # search_by_KW(social_db, keyword='Potter')\n # final = get_trend_kw(social_db=social_db, candidate_kw='Việt Nam', temp_db=temp_db)\n # print(final)\n # print(time.time()-vehinh_time)\n # new_dict = {'KW':candidate_kw,''}\n\n \n # print(new_dict)\n # temp_db.insert_one(new_dict)\n # temp_db.insert_one({'kw': 'Tùng Bùi', 'dates': [{'day': 19422.0, 'total': 7}]})\n # temp_db.insert_one({'kw': 'Tùng Bùi', 'dates': [{'day': 19411.0, 'total': 8}]})\n \n \n \n \n # topic_dict = draw_piechart(social_db)\n # # print(topic_dict)\n # # # print(\"Collection: {} Dict: {}\".format(clts,topic_dict))\n # fig1, ax1 = plt.subplots()\n # # print(list([topic_dict[i]['_id'] for i in range(len(topic_dict))]))\n # # print(list([topic_dict[i]['count'] for i in range(len(topic_dict))]))\n # ax1.pie(list([topic_dict[i]['count'] for i in range(len(topic_dict))]), labels=list([topic_dict[i]['_id'] for i in range(len(topic_dict))]), autopct='%1.1f%%',\n # shadow=True, startangle=90)\n # ax1.axis('equal')\n # # plt.legend()\n # # plt.show()\n # plt.savefig('piechart.png')\n # #Keyword\n # print(\"Ve hinh time: \",time.time()-vehinh_time)\n # start_time = time.time()\n # list_KW(database=social_db)\n # # # print(dict_kw)\n \n # # print(\"Establish table: \",time.time()-start_time)\n # # # print(new_dict)\n # with open('result_kw_new.json', 'w', encoding=\"utf8\") as fp:\n # json.dump(new_dict, fp, ensure_ascii=False)\n ","repo_name":"sontung2310/Social-Listening","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":27846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"37167478290","text":"import io, sys\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')\nnum=int(input())\narr=[]\nfor i in range(num):\n arr.append(input())\nfor i in range(num):\n print(arr[i])\n if i!=num-1:\n print(\"AMOLED\")","repo_name":"beOk91/code_up","sub_path":"code_up1630.py","file_name":"code_up1630.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"73562142222","text":"from base64 import encode\nfrom json import load\nimport os, string, random\nfrom config import load_config\nimport requests\n\ncfg = load_config()\nfrom PyMultiDictionary import MultiDictionary, DICT_WORDNET\n\ndictionary = MultiDictionary()\nBASE = string.digits + string.ascii_lowercase\n\n\ndef generate_seed() -> int:\n return random.randint(0, 999_999)\n\n\ndef encode_num(num: int) -> str:\n base = BASE\n b_len = len(base)\n digits = []\n while num > 0:\n digits.append(base[num % b_len])\n num //= b_len\n\n return ''.join(reversed(digits))\n\n\ndef decode_num(num: str) -> int:\n base = BASE\n b_len = len(base)\n digits = 0\n for char in num:\n digits *= b_len\n digits += base.index(char)\n\n return digits\n\n\nif __name__ == '__main__':\n word = 'hello'\n req = requests.get(f\"https://api.dictionaryapi.dev/api/v2/entries/en/{word}\")\n print(req.json())\n # synonym = dictionary.synonym(cfg.params.language, word)\n # print(synonym)\n # meaning = dictionary.meaning('en', word, dictionary=DICT_WORDNET)\n # print(type(meaning))\n # for key, value in meaning.items():\n # for entry in value:\n # print(key, entry)\n","repo_name":"jd907/Custom-wordle-game","sub_path":"data/stratch.py","file_name":"stratch.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"37251255736","text":"\"\"\"\nWrite a function which accepts a dictionary and an integer as input and returns an ascending sorted list of all the keys whose values \ncontain the input value. Note that the keys of this dictionary are strings while the values of this dictionary are 1 Dimensional lists \nof integers. For example if the input dictionary is:\n\nsample = {\"rabbit\" : [1, 2, 3],\n \"kitten\" : [2, 2, 6],\n \"lioness\": [6, 8, 9]}\n \nand the input integer is 2 then your function should return:\n[ \"kitten\", \"rabbit\",]\n\nIf the input integer is not found then your function should return an empty list.\n\n\"\"\"\n\n# Function Dec.\ndef value_containing_key(dict, num):\n new_list = []\n # Iterating through the keys\n for key in dict:\n # Checking: if the num is in the dict\n if num in dict[key]:\n new_list.append(key)\n new_list.sort()\n return new_list\n","repo_name":"jabhij/UTAx-CSE1309x-PYTHON","sub_path":"Week7/Dictionaries/Ex4.py","file_name":"Ex4.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"11398335545","text":"import mydbconn\nimport mysql.connector\n\ntry:\n name = input(\"Enter database name : \")\n sql = f'create database {name}'\n mydbconn.mycursor.execute(sql)\nexcept mysql.connector.errors.DatabaseError:\n print(f\"DBEXISTERROR : '{name}' database is already exists\")\nelse:\n print(f\"Database created - '{name}'\")","repo_name":"SachinPatil990969/SoftwareEngineering","sub_path":"Python/Database/blogs/createdb.py","file_name":"createdb.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"15401601535","text":"\"\"\"\nFind Common Characters\n\nGiven an array A of strings made only from lowercase letters, return a list of all characters that show up in all strings within the list (including duplicates). For example, if a character occurs 3 times in all strings but not 4 times, you need to include that character three times in the final answer.\n\nYou may return the answer in any order.\n\n \n\nExample 1:\n\nInput: [\"bella\",\"label\",\"roller\"]\nOutput: [\"e\",\"l\",\"l\"]\nExample 2:\n\nInput: [\"cool\",\"lock\",\"cook\"]\nOutput: [\"c\",\"o\"]\n\"\"\"\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n check = set(A[0])\n result = [[l] * min([a.count(l) for a in A]) for l in check]\n return sorted([i for e in result for i in e])\n \nfrom collections import Counter\n\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n my_list, res = [], []\n for v in A:\n my_list.append(Counter(v))\n for key in my_list[0]:\n times = my_list[0][key]\n for e in my_list[1:]:\n times = min(times, e[key])\n for _ in range(times):\n res.append(key)\n return res\n \nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n output = []\n for letter in set(min(A)):\n count = 100\n for word in A:\n count = min(count, word.count(letter))\n while count > 0:\n count -= 1\n output.append(letter)\n return output\n\n\nclass Solution:\n def commonChars(self, A: List[str]) -> List[str]:\n if not A:\n return []\n\n letters = set()\n for word in A:\n for letter in word:\n letters.add(letter)\n repeats = []\n for l in letters:\n occurance = min([word.count(l) for word in A])\n repeats.extend([l]*occurance)\n return repeats\n","repo_name":"Bennyhwanggggg/Algorithm-and-Data-Structures-and-Coding-Challenges","sub_path":"Challenges/findCommonCharacters.py","file_name":"findCommonCharacters.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"15274749202","text":"import json\nfrom tabnanny import check\nfrom tkinter import messagebox\nimport danemunicipio, manejosedes\nfrom msilib.schema import ComboBox\nfrom tkinter import *\nfrom tkinter import ttk\nimport main, actualizarTabla\n\nclass frmMain(Frame):\n def __init__(self, master, *args):\n super().__init__(master, *args)\n self.dbSedes = manejosedes.actualizarSedes()\n self.widgets()\n \n def sedes(self):\n self.informacionsedes = Frame(self.master, bg='blue')\n self.informacionsedes.place(relx=0.915, rely=0.005, relwidth=0.08, relheight=0.99)\n \n self.tablasedes = Frame(self.master, bg='orange')\n self.tablasedes.place(relx=0.005, rely=0.005, relwidth=0.91, relheight=0.99)\n \n self.controlTabla = Frame(self.tablasedes, bg='yellow')\n self.controlTabla.place(relx=0.005, rely=0.005, relwidth=0.99, relheight=0.05)\n \n self.btnIngresarSede = Button(self.informacionsedes, text='Ingresar sede', width=10, height=2, command=self.nuevasede)\n self.btnIngresarSede.pack(side='top', padx=5, pady=15)\n \n self.btnActualizarSede = Button(self.informacionsedes, text='Actualizar Sede', width=10, height=2, command=self.actualizarsede)\n self.btnActualizarSede.pack(side='top', padx=5, pady=15)\n \n #Se Agrega contenido en el FRAME controlTabla, aqui se colocara un boton de buscar (ya sea por municipio o nombre de la sede o codigo dane).\n self.btnClearTreeView = Button(self.controlTabla, text=\"Clear Tree View\", command=self.borrarTabla)\n self.btnClearTreeView.pack()\n self.btnClearTreeView = Button(self.controlTabla, text=\"Clear Tree View\", command=actualizarTabla.mostrarTabla)\n self.btnClearTreeView.pack()\n \n #Se crea un estilo que se aplicara al treeview\n self.styletreeview = ttk.Style()\n self.styletreeview.configure(\"mystyle.treeview\",highlightthickness=0, bd=0, font=('Calibri', 11)) # Modify the font of the body)\n self.styletreeview.configure(\"mystyle.treeview.Heading\",font=('Calibri', 13,'bold')) # Modify the font of the headings\n self.styletreeview.layout(\"mystyle.treeview\", [('self.styletreeview.self.treeviewSedesarea', {'sticky': 'nswe'})]) # Remove the borders\n \n #Se crea el treeview que mostrara los datos de la base de datos de las sedes\n self.treeviewSedes = ttk.Treeview(self.tablasedes,style=\"mystyle.treeview\",columns=(\"col1\",\"col2\",\"col3\",\"col4\",\"col5\",\"col6\",\"col7\",\"col8\",\"col9\",\"col10\"))\n #Se crean las columnas que contendran los datos\n self.treeviewSedes.column(\"#0\",width=5)\n self.treeviewSedes.column(\"col1\",width=60)\n self.treeviewSedes.column(\"col2\",width=140)\n self.treeviewSedes.column(\"col3\",width=100)\n self.treeviewSedes.column(\"col4\",width=140)\n self.treeviewSedes.column(\"col5\",width=80)\n self.treeviewSedes.column(\"col6\",width=80)\n self.treeviewSedes.column(\"col7\",width=100)\n self.treeviewSedes.column(\"col8\",width=25,anchor=CENTER)\n self.treeviewSedes.column(\"col9\",width=25,anchor=CENTER)\n self.treeviewSedes.column(\"col10\",width=25,anchor=CENTER)\n #Se colocan nombre a las columnas\n self.treeviewSedes.heading(\"#0\", text=\" \")\n self.treeviewSedes.heading(\"col1\", text=\"Dane Sede\")\n self.treeviewSedes.heading(\"col2\", text=\"Nombre Sede\")\n self.treeviewSedes.heading(\"col3\", text=\"Dane Institucion\")\n self.treeviewSedes.heading(\"col4\", text=\"Nombre Institucion\")\n self.treeviewSedes.heading(\"col5\", text=\"Departamento\")\n self.treeviewSedes.heading(\"col6\", text=\"Subregion\")\n self.treeviewSedes.heading(\"col7\", text=\"Municipio\")\n self.treeviewSedes.heading(\"col8\", text=\"Dane Municipio\")\n self.treeviewSedes.heading(\"col9\", text=\"Matricula\")\n self.treeviewSedes.heading(\"col10\", text=\"Activo\")\n #Configuramdo colores de acuerdo si es par o impar\n \"\"\"self.treeviewSedes.tag_configure('odd', background='Gray91')\n self.treeviewSedes.tag_configure('even', background='gray')\"\"\"\n #Se inserta el treeview al frame.\n self.treeviewSedes.place(relx=0.005, rely=0.06, relwidth=0.99, relheight=0.93)\n #Se realiza un iterante para mosntrar la base de datos.\n i = 0\n for sede in self.dbSedes:\n i += 1\n num = str(i)\n my_iid = num\n \"\"\"if check(i):\n color = 'even'\n else:\n color = 'odd'\"\"\"\n self.treeviewSedes.insert(\"\",END,text=my_iid,values=sede,iid=my_iid)\n scrollBarTreeView = ttk.Scrollbar(self.treeviewSedes, orient='vertical', command=self.treeviewSedes.yview)\n scrollBarTreeView.pack(side='right', fill='y')\n self.treeviewSedes.configure(yscrollcommand=scrollBarTreeView.set)\n self.treeviewSedes.bind(\"\", self.OnDoubleClick)\n \n def borrarTabla(self):\n self.treeviewSedes.delete(*self.treeviewSedes.get_children())\n \n def OnDoubleClick(self, event):\n item = self.treeviewSedes.selection()[0]\n messagebox.showinfo(\"Información\", self.treeviewSedes.item(item, \"text\"))\n \n def listmunicipios(self,event):\n if self.subregion.get()==\"BAJO CAUCA\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Caceres\", \"Caucasia\", \"El Bagre\", \"Nechi\", \"Taraza\", \"Zaragoza\"])\n if self.subregion.get()==\"MAGDALENA MEDIO\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Caracoli\", \"Maceo\", \"Puerto Berrio\", \"Puerto Nare\", \"Puerto Triunfo\", \"Yondo\"])\n if self.subregion.get()==\"NORDESTE\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Amalfi\", \"Anori\", \"Cisneros\", \"Remedios\", \"San Roque\", \"Santo Domingo\", \"Segovia\",\n \"Vegachi\", \"Yali\", \"Yolombo\"])\n if self.subregion.get()==\"NORTE\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Angostura\", \"Belmira\", \"Briceno\", \"Campamento\", \"Carolina\", \"Don Matias\", \"Entrerrios\",\n \"Gomez Plata\", \"Guadalupe\", \"Ituango\", \"San Andres\", \"San Jose De La Montana\", \"San Pedro\",\n \"Santa Rosa De Osos\", \"Toledo\", \"Valdivia\", \"Yarumal\"])\n if self.subregion.get()==\"OCCIDENTE\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Abriaqui\", \"Anza\", \"Armenia\", \"Buritica\", \"Caicedo\", \"Canasgordas\", \"Dabeiba\", \"Ebejico\", \"Frontino\",\n \"Giraldo\", \"Heliconia\", \"Liborina\", \"Olaya\", \"Peque\", \"Sabanalarga\", \"San Jeronimo\",\n \"Santafe De Antioquia\", \"Sopetran\", \"Uramita\"])\n if self.subregion.get()==\"ORIENTE\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Abejorral\", \"Alejandria\", \"Argelia\", \" Carmen De Viboral\", \"Cocorna\", \"Concepcion\",\n \"Granada\", \"Guarne\", \"Guatape\", \"La Ceja\", \"La Union\", \"Marinilla\",\n \"Narino\", \"Penol\", \"Retiro\", \"Rionegro\", \"San Carlos\", \"San Francisco\",\n \"San Luis\", \"San Rafael\", \"San Vicente\", \"Santuario\", \"Sonson\"])\n if self.subregion.get()==\"SUROESTE\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Amaga\", \"Andes\", \"Angelopolis\", \"Betania\", \"Betulia\", \"Caramanta\",\n \"Ciudad Bolivar\", \"Concordia\", \"Fredonia\", \"Hispania\", \"Jardin\", \"Jerico\",\n \"La Pintada\", \"Montebello\", \"Pueblorrico\", \"Salgar\", \"Santa Barbara\", \"Tamesis\",\n \"Tarso\", \"Titiribi\", \"Urrao\", \"Valparaiso\", \"Venecia\"])\n if self.subregion.get()==\"URABA\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Apartado\", \"Arboletes\", \"Carepa\", \"Chigorodo\", \"Murindo\", \"Mutata\",\n \"Necocli\", \"San Juan De Uraba\", \"San Pedro De Uraba\", \"Turbo\", \"Vigia Del Fuerte\"])\n if self.subregion.get()==\"VALLE DEL ABURRA\":\n self.cbxMunicipios.set(\" \")\n self.cbxMunicipios.config(values=[\"Barbosa\", \"Bello\", \"Caldas\", \"Copacabana\", \"Envigado\", \"Girardota\",\n \"Itagui\", \"La Estrella\", \"Medellin\", \"Sabaneta\"])\n if self.subregion.get()==\" \":\n self.cbxMunicipios.set(\"\")\n self.cbxMunicipios.config(values=\" \")\n \n def codigoDaneMunicipio(self,event):\n global numMunicipio\n municipio = self.municipio.get()\n numMunicipio = danemunicipio.denesmunicipio(municipio)\n self.daneMunicipio.set(numMunicipio)\n \n def nuevasede(self):\n self.btnIngresarSede['state'] = DISABLED\n self.btnActualizarSede['state'] = DISABLED\n self.frmSedes = Toplevel()\n self.frmSedes.geometry(\"{}x{}+{}+{}\".format(700, 300, 400, 200))\n self.frmSedes.transient(self.master)\n self.frmSedes.title('Nueva Sede')\n #self.frmSedes.overrideredirect(1)\n self.frmSedes.resizable(0,0)\n self.frmSedes.focus()\n self.frmnuevasede = Frame(self.frmSedes)\n self.frmnuevasede.pack(fill=BOTH, expand=True)\n \n self.codigoDaneSede = StringVar()\n Label(self.frmnuevasede, text=\"Codigo Dane Sede:\", font=self.letraTipo).place(relx=0.025, rely=0.05)\n Entry(self.frmnuevasede, textvariable=self.codigoDaneSede, justify='left', width=15, font= self.letraTipo).place(relx=0.34, rely=0.05)\n \n self.nombreSede = StringVar()\n Label(self.frmnuevasede, text=\"Nombre Sede:\", font=self.letraTipo).place(relx=0.025, rely=0.15)\n Entry(self.frmnuevasede, textvariable=self.nombreSede, justify='left', width=50, font= self.letraTipo).place(relx=0.34, rely=0.15)\n \n self.codigoDaneInstitucion = StringVar()\n Label(self.frmnuevasede, text=\"Codigo Dane Institucion:\", font=self.letraTipo).place(relx=0.025, rely=0.25)\n Entry(self.frmnuevasede, textvariable=self.codigoDaneInstitucion, justify='left', width=15, font= self.letraTipo).place(relx=0.34, rely=0.25)\n \n self.nombreInstitucion = StringVar()\n Label(self.frmnuevasede, text=\"Nombre Institucion:\", font=self.letraTipo).place(relx=0.025, rely=0.35)\n Entry(self.frmnuevasede, textvariable=self.nombreInstitucion, justify='left', width=50, font= self.letraTipo).place(relx=0.34, rely=0.35)\n \n self.departamento = StringVar()\n Label(self.frmnuevasede, text=\"Departamento:\", font=self.letraTipo).place(relx=0.025, rely=0.45)\n ttk.Combobox(self.frmnuevasede, textvariable=self.departamento, value=\"Antioquia\", justify='left', width=25, font= self.letraTipo).place(relx=0.34, rely=0.45)\n\n self.subregion = StringVar()\n Label(self.frmnuevasede, text=\"Subregion:\", font=self.letraTipo).place(relx=0.025, rely=0.55)\n self.cbxSubregion = ttk.Combobox(self.frmnuevasede, textvariable=self.subregion, value=[\"BAJO CAUCA\", \"MAGDALENA MEDIO\", \"NORDESTE\", \"NORTE\", \"OCCIDENTE\", \"ORIENTE\", \"SUROESTE\", \"URABA\", \"VALLE DEL ABURRA\"], justify='left', width=25, font= self.letraTipo)\n self.cbxSubregion.place(relx=0.34, rely=0.55)\n self.cbxSubregion.bind(\"<>\", self.listmunicipios)\n \n self.municipio = StringVar()\n Label(self.frmnuevasede, text=\"Municipio:\", font=self.letraTipo).place(relx=0.025, rely=0.65)\n self.cbxMunicipios = ttk.Combobox(self.frmnuevasede, textvariable=self.municipio, value=self.listmunicipios, justify='left', width=25, font= self.letraTipo)\n self.cbxMunicipios.place(relx=0.34, rely=0.65)\n self.cbxMunicipios.bind(\"<>\", self.codigoDaneMunicipio)\n \n self.daneMunicipio = StringVar()\n self.daneMunicipio.set(\" \")\n Label(self.frmnuevasede, textvariable=self.daneMunicipio, font=self.letraTipo, width=15).place(relx=0.7, rely=0.65)\n \n self.matricula = StringVar()\n Label(self.frmnuevasede, text=\"Cantidad de Estudiantes:\", font=self.letraTipo).place(relx=0.025, rely=0.75)\n Entry(self.frmnuevasede, textvariable=self.matricula, justify='left', width=10, font= self.letraTipo).place(relx=0.34, rely=0.75)\n \n self.estadoSede = StringVar()\n Label(self.frmnuevasede, text=\"Estado:\", font=self.letraTipo).place(relx=0.50, rely=0.75)\n self.cbxestado = ttk.Combobox(self.frmnuevasede, textvariable=self.estadoSede, value=[\"Activo\",\"Inactivo\"], justify='left', width=10, font= self.letraTipo)\n self.cbxestado.place(relx=0.60, rely=0.75)\n \n self.btnGuardar = Button(self.frmnuevasede, text='Guardar', command=self.guardarnuevasede, font=self.letraTipo)\n self.btnGuardar.place(relx=0.5, rely=0.85)\n \n self.btnSalir = Button(self.frmnuevasede, text='Cerrar', command=self.salirnuevasede, font=self.letraTipo)\n self.btnSalir.place(relx=0.75, rely=0.85)\n \n def actualizarsede(self):\n self.btnIngresarSede['state'] = DISABLED\n self.btnActualizarSede['state'] = DISABLED\n self.frmActualizarSedes = Toplevel()\n self.frmActualizarSedes.geometry(\"{}x{}+{}+{}\".format(700, 300, 400, 200))\n self.frmActualizarSedes.transient(self.master)\n self.frmActualizarSedes.title('Actualizar Sede')\n self.frmActualizarSedes.resizable(0,0)\n self.frmbuscarsede = Frame(self.frmActualizarSedes, bg='blue')\n self.frmbuscarsede.place(relx=0.005, rely=0.005, relwidth=0.99, relheight=0.125)\n self.frmactualizarsede = Frame(self.frmActualizarSedes, bg='orange')\n self.frmactualizarsede.place(relx=0.005, rely=0.15, relwidth=0.99, relheight=0.825)\n \n self.buscarDaneSede = StringVar()\n Label(self.frmbuscarsede, text=\"Codigo Dane Sede:\", font=self.letraTipo).place(relx=0.025, rely=0.05)\n Entry(self.frmbuscarsede, textvariable=self.buscarDaneSede, justify='left', width=15, font= self.letraTipo).place(relx=0.25, rely=0.05)\n \n self.btnBuscar = Button(self.frmbuscarsede, text='Buscar', command=self.buscarsede, font=self.letraTipo)\n self.btnBuscar.place(relx=0.5, rely=0.05)\n \n self.nombreSede = StringVar()\n Label(self.frmactualizarsede, text=\"Nombre Sede:\", font=self.letraTipo).place(relx=0.025, rely=0.05)\n #Entry(self.frmnuevasede, textvariable=self.nombreSede, justify='left', width=50, font= self.letraTipo).place(relx=0.34, rely=0.15)\n \n self.codigoDaneInstitucion = StringVar()\n Label(self.frmactualizarsede, text=\"Codigo Dane Institucion:\", font=self.letraTipo).place(relx=0.025, rely=0.15)\n #Entry(self.frmnuevasede, textvariable=self.codigoDaneInstitucion, justify='left', width=15, font= self.letraTipo).place(relx=0.34, rely=0.25)\n \n self.nombreInstitucion = StringVar()\n Label(self.frmactualizarsede, text=\"Nombre Institucion:\", font=self.letraTipo).place(relx=0.025, rely=0.25)\n #Entry(self.frmnuevasede, textvariable=self.nombreInstitucion, justify='left', width=50, font= self.letraTipo).place(relx=0.34, rely=0.35)\n \n self.departamento = StringVar()\n Label(self.frmactualizarsede, text=\"Departamento:\", font=self.letraTipo).place(relx=0.025, rely=0.35)\n #ttk.Combobox(self.frmnuevasede, textvariable=self.departamento, value=\"Antioquia\", justify='left', width=25, font= self.letraTipo).place(relx=0.34, rely=0.45)\n\n self.subregion = StringVar()\n Label(self.frmactualizarsede, text=\"Subregion:\", font=self.letraTipo).place(relx=0.025, rely=0.45)\n #self.cbxSubregion = ttk.Combobox(self.frmnuevasede, textvariable=self.subregion, value=[\"BAJO CAUCA\", \"MAGDALENA MEDIO\", \"NORDESTE\", \"NORTE\", \"OCCIDENTE\", \"ORIENTE\", \"SUROESTE\", \"URABA\", \"VALLE DEL ABURRA\"], justify='left', width=25, font= self.letraTipo)\n #self.cbxSubregion.place(relx=0.34, rely=0.55)\n #self.cbxSubregion.bind(\"<>\", self.listmunicipios)\n \n self.municipio = StringVar()\n Label(self.frmactualizarsede, text=\"Municipio:\", font=self.letraTipo).place(relx=0.025, rely=0.5525)\n #self.cbxMunicipios = ttk.Combobox(self.frmnuevasede, textvariable=self.municipio, value=self.listmunicipios, justify='left', width=25, font= self.letraTipo)\n #self.cbxMunicipios.place(relx=0.34, rely=0.65)\n #self.cbxMunicipios.bind(\"<>\", self.codigoDaneMunicipio)\n \n self.daneMunicipio = StringVar()\n self.daneMunicipio.set(\" \")\n Label(self.frmactualizarsede, textvariable=self.daneMunicipio, font=self.letraTipo, width=15).place(relx=0.7, rely=0.5525)\n \n self.matricula = StringVar()\n Label(self.frmactualizarsede, text=\"Cantidad de Estudiantes:\", font=self.letraTipo).place(relx=0.025, rely=0.6525)\n #Entry(self.frmnuevasede, textvariable=self.matricula, justify='left', width=10, font= self.letraTipo).place(relx=0.34, rely=0.75)\n \n self.estadoSede = StringVar()\n Label(self.frmactualizarsede, text=\"Estado:\", font=self.letraTipo).place(relx=0.50, rely=0.6525)\n #self.cbxestado = ttk.Combobox(self.frmnuevasede, textvariable=self.estadoSede, value=[\"Activo\",\"Inactivo\"], justify='left', width=10, font= self.letraTipo)\n #self.cbxestado.place(relx=0.60, rely=0.75)\n \n self.btnSalir = Button(self.frmactualizarsede, text='Cerrar', command=self.saliractualizarsede, font=self.letraTipo)\n self.btnSalir.place(relx=0.75, rely=0.85)\n\n def guardarnuevasede(self):\n global mtzsede\n self.btnGuardar['state'] = DISABLED\n mtzsede = [self.codigoDaneSede.get(),self.nombreSede.get(),self.codigoDaneInstitucion.get(),\n self.nombreInstitucion.get(),self.departamento.get(),self.subregion.get(),\n self.municipio.get(),numMunicipio,self.matricula.get(),self.estadoSede.get()]\n existe = manejosedes.validarSede(mtzsede)\n if existe==False:\n ingresado = manejosedes.guardarSede(mtzsede)\n if ingresado==True:\n self.frmSedes.destroy()\n messagebox.showinfo(\"Información\",\"Sede Agregada con Exito\")\n self.dbSedes = manejosedes.actualizarSedes()\n self.sedes()\n else:\n messagebox.showinfo(\"Información\",\"No se completo la actualizacion\")\n self.sedeexiste()\n else:\n self.frmSedes.destroy()\n self.btnIngresarSede['state'] = NORMAL\n self.btnActualizarSede['state'] = NORMAL\n messagebox.showinfo(\"Informacion\",\"Sede ya se encuentra registrada\")\n \n def buscarsede(self):\n global buscarDaneSede\n buscarDaneSede = self.buscarDaneSede.get()\n danesede = manejosedes.buscarSede(buscarDaneSede)\n \n def actualizarSede(self):\n pass\n \n def serviciosedes(self):\n self.frmServicio = Toplevel()\n self.frmServicio.geometry('600x100')\n \n def interventores(self):\n pass\n \n def salir(self):\n self.master.destroy()\n self.master.quit()\n \n def salirnuevasede(self):\n self.frmSedes.destroy()\n self.btnIngresarSede['state'] = NORMAL\n self.btnActualizarSede['state'] = NORMAL\n \n def saliractualizarsede(self):\n self.frmActualizarSedes.destroy()\n self.btnIngresarSede['state'] = NORMAL\n self.btnActualizarSede['state'] = NORMAL\n \n def ordenVentanas(self):\n if self.estadoVentana == 1:\n if self.nextVentana==2:\n self.frmInformacionBasica.destroy()\n self.estadoVentana = 2\n self.sedes()\n if self.nextVentana == 3:\n #Aqui van los frame que se deben eliminar antes de montar la siguiente ventana.\n self.estadoVentana = 3\n self.serviciosedes()\n \n def menusedes(self):\n self.nextVentana = 2\n self.ordenVentanas()\n \n def menuservicio(self):\n self.nextVentana = 3\n self.ordenVentanas()\n \n def widgets(self):\n self.estadoVentana = 1\n self.letraTipo = ('Arial', 12)\n (numSedes,mtzSubRegion) = manejosedes.cantidadSedes()\n self.cantidad = StringVar()\n self.cantidad.set(numSedes)\n \n #Creacion de Frame para informacion Basica\n self.frmInformacionBasica = Frame(self.master, bg='grey')\n self.frmInformacionBasica.place(relx=0, rely=0, relwidth=1, relheight=1)\n self.frmInformacionSubRegion = Frame(self.frmInformacionBasica, bg='grey')\n self.frmInformacionSubRegion.place(relx=0.005, rely=0.15)\n #Creacion de la barra de menu para las diferentes opciones\n self.menubar = Menu(self.master)\n self.master.config(menu=self.menubar)\n self.sedesmenu = Menu(self.menubar, tearoff=0)\n self.sedesmenu.add_command(label=\"Sedes\", command=self.menusedes, font=self.letraTipo)\n self.sedesmenu.add_separator()\n self.sedesmenu.add_command(label=\"Salir\", command=self.salir, font=self.letraTipo)\n self.serviciosmenu = Menu(self.menubar, tearoff=0)\n self.serviciosmenu.add_command(label=\"Servicios\", font=self.letraTipo, command=self.menuservicio)\n self.visitasmenu = Menu(self.menubar, tearoff=0)\n self.visitasmenu.add_command(label=\"Interventores\", font=self.letraTipo, command=self.interventores)\n self.menubar.add_cascade(label='Sedes', menu=self.sedesmenu, font=self.letraTipo)\n self.menubar.add_cascade(label='Servicios', menu=self.serviciosmenu, font=self.letraTipo)\n self.menubar.add_cascade(label='Visitas', menu=self.visitasmenu, font=self.letraTipo)\n #Informacion que se muestra en la pantalla inicial\n Label(self.frmInformacionBasica, text='Cantidad de Sedes', font=('Castellar', 18), bg='grey').place(relx=0.005, rely=0.005)\n Label(self.frmInformacionBasica, textvariable=self.cantidad, font=('Castellar', 24), bg='grey').place(relx=0.09, rely=0.05)\n Label(self.frmInformacionSubRegion, text=\"Cantidad de Sedes Por Subregion\", font=('Castellar', 14), bg='grey').grid(column=0, row=0, columnspan=2)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[0][0], font=self.letraTipo, bg='grey').grid(column=0, row=1, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[0][1], font=self.letraTipo, bg='grey').grid(column=1, row=1)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[1][0], font=self.letraTipo, bg='grey').grid(column=0, row=2, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[1][1], font=self.letraTipo, bg='grey').grid(column=1, row=2)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[2][0], font=self.letraTipo, bg='grey').grid(column=0, row=3, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[2][1], font=self.letraTipo, bg='grey').grid(column=1, row=3)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[3][0], font=self.letraTipo, bg='grey').grid(column=0, row=4, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[3][1], font=self.letraTipo, bg='grey').grid(column=1, row=4)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[4][0], font=self.letraTipo, bg='grey').grid(column=0, row=5, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[4][1], font=self.letraTipo, bg='grey').grid(column=1, row=5)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[5][0], font=self.letraTipo, bg='grey').grid(column=0, row=6, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[5][1], font=self.letraTipo, bg='grey').grid(column=1, row=6)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[6][0], font=self.letraTipo, bg='grey').grid(column=0, row=7, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[6][1], font=self.letraTipo, bg='grey').grid(column=1, row=7)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[7][0], font=self.letraTipo, bg='grey').grid(column=0, row=8, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[7][1], font=self.letraTipo, bg='grey').grid(column=1, row=8)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[8][0], font=self.letraTipo, bg='grey').grid(column=0, row=9, sticky=W)\n Label(self.frmInformacionSubRegion, text=mtzSubRegion[8][1], font=self.letraTipo, bg='grey').grid(column=1, row=9)","repo_name":"amp1210/GestiondeProyectos","sub_path":"frmPrincipal.py","file_name":"frmPrincipal.py","file_ext":"py","file_size_in_byte":24791,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"37649335242","text":"edge = input('输入平行四边形\\n边: ')\r\nheight = input('高: ')\r\n\r\nedge = int(edge)\r\nheight = int(height)\r\n\r\nfor i in range(0, height):\r\n for j in range(height - 1 - i, 0, -1):\r\n print(' ', end='')\r\n\r\n for q in range(0, edge + height - 1):\r\n print('* ', end='')\r\n print('')\r\n","repo_name":"apreight/PythonNotes-Demo","sub_path":"平行四边形.py","file_name":"平行四边形.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"9403466367","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport click\nimport spacy\nfrom pathlib import Path\nfrom wasabi import Printer\nfrom io import open\nfrom .dframcy import DframCy\nfrom .utils import get_default_columns\nfrom .trainer import DframeTrainer, DframeEvaluator, DframeTrainClassifier\n\nmessenger = Printer()\nDEFAULT_COLUMNS = \",\".join(get_default_columns())\n\n\n@click.group()\ndef main():\n pass\n\n\n@main.command()\n@click.option(\n \"--input_file\", \"-i\", required=True, type=Path, help=\"Input text file path.\"\n)\n@click.option(\n \"--output_file\", \"-o\", required=True, type=Path, help=\"Output file path/name\"\n)\n@click.option(\n \"--convert_type\",\n \"-t\",\n default=\"csv\",\n show_default=True,\n type=str,\n help=\"Output file format (json/csv)\",\n)\n@click.option(\n \"--language_model\",\n \"-l\",\n default=\"en_core_web_sm\",\n show_default=True,\n type=str,\n help=\"Language model \" \"to be used.\",\n)\n@click.option(\n \"--columns\",\n \"-c\",\n default=DEFAULT_COLUMNS,\n show_default=True,\n type=str,\n help=\"Annotations to be \" \"included in dataframe.\",\n)\n@click.option(\n \"--separate_entity_frame\",\n \"-s\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Save separate \" \"entity dataframe.\",\n)\ndef convert(\n input_file,\n output_file,\n convert_type,\n language_model,\n columns,\n separate_entity_frame,\n):\n if output_file.is_dir():\n output_file = output_file.joinpath(input_file.stem + \".\" + str(convert_type))\n if input_file.exists():\n with open(input_file, \"r\") as infile:\n text = infile.read().strip(\"\\n\").strip()\n nlp = spacy.load(language_model)\n dframcy = DframCy(nlp)\n doc = dframcy.nlp(text)\n if columns == DEFAULT_COLUMNS:\n annotation_dataframe = dframcy.to_dataframe(\n doc, separate_entity_dframe=separate_entity_frame\n )\n else:\n annotation_dataframe = dframcy.to_dataframe(\n doc,\n columns=columns.split(\", \"),\n separate_entity_dframe=separate_entity_frame,\n )\n if separate_entity_frame:\n token_annotation_dataframe, entity_dataframe = annotation_dataframe\n else:\n token_annotation_dataframe = annotation_dataframe\n entity_dataframe = None\n\n if convert_type == \"csv\":\n token_annotation_dataframe.to_csv(output_file)\n if separate_entity_frame:\n entity_output_file = Path(\n str(output_file).strip(\".csv\") + \"_entity.csv\"\n )\n entity_dataframe.to_csv(entity_output_file)\n elif convert_type == \"json\":\n annotation_json = token_annotation_dataframe.to_json(orient=\"columns\")\n with open(output_file, \"w\") as outfile:\n json.dump(annotation_json, outfile)\n if separate_entity_frame:\n entity_output_file = Path(\n str(output_file).strip(\".json\") + \"_entity.json\"\n )\n with open(entity_output_file, \"w\") as ent_outfile:\n json.dump(entity_dataframe, ent_outfile)\n else:\n messenger.fail(\n \"Unknown output file format '{}'\".format(convert_type), exits=-1\n )\n else:\n messenger.fail(\"input path {} does not exist\".format(input_file), exits=-1)\n\n\n@main.command()\n@click.option(\"--lang\", \"-l\", required=True, type=str, help=\"Model language.\")\n@click.option(\n \"--output_path\",\n \"-o\",\n required=True,\n type=str,\n help=\"Output directory to store mode in.\",\n)\n@click.option(\n \"--train_path\",\n \"-t\",\n required=True,\n type=str,\n help=\"Path of CSV containing training data.\",\n)\n@click.option(\n \"--dev_path\",\n \"-d\",\n required=True,\n type=str,\n help=\"Path to CSV containing validation data\",\n)\n@click.option(\n \"--debug_data_first\",\n \"-debug\",\n default=True,\n show_default=True,\n type=bool,\n help=\"Run spaCy's training \" \"data debugger before \" \"training\",\n)\n@click.option(\n \"--raw_text\",\n \"-rt\",\n default=None,\n show_default=True,\n type=str,\n help=\"Path to jsonl file with unlabelled \" \"text documents.\",\n)\n@click.option(\n \"--base_model\",\n \"-b\",\n default=None,\n show_default=True,\n type=str,\n help=\"Name of model to update\",\n)\n@click.option(\n \"--pipeline\",\n \"-p\",\n default=\"tagger,parser,ner\",\n show_default=True,\n type=str,\n help=\"Comma-separated \" \"names of pipeline \" \"components\",\n)\n@click.option(\n \"--vectors\",\n \"-v\",\n default=None,\n show_default=True,\n type=str,\n help=\"Model to load vectors from\",\n)\n@click.option(\n \"--n_iter\",\n \"-n\",\n default=30,\n show_default=True,\n type=int,\n help=\"Number of iterations\",\n)\n@click.option(\n \"--n_early_stopping\",\n \"-ne\",\n default=None,\n show_default=True,\n type=int,\n help=\"Maximum number of \" \"training epochs without \" \"dev accuracy improvement\",\n)\n@click.option(\n \"--n_examples\",\n \"-ns\",\n default=0,\n show_default=True,\n type=int,\n help=\"Number of examples\",\n)\n@click.option(\n \"--use_gpu\", \"-g\", default=-1, show_default=True, type=int, help=\"Use GPU\"\n)\n@click.option(\n \"--version\",\n \"-v\",\n default=\"0.0.0\",\n show_default=True,\n type=str,\n help=\"Model version\",\n)\n@click.option(\n \"--meta_path\",\n \"-m\",\n default=None,\n show_default=True,\n type=Path,\n help=\"Optional path to meta.json to \" \"use as base.\",\n)\n@click.option(\n \"--init_tok2vec\",\n \"-t2v\",\n default=None,\n show_default=True,\n type=str,\n help=\"Path to pretrained weights \" \"for the token-to-vector \" \"parts of the models\",\n)\n@click.option(\n \"--parser_multitasks\",\n \"-pm\",\n default=\"\",\n show_default=True,\n type=str,\n help=\"Side objectives for parser \" \"CNN, e.g. 'dep' or 'dep,\" \"tag'\",\n)\n@click.option(\n \"--entity_multitasks\",\n \"-em\",\n default=\"\",\n show_default=True,\n type=str,\n help=\"Side objectives for NER \" \"CNN, e.g. 'dep' or 'dep,\" \"tag'\",\n)\n@click.option(\n \"--noise_level\",\n \"-n\",\n default=0.0,\n show_default=True,\n type=float,\n help=\"Amount of corruption for data \" \"augmentation\",\n)\n@click.option(\n \"--orth_variant_level\",\n \"-vl\",\n default=0.0,\n show_default=True,\n type=float,\n help=\"Amount of orthography \" \"variation for data \" \"augmentation\",\n)\n@click.option(\n \"--eval_beam_widths\",\n \"-bw\",\n default=\"\",\n show_default=True,\n type=str,\n help=\"Beam widths to evaluate, \" \"e.g. 4,8\",\n)\n@click.option(\n \"--gold_preproc\",\n \"-G\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Use gold preprocessing\",\n)\n@click.option(\n \"--learn_tokens\",\n \"-T\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Make parser learn \" \"gold-standard tokenization\",\n)\n@click.option(\n \"--textcat_multilabel\",\n \"-TML\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Textcat classes \" \"aren't mutually \" \"exclusive (\" \"multilabel)\",\n)\n@click.option(\n \"--textcat_arch\",\n \"-ta\",\n default=\"bow\",\n show_default=True,\n type=str,\n help=\"Textcat model architecture\",\n)\n@click.option(\n \"--textcat_positive_label\",\n \"-tpl\",\n default=None,\n show_default=True,\n type=str,\n help=\"Textcat positive \" \"label for binary \" \"classes with two \" \"labels\",\n)\n@click.option(\n \"--verbose\", \"-VV\", default=False, show_default=True, type=bool, help=\"verbosity\"\n)\ndef train(\n lang,\n output_path,\n train_path,\n dev_path,\n debug_data_first,\n raw_text,\n base_model,\n pipeline,\n vectors,\n n_iter,\n n_early_stopping,\n n_examples,\n use_gpu,\n version,\n meta_path,\n init_tok2vec,\n parser_multitasks,\n entity_multitasks,\n noise_level,\n orth_variant_level,\n eval_beam_widths,\n gold_preproc,\n learn_tokens,\n textcat_multilabel,\n textcat_arch,\n textcat_positive_label,\n verbose,\n):\n dframe_trainer = DframeTrainer(\n lang,\n output_path,\n train_path,\n dev_path,\n debug_data_first,\n raw_text,\n base_model,\n pipeline,\n vectors,\n n_iter,\n n_early_stopping,\n n_examples,\n use_gpu,\n version,\n meta_path,\n init_tok2vec,\n parser_multitasks,\n entity_multitasks,\n noise_level,\n orth_variant_level,\n eval_beam_widths,\n gold_preproc,\n learn_tokens,\n textcat_multilabel,\n textcat_arch,\n textcat_positive_label,\n verbose,\n )\n dframe_trainer.begin_training()\n\n\n@main.command()\n@click.option(\"--model\", \"-m\", required=True, type=str, help=\"Model name or path\")\n@click.option(\n \"--data_path\",\n \"-d\",\n required=True,\n type=str,\n help=\"Path of CSV containing validation data\",\n)\n@click.option(\n \"--gpu_id\", \"-g\", default=-1, show_default=True, type=bool, help=\"Use GPU\"\n)\n@click.option(\n \"--gold_preproc\",\n \"-G\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Use gold preprocessing\",\n)\n@click.option(\n \"--displacy_path\",\n \"-dp\",\n default=None,\n show_default=True,\n type=str,\n help=\"Directory to output rendered \" \"parses as HTML\",\n)\n@click.option(\n \"--displacy_limit\",\n \"-dl\",\n default=25,\n show_default=True,\n type=int,\n help=\"Limit of parses to render as \" \"HTML\",\n)\n@click.option(\n \"--return_scores\",\n \"-R\",\n default=False,\n show_default=True,\n type=bool,\n help=\"Return dict containing \" \"model scores\",\n)\ndef evaluate(\n model, data_path, gpu_id, gold_preproc, displacy_path, displacy_limit, return_scores\n):\n dframe_evaluator = DframeEvaluator(\n model,\n data_path,\n gpu_id,\n gold_preproc,\n displacy_path,\n displacy_limit,\n return_scores,\n )\n dframe_evaluator.begin_evaluation()\n\n\n@main.command()\n@click.option(\"--output_path\", \"-o\", required=True, type=str, help=\"Output model path\")\n@click.option(\n \"--train_path\", \"-t\", required=True, type=str, help=\"path to training data csv\"\n)\n@click.option(\n \"--dev_path\",\n \"-d\",\n required=True,\n type=str,\n help=\"path to testing/validation data csv\",\n)\n@click.option(\n \"--model\",\n \"-m\",\n default=None,\n show_default=True,\n type=str,\n help=\"language model name\",\n)\n@click.option(\n \"--n_iter\",\n \"-n\",\n default=20,\n show_default=True,\n type=int,\n help=\"Number of training iterations\",\n)\n@click.option(\n \"--init_tok2vec\",\n \"-t2v\",\n default=None,\n show_default=True,\n type=Path,\n help=\"Pretrained tok2vec weights\",\n)\n@click.option(\n \"--exclusive_classes\",\n \"-ec\",\n default=False,\n show_default=True,\n type=bool,\n help=\"classes exclusive\",\n)\n@click.option(\n \"--architecture\",\n \"-a\",\n default=\"ensemble\",\n show_default=True,\n type=str,\n help=\"model architecture\",\n)\n@click.option(\n \"--train_split\",\n \"-s\",\n default=0.8,\n show_default=True,\n type=float,\n help=\"split in case no testing data\",\n)\ndef textcat(\n output_path,\n train_path,\n dev_path,\n model,\n n_iter,\n init_tok2vec,\n exclusive_classes,\n architecture,\n train_split,\n):\n dframe_textcat_train = DframeTrainClassifier(\n output_path,\n train_path,\n dev_path,\n model,\n n_iter,\n init_tok2vec,\n exclusive_classes,\n architecture,\n train_split,\n )\n dframe_textcat_train.begin_training()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"christenurs/dframcy","sub_path":"dframcy/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":11791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"}
+{"seq_id":"22091887080","text":"#!/usr/bin/env python\nimport argparse\nimport time\nimport logging\nimport redis\nimport requests\nimport socket\nimport subprocess\nfrom logging.handlers import RotatingFileHandler\n\nfrom mpd import MPDClient, ConnectionError\n\nimport sys\nsys.path.insert(1, '..')\nimport mpd_env # noqa\n\nMAIN_SWITCH = 'http://omega2.lan:8000/switch/0'\nSUB_SWITCH = 'http://sakura.lan:31337/switch'\n\ntry:\n from subprocess import DEVNULL\nexcept ImportError:\n import os\n DEVNULL = open(os.devnull, 'wb')\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument('--interval', type=int, default=10)\narg_parser.add_argument('--timeout', type=int, default=300)\narg_parser.add_argument('--gap', type=int, default=(4 * 60 * 60))\narg_parser.add_argument('-v', '--verbose', action='store_true')\narg_parser.add_argument('-l', '--log-file', help='Log file')\narg_parser.add_argument('--redis-target', help='Target key for wifi-monitor.')\narg_parser.add_argument('--ping-target', help='Target hostname for ping.')\n\n\nclass MpdManager():\n\n def __init__(self, interval, timeout, gap,\n host='localhost', port=6600, password=None,\n redis_target=None, ping_target=None):\n self.redis_target = redis_target\n self.ping_target = ping_target\n self.interval = interval\n self.timeout = timeout\n self.gap = gap\n self.host = host\n self.port = port\n self.password = password\n self.redis = redis.StrictRedis()\n self.logger = logging.getLogger('MpdManager')\n self.is_mpd_on = False\n\n self.prev_on = True\n self.last_on = time.time()\n\n self.connect()\n\n def connect(self):\n self.logger.info('Connecting MPD')\n self.mpd = MPDClient()\n try:\n self.mpd.connect(self.host, self.port)\n if self.password:\n self.mpd.password(self.password)\n self.is_mpd_on = True\n except Exception as e:\n self.logger.debug(e)\n raise e\n self.logger.info('MPD disabled')\n self.is_mpd_on = False\n\n def check_mpd_connection(self):\n try:\n self.mpd.ping()\n except (ConnectionError, BrokenPipeError):\n self.connect()\n\n def run(self):\n while 1:\n # Multiple tests\n last_seen = max(filter(bool, (\n self.check_alive_ping(),\n self.check_alive_redis(),\n self.last_on,\n )))\n\n now = time.time()\n\n difference = now - last_seen\n\n self.logger.debug('Last seen: {} diff: {}'.format(\n last_seen,\n difference))\n\n if difference < self.timeout:\n if not self.prev_on:\n self.on_connected()\n self.prev_on = True\n else:\n if self.prev_on:\n self.on_disconnected()\n self.prev_on = False\n\n if last_seen:\n self.last_on = last_seen\n\n time.sleep(self.interval)\n\n def on_connected(self):\n self.logger.info('Connected')\n\n if self.is_mpd_on:\n self.check_mpd_connection()\n\n if (time.time() - self.last_on) <= self.gap:\n self.logger.info('Start music')\n self.mpd.play()\n else:\n self.logger.info('Over gap')\n self.mpd.command_list_ok_begin()\n self.mpd.clear()\n self.mpd.setvol(70)\n self.mpd.load('latest')\n self.mpd.play()\n self.mpd.command_list_end()\n\n try:\n requests.put(MAIN_SWITCH)\n except Exception as e:\n self.logger.error(str(e))\n\n def on_disconnected(self):\n self.logger.info('Disconnected')\n\n if self.is_mpd_on:\n self.check_mpd_connection()\n self.logger.info('Stop music')\n self.mpd.stop()\n\n for switch in (MAIN_SWITCH, SUB_SWITCH):\n try:\n requests.delete(switch)\n except Exception as e:\n self.logger.error(str(e))\n\n def check_alive_ping(self):\n p = subprocess.Popen(['ping', '-c1', '-W1', self.ping_target],\n stdout=DEVNULL,\n stderr=DEVNULL)\n return_code = p.wait()\n now = time.time()\n\n if return_code == 0:\n self.logger.debug('Ping {}'.format(now))\n self._last_ping_timestamp = now\n return self._last_ping_timestamp\n else:\n return getattr(self, '_last_ping_timestamp', None)\n\n def check_alive_redis(self):\n # subprocess.Popen(['ping', '-c1', '-W1', self.ping_target],\n # stdout=DEVNULL,\n # stderr=DEVNULL)\n last_seen = self.redis.hget(self.redis_target, 'lastseen')\n if last_seen:\n last_seen = float(last_seen)\n self.logger.debug('Redis {}'.format(last_seen))\n return last_seen\n else:\n return None\n\n\nif __name__ == '__main__':\n args = arg_parser.parse_args()\n\n logging.basicConfig(\n datefmt='%y-%m-%d %H:%M:%S',\n format='%(asctime)s:%(levelname)s: %(message)s')\n\n if args.log_file:\n handler = RotatingFileHandler(args.log_file)\n formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\n logger = logging.getLogger('MpdManager')\n logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)\n\n logger.info(f'Using {mpd_env.MPD_HOST} {mpd_env.MPD_PORT} {mpd_env.MPD_PASSWORD}')\n\n manager = MpdManager(args.interval, args.timeout, args.gap,\n mpd_env.MPD_HOST, mpd_env.MPD_PORT, mpd_env.MPD_PASSWORD,\n redis_target=args.redis_target,\n ping_target=args.ping_target)\n manager.run()\n","repo_name":"tribela/home-automations","sub_path":"phone-scanner/phone-scanner.py","file_name":"phone-scanner.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"74942738382","text":"from __future__ import annotations\n\nimport discord\n\nfrom constants import Day\nfrom utils import keycap_digit\n\n__all__ = (\"WeekDays\",)\n\n\nclass WeekDays(discord.ui.Select):\n def __init__(self, placeholder=\"Select the weekdays for registrations\", max=7):\n _o = []\n for idx, day in enumerate(Day, start=1):\n _o.append(discord.SelectOption(label=day.name.title(), value=day.name, emoji=keycap_digit(idx)))\n\n super().__init__(placeholder=placeholder, max_values=max, options=_o)\n\n async def callback(self, interaction: discord.Interaction):\n await interaction.response.defer()\n self.view.stop()\n\n self.view.custom_id = [Day(_) for _ in self.values]\n","repo_name":"quotientbot/Quotient-Bot","sub_path":"src/cogs/esports/views/scrims/_days.py","file_name":"_days.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"47"}
+{"seq_id":"37222227864","text":"#!/usr/bin/env python3\n# coding=utf-8\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\ninclude a plot showing the common-source node voltage, V , as a function of V 1 − V 2 for all three values of V 2 . How does the value of V change as V 1 goes from below V 2 to above it?\n\"\"\"\n\ndef clip(xs, ys, xbounds, ybounds):\n pairs = [(x, y) for (x, y) in zip(xs, ys) if (xbounds[0] <= x) and (x <= xbounds[1]) and (ybounds[0] <= y) and (y <= ybounds[1])]\n out = list(zip(*pairs))\n return np.array(out[0]), np.array(out[1])\n\ndef clip_range(xs, ys, bounds):\n return clip(xs, ys, (-np.inf, np.inf), bounds)\n\ndef fit(xs, ys, model, initial_params):\n def err_f(params): return np.mean(np.power(np.log(ys) - np.log(model(xs, params)), 2))\n res = minimize(err_f, x0 = initial_params, method='Nelder-Mead')\n print(res)\n return res.x\n\n\n# Info about trials\nVbs = [.563, .563, .563, 1.076, 1.076, 1.076]\nV2s = [2.497, 3.508, 4.49, 4.49, 3.508, 2.493]\nfileNames = [\"data/T%d.V.csv\" % n for n in range(6)]\n\n# Import data\nVdms = [] #plural of Vdm\nVs = []\n\nfor name in fileNames:\n Vdm = []\n V = []\n with open(name) as f:\n c = csv.reader(f, delimiter=\",\")\n next(c) # Throw away the header\n for row in c:\n Vdm += [float(row[0])]\n V += [float(row[1])] \n Vdms += [Vdm]\n Vs += [V]\n\n\n# Plot things\nfig = plt.figure(figsize=(8,6))\nax = plt.subplot(111)\n\n# First, the ones with lower Vb\nfor (color, V2, Vdm, V) in zip(['b','g','r'], V2s, Vdms, Vs[:3]):\n ax.plot(Vdm, V, color + '.', markersize=1, label=\"V2 = %g V\" % V2)\nplt.title(\"Common source node voltage (Vb = %g V)\" % Vbs[0])\nplt.xlabel(\"Differential voltage (V)\")\nplt.ylabel(\"Common-source voltage (V)\")\nplt.grid(True)\nax.legend()\nplt.savefig(\"source-voltage-low-vb.pdf\")\nplt.cla()\n\n# Now higher Vb. Reverse colors so they match\nfor (color, V2, Vdm, V) in zip(['r','g','b'], V2s[3:], Vdms[3:], Vs[3:]):\n ax.plot(Vdm, V, color + '.', markersize=1, label=\"V2 = %g V\" % V2)\nplt.title(\"Common source node voltage (Vb = %g V)\" % Vbs[3])\nplt.xlabel(\"Differential voltage (V)\")\nplt.ylabel(\"Common-source voltage (V)\")\nplt.grid(True)\nax.legend()\nplt.savefig(\"source-voltage-high-vb.pdf\")\nplt.cla()\n","repo_name":"TShapinsky/circuits-lab-7","sub_path":"source-voltage.py","file_name":"source-voltage.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"31536350587","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 6 13:02:01 2017\r\n\r\n@author: Ankurt.04\r\n\"\"\"\r\n\r\n#Chapter 7 - end of chapter execises\r\n\r\n#Ex 1 has already been done as part of inchapter study\r\n\r\nimport math\r\n\r\n#Ex2\r\ndef eval_loop():\r\n while True: \r\n a = input('Enter the calulcation that you want to carry out: ')\r\n if a == 'done':\r\n break\r\n b = eval(a)\r\n print('The result is: ', b)\r\n print('The result is: ', b) #if user inputs 'done' then loop breaks and prints value of last caclulated expression\r\n \r\n \r\neval_loop() \r\n\r\n\r\n#Ex3\r\ndef estimate_pi():\r\n product = (2 * math.sqrt(2)) / 9801\r\n k = 0.0\r\n last_term = 1.0\r\n term_sum = 0.0 #all these variables are initialised to a float value as they will be required to be float during caclulation (to avoid data type mismatch)\r\n while last_term > 1e-15:\r\n last_term = ((math.factorial(4.0 * k)) * (1103.0 + (26390.0 * k))) / ((math.factorial(k)**4.0) * (396.0**(4.0 * k)))\r\n term_sum += last_term\r\n k += 1.0\r\n #when last terms becomes less, loop exists\r\n result = product * term_sum\r\n return 1 / result\r\n \r\na = estimate_pi()\r\nb = math.pi\r\nprint('calculated pi value is:', a)\r\nprint('module pi value is:', b)\r\n","repo_name":"ankurt04/ThinkPython2E","sub_path":"chapter7_exercises.py","file_name":"chapter7_exercises.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"73567027343","text":"import collections\n\nfrom dm_control import mujoco\nfrom dm_control.rl import control\nfrom dm_control.suite import base\nfrom dm_control.suite import common\nfrom dm_control.suite.utils import randomizers\nfrom dm_control.utils import containers\nfrom dm_control.utils import rewards\nfrom lxml import etree\nimport numpy as np\n\n_DEFAULT_TIME_LIMIT = 30\n_CONTROL_TIMESTEP = .03 # (Seconds)\n\nSUITE = containers.TaggedTasks()\n\n\ndef get_model_and_assets(n_joints):\n \"\"\"Returns a tuple containing the model XML string and a dict of assets.\n\n Args:\n n_joints: An integer specifying the number of joints in the swimmer.\n\n Returns:\n A tuple `(model_xml_string, assets)`, where `assets` is a dict consisting of\n `{filename: contents_string}` pairs.\n \"\"\"\n return _make_model(n_joints), common.ASSETS\n\n\n@SUITE.add('benchmarking')\ndef swimmer6(time_limit=_DEFAULT_TIME_LIMIT, random=None,\n environment_kwargs=None):\n \"\"\"Returns a 6-link swimmer.\"\"\"\n return _make_swimmer(6, time_limit, random=random,\n environment_kwargs=environment_kwargs)\n\n\n@SUITE.add('benchmarking')\ndef swimmer15(time_limit=_DEFAULT_TIME_LIMIT, random=None,\n environment_kwargs=None):\n \"\"\"Returns a 15-link swimmer.\"\"\"\n return _make_swimmer(15, time_limit, random=random,\n environment_kwargs=environment_kwargs)\n\n\ndef swimmer(n_links=3, time_limit=_DEFAULT_TIME_LIMIT,\n random=None, environment_kwargs=None):\n \"\"\"Returns a swimmer with n links.\"\"\"\n return _make_swimmer(n_links, time_limit, random=random,\n environment_kwargs=environment_kwargs)\n\n\ndef _make_swimmer(n_joints, time_limit=_DEFAULT_TIME_LIMIT, random=None,\n environment_kwargs=None):\n \"\"\"Returns a swimmer control environment.\"\"\"\n model_string, assets = get_model_and_assets(n_joints)\n physics = Physics.from_xml_string(model_string, assets=assets)\n task = Swimmer(random=random)\n environment_kwargs = environment_kwargs or {}\n return control.Environment(\n physics, task, time_limit=time_limit, control_timestep=_CONTROL_TIMESTEP,\n **environment_kwargs)\n\n\ndef _make_model(n_bodies):\n \"\"\"Generates an xml string defining a swimmer with `n_bodies` bodies.\"\"\"\n if n_bodies < 3:\n raise ValueError('At least 3 bodies required. Received {}'.format(n_bodies))\n mjcf = etree.fromstring(common.read_model('swimmer.xml'))\n head_body = mjcf.find('./worldbody/body')\n actuator = etree.SubElement(mjcf, 'actuator')\n sensor = etree.SubElement(mjcf, 'sensor')\n\n parent = head_body\n for body_index in range(n_bodies - 1):\n site_name = 'site_{}'.format(body_index)\n child = _make_body(body_index=body_index)\n child.append(etree.Element('site', name=site_name))\n joint_name = 'joint_{}'.format(body_index)\n joint_limit = 360.0/n_bodies\n joint_range = '{} {}'.format(-joint_limit, joint_limit)\n child.append(etree.Element('joint', {'name': joint_name,\n 'range': joint_range}))\n motor_name = 'motor_{}'.format(body_index)\n actuator.append(etree.Element('motor', name=motor_name, joint=joint_name))\n velocimeter_name = 'velocimeter_{}'.format(body_index)\n sensor.append(etree.Element('velocimeter', name=velocimeter_name,\n site=site_name))\n gyro_name = 'gyro_{}'.format(body_index)\n sensor.append(etree.Element('gyro', name=gyro_name, site=site_name))\n parent.append(child)\n parent = child\n\n # Move tracking cameras further away from the swimmer according to its length.\n cameras = mjcf.findall('./worldbody/body/camera')\n scale = n_bodies / 6.0\n for cam in cameras:\n if cam.get('mode') == 'trackcom':\n old_pos = cam.get('pos').split(' ')\n new_pos = ' '.join([str(float(dim) * scale) for dim in old_pos])\n cam.set('pos', new_pos)\n\n return etree.tostring(mjcf, pretty_print=True)\n\n\ndef _make_body(body_index):\n \"\"\"Generates an xml string defining a single physical body.\"\"\"\n body_name = 'segment_{}'.format(body_index)\n visual_name = 'visual_{}'.format(body_index)\n inertial_name = 'inertial_{}'.format(body_index)\n body = etree.Element('body', name=body_name)\n body.set('pos', '0 .1 0')\n etree.SubElement(body, 'geom', {'class': 'visual', 'name': visual_name})\n etree.SubElement(body, 'geom', {'class': 'inertial', 'name': inertial_name})\n return body\n\n\nclass Physics(mujoco.Physics):\n \"\"\"Physics simulation with additional features for the swimmer domain.\"\"\"\n\n def nose_to_target(self):\n \"\"\"Returns a vector from nose to target in local coordinate of the head.\"\"\"\n nose_to_target = (self.named.data.geom_xpos['target'] -\n self.named.data.geom_xpos['nose'])\n head_orientation = self.named.data.xmat['head'].reshape(3, 3)\n return nose_to_target.dot(head_orientation)[:2]\n\n def nose_to_target_dist(self):\n \"\"\"Returns the distance from the nose to the target.\"\"\"\n return np.linalg.norm(self.nose_to_target())\n\n def body_velocities(self):\n \"\"\"Returns local body velocities: x,y linear, z rotational.\"\"\"\n xvel_local = self.data.sensordata[12:].reshape((-1, 6))\n vx_vy_wz = [0, 1, 5] # Indices for linear x,y vels and rotational z vel.\n return xvel_local[:, vx_vy_wz].ravel()\n\n def joints(self):\n \"\"\"Returns all internal joint angles (excluding root joints).\"\"\"\n return self.data.qpos[3:].copy()\n\n\nclass Swimmer(base.Task):\n \"\"\"A swimmer `Task` to reach the target or just swim.\"\"\"\n\n def __init__(self, random=None):\n \"\"\"Initializes an instance of `Swimmer`.\n\n Args:\n random: Optional, either a `numpy.random.RandomState` instance, an\n integer seed for creating a new `RandomState`, or None to select a seed\n automatically (default).\n \"\"\"\n super().__init__(random=random)\n\n def initialize_episode(self, physics):\n \"\"\"Sets the state of the environment at the start of each episode.\n\n Initializes the swimmer orientation to [-pi, pi) and the relative joint\n angle of each joint uniformly within its range.\n\n Args:\n physics: An instance of `Physics`.\n \"\"\"\n # Random joint angles:\n randomizers.randomize_limited_and_rotational_joints(physics, self.random)\n # Random target position.\n close_target = self.random.rand() < .2 # Probability of a close target.\n target_box = .3 if close_target else 2\n xpos, ypos = self.random.uniform(-target_box, target_box, size=2)\n physics.named.model.geom_pos['target', 'x'] = xpos\n physics.named.model.geom_pos['target', 'y'] = ypos\n physics.named.model.light_pos['target_light', 'x'] = xpos\n physics.named.model.light_pos['target_light', 'y'] = ypos\n\n super().initialize_episode(physics)\n\n def get_observation(self, physics):\n \"\"\"Returns an observation of joint angles, body velocities and target.\"\"\"\n obs = collections.OrderedDict()\n obs['joints'] = physics.joints()\n obs['to_target'] = physics.nose_to_target()\n obs['body_velocities'] = physics.body_velocities()\n return obs\n\n def get_reward(self, physics):\n \"\"\"Returns a smooth reward.\"\"\"\n target_size = physics.named.model.geom_size['target', 0]\n return rewards.tolerance(physics.nose_to_target_dist(),\n bounds=(0, target_size),\n margin=5*target_size,\n sigmoid='long_tail')\n","repo_name":"deepmind/dm_control","sub_path":"dm_control/suite/swimmer.py","file_name":"swimmer.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","stars":3200,"dataset":"github-code","pt":"47"}
+{"seq_id":"23013491924","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom .inplace_abn import InPlaceABN, InPlaceABNSync, ABN\nfrom .base_nets.layer import Bottleneck_inplace\n\nclass ResidualBlock(nn.Module):\n def __init__(self, inplanes, planes, abn, stride=1, kernel_size=3, downsample=None,):\n super(ResidualBlock, self).__init__()\n\n self.bn1 = nn.BatchNorm3d(inplanes)\n # self.bn1 = abn(inplanes)\n # self.relu = nn.ReLU6(inplace=True)\n self.relu = nn.ReLU6()\n self.conv1 = nn.Conv3d(inplanes, planes, stride=stride, kernel_size=kernel_size, padding=int((kernel_size - 1)/2))\n \n self.bn2 = nn.BatchNorm3d(planes)\n # self.bn2 = abn(planes)\n self.conv2 = nn.Conv3d(planes, planes, kernel_size=kernel_size, padding=int((kernel_size - 1)/2))\n \n self.downsample = downsample\n self.stride = stride\n \n def forward(self, x):\n \n out = x.clone()\n\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv1(out)\n \n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n if self.downsample is not None:\n x = self.downsample(x)\n \n # 保持正确的梯度流动:x -> out -> x + out \n # out += x \n out = out + x\n\n return out\n\nclass ResidualConnect(nn.Module):\n\n def __init__(self, md, inplanes, planes, res_type='SAME'):\n super(ResidualConnect, self).__init__()\n \"\"\"\n :param md -> nn.Module: 模块\n :param res_type -> str: ['SAME', 'UP', 'DOWN']\n \n \"\"\"\n self.md = md\n self.res_type = res_type\n self.downsample = nn.Conv3d(inplanes, planes, kernel_size=3, stride=2, padding=1)\n self.upsample = nn.ConvTranspose3d(inplanes, planes, kernel_size=3, stride=2, padding=1, output_padding=1)\n\n\n def forward(self, x):\n\n out = x.clone()\n\n out = self.md(out)\n if self.res_type == 'UP':\n x = self.upsample(x)\n elif self.res_type == 'DOWN':\n x = self.downsample(x)\n \n out = out + x\n\n return out\n\n\n\nclass MultiHeadAttention(torch.nn.Module):\n def __init__(self, in_channel, key_filters, value_filters,\n output_filters, num_heads, dropout_prob=0.5, layer_type='SAME'):\n super().__init__()\n \"\"\"\n Multihead scaled-dot-product attention (3d) with input/output transformations.\n\n :param inputs -> tensor: [batch, c, d, h, w]\n :param in_channel -> int: 输入通道数量\n :param key_filters -> int: k-transform后的通道数\n :param value_filters -> int: v-transform后的通道数\n :param output_filters -> int: 输出通道数\n :param num_heads -> int: 需要被key_filters & value_filters整除\n :param layer_type -> str: choose from ['SAME', 'DOWN', 'UP']\n\n Raises:\n ValueError: attention heads的数量不能被通道数整除.\n \"\"\"\n\n if key_filters % num_heads != 0:\n raise ValueError(\"Key depth (%d) must be divisible by the number of \"\n \"attention heads (%d).\" % (key_filters, num_heads))\n if value_filters % num_heads != 0:\n raise ValueError(\"Value depth (%d) must be divisible by the number of \"\n \"attention heads (%d).\" % (value_filters, num_heads))\n if layer_type not in ['SAME', 'DOWN', 'UP']:\n raise ValueError(\"Layer type (%s) must be one of SAME, \"\n \"DOWN, UP.\" % (layer_type))\n\n self.num_heads = num_heads\n self.layer_type = layer_type\n\n self.QueryTransform = None\n if layer_type == 'SAME':\n self.QueryTransform = nn.Conv3d(in_channel, key_filters, kernel_size=1, stride=1,\n padding=0, bias=True)\n elif layer_type == 'DOWN':\n self.QueryTransform = nn.Conv3d(in_channel, key_filters, kernel_size=3, stride=2,\n padding=1, bias=True) # author use bias\n elif layer_type == 'UP':\n self.QueryTransform = nn.ConvTranspose3d(in_channel, key_filters, kernel_size=3, stride=2,\n padding=1, bias=True)\n\n self.KeyTransform = nn.Conv3d(in_channel, key_filters, kernel_size=1, stride=1, padding=0, bias=True)\n self.ValueTransform = nn.Conv3d(in_channel, value_filters, kernel_size=1, stride=1, padding=0, bias=True)\n self.attention_dropout = nn.Dropout(dropout_prob)\n\n self.outputConv = nn.Conv3d(value_filters, output_filters, kernel_size=1, stride=1, padding=0, bias=True)\n\n self._scale = (key_filters // num_heads) ** 0.5\n\n def forward(self, inputs):\n \"\"\"\n :param inputs: B, C, D, H, W\n :return: outputs: B, Co, Dq, Hq, Wq\n \"\"\"\n\n if self.layer_type == 'SAME' or self.layer_type == 'DOWN':\n q = self.QueryTransform(inputs)\n elif self.layer_type == 'UP':\n q = self.QueryTransform(inputs, output_size=(inputs.shape[2] * 2, inputs.shape[3] * 2, inputs.shape[4] * 2))\n\n # [B, Dq, Hq, Wq, Ck]\n k = self.KeyTransform(inputs).permute(0, 2, 3, 4, 1)\n v = self.ValueTransform(inputs).permute(0, 2, 3, 4, 1)\n q = q.permute(0, 2, 3, 4, 1)\n\n Batch, Dq, Hq, Wq = q.shape[0], q.shape[1], q.shape[2], q.shape[3]\n\n # [B, D, H, W, N, Ck]\n k = self.split_heads(k, self.num_heads)\n v = self.split_heads(v, self.num_heads)\n q = self.split_heads(q, self.num_heads)\n\n # [(B, D, H, W, N), c]\n k = torch.flatten(k, 0, 4)\n v = torch.flatten(v, 0, 4)\n q = torch.flatten(q, 0, 4)\n\n # normalize\n q = q / self._scale\n # attention\n # [(B, Dq, Hq, Wq, N), (B, D, H, W, N)]\n A = torch.matmul(q, k.transpose(0, 1))\n A = torch.softmax(A, dim=1)\n A = self.attention_dropout(A)\n\n # [(B, Dq, Hq, Wq, N), C]\n O = torch.matmul(A, v)\n # [B, Dq, Hq, Wq, C]\n O = O.view(Batch, Dq,Hq, Wq, v.shape[-1] * self.num_heads)\n # [B, C, Dq, Hq, Wq]\n O = O.permute(0, 4, 1, 2, 3)\n # [B, Co, Dq, Hq, Wq]\n O = self.outputConv(O)\n\n return O\n\n def split_heads(self, x, num_heads):\n \"\"\"\n 把通道split成为若干head\n\n :param x -> tensor: [batch, h, w, channels]\n :param num_heads: head数量\n :return\n a Tensor with shape [batch, h, w, num_heads, channels / num_heads]\n \"\"\"\n\n channel_num = x.shape[-1]\n return x.view(x.shape[0], x.shape[1], x.shape[2], x.shape[3], num_heads, int(channel_num / num_heads))\n\n\n\nclass multiClass_unet_nonLocal(nn.Module):\n def __init__(self, n_inp=1, n_out=3, feats=(32, 64, 128), abn=2, n_encoders=2,):\n \"\"\"\n :param n_inp: 输入channel数量,3D图像默认为1\n :param n_out: 输出channel数量,与n_inp一致\n :param feats: 经过conv后的channel数量\n :param abn: [0,1,2]指定类型的abn\n :param n_encoders:\n\n \"\"\"\n super().__init__()\n self.n_inp = n_inp\n if abn == 0:\n abnblock = ABN\n elif abn == 1:\n abnblock = InPlaceABN\n elif abn == 2:\n abnblock = InPlaceABNSync\n # abnblock = abn\n\n self.relu = nn.ReLU6(inplace=True)\n\n self.in_layer = ResidualBlock(inplanes=n_inp, planes=feats[0], kernel_size=3, abn=abnblock,\n downsample=nn.Conv3d(n_inp, feats[0], stride=1, kernel_size=1))\n self.down_layer1 = ResidualBlock(inplanes=feats[0], planes=feats[1], abn=abnblock, stride=2,\n downsample=nn.Conv3d(feats[0], feats[1], stride=2, kernel_size=1))\n self.down_layer2 = ResidualBlock(inplanes=feats[1], planes=feats[2], abn=abnblock, stride=2,\n downsample=nn.Conv3d(feats[1], feats[2], stride=2, kernel_size=1))\n \n # github:https://github.com/divelab/Non-local-U-Nets\n # 源码, 三类channel数量一致, head2 = 2\n self.bottom_layer = ResidualConnect(MultiHeadAttention(in_channel=feats[2], \n key_filters=feats[2],\n value_filters=feats[2],\n output_filters=feats[2],\n num_heads=2,\n layer_type='SAME'),\n inplanes=feats[2],\n planes=feats[2],\n res_type='SAME',)\n \n # heads = 1\n self.up_layer2 = ResidualConnect(MultiHeadAttention(in_channel=feats[2], \n key_filters=feats[1],\n value_filters=feats[1],\n output_filters=feats[1],\n num_heads=1,\n layer_type='UP'),\n inplanes=feats[2],\n planes=feats[1],\n res_type='UP',)\n \n\n self.up_layer1 = ResidualConnect(MultiHeadAttention(in_channel=feats[1], \n key_filters=feats[0],\n value_filters=feats[0],\n output_filters=feats[0],\n num_heads=1,\n layer_type='UP'),\n inplanes=feats[1],\n planes=feats[0],\n res_type='UP',\n )\n \n self.out_layer = ResidualBlock(inplanes=feats[0], planes=n_out, kernel_size=1, abn=abnblock,\n downsample=nn.Conv3d(feats[0], n_out, stride=1, kernel_size=1))\n\n\n def forward(self, x, *args):\n\n x0 = self.in_layer(x)\n x1 = self.down_layer1(x0)\n x2 = self.down_layer2(x1)\n # print(x2.shape)\n \n xb = self.bottom_layer(x2)\n # print(xb.shape)\n\n xr2 = self.up_layer2(xb)\n xr21 = torch.add(xr2, x1)\n xr1 = self.up_layer1(xr2)\n xr11 = torch.add(xr1, x0)\n\n out = self.out_layer(xr11)\n\n return out\n\n\nif __name__ == '__main__':\n device = torch.device('cpu')\n inputs = torch.rand(1, 24, 60, 60).unsqueeze(0).to(device)\n # net = MultiHeadAttention(in_channel=1, \n # key_filters=10, \n # value_filters=8, \n # output_filters=5, \n # num_heads=1, \n # dropout_prob=0.5, \n # layer_type='UP', # 'SAME', 'DOWN', 'UP'\n # ) \n # net = nn.ConvTranspose3d(in_channels=1, out_channels=10, kernel_size=3, stride=2, padding=1, output_padding=1)\n net = multiClass_unet_nonLocal(\n n_inp=1,\n n_out=3,\n feats=(32, 64, 128),\n abn=2,\n )\n res = net(inputs)\n print('input shape: {}'.format(inputs.shape))\n print('res shape: {}'.format(res.shape))","repo_name":"ypsprimer/3d-segmentaion","sub_path":"models/unets_nonlocal.py","file_name":"unets_nonlocal.py","file_ext":"py","file_size_in_byte":11788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"13418891385","text":"\n\"\"\"\nRun hatEval experiments\n\"\"\"\nimport pandas as pd\nimport os\nimport pathlib\nimport torch\nimport numpy as np\nimport logging\nfrom datasets import Dataset, Value, ClassLabel, Features, DatasetDict, load_dataset\nfrom transformers import Trainer\nfrom sklearn.metrics import precision_recall_fscore_support, accuracy_score, f1_score\nfrom .preprocessing import preprocess_tweet, get_preprocessing_args\nfrom .training import train_and_eval, load_model\nfrom .tuning import hyperparameter_sweep, get_training_arguments\n\n\nlogging.basicConfig()\n\nlogger = logging.getLogger('pysentimiento')\nlogger.setLevel(logging.INFO)\n\ntask_name = \"hate_speech\"\n\nproject_dir = pathlib.Path(os.path.dirname(__file__)).parent\ndata_dir = os.path.join(project_dir, \"data\", \"hate\")\n\nlabels_order = [\"HS\", \"TR\", \"AG\"]\n\n# Labels for the Portuguese dataset\npt_labels = ['Sexism', 'Body', 'Racism', 'Ideology', 'Homophobia']\n\n\ndef load_datasets(lang,\n train_path=None, dev_path=None, test_path=None, limit=None,\n preprocess=True, preprocessing_args={}):\n \"\"\"\n Load hate speech datasets\n\n \"\"\"\n if lang == \"it\":\n ds = load_dataset(\"pysentimiento/it_haspeede\")\n ds = ds.map(lambda x: {\"labels\": torch.Tensor(\n [x[\"hs\"], x[\"stereotype\"]])}, batched=False)\n elif lang == \"pt\":\n ds = load_dataset(\"pysentimiento/pt_hate_speech\")\n ds = ds.map(lambda x: {\"labels\": torch.Tensor(\n [x[l] for l in pt_labels])}, batched=False)\n else:\n train_path = train_path or os.path.join(\n data_dir, f\"hateval2019_{lang}_train.csv\")\n dev_path = dev_path or os.path.join(\n data_dir, f\"hateval2019_{lang}_dev.csv\")\n test_path = test_path or os.path.join(\n data_dir, f\"hateval2019_{lang}_test.csv\")\n\n train_df = pd.read_csv(train_path)\n dev_df = pd.read_csv(dev_path)\n test_df = pd.read_csv(test_path)\n\n features = Features({\n 'id': Value('int64'),\n 'text': Value('string'),\n 'HS': ClassLabel(num_classes=2, names=[\"OK\", \"HATEFUL\"]),\n 'TR': ClassLabel(num_classes=2, names=[\"GROUP\", \"INDIVIDUAL\"]),\n \"AG\": ClassLabel(num_classes=2, names=[\"NOT AGGRESSIVE\", \"AGGRESSIVE\"])\n })\n\n train_dataset = Dataset.from_pandas(\n train_df, features=features, preserve_index=False)\n dev_dataset = Dataset.from_pandas(\n dev_df, features=features, preserve_index=False)\n test_dataset = Dataset.from_pandas(\n test_df, features=features, preserve_index=False)\n\n ds = DatasetDict(\n train=train_dataset,\n dev=dev_dataset,\n test=test_dataset\n )\n\n ds = ds.map(lambda x: {\n \"labels\": torch.Tensor([x[\"HS\"], x[\"TR\"], x[\"AG\"]])\n }, batched=False)\n\n if preprocess:\n def preprocess_fn(x):\n return {\n \"text\": preprocess_tweet(x[\"text\"], lang=lang, **preprocessing_args)\n }\n\n ds = ds.map(preprocess_fn, batched=False)\n return ds\n\n\ndef _get_b_metrics(preds, labels):\n ret = {}\n\n f1s = []\n precs = []\n recalls = []\n original_preds = preds.copy()\n\n preds[:, 1] = preds[:, 0] & preds[:, 1]\n preds[:, 2] = preds[:, 0] & preds[:, 2]\n\n for i, cat in enumerate([\"HS\", \"TR\", \"AG\"]):\n cat_labels, cat_preds = labels[:, i], preds[:, i]\n\n precision, recall, f1, _ = precision_recall_fscore_support(\n cat_labels, cat_preds, average='binary', zero_division=0,\n )\n\n f1s.append(f1)\n precs.append(precision)\n recalls.append(recall)\n\n ret[cat.lower()+\"_f1\"] = f1\n ret[cat.lower()+\"_precision\"] = precision\n ret[cat.lower()+\"_recall\"] = recall\n\n neg_hs_f1_score = f1_score(1-(preds[:, 0] > 0), 1 - labels[:, 0])\n\n ret[\"macro_hs_f1_score\"] = (f1s[0] + neg_hs_f1_score) / 2\n #\n # We calculate EMR in a gated way\n # Block TR and AG if HS is False\n #\n ret[\"emr_no_gating\"] = accuracy_score(labels, original_preds)\n ret[\"emr\"] = accuracy_score(labels, preds)\n\n ret[\"macro_f1\"] = torch.Tensor(f1s).mean()\n ret[\"macro_precision\"] = torch.Tensor(precs).mean()\n ret[\"macro_recall\"] = torch.Tensor(recalls).mean()\n\n return ret\n\n\ndef get_task_b_metrics(predictions):\n\n outputs = predictions.predictions\n labels = predictions.label_ids\n\n return _get_b_metrics(outputs > 0, labels)\n# Maps combinations to classes\n\n\ncombinatorial_mapping = {\n (0, 0, 0): 0, # not hateful\n (1, 0, 0): 1, # hs, not tr, not ag\n (1, 0, 1): 2, # hs, not tr, ag\n (1, 1, 0): 3, # hs, tr , not ag\n (1, 1, 1): 4, # hs, tr , ag\n}\n\ninverse_combinatorial_mapping = {\n v: k for k, v in combinatorial_mapping.items()}\n\n\ndef get_combinatorial_metrics(predictions):\n outputs = predictions.predictions\n labels = predictions.label_ids\n\n preds = outputs.argmax(1)\n\n normalized_preds = np.array(\n [inverse_combinatorial_mapping[k] for k in preds])\n normalized_labels = np.array(\n [inverse_combinatorial_mapping[k] for k in labels])\n return _get_b_metrics(normalized_preds, normalized_labels)\n\n\nclass HierarchicalTrainer(Trainer):\n \"\"\"\n Hierarchical Cross Entropy loss\n \"\"\"\n\n def __init__(self, gamma=0, *args, **kwargs):\n \"\"\"\n Gamma is the hyperparameter of this loss\n\n B(y) = (1 - y) γ + y * 1\n\n such that\n\n L(y, ypred) = L(y_HS, ypred_HS) + B(y_HS) (L(y_TR, ypred_TR) + L(y_AG, ypred_AG))\n\n If γ = 1, this is equal to standard sum of binary cross-entropies\n If equals to zero, it only sums the losses of the second-stage variables if the previous is one\n \"\"\"\n super().__init__(*args, **kwargs)\n self.gamma = gamma\n\n def compute_loss(self, model, inputs, return_outputs=False):\n labels = inputs.pop(\"labels\")\n outputs = model(**inputs)\n logits = outputs.logits\n\n loss_fct = torch.nn.BCEWithLogitsLoss(reduction='none')\n unmasked_loss = loss_fct(logits, labels)\n # Expand to two columns, as this is the dimension of the second stage\n mask = labels[:, 0].view(-1, 1).expand(labels.shape[0], 2).clone()\n # Esto está mal porque floats pero bueno\n mask[mask < 1] = self.gamma\n\n first_stage_loss = unmasked_loss[:, 0]\n # Mask only the second stage\n second_stage_loss = (unmasked_loss[:, 1:] * mask).sum(1)\n loss = (first_stage_loss + second_stage_loss).sum()\n return (loss, outputs) if return_outputs else loss\n\n\ndef get_trainer_class(hierarchical=False, gamma=.0):\n \"\"\"\n\n \"\"\"\n if hierarchical:\n return (lambda *args, **kwargs:\n HierarchicalTrainer(*\n args, gamma=gamma, **kwargs)) if hierarchical else None\n\n\ndef get_metrics_fun(task_b, combinatorial):\n \"\"\"\n Returns the function that computes the metrics\n \"\"\"\n if task_b:\n if combinatorial:\n return get_combinatorial_metrics\n else:\n return get_task_b_metrics\n else:\n return None\n\n\ndef accepts(lang, **kwargs):\n \"\"\"\n Returns whether the task is defined for the given language\n \"\"\"\n return lang in [\"it\", \"en\", \"es\", \"pt\"]\n\n\ndef get_id2label(lang, task_b, combinatorial):\n \"\"\"\n Returns a dictionary that maps the label id to the label name\n \"\"\"\n if lang == \"it\":\n return {\n 0: \"hateful\",\n 1: \"stereotype\",\n }\n elif lang == \"pt\":\n return dict(enumerate(pt_labels))\n elif task_b:\n if combinatorial:\n return {\n 0: \"not hateful\",\n 1: \"hateful, not tr, not ag\",\n 2: \"hateful, not tr, ag\",\n 3: \"hateful, targeted, not ag\",\n 4: \"hateful, targeted, ag\",\n }\n else:\n return {\n 0: \"hateful\",\n 1: \"targeted\",\n 2: \"aggressive\",\n }\n else:\n return {\n 0: 'ok',\n 1: 'hateful',\n }\n\n\ndef train(\n base_model, lang, task_b=True, class_weight=None,\n hierarchical=False, gamma=.0, dev=False,\n combinatorial=False, use_defaults_if_not_tuned=False, **kwargs,\n):\n \"\"\"\n Train function\n\n Arguments:\n ---------\n\n task_b: bool (default False)\n If true, trains model for task_b\n\n combinatorial: bool (default False)\n If task_b true, whether to train a model in a combinatorial fashion\n\n That is, instead of training a different output for each predicted label,\n train a classifier for 5 possible combinations:\n 0: not hateful\n 1: HS, not TR, not AG\n 2: HS, not TR, AG\n 3: HS, TR, not AG\n 4: HS, TR, AG\n \"\"\"\n\n ds = load_datasets(\n lang=lang, preprocessing_args=get_preprocessing_args(base_model, lang=lang))\n\n if dev:\n ds[\"test\"] = ds[\"dev\"]\n\n trainer_class = get_trainer_class(hierarchical, gamma)\n metrics_fun = get_metrics_fun(\n task_b=task_b, combinatorial=combinatorial) if lang not in {\"it\", \"pt\"} else None\n id2label = get_id2label(lang=lang, task_b=task_b,\n combinatorial=combinatorial)\n\n if class_weight:\n class_weight = torch.Tensor([ds[\"train\"][k]\n for k in ds[\"train\"].features[\"label\"].names])\n class_weight = 1 / (2 * class_weight.mean(1))\n\n training_args = get_training_arguments(base_model, task_name=task_name, lang=lang,\n metric_for_best_model=\"eval/macro_f1\", use_defaults_if_not_tuned=use_defaults_if_not_tuned)\n\n return train_and_eval(\n base_model=base_model, dataset=ds, id2label=id2label,\n lang=lang, training_args=training_args,\n class_weight=class_weight, metrics_fun=metrics_fun, trainer_class=trainer_class,\n **kwargs\n )\n\n\ndef hp_tune(model_name, lang, **kwargs):\n \"\"\"\n Hyperparameter tuning with wandb\n \"\"\"\n if lang == \"it\":\n id2label = {\n 0: \"hateful\",\n 1: \"stereotype\",\n }\n\n compute_metrics = None\n\n elif lang == \"pt\":\n id2label = dict(enumerate(pt_labels))\n compute_metrics = None\n else:\n id2label = {\n 0: \"hateful\",\n 1: \"targeted\",\n 2: \"aggressive\",\n }\n compute_metrics = get_task_b_metrics\n\n ds = load_datasets(\n lang=lang, preprocessing_args=get_preprocessing_args(\n model_name, lang=lang)\n )\n\n def model_init():\n model, _ = load_model(model_name, id2label, lang=lang)\n return model\n\n _, tokenizer = load_model(model_name, id2label, lang=lang)\n\n config_info = {\n \"model\": model_name,\n \"task\": task_name,\n \"lang\": lang,\n }\n\n return hyperparameter_sweep(\n name=f\"swp-{task_name}-{lang}-{model_name}\",\n group_name=f\"swp-{task_name}-{lang}\",\n model_init=model_init,\n tokenizer=tokenizer,\n datasets=ds,\n id2label=id2label,\n compute_metrics=compute_metrics,\n config_info=config_info,\n **kwargs\n )\n","repo_name":"pysentimiento/pysentimiento","sub_path":"pysentimiento/hate.py","file_name":"hate.py","file_ext":"py","file_size_in_byte":11186,"program_lang":"python","lang":"en","doc_type":"code","stars":443,"dataset":"github-code","pt":"47"}
+{"seq_id":"23697717490","text":"import argparse\nimport torch\nimport lib\nimport numpy as np\nimport os\nimport datetime\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--hidden_size', default=100, type=int) #Literature uses 100 / 1000 --> better is 100\nparser.add_argument('--num_layers', default=3, type=int) #1 hidden layer\nparser.add_argument('--batch_size', default=50, type=int) #50 in first paper and 32 in second paper\nparser.add_argument('--dropout_input', default=0, type=float) #0.5 for TOP and 0.3 for BPR\nparser.add_argument('--dropout_hidden', default=0.5, type=float) #0.5 for TOP and 0.3 for BPR\nparser.add_argument('--n_epochs', default=5, type=int) #number of epochs (10 in literature)\nparser.add_argument('--k_eval', default=20, type=int) #value of K durig Recall and MRR Evaluation\n# parse the optimizer arguments\nparser.add_argument('--optimizer_type', default='Adagrad', type=str) #Optimizer --> Adagrad is the best according to literature\nparser.add_argument('--final_act', default='tanh', type=str) #Final Activation Function\nparser.add_argument('--lr', default=0.01, type=float) #learning rate (Best according to literature 0.01 to 0.05)\nparser.add_argument('--weight_decay', default=0, type=float) #no weight decay\nparser.add_argument('--momentum', default=0, type=float) #no momentum\nparser.add_argument('--eps', default=1e-6, type=float) #not used\nparser.add_argument(\"-seed\", type=int, default=22, help=\"Seed for random initialization\") #Random seed setting\nparser.add_argument(\"-sigma\", type=float, default=None, help=\"init weight -1: range [-sigma, sigma], -2: range [0, sigma]\") # weight initialization [-sigma sigma] in literature\n\n####### TODO: discover this ###########\nparser.add_argument(\"--embedding_dim\", type=int, default=-1, help=\"using embedding\") \n####### TODO: discover this ###########\n\n# parse the loss type\nparser.add_argument('--loss_type', default='TOP1-max', type=str) #type of loss function TOP1 / BPR / TOP1-max / BPR-max\n# etc\nparser.add_argument('--time_sort', default=False, type=bool) #In case items are not sorted by time stamp\nparser.add_argument('--model_name', default='GRU4REC-CrossEntropy', type=str)\nparser.add_argument('--save_dir', default='models', type=str)\nparser.add_argument('--data_folder', default='../Dataset/RecSys_Dataset_After/', type=str)\nparser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)\nparser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)\nparser.add_argument(\"--is_eval\", action='store_true') #should be used during testing and eliminated during training\nparser.add_argument('--load_model', default=None, type=str)\nparser.add_argument('--checkpoint_dir', type=str, default='checkpoint')\n\n# Get the arguments\nargs = parser.parse_args()\nargs.cuda = torch.cuda.is_available()\n#use random seed defined\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\n\n\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n#Write Checkpoints with arguments used in a text file for reproducibility\ndef make_checkpoint_dir():\n print(\"PARAMETER\" + \"-\"*10)\n now = datetime.datetime.now()\n S = '{:02d}{:02d}{:02d}{:02d}'.format(now.month, now.day, now.hour, now.minute)\n save_dir = os.path.join(args.checkpoint_dir, S)\n if not os.path.exists(args.checkpoint_dir):\n os.mkdir(args.checkpoint_dir)\n\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n args.checkpoint_dir = save_dir\n with open(os.path.join(args.checkpoint_dir, 'parameter.txt'), 'w') as f:\n for attr, value in sorted(args.__dict__.items()):\n print(\"{}={}\".format(attr.upper(), value))\n f.write(\"{}={}\\n\".format(attr.upper(), value))\n print(\"---------\" + \"-\"*10)\n\n#weight initialization if it was defined\ndef init_model(model):\n if args.sigma is not None:\n for p in model.parameters():\n if args.sigma != -1 and args.sigma != -2:\n sigma = args.sigma\n p.data.uniform_(-sigma, sigma)\n elif len(list(p.size())) > 1:\n sigma = np.sqrt(6.0 / (p.size(0) + p.size(1)))\n if args.sigma == -1:\n p.data.uniform_(-sigma, sigma)\n else:\n p.data.uniform_(0, sigma)\n\n\ndef main():\n print(\"Loading train data from {}\".format(os.path.join(args.data_folder, args.train_data)))\n print(\"Loading valid data from {}\".format(os.path.join(args.data_folder, args.valid_data)))\n\n train_data = lib.Dataset(os.path.join(args.data_folder, args.train_data))\n valid_data = lib.Dataset(os.path.join(args.data_folder, args.valid_data), itemmap=train_data.itemmap)\n make_checkpoint_dir()\n \n #set all the parameters according to the defined arguments\n input_size = len(train_data.items)\n hidden_size = args.hidden_size\n num_layers = args.num_layers\n output_size = input_size\n batch_size = args.batch_size\n dropout_input = args.dropout_input\n dropout_hidden = args.dropout_hidden\n embedding_dim = args.embedding_dim\n final_act = args.final_act\n loss_type = args.loss_type\n optimizer_type = args.optimizer_type\n lr = args.lr\n weight_decay = args.weight_decay\n momentum = args.momentum\n eps = args.eps\n n_epochs = args.n_epochs\n time_sort = args.time_sort\n #loss function\n loss_function = lib.LossFunction(loss_type=loss_type, use_cuda=args.cuda) #cuda is used with cross entropy only\n if not args.is_eval: #training\n #Initialize the model\n model = lib.GRU4REC(input_size, hidden_size, output_size, final_act=final_act,\n num_layers=num_layers, use_cuda=args.cuda, batch_size=batch_size,\n dropout_input=dropout_input, dropout_hidden=dropout_hidden, embedding_dim=embedding_dim)\n #weights initialization\n init_model(model)\n #optimizer\n optimizer = lib.Optimizer(model.parameters(), optimizer_type=optimizer_type, lr=lr,\n weight_decay=weight_decay, momentum=momentum, eps=eps)\n #trainer class\n trainer = lib.Trainer(model, train_data=train_data, eval_data=valid_data, optim=optimizer,\n use_cuda=args.cuda, loss_func=loss_function, batch_size=batch_size, args=args)\n print('#### START TRAINING....')\n trainer.train(0, n_epochs - 1)\n else: #testing\n if args.load_model is not None:\n print(\"Loading pre-trained model from {}\".format(args.load_model))\n try:\n checkpoint = torch.load(args.load_model)\n except:\n checkpoint = torch.load(args.load_model, map_location=lambda storage, loc: storage)\n model = checkpoint[\"model\"]\n model.gru.flatten_parameters()\n evaluation = lib.Evaluation(model, loss_function, use_cuda=args.cuda, k = args.k_eval)\n loss, recall, mrr = evaluation.eval(valid_data, batch_size)\n print(\"Final result: recall = {:.2f}, mrr = {:.2f}\".format(recall, mrr))\n else:\n print(\"No Pretrained Model was found!\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hungpthanh/GRU4REC-pytorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":229,"dataset":"github-code","pt":"47"}
+{"seq_id":"5939147177","text":"# Unit tests for buttonManager\nimport mock\nfrom mock import call\nfrom mock import Mock\nfrom mock import patch\nimport os\nfrom os import sys, path\n\nimport unittest\n\nsys.path.append(os.path.realpath('..'))\nimport settings\n\n\nclass TestWriteSettingsThread(unittest.TestCase):\n def setUp(self):\n settings.CONFIG_FILE = \"Test/shotmanager.conf\"\n settings.CONFIG_FILE_BACKUP = \"Test/shotmanager.back\"\n self.lock = Mock()\n settings.settingsLock = self.lock\n\n def testLocks(self):\n \"\"\" Make sure we lock/unlock \"\"\"\n settings.writeSettingsThread(\"a\", \"b\")\n self.lock.acquire.assert_called_with()\n self.lock.release.assert_called_with()\n\n def testValueSet(self):\n \"\"\" Make sure we are setting the correct value \"\"\"\n with patch('ConfigParser.SafeConfigParser') as patchedParser:\n parser = Mock()\n patchedParser.return_value = parser\n settings.writeSettingsThread(\"aaa\", \"bbb\")\n parser.read.assert_called_with(\"Test/shotmanager.conf\")\n parser.set.assert_called_with(\"shotManager\", \"aaa\", \"bbb\")\n\n\nclass TestReadSetting(unittest.TestCase):\n def setUp(self):\n mockParser = patch('ConfigParser.SafeConfigParser')\n self.addCleanup(mockParser.stop)\n mock = mockParser.start()\n self.parser = Mock()\n settings.CONFIG_FILE = \"Test/shotmanager.conf\"\n mock.return_value = self.parser\n\n def testReadSetting(self):\n \"\"\" Test that we attempt to read the correct thing \"\"\"\n self.parser.get = Mock(return_value = \"foo\")\n value = settings.readSetting(\"bleh\")\n self.parser.get.assert_called_with(\"shotManager\", \"bleh\")\n self.assertEqual(value, \"foo\")\n\n def testReadBadSetting(self):\n \"\"\" Test that we get an exception from a failed get \"\"\"\n self.parser.get = Mock(return_value = \"foo\", side_effect=KeyError(\"Boo\"))\n try:\n value = settings.readSetting(\"bleh\")\n except:\n pass\n else:\n self.assertFalse(True)\n","repo_name":"OpenSolo/OpenSolo","sub_path":"shotmanager/Test/TestSettings.py","file_name":"TestSettings.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"47"}
+{"seq_id":"41720663684","text":"\"\"\"!\nI couldn't find a good library to write gRPC/protobuf objects to disk so this is my own implementation of a\ngeneralized json converter for said objects. Good for writing the objects themselves but be aware that it will\nwrite any vectors/matrixes/images stored within the objects to disk so it may be a good idea to remove said\ndata before converting to json if you care about performance in your implementations.\n\"\"\"\nimport pdb\nfrom collections import defaultdict\nimport json\n\nfrom briar.briar_grpc import briar_pb2, briar_service_pb2, briar_error_pb2\n\n\n\n# Protobuf generated gRPC objects contain lots of fields which don't need to be saved - this is a blacklist\n# of the fields to ignore\nATTRIB_IGNORE = ['ByteSize', 'Clear', 'ClearExtension', 'ClearField', 'CopyFrom',\n 'DESCRIPTOR', 'DiscardUnknownFields', 'Extensions',\n 'FindInitializationErrors', 'FromString', 'HasExtension',\n 'HasField', 'IsInitialized', 'ListFields', 'MergeFrom',\n 'MergeFromString', 'ParseFromString', 'RegisterExtension',\n 'SerializePartialToString', 'SerializeToString',\n 'SetInParent','UnknownFields', 'WhichOneof',\n '_CheckCalledFromGeneratedFile', '_SetListener',\n '__deepcopy__', '__delattr__', '__dir__', '__doc_',\n '_extensions_by_name', '_extensions_by_number', 'EnumTypeWrapper']\n\n\ndef save(json_obj, save_path,options=None):\n \"\"\"!\n Save a list or dictionary containing protobuf classes to a json file\n\n @param json_obj list||dict: List or dict containing data to save\n @param save_path str: Path to the file to save\n\n Returns: None\n \"\"\"\n with open(save_path, 'w') as fp:\n fp.write(json.dumps(json_obj, default=GrpcEncoder(options).default,\n sort_keys=True, indent=4))\n\nclass GrpcEncoder(json.JSONEncoder):\n \"\"\"!\n Encoder class which extends the normal JSON encoder to allow the encoding of gRPC objects. Inherits from\n json.JSONEncoder\n \"\"\"\n def __init__(self,options=None):\n self.options = options\n\n def default(self, obj):\n \"\"\"!\n Json hook function to convert gRPC objects into a json-serializable object\n\n @param obj Any: General object to convert to a json-serializable object\n @return: Json-Serializable object\n \"\"\"\n # convert bytes to a byte-string\n if isinstance(obj, bytes):\n try:\n o2 = obj.decode(\"ISO-8859-1\")\n o = {\"__class__\":\"bytes\", \"__contents__\":o2}\n return o\n except Exception as e:\n print('got an exception',e)\n return {\"__class__\":\"bytes\", \"__contents__\":bytes()}\n # print(\"Exception\", e)\n # print(\"\\tObject\", obj)\n\n # convert Briar protobuf grpc objects to a dictionary\n if getattr(obj, '__module__', None) in (briar_pb2.__name__, briar_service_pb2.__name__,\n briar_error_pb2.__name__):\n return proto_obj_to_dict(obj,self.options)\n\n if \"Repeated\" in str(type(obj)) and \"Container\" in str(type(obj)):\n return {\"__class__\": str(type(obj)).split(\"'\")[1],\n \"__contents__\":[i for i in obj]}\n\n if \"MapContainer\" in str(type(obj)):\n return {\"__class__\": str(type(obj)).split(\"'\")[1],\n \"__contents__\": dict(obj.items())}\n\n if \"EnumTypeWrapper\" in str(type(obj)):\n return None\n\n return json.JSONEncoder.default(self, obj)\n\n\ndef proto_obj_to_dict(obj,options=None):\n \"\"\"!\n Takes a general gRPC/protobuf object, eliminates the unnecessary fields, and stores the data in a dict.\n\n Classes will be saved as dictionaries with a \"__class__\" attribute. This should be the full import path to\n the class within its module.\n\n @param obj: Any gRPC object generated by protobuf files\n @return: A dictionary representing the object\n \"\"\"\n d = defaultdict(list)\n for attrib in dir(obj):\n if hasattr(obj, attrib):\n # ignore a bunch of grpc fields which aren't needed. Ignore method names too\n if (not callable(getattr(obj, attrib))\n and (not attrib.startswith(\"__\"))\n and attrib not in ATTRIB_IGNORE):\n d[attrib] = getattr(obj, attrib)\n\n # split out a callable class name and save it as an attribute\n elif attrib == \"__class__\":\n split_class = str(getattr(obj, attrib)).split(\"'\")[1].split('.')\n # ignore backend protobuf classes\n if \"google\" in split_class \\\n or \"DescriptorMapping\" in split_class \\\n or \"DescriptorSequence\" in split_class \\\n or \"EnumTypeWrapper\" in split_class:\n continue\n\n d[attrib] = str(getattr(obj, attrib)).split()[-1].replace(\"'\", '').replace('>', '')\n return d\n\n\ndef load(load_path,options=None):\n \"\"\"!\n Load the json file at the given directory, reloading dictionaries with \"__class__\" fields into the specified\n objects and initializing them with values defined by key/value pairs within the dictionary\n\n @param load_path str: Path to the json file to load\n\n @return: The contents of the json file deserialized into the appropriate objects\n \"\"\"\n with open(load_path, \"r\") as f:\n return json.loads(''.join(f.readlines()), object_hook=GrpcDecoder(options).default)\n\n\nclass GrpcDecoder(json.JSONDecoder):\n \"\"\"!\n Object which extends the JSONDecoded to allow it to read saved gRPC files. Applied as a hook\n in the json load function. Inherits from json.JSONDecoder\n \"\"\"\n def __init__(self,options):\n self.options = options\n def default(self, obj):\n \"\"\"!\n Takes the given object and convert it into a gRPC object if its a dictionary\n @param obj object||dict: Dictionary which represents an object.\n\n @return: object\n \"\"\"\n if isinstance(obj, dict) and '__class__' in obj:\n cls = obj['__class__']\n import_path = obj['__class__'].split('.')\n module_name = '.'.join(import_path[:-1])\n class_name = import_path[-1]\n\n # convert byte arrays which were written as strings\n if cls == \"bytes\":\n try:\n return obj[\"__contents__\"].encode(\"ISO-8859-1\")\n except Exception as e:\n print('Exception: ', e)\n return bytes()\n\n # convert protobuf lists and dictionaries\n elif (\"Repeated\" in cls and \"Container\" in cls) or (\"MapContainer\" in cls):\n if (\"Repeated\" in cls and \"Container\" in cls):\n l = list()\n for list_item in obj['__contents__']:\n l.append(self.default(list_item))\n # return l\n else: # \"MapContainer\" in cls\n d = dict()\n for k, v in obj['__contents__'].items():\n d[k] = self.default(v)\n # return d\n return obj['__contents__']\n\n # convert protobuf/grpc objects\n elif module_name in (briar_pb2.__name__, briar_service_pb2.__name__):\n return dict_to_proto_obj(obj,self.options)\n\n return obj\n\n\ndef dict_to_proto_obj(obj_dict,options=None):\n \"\"\"!\n Take the object dictionary, read the dict which is saved in in the '__class__' key, and initialize it with\n values stored in the dictionary's key/value pairs\n\n @param obj_dict dict: A dictionary specifically containing a '__class__' key/value pair storing the full module\n path to the object.\n\n @return: A gRPC object defined by '__class__'\n \"\"\"\n cls_path = obj_dict['__class__'].split('.')\n module_name = cls_path[0]\n module = __import__(module_name)\n for sub_module in cls_path[1:]:\n module = getattr(module, sub_module)\n class_ref = module\n\n cls_instance = class_ref()\n for attrib, value in obj_dict.items():\n if attrib == '__class__':\n continue\n try:\n if not value == [] and not value == {}:\n setattr(cls_instance, attrib, value)\n except AttributeError:\n # some protobuf objects don't like direct assignment\n # try:\n cls_attrib = getattr(cls_instance, attrib)\n if \"MergeFrom\" in dir(cls_attrib):\n try:\n cls_attrib.MergeFrom(value)\n except:\n for k in value:\n if hasattr(cls_attrib,'get_or_create'):\n o = cls_attrib.get_or_create(k)\n o.CopyFrom(value[k])\n elif \"CopyFrom\" in dir(cls_attrib):\n cls_attrib.CopyFrom(value)\n # except Exception as e:\n # if True:# options is not None and options.verbose:\n # print('Warning: Could not write attribute ', attrib) #TODO: Fix durations and errors to correctly load through\n # print(e)\n return cls_instance\n","repo_name":"ORNL/briar-api","sub_path":"lib/python/briar/grpc_json.py","file_name":"grpc_json.py","file_ext":"py","file_size_in_byte":9320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"43611171627","text":"from __future__ import absolute_import, print_function\n\nfrom datetime import datetime\n\nfrom invenio_records.api import Record\n\nfrom zenodo.modules.records.serializers import csl_v1\n\n\ndef test_minimal(db, minimal_record, recid_pid):\n \"\"\"Test minimal record.\"\"\"\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n d = datetime.utcnow().date()\n assert obj == {\n 'id': '123',\n 'DOI': '10.5072/zenodo.123',\n 'type': 'article',\n 'title': 'Test',\n 'abstract': 'My description',\n 'author': [\n {'family': 'Test'},\n ],\n 'issued': {\n 'date-parts': [[d.year, d.month, d.day]]\n }\n }\n\n\ndef test_full(db, full_record, recid_pid):\n \"\"\"Test minimal record.\"\"\"\n obj = csl_v1.transform_record(recid_pid, Record(full_record))\n assert obj == {\n \"publisher_place\": \"Staszkowka\",\n \"type\": \"book\",\n \"author\": [\n {\n \"given\": \"John\",\n \"family\": \"Doe\"\n },\n {\n \"given\": \"Jane\",\n \"family\": \"Doe\"\n },\n {\n \"given\": \"John\",\n \"family\": \"Smith\"\n },\n {\n \"given\": \"Jack\",\n \"family\": \"Nowak\"\n }\n ],\n \"title\": \"Test title\",\n \"ISBN\": \"978-0201633610\",\n \"issue\": \"2\",\n \"language\": \"eng\",\n \"volume\": \"20\",\n \"publisher\": \"Jol\",\n \"version\": \"1.2.5\",\n \"note\": \"notes\",\n \"issued\": {\n \"date-parts\": [[2014, 2, 27]]\n },\n \"abstract\": \"Test Description\",\n \"DOI\": \"10.1234/foo.bar\",\n \"page\": \"20\",\n \"container_title\": \"Bam\",\n \"id\": \"123\",\n \"ISSN\": \"0317-8471\",\n \"event\": \"The 13th Biennial HITRAN Conference (HITRAN13)\",\n \"event-place\": \"Harvard-Smithsonian Center for Astrophysics\",\n }\n\n\ndef test_type(db, minimal_record, recid_pid):\n \"\"\"\"Test type.\"\"\"\n minimal_record.update({\n 'resource_type': {'type': 'publication', 'subtype': 'thesis'}\n })\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['type'] == 'thesis'\n\n minimal_record.update({\n 'resource_type': {'type': 'publication'}\n })\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['type'] == 'article'\n\n minimal_record.update({\n 'resource_type': {'type': 'image'}\n })\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['type'] == 'graphic'\n\n\ndef test_author(db, minimal_record, recid_pid):\n \"\"\"\"Test author.\"\"\"\n minimal_record['creators'] = []\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['author'] == []\n\n minimal_record['creators'] = [\n {'familyname': 'TestFamily1', 'givennames': 'TestGiven1'},\n {'familyname': 'TestFamily2', 'name': 'TestName2'},\n {'name': 'TestName3'},\n ]\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['author'] == [\n {'family': 'TestFamily1', 'given': 'TestGiven1'},\n {'family': 'TestName2'},\n {'family': 'TestName3'},\n ]\n\n\ndef test_identifiers(db, minimal_record, recid_pid):\n \"\"\"\"Test identifiers.\"\"\"\n minimal_record['doi'] = '10.1234/foo'\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['DOI'] == '10.1234/foo'\n assert 'publisher' not in obj\n\n minimal_record['doi'] = '10.5281/foo'\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['DOI'] == '10.5281/foo'\n assert obj['publisher'] == 'Zenodo'\n\n minimal_record['imprint'] = {'isbn': '978-1604598933'}\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['ISBN'] == '978-1604598933'\n\n minimal_record['alternate_identifiers'] = [{\n 'identifier': 'ISSN 0264-2875',\n 'scheme': 'issn'\n }]\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['ISSN'] == 'ISSN 0264-2875'\n\n\ndef test_journal(db, minimal_record, recid_pid):\n \"\"\"Test journal record.\"\"\"\n minimal_record['journal'] = {\n 'volume': '42',\n 'issue': '7',\n 'title': 'Journal title',\n 'pages': '10-20',\n }\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['container_title'] == 'Journal title'\n assert obj['volume'] == '42'\n assert obj['issue'] == '7'\n assert obj['page'] == '10-20'\n\n\ndef test_part_of(db, minimal_record, recid_pid):\n \"\"\"Test journal record.\"\"\"\n minimal_record['part_of'] = {\n 'title': 'Conference proceedings title',\n 'pages': '10-20',\n }\n minimal_record['imprint'] = {\n 'publisher': 'The Good Publisher',\n 'place': 'Somewhere',\n }\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['container_title'] == 'Conference proceedings title'\n assert obj['page'] == '10-20'\n assert obj['publisher'] == 'The Good Publisher'\n assert obj['publisher_place'] == 'Somewhere'\n\n\ndef test_other(db, minimal_record, recid_pid):\n \"\"\"Test other fields.\"\"\"\n minimal_record['language'] = 'en'\n minimal_record['notes'] = 'Test note'\n minimal_record['imprint'] = {\n 'publisher': 'Zenodo',\n }\n obj = csl_v1.transform_record(recid_pid, Record(minimal_record))\n assert obj['language'] == 'en'\n assert obj['note'] == 'Test note'\n assert obj['publisher'] == 'Zenodo'\n","repo_name":"zenodo/zenodo","sub_path":"tests/unit/records/test_schemas_csl.py","file_name":"test_schemas_csl.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":847,"dataset":"github-code","pt":"47"}
+{"seq_id":"17047806212","text":"import numpy as np \r\nimport json \r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torchvision\r\nfrom torchvision import models,transforms\r\nimport cv2\r\n\r\nuse_pretrained = True \r\nnet = models.vgg16(use_pretrained)\r\nnet.eval()\r\n\r\nclass BaseTransform():\r\n\r\n def __init__(self,resize,mean,std):\r\n\r\n self.base_transform = transforms.Compose([ \r\n transforms.Resize(resize), \r\n transforms.CenterCrop(resize),\r\n transforms.ToTensor(), \r\n transforms.Normalize(mean,std) \r\n\r\n ])\r\n\r\n def __call__(self,img):\r\n\r\n return self.base_transform(img)\r\n\r\nresize = 224\r\nmean = (0.485,0.456,0.406)\r\nstd = (0.229,0.224,0.225)\r\n\r\nILSVRC_class_index = json.load(open('C:\\\\imagenet_class_index.json','r'))\r\nILSVRC_class_index\r\n\r\nclass ILSVRCPredictor():\r\n\r\n def __init__(self,class_index):\r\n\r\n self.class_index = class_index\r\n\r\n def predict_max(self,out):\r\n\r\n maxid = np.argmax(out.detach().numpy()) \r\n predicted_label_name = self.class_index[str(maxid)][1]\r\n return predicted_label_name\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 224) \r\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 224)\r\n\r\ntransform = BaseTransform(resize,mean,std)\r\npredictor = ILSVRCPredictor(ILSVRC_class_index)\r\nwhile (cap.isOpened()):\r\n ret,frame = cap.read()\r\n frame = Image.fromarray(frame)\r\n img_transformed = transform(frame) \r\n inputs = img_transformed.unsqueeze_(0) \r\n out =net(inputs)\r\n result = predictor.predict_max(out)\r\n\r\n print(\"入力画像の予測結果:\", result)\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"KonanSasaki/CNN_Practice","sub_path":"VGG_camera.py","file_name":"VGG_camera.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"21253462442","text":"## Idea is we will read all pixel files and identify each pixel active or not, map this data to even or odd then observe how wts alter\n## Lets do wt update on each training sample also we will see cost for each step \n\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef sigmoid(i):\n return 1 / (1 + np.exp(-i))\n\nn = 10\nn_train = 8\nn_test = 2\n\n# Reading pixel files and storing active pixels\nx = []\nfor i in range(0,n):\n file = str(i)+'.png'\n im = Image.open(file)\n pix = list(im.getdata())\n a = [int(i == (0,0,0)) for i in pix]\n x.append(a)\n\ntrackCost = []\nnp.random.seed(1)\nW = 2*np.random.rand(1,15)-1 #random wts\n\n# Altering W using backprop\nfor epoch in range(0,100):\n for i in range(0,n_train): # Loop till 7, we will use 8,9 to test \n sig = sigmoid(W.dot(x[i]))\n red = (2*(sig-(i%2))*sig*(1-sig))\n dw = red*x[i]\n W = W - dw\n cost_now = ( sigmoid(W.dot(np.array(x[i]))) - (i%2) )**2\n trackCost.append(cost_now)\n\nplt.plot(trackCost)\nplt.show()\n\nprint(\"========== TESTING ========\")\nprint(sigmoid(W.dot(x[8])))\nprint(sigmoid(W.dot(x[9])))","repo_name":"san7nu/Python_codes","sub_path":"Neural Nwks/pixelNNwk.py","file_name":"pixelNNwk.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"3867336140","text":"from __future__ import annotations\n\nimport abc\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nif TYPE_CHECKING: # pragma: no cover\n from .transactions import RootTransaction\n\nC = TypeVar(\"C\")\n\n\nclass Publish(Generic[C], metaclass=abc.ABCMeta):\n \"\"\"The Publish interface.\n\n This interface is used to define a publish process such as publishing an\n asset, animation cache, etc.\n\n The publish shouldn't need to define an :code:`__init__` method, since the\n context should contain all of the information needed to run the publish,\n and optionally the results of each publish stage. Each publish stage may\n transform the data within the context or session, then use the transactions\n to make the transformation permanent. Transactions could represent a\n filesystem update, publishing to a database, etc. Lastly, transactions can\n be rolled back if one of the publish stages fail.\n \"\"\"\n\n async def pre_publish(self, transaction: RootTransaction, context: C) -> C:\n \"\"\"Pre-publish stage.\n\n This stage should be used to prepare the main publish. For example,\n creating and unlocking the publish directory, preparing a publish\n database entry, etc.\n\n Args:\n transaction: The collections of transactions to run for the\n pre-publish stage.\n context: The context to use for the pre-publish stage.\n\n Returns:\n The context and results of the pre-publish stage.\n \"\"\"\n return context\n\n @abc.abstractmethod\n async def publish(self, transaction: RootTransaction, context: C) -> C:\n \"\"\"Publish stage.\n\n This stage should be used for the main publish work. For example,\n generating caches, transforming rigs into an optimized version, etc.\n Then, the publish stage should use the transactions to make the changes\n permanent.\n\n Args:\n transaction: The collections of transactions to run for the\n publish stage.\n context: The context to use for the publish stage.\n\n Returns:\n The context and results of the publish stage.\n \"\"\"\n ... # pragma: no cover\n\n async def post_publish(self, transaction: RootTransaction, context: C) -> C:\n \"\"\"Post-publish stage.\n\n This stage should be used to finalize the publish. For example,\n generating a metadata file that contains data about the files in the\n publish such as a checksum, stats, etc. Or, finalizing the publish\n database entry.\n\n Args:\n transaction: The collections of transactions to run for the\n post-publish stage.\n context: The context to use for the post-publish stage.\n\n Returns:\n The context and results of the post-publish stage.\n \"\"\"\n return context\n","repo_name":"scott-wilson/publish","sub_path":"bindings/python/pypublish/_publish.py","file_name":"_publish.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"72862043662","text":"from django.urls import path\nfrom django.views.generic import RedirectView\nfrom main_app.views import (\n MockGunMessageViewset,\n MockGunEmailValidateView,\n DataImportExportView,\n TemplatesView,\n TemplateVersionsView,\n MockGunDomainViewset,\n)\n\n\nurlpatterns = [\n path(\"v3/domains\", MockGunDomainViewset.as_view({\"get\": \"list\"})),\n path(\n \"v3/domains/\",\n MockGunDomainViewset.as_view({\"get\": \"get\"}),\n ),\n path(\"v3//messages\", MockGunMessageViewset.as_view({\"post\": \"create\"})),\n path(\n \"v3//templates\",\n TemplatesView.as_view({\"post\": \"create\", \"get\": \"list\"}),\n ),\n path(\n \"v3//templates//versions/\",\n TemplateVersionsView.as_view({\"get\": \"get\"}),\n ),\n path(\n \"v3//templates//versions\",\n TemplateVersionsView.as_view({\"get\": \"list\"}),\n ),\n path(\n \"v3//templates/\",\n TemplatesView.as_view({\"get\": \"get\"}),\n ),\n path(\n \"v4/address/validate\",\n MockGunEmailValidateView.as_view({\"post\": \"create\", \"get\": \"create\"}),\n ),\n path(\"data\", DataImportExportView.as_view({\"post\": \"create\", \"get\": \"get\"})),\n path(\n \"\", RedirectView.as_view(url=\"/admin/main_app/mockgunmessage/\", permanent=False)\n ),\n]\n","repo_name":"dmarkey/mock-gun","sub_path":"src/main_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"28830221350","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport pymzn\nimport csv\nimport csv\nwith open('eggs.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(['Spam'] * 1 + ['Baked Beans'])\n spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])\n\nq = pymzn.Gurobi()\nr = pymzn.Gecode()\n#t = pymzn.\nw = \"Results.csv\"\nfor i in range(0,1):\n\n h = \"/Users/ranjith/Desktop/snmmatching/pyzinc/SNM_Instances/r5_10_10_2/RealInstance\"\n g = \"/Users/ranjith/Desktop/snmmatching/Instances/3_seekers_and_3_donors_Output/OutputGecode\"\n z = \"/Users/ranjith/Desktop/snmmatching/Instances/3_seekers_and_3_donors_Output/OutputMIP\"\n a = \"/Users/ranjith/Desktop/snmmatching/pyzinc/Magic.ozn\"\n h = h + str(i) + \".dzn\"\n g = g + str(i) + \".dzn\"\n z = z + str(i) + \".dzn\"\n\n try:\n # CP_EF\n s = pymzn.minizinc('CP_EF2.mzn', h, solver= r, keep = True, timeout = 10, statistics = True)\n t = pymzn.minizinc('MIP_EF.mzn', h, solver= q, keep = True, timeout = 10, statistics = True)\n #print(\"CP_EF: \",10 - s['s'], i)\n #s = pymzn.dict2dzn(s, fout=g)\n print (s)\n print (t)\n #break\n\n except pymzn.MiniZincUnsatisfiableError:\n print(\"Instance\", i, \"is unsatisfiable on CP_EF\")\n '''\n try:\n # MIP_EF\n w = pymzn.minizinc('MIP_EF.mzn', h, solver= q, timeout=1)[0]\n #e = pymzn.dzn2dict(w, rebase_arrays=False)\n print(\"MIP_EF: \", w['s'], i)\n #break\n\n except pymzn.MiniZincUnsatisfiableError:\n print(\"Instance\", i, \"is unsatisfiable on MIP_EF\")\n\n\n # CP_MaxCard\n try:\n # CP_MaxCard\n s = pymzn.minizinc('CP_MaxCard.mzn', h, solver=r, timeout=1)[0]\n # s = pymzn.dict2dzn(s, fout=g)\n print(\"CP_MaxCard: \",10 - s['s'], i)\n # break\n\n except pymzn.MiniZincUnsatisfiableError:\n print(\"Instance\", i, \"is unsatisfiable on CP_MaxCard\")\n\n # MIP_MaxCard\n try:\n # MIP_EF\n w = pymzn.minizinc('MIP_MaxCard.mzn', h, solver=q, timeout=1)[0]\n # e = pymzn.dzn2dict(w, rebase_arrays=False)\n print(\"MIP_EF: \", w['s'], i)\n # break\n\n except pymzn.MiniZincUnsatisfiableError:\n print(\"Instance\", i, \"is unsatisfiable on MIP_MaxCard\")\n '''\n #x = pymzn.solns2out(s, a)\n # log time start\n\n # log time stop\nprint (\"Cheers! \")\n","repo_name":"ranjithkj/SNM-allocation-problem","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"24463956022","text":"from __future__ import annotations\n\nimport discord\nfrom discord.ext import commands\nfrom bot import Curator\nimport asyncpg\nfrom typing import Optional, List, Union\nimport asyncio\nimport emoji as amoji\nfrom datetime import datetime\n\nfrom .utils import db\nfrom .utils import selecting\nfrom.utils.formats import human_date, human_join\n\nfrom .utils.checks import is_bot_admin, owner_or_guild_permissions\n\n\nclass Rolemenus(db.Table):\n guild = db.Column(db.Integer(big=True)) # Server the menu is in (ID)\n message = db.Column(db.Array(sql_type=db.Integer(big=True)), primary_key=True) # ID of the channel and of the message\n description = db.Column(db.String, default='') # Description at the top of the menu message\n roles = db.Column(db.Array(sql_type=db.Integer(big=True)), default='{}') # The roles to choose from (list of IDs)\n emojis = db.Column(db.Array(sql_type=db.String), default='{}') # The emojis to react with (list of IDs or unicode emojis in string format)\n role_descs = db.Column(db.Array(sql_type=db.String), default='{}') # Short descriptions of all the roles to choose from\n allow_multiple = db.Column(db.Boolean, default=True) # Specifies if you can select multiple roles from the list (true/false)\n created_by = db.Column(db.Integer(big=True)) # User who created the menu (ID)\n created_at = db.Column(db.Datetime, default=\"now() at time zone 'utc'\") # The moment the menu was created\n last_edited_by = db.Column(db.Integer(big=True)) # User who last edited the menu (ID)\n last_edited_at = db.Column(db.Datetime) # The last moment the menu was edited\n enabled = db.Column(db.Boolean, default=True) # Specifies if the menu is enabled/disabled on purpose\n\n\nmenus = {}\n\n\ndef make_message(description, roles, emojis, role_descs, status=True) -> str:\n status_text = '***This menu is currently out of service***\\n\\n' if not status else ''\n message = status_text + description\n for i in range(len(roles)):\n message += f'\\n\\n{emojis[i]}: **{roles[i].name}**'\n if role_descs[i]:\n message += f'\\n{role_descs[i]}'\n return message\n\n\nasync def get_menu_from_link(ctx, url) -> Optional[SelectionMenu]:\n IDs = url.split('/')[-3:]\n IDs[0] = int(IDs[0])\n if IDs[0] not in menus.keys() or (int(IDs[0]) != ctx.guild.id and ctx.author.id not in ctx.bot.admins):\n await ctx.send('Please provide a valid message URL of a menu on this server.')\n return None\n if IDs[1]+','+IDs[2] not in menus[IDs[0]].keys():\n await ctx.send('Please provide a valid message URL of a menu on this server.')\n return None\n return menus[IDs[0]][str(IDs[1])+','+str(IDs[2])]\n\n\nclass SelectionMenu:\n def __init__(self, bot, message, description, roles, emojis, role_descs, allow_multiple, created_by, created_at, last_edited_by=None, last_edited_at=None, status=True, issues=None):\n self.bot: Curator = bot\n self.message: discord.Message = message\n self.description: str = description\n self.roles: List[discord.Role] = roles\n self.emojis: List[Union[discord.Emoji, str]] = emojis\n self.role_descs: List[str] = role_descs # description text per role\n self.allow_multiple: bool = allow_multiple\n self.created_by: discord.User = created_by\n self.created_at: datetime = created_at\n self.last_edited_by: Optional[discord.User] = last_edited_by\n self.last_edited_at: Optional[datetime] = last_edited_at\n self.status: bool = status\n self.issues: List[List[str, Optional[int]]] = issues or []\n self.ignore_next_removal = False # Used to ignore a reaction being deleted when it is the bot that deletes it\n\n async def reaction_received(self, emoji: Union[discord.Emoji, discord.PartialEmoji, str], member: discord.Member):\n if emoji not in self.emojis and emoji is not None:\n self.ignore_next_removal = True\n await self.message.remove_reaction(emoji, member)\n return\n\n if not self.status:\n self.ignore_next_removal = True\n if emoji: await self.message.remove_reaction(emoji, member)\n return await member.send(f'**{self.message.guild}:** the menu you tried to use is currently out of service, sorry for the inconvenience.')\n\n role = self.roles[self.emojis.index(emoji)]\n if role in member.roles:\n return await member.send(f'**{self.message.guild}:** you already have the **{role}** role.')\n if not self.allow_multiple:\n for r in self.roles:\n if r in member.roles:\n self.ignore_next_removal = True\n await self.message.remove_reaction(emoji, member)\n return await member.send(f'**{self.message.guild}:** you already have a role from this'\n f' menu, you must first remove **{r}** if you want **{role}**.')\n\n try:\n await member.add_roles(role, reason=f'Selected role ({self.message.jump_url})')\n await member.send(f'**{self.message.guild}:** gave you the **{role}** role.')\n logchannel = self.bot.server_configs[self.message.guild.id].logchannel\n if logchannel:\n await logchannel.send(f'{member} selected {role} in a menu')\n except discord.Forbidden:\n self.ignore_next_removal = True\n await self.message.remove_reaction(emoji, member)\n guild_owner = self.message.guild.owner\n await guild_owner.send(f'**{self.message.guild}:** I do not have the required permissions to give'\n f' **{member}** the **{role}** role on your server. I need \"Manage Roles\"'\n f' permissions and my highest role needs to be higher than the roles you'\n f' want me to add/remove.')\n await member.send(f'**{self.message.guild}:** I don\\'t have permission to give you the **{role}**'\n f' role. I have contacted the server owner about this.')\n\n async def reaction_removed(self, emoji: Union[discord.Emoji, discord.PartialEmoji, str], member: discord.Member):\n if self.ignore_next_removal:\n self.ignore_next_removal = False\n return\n\n if emoji not in self.emojis or emoji is None:\n self.issues.append(['emoji', emoji])\n await self.disable()\n return await member.send(f'**{self.message.guild}:** an issue was found with the role menu. It is likely that the reaction you just removed is not present on the server anymore. If this is the case, please contact the staff of the server and ask them to change the emoji.')\n\n if not self.status:\n await self.message.remove_reaction(emoji, member)\n return await member.send(f'**{self.message.guild}:** the menu you tried to use is currently out of service, sorry for the inconvenience.')\n\n role = self.roles[self.emojis.index(emoji)]\n if role not in member.roles:\n return await member.send(f'**{self.message.guild}:** you do not have the **{role}** role so I couldn\\'t remove it.')\n\n try:\n await member.remove_roles(role, reason=f'Removed role ({self.message.jump_url})')\n await member.send(f'**{self.message.guild}:** removed the **{role}** role.')\n logchannel = self.bot.server_configs[self.message.guild.id].logchannel\n if logchannel:\n await logchannel.send(f'{member} deselected {role} in a menu')\n except discord.Forbidden:\n guild_owner = self.message.guild.owner\n await guild_owner.send(f'**{self.message.guild}:** I do not have the required permissions to'\n f' remove the **{role}** role from **{member}** on your server. I need'\n f' \"Manage Roles\" permissions and my highest role needs to be higher than'\n f' the roles you want me to add/remove.')\n await member.send(f'**{self.message.guild}:** I don\\'t have permission to remove the **{role}**'\n f' role from you. I have contacted the server owner about this.')\n\n async def disable(self):\n self.status = False\n query = 'UPDATE rolemenus SET enabled = False WHERE message = $1;'\n await self.bot.pool.fetchval(query, [self.message.channel.id, self.message.id])\n await self.message.edit(content='***This menu is currently out of service***\\n\\n' + self.message.content)\n\n async def enable(self):\n pass\n\n\nclass RoleSelector(commands.Cog):\n def __init__(self, bot: Curator):\n self.bot = bot\n self._task = bot.loop.create_task(self.get_menus())\n\n @commands.group(name='roleselector', aliases=['rselector', 'rolesel', 'rsel', 'rs', 'rolemenu', 'rmenu', 'rm'])\n async def role_selector(self, ctx: commands.Context):\n \"\"\"All commands revolving around the role selection menus.\"\"\"\n if ctx.guild.id not in menus.keys():\n menus[ctx.guild.id] = {}\n\n if not ctx.invoked_subcommand:\n await ctx.send(f'Use `{ctx.prefix}help roleselector` for the possible commands.')\n\n @role_selector.group(name='make', aliases=['create', 'new'], invoke_without_command=True)\n @owner_or_guild_permissions(manage_roles=True)\n async def make_rsel(self, ctx: commands.Context, channel: discord.TextChannel,\n *roles_and_emojis: Union[discord.Role, discord.Emoji, str], allow_multiple=True):\n \"\"\"Create a role selection menu!\n\n Provide the channel for the menu.\n More instructions coming soon.\n\n Use \"roleselector make unique\" instead to restrict members to choosing only one role from the list.\n \"\"\"\n if len(roles_and_emojis) % 2 == 1:\n return await ctx.send(f'Provide a valid list of arguments (`{ctx.prefix}help roleselector make` for info on'\n f' how to use this command)')\n\n roles = []\n emojis = []\n i = 0\n for item in roles_and_emojis:\n if i % 2 == 0: # Item should be a role\n if type(item) == discord.Role:\n if item in roles:\n return await ctx.send('You have put in the same role more than once.')\n elif item >= ctx.author.top_role:\n return await ctx.send(f'**{item}** is (higher than) your highest role so you cannot make it available.')\n else:\n roles.append(item)\n else:\n return await ctx.send(f'`{item}` is invalid.')\n else: # Item should be an emoji\n if type(item) == discord.Emoji or item in amoji.unicode_codes.EMOJI_UNICODE_ENGLISH.values():\n if type(item) == discord.Emoji and item not in ctx.guild.emojis:\n return await ctx.send('You can only use custom emojis from this server.')\n if item in emojis:\n return await ctx.send('You can only use an emoji once.')\n else:\n emojis.append(item)\n else:\n return await ctx.send(f'`{item}` is invalid.')\n i += 1\n\n def check1(message1):\n return message1.author == ctx.author and message1.channel == ctx.channel\n\n await ctx.send('Give the description of the selection menu (times out in 5 minutes).')\n try:\n description = await self.bot.wait_for('message', check=check1, timeout=300)\n except asyncio.TimeoutError:\n return await ctx.send('Timed out')\n description = description.content\n\n \"\"\"def check2(reaction2, user2):\n return user2 == ctx.author and reaction2.message == msg2 and (reaction2.emoji == '✅' or reaction2.emoji == '❌')\n\n msg2 = await ctx.send('Are people allowed to select multiple roles from the list? '\n 'React with :white_check_mark: or :x: (times out in 1 minute).')\n await msg2.add_reaction('✅')\n await msg2.add_reaction('❌')\n try:\n r2, u2 = await self.bot.wait_for('reaction_add', check=check2, timeout=60)\n except asyncio.TimeoutError:\n return await ctx.send('Timed out')\n allow_multiple = True if r2.emoji == '✅' else False\"\"\" # Obsoleted due to using a sub-command\n\n def check3_reply(message3):\n if message3.author == ctx.author and message3.channel == ctx.channel:\n nonlocal event\n event = 'message'\n return True\n else:\n return False\n\n def check3_react(reaction3, user3):\n if reaction3.message == msg3 and user3 == ctx.author and reaction3.emoji == '❌':\n nonlocal event\n event = 'reaction'\n return True\n else:\n return False\n\n role_descs = []\n for role in roles:\n event = None\n msg3 = await ctx.send(f'Give a (short) description for the **{role.name}** role. '\n f'React to this message with :x: if you do not wish to add a description. '\n f'Times out in 2 minutes.')\n await msg3.add_reaction('❌')\n pending_tasks = [self.bot.wait_for('message', check=check3_reply),\n self.bot.wait_for('reaction_add', check=check3_react)]\n done_tasks, pending_tasks = await asyncio.wait(pending_tasks, timeout=120, return_when=asyncio.FIRST_COMPLETED)\n for task in pending_tasks: # The event(s) that didn't trigger (both in case of a timeout)\n task.cancel()\n if not event: # timeout on asyncio.wait() does not raise an error, it returns all unfinished tasks in the pending_tasks set\n return await ctx.send('Timed out.')\n for task in done_tasks: # The thing that happened, can only be one. In an iterable because done_tasks is a set\n if event == 'message': # User replied with a description\n role_description = await task\n role_descs.append(role_description.content)\n else: # User reacted with ❌\n role_descs.append(None)\n\n menu_message = await channel.send(make_message(description, roles, emojis, role_descs))\n for emoji in emojis:\n await menu_message.add_reaction(emoji)\n created_at = datetime.utcnow()\n\n query = 'INSERT INTO rolemenus VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NULL, NULL, True);'\n await self.bot.pool.fetchval(query, ctx.guild.id, [channel.id, menu_message.id], description, [role.id for role in roles],\n [str(emoji.id) if type(emoji) != str else emoji for emoji in emojis], role_descs,\n allow_multiple, ctx.author.id, created_at)\n\n await ctx.send(f'Here it is: {menu_message.jump_url}')\n menus[channel.guild.id][str(channel.id)+','+str(menu_message.id)] = SelectionMenu(self.bot, menu_message, description,\n roles, emojis, role_descs,\n allow_multiple, ctx.author, created_at)\n\n @make_rsel.command(name='unique', aliases=['singular', 'limited'])\n @owner_or_guild_permissions(manage_roles=True)\n async def make_rsel_unique(self, ctx: commands.Context, channel: discord.TextChannel,\n *roles_and_emojis: Union[discord.Role, discord.Emoji, discord.PartialEmoji, str]):\n \"\"\"Create a role selection menu from which only one role can be selected.\"\"\"\n await ctx.invoke(self.make_rsel, channel, *roles_and_emojis, allow_multiple=False)\n\n @role_selector.command()\n @owner_or_guild_permissions(manage_roles=True)\n async def enable(self, ctx: commands.Context, menu_url):\n \"\"\"Enable the role menu.\"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if menu.status:\n return await ctx.send('This menu is already enabled:thinking:')\n\n if menu.issues:\n issue_list = '\\n'.join([f'Couldn\\'t find **{issue[0]}** with ID **{issue[1]}**' for issue in menu.issues])\n return await ctx.send(f'You cannot do this because the menu is broken.\\n_{issue_list}_\\nUse `changeemoji` and/or `removerole` to fix this.')\n\n menu.status = True\n query = 'UPDATE rolemenus SET enabled = True WHERE message = $1;'\n await self.bot.pool.fetchval(query, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=menu.message.content.split('\\n', 2)[2:][0])\n await ctx.send('Successfully enabled.')\n\n @role_selector.command()\n @owner_or_guild_permissions(manage_roles=True)\n async def disable(self, ctx: commands.Context, menu_url):\n \"\"\"Disable the role menu.\"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if not menu.status:\n return await ctx.send('This menu is already inactive:thinking:')\n\n await menu.disable()\n await ctx.send('Successfully disabled.')\n\n @role_selector.command()\n @owner_or_guild_permissions(manage_roles=True)\n async def addrole(self, ctx: commands.Context, menu_url, role: discord.Role, emoji: Union[discord.Emoji, discord.PartialEmoji, str], *, description: Optional[str]):\n \"\"\"Add a role to the menu.\"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if role in menu.roles:\n return await ctx.send('This role is already in this menu:thinking:')\n\n if role >= ctx.author.top_role:\n return await ctx.send('This role is (higher than) your highest role so you cannot make it available.')\n\n if type(emoji) == str and emoji not in amoji.unicode_codes.EMOJI_UNICODE_ENGLISH.values():\n return await ctx.send('Please give me a valid emoji.')\n\n menu.roles.append(role)\n menu.emojis.append(emoji)\n menu.role_descs.append(description)\n query = 'UPDATE rolemenus SET roles = array_append(roles, $1), emojis = array_append(emojis, $2), role_descs = array_append(role_descs, $3) WHERE message = $4;'\n await self.bot.pool.fetchval(query, role.id, str(emoji) if type(emoji) != str else emoji, description, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=menu.message.content+f'\\n\\n{emoji}: **{role.name}**'+(f'\\n{description}' if description else ''))\n await menu.message.add_reaction(emoji)\n await self.update_last_edit(menu, ctx.author)\n await ctx.send(f'Successfully added **{role}**.')\n\n @role_selector.command()\n @owner_or_guild_permissions(manage_roles=True)\n async def removerole(self, ctx: commands.Context, menu_url, role: Union[discord.Role, int]):\n \"\"\"Remove a role from the menu.\"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if type(role) == int:\n if role - 1 not in list(range(len(menu.roles))):\n return await ctx.send(f'Provide a role or one of these numbers: {human_join([str(n) for n in range(len(menu.roles))])}.')\n role = menu.roles[role - 1]\n\n if role >= ctx.author.top_role:\n return await ctx.send('That role is (higher than) your highest role so you cannot remove it from the menu.')\n\n if role not in menu.roles:\n return await ctx.send('That role is already not in this menu:thinking:')\n\n index = menu.roles.index(role)\n menu.roles.pop(index)\n emoji = menu.emojis.pop(index)\n role_desc = menu.role_descs.pop(index)\n query = 'UPDATE rolemenus SET roles = array_remove(roles, $1), emojis = array_remove(emojis, $2), role_descs = array_remove(role_descs, $3) WHERE message = $4;'\n await self.bot.pool.fetchval(query, role.id, str(emoji.id) if type(emoji) != str else emoji, role_desc, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=make_message(menu.description, menu.roles, menu.emojis, menu.role_descs, status=menu.status))\n await menu.message.clear_reaction(emoji)\n await self.update_last_edit(menu, ctx.author)\n await ctx.send(f'Successfully removed **{role}**.')\n\n @role_selector.command(aliases=['changemoji', 'editemoji', 'newemoji'])\n @owner_or_guild_permissions(manage_roles=True)\n async def changeemoji(self, ctx: commands.Context, menu_url, role: discord.Role, emoji: Union[discord.Emoji, discord.PartialEmoji, str]):\n \"\"\"Change the emoji for a role.\n\n Requires permission to manage messages in the channel of the menu\n \"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if role not in menu.roles:\n return await ctx.send('No such role exists in the menu you provided.')\n\n if emoji == menu.emojis[menu.roles.index(role)]:\n return await ctx.send('That role is already using that emoji:thinking:')\n if emoji in menu.emojis:\n return await ctx.send('This emoji is already used in this menu. You can only use an emoji once.')\n\n if type(emoji) == str and emoji not in amoji.unicode_codes.EMOJI_UNICODE_ENGLISH.values():\n return await ctx.send(f'Please give me a valid emoji.')\n\n old_emoji = menu.emojis[menu.roles.index(role)]\n menu.emojis[menu.roles.index(role)] = emoji\n query = 'UPDATE rolemenus SET emojis = array_replace(emojis, $1, $2) WHERE message = $3;'\n await self.bot.pool.fetchval(query, str(old_emoji.id) if type(old_emoji) != str else old_emoji, str(emoji.id) if type(emoji) != str else emoji, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=make_message(menu.description, menu.roles, menu.emojis, menu.role_descs, status=menu.status))\n await menu.message.clear_reactions()\n for e in menu.emojis:\n await menu.message.add_reaction(e)\n await self.update_last_edit(menu, ctx.author)\n await ctx.send('Successfully changed the emoji.')\n\n @role_selector.command(aliases=['changeroledescription', 'changerdesc', 'editroledesc', 'roledescription', 'roledesc'])\n @owner_or_guild_permissions(manage_roles=True)\n async def changeroledesc(self, ctx: commands.Context, menu_url, role: discord.Role, *, description: Optional[str]):\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if role not in menu.roles:\n return await ctx.send('This role is not in this menu.')\n\n if description == menu.role_descs[menu.roles.index(role)]:\n return await ctx.send('This is already the description for this role:thinking:')\n\n menu.role_descs[menu.roles.index(role)] = description\n query = 'UPDATE rolemenus SET role_descs = $1 WHERE message = $2;' # This query is not using the PostgreSQL array_replace() function because role descriptions do not have to be unique\n await self.bot.pool.fetchval(query, menu.role_descs, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=make_message(menu.description, menu.roles, menu.emojis, menu.role_descs, status=menu.status))\n await self.update_last_edit(menu, ctx.author)\n await ctx.send(f'Successfully {\"changed\" if description else \"removed\"} role description.')\n\n @role_selector.command(aliases=['description'])\n @owner_or_guild_permissions(manage_roles=True)\n async def changedescription(self, ctx: commands.Context, menu_url, *, description):\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if description == menu.description:\n return await ctx.send('But that is already the description:thinking:')\n\n menu.description = description\n query = 'UPDATE rolemenus SET description = $1 WHERE message = $2;'\n await self.bot.pool.fetchval(query, description, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=make_message(description, menu.roles, menu.emojis, menu.role_descs, status=menu.status))\n await self.update_last_edit(menu, ctx.author)\n await ctx.send('Successfully changed description.')\n\n @role_selector.command(aliases=['order'])\n @owner_or_guild_permissions(manage_roles=True)\n async def reorder(self, ctx: commands.Context, menu_url, *order: int):\n \"\"\"Re-order the items in the list.\n\n Example for order argument: 1 4 3 2 (swap the second and fourth role in the list)\n \"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n sorted_order = list(order).copy()\n sorted_order.sort()\n if list(range(1, len(menu.roles) + 1)) != sorted_order:\n return await ctx.send(f'Please provide the numbers {human_join([str(n) for n in range(1, len(menu.roles) + 1)], final=\"and\")}, each once.')\n\n menu.roles = [menu.roles[index - 1] for index in order]\n menu.emojis = [menu.emojis[index - 1] for index in order]\n menu.role_descs = [menu.role_descs[index - 1] for index in order]\n query = 'UPDATE rolemenus SET roles = $1, emojis = $2, role_descs = $3 WHERE message = $4;'\n await self.bot.pool.fetchval(query, [role.id for role in menu.roles], [str(emoji.id) if type(emoji) != str else emoji for emoji in menu.emojis], menu.role_descs, [menu.message.channel.id, menu.message.id])\n await menu.message.edit(content=make_message(menu.description, menu.roles, menu.emojis, menu.role_descs, status=menu.status))\n await menu.message.clear_reactions()\n for emoji in menu.emojis:\n await menu.message.add_reaction(emoji)\n await self.update_last_edit(menu, ctx.author)\n await ctx.send('Successfully re-ordered.')\n\n @role_selector.command(aliases=['transport'])\n @owner_or_guild_permissions(manage_channels=True)\n async def move(self, ctx: commands.Context, menu_url, channel: discord.TextChannel):\n \"\"\"Move a selection menu to another channel.\n\n Provide the URL of the message that contains the menu.\n \"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n if channel == menu.message.channel:\n return await ctx.send('The menu is already in that channel:thinking:')\n\n if channel.guild != menu.message.guild:\n return await ctx.send('You have to move it to a channel in the same server.')\n\n new_message = await channel.send(menu.message.content)\n for emoji in menu.emojis:\n await new_message.add_reaction(emoji)\n query = 'UPDATE rolemenus SET message = $1 WHERE message = $2;'\n await self.bot.pool.fetchval(query, [channel.id, new_message.id], [menu.message.channel.id, menu.message.id])\n menus[channel.guild.id][str(channel.id)+','+str(new_message.id)] = menus[menu.message.guild.id].pop(str(menu.message.channel.id)+','+str(menu.message.id))\n await menu.message.delete()\n for emoji in menu.emojis:\n await new_message.add_reaction(emoji)\n menu.message = new_message\n await ctx.send(f'Here it is: {menu.message.jump_url}')\n\n @role_selector.command(aliases=['remove'])\n @owner_or_guild_permissions(manage_roles=True)\n async def delete(self, ctx: commands.Context, menu_url):\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n for role in menu.roles:\n if role >= ctx.author.top_role:\n return await ctx.send(f'**{role}** is (higher than) your highest role so you cannot remove a menu containing it.')\n\n prompt_text = 'Are you sure you want to completely delete this role selection menu? This action cannot be undone.'\n confirm = await ctx.prompt(prompt_text, reacquire=False)\n if not confirm:\n return await ctx.send('Timed out.')\n\n query = 'DELETE FROM rolemenus WHERE message = $1;'\n await self.bot.pool.fetchval(query, [menu.message.channel.id, menu.message.id])\n await menu.message.delete()\n del (menus[menu.message.guild.id][str(menu.message.channel.id)+','+str(menu.message.id)])\n await ctx.send('Successfully deleted the menu.')\n\n async def update_last_edit(self, menu: SelectionMenu, user):\n moment = datetime.utcnow()\n query = 'UPDATE rolemenus SET last_edited_by = $1, last_edited_at = $2 WHERE message = $3;'\n await self.bot.pool.fetchval(query, user.id, moment, [menu.message.channel.id, menu.message.id])\n menu.last_edited_by = user\n menu.last_edited_at = moment\n\n @role_selector.command()\n async def info(self, ctx: commands.Context, menu_url):\n \"\"\"Get all the information there is about a role menu.\n\n Provide the URL of the message that contains the menu.\n \"\"\"\n menu = await get_menu_from_link(ctx, menu_url)\n if not menu:\n return\n\n embed = discord.Embed(title='Selector Information', description=f'{\"Multiple\" if menu.allow_multiple else \"Single\"}-choice menu\\nCreated by {menu.created_by} at {human_date(menu.created_at)}{f\" and last edited by {menu.last_edited_by} at {human_date(menu.last_edited_at)}\" if menu.last_edited_by else \"\"}\\nThe menu is [here]({menu.message.jump_url})')\n embed.add_field(name='Menu description', value=menu.description, inline=False)\n for i in range(len(menu.roles)):\n embed.add_field(name=(str(menu.emojis[i]) if menu.emojis[i] else '*Emoji not found*'), value=f'{f\"**{menu.roles[i].name}**\" if menu.roles[i] else \"*Role not found*\"}\\n{menu.role_descs[i] if menu.role_descs[i] else \"No description\"}')\n if menu.issues:\n embed.add_field(name=f'Issue{\"s\" if len(menu.issues) > 1 else \"\"}', value='\\n'.join([f'Couldn\\'t find **{issue[0]}** with ID **{issue[1]}**' for issue in menu.issues]), inline=False)\n embed.set_footer(icon_url=('https://www.iconsdb.com/icons/preview/lime/square-xxl.png' if menu.status else 'https://www.iconsdb.com/icons/preview/red/square-xxl.png'), text=f'Status: {\"in\" if menu.status else \"out of\"} service')\n\n await ctx.send(embed=embed)\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload: discord.RawReactionActionEvent):\n emoji, message, member = await self.info_from_payload(payload)\n if member == self.bot.user:\n return\n\n if message.guild.id in menus.keys():\n if str(message.channel.id)+','+str(message.id) in menus[message.guild.id].keys():\n await menus[message.guild.id][str(message.channel.id)+','+str(message.id)].reaction_received(emoji, member)\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload: discord.RawReactionActionEvent):\n emoji, message, member = await self.info_from_payload(payload)\n if message.guild.id in menus.keys():\n if str(message.channel.id)+','+str(message.id) in menus[message.guild.id].keys():\n await menus[message.guild.id][str(message.channel.id)+','+str(message.id)].reaction_removed(emoji, member)\n\n async def info_from_payload(self, payload: discord.RawReactionActionEvent):\n guild: discord.Guild = self.bot.get_guild(payload.guild_id)\n channel: discord.TextChannel = guild.get_channel(payload.channel_id)\n message: discord.Message = await channel.fetch_message(payload.message_id)\n emoji: discord.PartialEmoji = payload.emoji\n if emoji.is_custom_emoji():\n try:\n emoji: discord.Emoji = await guild.fetch_emoji(emoji.id)\n except Exception as e:\n #print('Exception received from the emoji thing')\n #print(emoji)\n #raise e\n emoji: None = None\n\n else:\n emoji: str = emoji.name\n member = guild.get_member(payload.user_id)\n return emoji, message, member\n\n async def get_menus(self):\n \"\"\"Get menus from database.\"\"\"\n if not self.bot.is_ready():\n await self.bot.wait_for('ready')\n\n try:\n menus.clear()\n rows = await self.bot.pool.fetch('SELECT * FROM rolemenus')\n for menu in rows:\n guild = self.bot.get_guild(menu['guild'])\n if not guild:\n print(f'Guild with id {menu[\"guild\"]} not found. You can use the `unguild` command to clear '\n f'everything from this guild from the database.')\n continue\n\n channel = guild.get_channel(menu['message'][0])\n if not channel:\n print(f'Channel with id {menu[\"message\"][0]} not found in guild \"{guild}\" ({guild.id}). Contact the '\n f'guild owner ({guild.owner}) to check if I still have permission to this channel, or you can '\n f'use the `unchannel` command to clear everything from this channel from the database.')\n continue\n\n try:\n menu_message = await channel.fetch_message(menu['message'][1])\n except:\n print(f'Couldn\\'t find a role menu message (https://discordapp.com/channels/{guild.id}/{channel.id}/'\n f'{menu[\"message\"][1]}) in guild \"{guild}\" in channel \"{channel}\". Contact the guild owner '\n f'({guild.owner}) to check if this message was removed. You can remove this menu from the database with the command '\n f'`sql DELETE FROM rolemenus WHERE message = \\'{{{channel.id}, {menu[\"message\"][1]}}}\\'`.')\n continue\n\n if guild.id not in menus.keys():\n menus[menu['guild']] = {}\n\n issues = [] # A list of issues with getting roles or emojis. If this list is not empty, the status of this menu will be \"False\".\n\n roles = []\n for role_id in menu['roles']:\n role = guild.get_role(role_id)\n if not role:\n print(f'Couldn\\'t find role with id {role_id} for menu {menu_message.jump_url} in guild \"{guild}\" '\n f'in channel \"{channel}\". Contact the server owner ({guild.owner}) to see if they removed '\n f'this role. You can remove this menu from the database with the command '\n f'`sql DELETE FROM rolemenus WHERE message = \\'{{{channel.id}, {menu_message.id}}}\\'`.')\n issues.append(['role', role_id])\n roles.append(None)\n else:\n roles.append(role)\n\n emojis = []\n for emoji_id in menu['emojis']:\n if emoji_id in amoji.unicode_codes.EMOJI_UNICODE_ENGLISH.values():\n emojis.append(amoji.emojize(emoji_id))\n else:\n emoji_id = int(emoji_id)\n try:\n emojis.append(await guild.fetch_emoji(emoji_id))\n except:\n print(f'Couldn\\'t find emoji with id {emoji_id} for menu {menu_message.jump_url} in guild '\n f'\"{guild}\" in channel \"{channel}\". Contact the server owner ({guild.owner}) to see if '\n f'they removed this emoji. You can remove this menu from the database with the command '\n f'`sql DELETE FROM rolemenus WHERE message = \\'{{{channel.id}, {menu_message.id}}}\\'`.')\n issues.append(['emoji', emoji_id])\n emojis.append(None)\n\n created_by = await self.bot.fetch_user(menu['created_by'])\n last_edited_by = None\n if menu['last_edited_by']:\n last_edited_by = await self.bot.fetch_user(menu['last_edited_by'])\n status = True if (not issues) and menu['enabled'] else False\n if (not status) and (not menu_message.content.startswith('***This menu is currently out of service***\\n\\n')):\n await menu_message.edit(content='***This menu is currently out of service***\\n\\n'+menu_message.content)\n menus[guild.id][str(channel.id)+','+str(menu_message.id)] = \\\n SelectionMenu(self.bot, menu_message, menu['description'], roles, emojis, menu['role_descs'],\n menu['allow_multiple'], created_by, menu['created_at'], last_edited_by=last_edited_by,\n last_edited_at=menu['last_edited_at'], status=status, issues=issues)\n except (OSError, discord.ConnectionClosed, asyncpg.PostgresConnectionError):\n self._task.cancel()\n self._task = self.bot.loop.create_task(self.get_menus())\n\n @commands.command(aliases=['printrss', 'printrs'], hidden=True)\n @is_bot_admin()\n async def printmenus(self, ctx: commands.Context):\n \"\"\"Print all role menus.\"\"\"\n print(menus)\n await ctx.send('Check the Python printer output for your results.')\n\n @commands.command(aliases=['iam', 'iwant', 'gimme'])\n async def giveme(self, ctx: commands.Context, role: discord.Role):\n \"\"\"Give yourself a role from the list of roles you can give yourself.\"\"\"\n available_roles = self.bot.server_configs[ctx.guild.id].self_roles\n if role not in available_roles:\n return await ctx.send(f'You can only give yourself {human_join([f\"**{r}**\" for r in available_roles], final=\"and\")}.')\n\n if role in ctx.author.roles:\n return await ctx.send('You already have this role:thinking:')\n\n try:\n await ctx.author.add_roles(role, reason=f'Selected role ({ctx.message.jump_url})')\n await ctx.send('Gave you the role.')\n except discord.Forbidden:\n await ctx.send('I do not have the required permissions to give you the role. I need \"Manage Roles\" '\n 'permissions and my highest role needs to be higher than the roles I am supposed to add/remove.')\n\n @commands.command(aliases=['imnot', 'removerole'])\n async def takerole(self, ctx: commands.Context, role: discord.Role):\n \"\"\"Remove a role from yourself if it is on the list of self-assignable roles.\"\"\"\n available_roles = self.bot.server_configs[ctx.guild.id].self_roles\n if role not in available_roles:\n return await ctx.send(f'You can only remove {human_join([f\"**{r}**\" for r in available_roles], final=\"and\")} from yourself.')\n\n if role not in ctx.author.roles:\n return await ctx.send('You do not have this role:thinking:')\n\n try:\n await ctx.author.remove_roles(role, reason=f'Removed role ({ctx.message.jump_url})')\n await ctx.send('Removed the role from you.')\n except discord.Forbidden:\n await ctx.send('I do not have the required permissions to remove that role from you. I need \"Manage Roles\" '\n 'permissions and my highest role needs to be higher than the roles I am supposed to add/remove.')\n\n @commands.group(name='selfassign', aliases=['selfroles'], invoke_without_command=True)\n #@owner_or_guild_permissions(manage_roles=True)\n async def self_assign(self, ctx: commands.Context):\n \"\"\"See which roles members can assign to themselves using a command. Use subcommands to change the list.\"\"\"\n current_roles = self.bot.server_configs[ctx.guild.id].self_roles\n await ctx.send(f'Use `{ctx.prefix}help selfassign` to see the available subcommands. ' +\n ('There are currently no roles available for people to give themselves.'\n if not current_roles else\n f'The currently available role{\"s are\" if len(current_roles) > 1 else \" is\"} '\n f'{human_join([f\"**{role}**\" for role in current_roles], final=\"and\")}.'))\n\n @self_assign.command(name='set', aliases=['choose', 'select'])\n @owner_or_guild_permissions(manage_roles=True)\n async def set_selfroles(self, ctx: commands.Context, *roles: discord.Role):\n \"\"\"Set a list of roles that people can give themselves.\n\n Provide the role IDs, mentions or names as arguments.\n Duplicates will be ignored.\n \"\"\"\n for role in roles:\n if role >= ctx.author.top_role:\n return await ctx.send(f'**{role}** role is (higher than) your highest role so you cannot make it available.')\n\n await selecting.set_roles(ctx, self.bot, 'self_roles', list(roles))\n\n @self_assign.command(name='add', aliases=['include'])\n @owner_or_guild_permissions(manage_roles=True)\n async def add_selfroles(self, ctx: commands.Context, *new_roles: discord.Role):\n \"\"\"Add roles to the list that people can assign themselves.\n\n Provide role IDs, mentions or names as arguments.\n Duplicates and roles that are already on the list will be ignored.\n \"\"\"\n for role in new_roles:\n if role >= ctx.author.top_role:\n return await ctx.send(f'**{role}** role is (higher than) your highest role so you cannot make it available.')\n\n await selecting.add_roles(ctx, self.bot, 'self_roles', list(new_roles))\n\n @self_assign.command(name='remove', aliases=['delete'])\n @owner_or_guild_permissions(manage_roles=True)\n async def remove_selfroles(self, ctx: commands.Context, *roles: discord.Role):\n \"\"\"Remove roles from the list that members can take for themselves.\n\n Provide role IDs, mentions or names as arguments.\n Duplicates and roles that aren't in the list will be ignored.\n \"\"\"\n for role in roles:\n if role >= ctx.author.top_role:\n return await ctx.send(f'**{role}** role is (higher than) your highest role so you cannot remove it from'\n f' the list.')\n\n await selecting.remove_roles(ctx, self.bot, 'self_roles', list(roles))\n\n @self_assign.command(name='clear', aliases=['wipe'])\n @owner_or_guild_permissions(manage_roles=True)\n async def clear_selfroles(self, ctx: commands.Context):\n \"\"\"Clear the entire list of roles that people can give themselves.\"\"\"\n current_roles = self.bot.server_configs[ctx.guild.id].self_roles\n for role in current_roles:\n if role >= ctx.author.top_role:\n return await ctx.send(f'**{role}** role is (higher than) your highest role so you cannot remove it from'\n f' the list. No roles have been removed.')\n\n await selecting.clear_roles(ctx, self.bot, 'self_roles')\n\n\ndef setup(bot: Curator):\n bot.add_cog(RoleSelector(bot))\n","repo_name":"Curator-Discord-Bot/Curator","sub_path":"cogs/roleselector.py","file_name":"roleselector.py","file_ext":"py","file_size_in_byte":43543,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"26125499080","text":"import logging\n\nfrom exchange import Exchange\nfrom bitstamp import client\n\nlogger = logging.getLogger( __name__ )\n\nclass Bitstamp(Exchange):\n def __init__(self, keyfile):\n f = open(keyfile)\n keys = f.readlines()\n f.close()\n if len(keys) != 3:\n logger.error(\"Incorrect key file\")\n exit()\n\n [user, key, secret] = [keys[0].strip(), keys[1].strip(), keys[2].strip()]\n self.bs_client = client.trading(username=user, key=key, secret=secret)\n self.bs_client_public = client.public()\n\n\n def balance(self):\n # {u'btc_reserved': 1.0, u'fee': 0.25, u'btc_available': 18.00767799, u'usd_reserved': 0.0, u'btc_balance': 19.00767799, u'usd_balance': 399.0, u'usd_available': 399.0}\n balance = self.bs_client.account_balance()\n\n # Convert string to double\n for attr in balance:\n balance[attr] = float(balance[attr])\n return balance\n\n\n def ticker(self):\n # example: {u'volume': u'40457.80751208', u'last': u'384.02', u'timestamp': u'1446878501', u'bid': u'384.02', u'vwap': u'378.59', u'high': u'396.67', u'low': u'362.64', u'ask': u'385.90', u'open': 374.41}\n tick = self.bs_client_public.ticker()\n for attr in tick:\n tick[attr] = float(tick[attr])\n return tick\n\n\n def open_orders(self):\n orders = self.bs_client.open_orders()\n return orders\n\n\n def buy(self, amount, price):\n result = self.bs_client.buy_limit_order(amount=amount, price=price)\n print(\"buy\", result)\n\n\n def sell(self, amount, price):\n result = self.bs_client.sell_limit_order(amount=amount, price=price)\n print(\"sell\", result);\n\n\n def cancel_order(self, order_id):\n self.bs_client.cancel_order(order_id)\n\n\n def cancel_all_orders(self, coin):\n # coin is 'btc' or 'ltc', but unused now\n orders = self.open_orders()\n for order in orders:\n self.bs_client.cancel_order(order['id'])\n","repo_name":"jyunfan/btc-trading","sub_path":"src/Bitstamp.py","file_name":"Bitstamp.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"73700148638","text":"# import libraries\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n\t'''\n\tINPUT: \n\tmessages_filepath - to find the correct current location of the messages .csv file \n\tcategories_filepath - to find the correct current location of the categories .csv file \n\n\tOUTPUT:\n\tdataframe, merging the two input .csv-files containing messages and categories \n\t'''\n\tmessages = pd.read_csv(messages_filepath)\n\tcategories = pd.read_csv(categories_filepath) \n\treturn messages.merge(categories, on=\"id\")\n\n\ndef clean_data(df):\n\t'''\n\tINPUT: \n\tdf - pandas dataframe\n\n\tOUTPUT:\n\tdf - cleaned pandas dataframe\n\t'''\n\t# create a dataframe of the 36 individual category columns\n\tcategories = df[\"categories\"].str.split(\";\", expand=True)\n\t# select the first row of the categories dataframe\n\trow = categories[:1]\n\t# list of new column names for the categories, rename the columns of `categories`\n\tcategory_colnames = [list(row[name])[0][:-2] for name in row]\n\tcategories.columns = category_colnames\n\t# only keep the last char of the string per cell as an integer (0 or 1)\n\tfor column in categories:\n\t\t# set each value to be the last character of the string\n\t\tcategories[column] = categories[column].str[-1]\n\t\t# convert column from string to numeric\n\t\tcategories[column] = categories[column].astype(int)\n\t\n\t# drop the old categories column\n\tdf = df.drop(['categories'], axis=1).copy()\n\t# concatenate the original dataframe with the new `categories` dataframe\n\tdf = pd.concat([df, categories], axis=1)\n\t# drop duplicated rows from the df dataframe\n\tdf = df.drop_duplicates()\n\t\n\treturn df\n\ndef save_data(df, database_filename):\n\t'''\n\tINPUT: \n\tdf - pandas dataframe to be saved into a sqlite db file\n\n\tOUTPUT:\n\t-\n\t'''\n\t# new sqllite dbbase named database_filename\n\tengine = create_engine('sqlite:///'+database_filename)\n\t# write merged and cleaned dataframe df into a new table\n\tdf.to_sql('message_categories', engine, index=False, if_exists='replace') \n\ndef main():\t\n\t'''\n\tThis main function starts to read in user arguments for the different \n\tfilepaths for the messages.csv and categories.csv, as well as one to \n\tdetermine were the resulting sqlite database is to be stored.\n\t\n\tUsing these filepaths, it first loads the messages and categories data.\n\tThen, the data is cleaned and processed to be ready as input for ML algorithms.\n\tAs a last step, this cleaned data gets stored into a sqlite database file.\n\t'''\n\tif len(sys.argv) == 4:\n\n\t\tmessages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n\t\tprint('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n\t\t\t.format(messages_filepath, categories_filepath))\n\t\tdf = load_data(messages_filepath, categories_filepath)\n\n\t\tprint('Cleaning data...')\n\t\tdf = clean_data(df)\n \n\t\tprint('Saving data...\\n DATABASE: {}'.format(database_filepath))\n\t\tsave_data(df, database_filepath)\n \n\t\tprint('Cleaned data saved to database!')\n \n\telse:\n\t\tprint('Please provide the filepaths of the messages and categories '\\\n\t\t\t\t'datasets as the first and second argument respectively, as '\\\n\t\t\t\t'well as the filepath of the database to save the cleaned data '\\\n\t\t\t\t'to as the third argument. \\n\\nExample: python process_data.py '\\\n\t\t\t\t'disaster_messages.csv disaster_categories.csv '\\\n\t\t\t\t'DisasterResponse.db')\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"mileier/disaster_response","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"70360644639","text":"from rich.console import Console\r\nfrom rich.table import Table\r\nimport json\r\n\r\nconsole = Console()\r\n\r\n\r\nclass announcement():\r\n \"\"\"overview class\"\"\"\r\n\r\n def get_data(self, request):\r\n \"\"\"extract data from overview\"\"\"\r\n data = request.get(endpoint=\"/announcement/\")\r\n return data[0], data[1]\r\n\r\n def generate_announcement_table(self, data):\r\n \"\"\"print top memeber and team information\"\"\"\r\n table = Table(show_header=True, header_style=\"bold green\")\r\n table.add_column(\"title\", style=\"green\")\r\n table.add_column(\"content\", style=\"green\")\r\n table.add_column(\"author\", style=\"green\")\r\n table.add_column(\"date\", style=\"green\")\r\n for i in data:\r\n table.add_row(i[\"title\"],\r\n i[\"content\"], i[\"author\"], i[\"date\"])\r\n console.print(table)\r\n\r\n def generate_dashboard(self, request):\r\n \"\"\"announcement\"\"\"\r\n console.print(\"Announcement\", style=\"bold blue\")\r\n with console.status(\"[bold green]please wait...\\n\") as status:\r\n data, status = self.get_data(request)\r\n # print(data)\r\n if status == 200:\r\n self.generate_announcement_table(data[\"data\"])\r\n else:\r\n pass\r\n","repo_name":"hacksec-in/hacksec-cli","sub_path":"hacksec_cli/mechanism/announcement/announcement.py","file_name":"announcement.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"}
+{"seq_id":"18665801026","text":"import tornado.web, tornado.gen\r\nfrom modules.utils import dict_from_cursor_one\r\nfrom modules.db_utils import db\r\n\r\ndb = db()\r\n\r\nclass BaseHandler(tornado.web.RequestHandler):\r\n\r\n\r\n @property\r\n def log(self):\r\n return self.application.log\r\n\r\n @property\r\n def db(self):\r\n # if db.closed:\r\n # db.connect()\r\n return db\r\n\r\n @tornado.gen.coroutine\r\n def prepare(self):\r\n yield db.connect()\r\n return super(BaseHandler, self).prepare()\r\n\r\n @tornado.gen.coroutine\r\n def on_finish(self):\r\n if not self.db.closed:\r\n self.db.close()\r\n return super(BaseHandler, self).on_finish()\r\n\r\nclass Base(BaseHandler):\r\n\r\n @tornado.gen.coroutine\r\n def fetch(self, Module, id, filter_by=None):\r\n \"\"\"\r\n :param Module: class for object that we return\r\n :param id: get object by id\r\n :param filter_by: attribute is dict where key - field in Module.__tablename__ and value - value for this field\r\n :return Module object:\r\n \"\"\"\r\n SQL = \"SELECT * FROM {0} WHERE id='{1}'\".format(Module.__tablename__, id)\r\n if filter_by:\r\n if not isinstance(filter_by, dict):\r\n raise AttributeError('The filter_by attribute must be dictionary!')\r\n SQL += ' AND'\r\n for fk, fv in filter_by.items():\r\n SQL += \" {0}='{1}' AND \".format(fk, fv)\r\n SQL = SQL[0:-4]\r\n cur = yield self.db.execute(SQL)\r\n return Module.get(cur)\r\n\r\n @tornado.gen.coroutine\r\n def fetch_all(self, Module, filter_by=None, order_by=None):\r\n \"\"\"\r\n :param Module: class for list objects that we return\r\n :param filter_by: attribute is dict where key - field in Module.__tablename__ and value - value for this field\r\n :param order_by: attribute is dict where key 'field' - field in which we will sort\r\n and 'type' - DESC or ASC sorted type\r\n :return list Module objects:\r\n \"\"\"\r\n SQL = \"SELECT * FROM {0}\".format(Module.__tablename__)\r\n if filter_by:\r\n if not isinstance(filter_by, dict):\r\n raise AttributeError('The filter_by attribute must be dictionary!')\r\n SQL += ' WHERE'\r\n for fk, fv in filter_by.items():\r\n SQL += \" {0}='{1}' AND \".format(fk, fv)\r\n SQL = SQL[0:-4]\r\n if order_by:\r\n if not isinstance(order_by, dict):\r\n raise AttributeError('The sort_by attribute must be dictionary!')\r\n SQL += ' ORDER BY'\r\n for ok, ov in order_by.items():\r\n SQL += ' {} {},'.format(ok, ov)\r\n SQL = SQL[0:-1]\r\n cur = yield self.db.execute(SQL)\r\n return Module.get_all(cur)\r\n\r\n @tornado.gen.coroutine\r\n def fetch_by(self, Module, **kwargs):\r\n SQL = \"SELECT * FROM {} WHERE \".format(Module.__tablename__)\r\n for k, v in kwargs.items():\r\n SQL += \"{0}='{1}' AND \".format(k, v)\r\n SQL = SQL[0:-4]\r\n cur = yield self.db.execute(SQL)\r\n return Module.get(cur)\r\n\r\n @tornado.gen.coroutine\r\n def save(self):\r\n if not self.check_required_fields():\r\n return\r\n SELECT_SQL = 'SELECT * FROM {}'.format(self.__class__.__dict__['__tablename__'])+ ' WHERE '\r\n SQL_INSERT = \"INSERT INTO {} (\".format(self.__class__.__dict__['__tablename__'])\r\n VALUES = ' VALUES('\r\n for k in self.__dict__:\r\n if self.__getattribute__(k) != None:\r\n SQL_INSERT += \"{},\".format(k)\r\n SELECT_SQL +=\"{0}='{1}' AND \".format(k, self.__getattribute__(k))\r\n VALUES += \"'{}',\".format(self.__getattribute__(k))\r\n SQL = SQL_INSERT[0:-1]+ ')' + VALUES[0:-1]+')'\r\n yield self.db.execute(SQL)\r\n cur_select = yield self.db.execute(SELECT_SQL[0:-4])\r\n result = self.__class__.get(cur_select)\r\n self.id = result.id\r\n\r\n @tornado.gen.coroutine\r\n def update(self, filter=None):\r\n \"\"\"\r\n :param filter: must be dict with field and value key\r\n :return:\r\n \"\"\"\r\n if not self.check_required_fields():\r\n return\r\n SQL_UPDATE = \"UPDATE {} SET \".format(self.__class__.__dict__['__tablename__'])\r\n for k in self.__dict__:\r\n if k != 'id' and self.__getattribute__(k) != None:\r\n SQL_UPDATE += \"{}='{}',\".format(k, self.__getattribute__(k))\r\n if not filter:\r\n SQL = SQL_UPDATE[0:-1] + \" WHERE id='{}'\".format(self.id)\r\n else:\r\n SQL = SQL_UPDATE[0:-1] + \" WHERE {field}='{value}'\".format(**filter)\r\n yield self.db.execute(SQL)\r\n\r\n\r\n @tornado.gen.coroutine\r\n def remove(self):\r\n yield self.db.execute(\"DELETE FROM {} WHERE id='{}'\".format(self.__class__.__dict__['__tablename__'], self.id))\r\n\r\n @classmethod\r\n def get(cls, cursor):\r\n obj = cursor.fetchone()\r\n object = cls()\r\n if obj:\r\n for k in object.__dict__.keys():\r\n if k in obj:\r\n object.__setattr__(k, obj[k])\r\n else:\r\n object.__setattr__(k, None)\r\n if 'id' in obj:\r\n object.__setattr__('id', obj['id'])\r\n return object\r\n\r\n @classmethod\r\n def get_all(cls, cursor):\r\n objs = cursor.fetchall()\r\n list_objs = []\r\n for obj in objs:\r\n object = cls()\r\n keys = object.__dict__.keys()\r\n if obj:\r\n for k in keys:\r\n if k in obj:\r\n object.__setattr__(k, obj[k])\r\n else:\r\n object.__setattr__(k, None)\r\n if 'id' in obj:\r\n object.__setattr__('id', obj['id'])\r\n list_objs.append(object)\r\n return list_objs\r\n\r\n def get_current_user(self):\r\n return self.get_secure_cookie(\"email\")\r\n\r\n def write_error(self, status_code, **kwargs):\r\n if status_code in [403, 404, 500, 503]:\r\n self.render(\"admin/404.html\")\r\n else:\r\n self.write('BOOM!')\r\n\r\n @tornado.gen.coroutine\r\n def get_current_user_dict(self):\r\n email = self.current_user.decode()\r\n cur = yield self.db.execute(\"SELECT * FROM users WHERE email='{}'\".format(email))\r\n return dict_from_cursor_one(cur)\r\n\r\n","repo_name":"Stevee67/portfolio","sub_path":"modules/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"42817680882","text":"from gan import generator_model, discriminator_model, GAN, train\r\nfrom utilities import load_pickled_object\r\n\r\nfeatures_file_path = './studies/flash_crashes/omx30/features/'\r\n\r\n\r\ndef mae_phase():\r\n features_dictionary = load_pickled_object(features_file_path + 'x_c.pickle')\r\n\r\n noise_dimensions = 2\r\n generator = generator_model(noise_dimensions)\r\n discriminator = discriminator_model()\r\n gan = GAN(discriminator, generator)\r\n train(generator, discriminator, gan, noise_dimensions, features_dictionary)\r\n","repo_name":"Refinitiv-API-Samples/Blueprint.RD.AI.FE.SyntheticDataWithGenerativeAdversarialNetworks","sub_path":"mae_module.py","file_name":"mae_module.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"7895498197","text":"import json\n\nfinished = False\nj = 0\nwith open('./examples/res/jiulin.json', 'w', encoding='utf-8') as w:\n data = []\n with open('./examples/res/alphapose-results_jiulin.json', 'r', encoding='utf-8') as r:\n result = json.load(r)\n for i in range(0, 36800):\n if finished:\n obj = {'score': None, 'keypoints': None, 'image_id': None, 'category_id': None}\n data.append(obj)\n break\n elif int(result[j]['image_id'].replace('.jpg', '')) > i:\n obj = {'score': None, 'keypoints': None, 'image_id': None, 'category_id': None}\n data.append(obj)\n elif int(result[j]['image_id'].replace('.jpg', '')) == i:\n data.append(result[j])\n j += 1\n if j >= len(result):\n finished = True\n else:\n j += 1\n if j >= len(result):\n finished = True\n json.dump(data, w)\n","repo_name":"Fight-hawk/RunningPose","sub_path":"pre_processed.py","file_name":"pre_processed.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"41534045038","text":"import subprocess\r\nimport serial\r\nimport serial.tools.list_ports\r\nfrom fastapi import FastAPI, Request, Form, HTTPException\r\nfrom fastapi.templating import Jinja2Templates\r\nimport time\r\nimport os \r\n\r\napp = FastAPI()\r\ntemplates = Jinja2Templates(directory=\"templates/\")\r\nserialInst = serial.Serial('COM3', 9600, timeout=1)\r\n\r\ndef getSerialOutput():\r\n while True:\r\n if serialInst.in_waiting:\r\n packet = serialInst.readline().decode('utf-8').rstrip()\r\n serial_data = list(map(float, packet.split(' ')))\r\n data = {\r\n \"temperature\": serial_data[0],\r\n \"humidity\": serial_data[1],\r\n \"PPM\": serial_data[2]\r\n }\r\n return data\r\n\r\n\r\n\r\ndef blink_led(pin):\r\n serialInst.write(bytes(str(pin), 'utf-8'))\r\n time.sleep(1)\r\n \r\n\r\n@app.get(\"/\")\r\ndef index(request: Request):\r\n data = getSerialOutput()\r\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"temperature\": data['temperature'], \"humidity\": data['humidity'], \"PPM\": data['PPM'],})\r\n \r\n\r\n@app.post(\"/run\")\r\nasync def run_file():\r\n subprocess.run(['python', 'C:/Users/riyesh/OneDrive/Desktop/gaze_controlled_keyboard_p10/gaze_controlled_keyboard_p10.py'])\r\n return {\"message\": \"File ran successfully.\"}\r\n\r\n@app.post(\"/run1\")\r\nasync def run_file1():\r\n subprocess.run(['python', 'C:/Users/riyesh/OneDrive/Desktop/gaze_controlled_keyboard_p10/voiceacct.py'])\r\n return {\"message\": \"File ran successfully.\"}\r\n\r\n@app.post(\"/blink_led_2\")\r\nasync def blink_led_2_endpoint(request: Request):\r\n blink_led('f')\r\n data = getSerialOutput()\r\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"message\": \"AC is On!\",\"temperature\": data['temperature'], \"humidity\": data['humidity'], \"PPM\": data['PPM']})\r\n\r\n@app.post(\"/blink_led_3\")\r\nasync def blink_led_3_endpoint(request: Request):\r\n blink_led('a')\r\n data = getSerialOutput()\r\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"message\": \"Fan is On\",\"temperature\": data['temperature'], \"humidity\": data['humidity'], \"PPM\": data['PPM']})\r\n\r\n@app.post(\"/blink_led_4\")\r\nasync def blink_led_4_endpoint(request: Request):\r\n blink_led('l')\r\n data = getSerialOutput()\r\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"message\": \"Lights are ON!\",\"temperature\": data['temperature'], \"humidity\": data['humidity'], \"PPM\": data['PPM']})\r\n\r\nif __name__ == '__main__':\r\n import uvicorn\r\n uvicorn.run(app, host=\"127.0.0.1\", port=8000)\r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"Duwal94/HCI-based-PD_Helper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"26107535260","text":"from bayes_opt import BayesianOptimization\r\nfrom lightgbm import LGBMRegressor, LGBMClassifier\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nfrom sklearn.datasets import load_diabetes, load_breast_cancer\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score,r2_score\r\n\r\n#1. 데이터\r\n\r\ndatasets = load_breast_cancer()\r\n\r\nx = datasets.data\r\ny = datasets.target\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x,y,train_size=0.8,random_state=123,shuffle=True)\r\n\r\nscaler = StandardScaler()\r\nx_train = scaler.fit_transform(x_train)\r\nx_test = scaler.transform(x_test)\r\n\r\n#2. 모델\r\nbayesian_params = {\r\n 'max_depth':(6,16),\r\n 'num_leaves' :(24,64),\r\n 'min_child_sample' :(10,200),\r\n 'min_child_weight' : (1,50),\r\n 'subsample':(0.5,1),\r\n 'colsample_bytree':(0.5,1),\r\n 'max_bin' : (420,520),\r\n 'reg_lambda' : (0.0001,10),\r\n 'reg_alpha':(0.01,50)\r\n}\r\n\r\ndef lgb_hamsu(max_depth,num_leaves,min_child_sample,min_child_weight,subsample,colsample_bytree,max_bin,\r\n reg_lambda,reg_alpha) :\r\n params ={ \r\n 'n_estimators':500,\"learning_rate\":0.02,\r\n 'max_depth': int(round(max_depth)), # 정수만 \r\n 'num_leaves':int(round(num_leaves)), # 정수만\r\n 'min_child_sample' :int(round(min_child_sample)), # 정수만\r\n 'min_child_weight' : int(round(min_child_weight)), # 정수만\r\n 'subsample':max(min(subsample,1),0,), # (0~1)사이값\r\n 'colsample_bytree':max(min(colsample_bytree,1),0,), # (0~1)사이값\r\n 'max_bin' : max(int(round(max_bin)),10), # 10 이상\r\n 'reg_lambda' : max(reg_lambda,0), # 0이상(양수)\r\n 'reg_alpha':max(reg_alpha,0) # 0이상(양수)\r\n \r\n }\r\n \r\n \r\n model = LGBMClassifier(**params)\r\n # ** 키워드받겠다(딕셔너리형태)\r\n # * 여러개의인자를 받겠다.\r\n model.fit(x_train,y_train,\r\n eval_set=[(x_train,y_train),(x_test,y_test)],\r\n eval_metric='rmse',\r\n verbose=0,\r\n early_stopping_rounds=50,\r\n )\r\n y_predict = model.predict(x_test)\r\n results = accuracy_score(y_test,y_predict)\r\n \r\n \r\n return results\r\n\r\nlgb_bo = BayesianOptimization(f=lgb_hamsu,\r\n pbounds= bayesian_params,\r\n random_state=123)\r\nlgb_bo.maximize(init_points=5,n_iter=50)\r\n\r\nprint(lgb_bo.max)\r\n\r\n# {'target': 0.9912280701754386,\r\n# 'params': {'colsample_bytree': 0.7919410497955786,\r\n# 'max_bin': 458.2130768347505,\r\n# 'max_depth': 13.011875840403292,\r\n# 'min_child_sample': 90.11618557959497,\r\n# 'min_child_weight': 4.849037464942212,\r\n# 'num_leaves': 42.53863680221558,\r\n# 'reg_alpha': 30.319611432881363,\r\n# 'reg_lambda': 4.135217639511628,\r\n# 'subsample': 0.5181827326249917}}\r\n","repo_name":"junginee/From-ML-to-DL","sub_path":"ml/ml55_BayesianOptimization3_lgbm_cancer.py","file_name":"ml55_BayesianOptimization3_lgbm_cancer.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"}
+{"seq_id":"10900481143","text":"def encode(some_text):\r\n\tbin_txt=\"\"\r\n\tfor letter in some_text:\r\n\t\tlet_bin = ord(letter)\r\n\t\tres = \"\"\r\n\t\twhile let_bin !=0:\r\n\t\t\tres+=str(let_bin % 2)\r\n\t\t\tlet_bin //= 2\r\n\t\tadd_zero = \"0\"*(8-len(res))+res[::-1] #adds missing zeroes\r\n\t\tbin_txt += add_zero\r\n\treturn bin_txt\r\n\r\ndef decode(bin_text):\r\n\tresult = \"\"\r\n\tfor index in range(0,len(bin_text),8):\r\n\t\tcut =slice(index, index+8)\r\n\t\ti=0\r\n\t\tdecoded = 0\r\n\t\tfor idx in bin_text[cut][::-1]:\r\n\t\t\tdecoded += int(idx)*2**i\r\n\t\t\ti+=1\r\n\t\tresult += chr(decoded)\r\n\treturn result\r\n\r\ndef encode_file(path):\r\n\twith open(path, \"r\") as f:\r\n\t\ttext = f.read()\r\n\t\treturn (encode(text))\r\n\r\ndef decode_file(path1):\r\n\twith open(path1, \"r\") as f:\r\n\t\ttext = f.read()\r\n\t\treturn (decode(text))\r\n\r\n\r\nprint(encode_file(\"mixed_typed_list.py\"))","repo_name":"AnushavanAleksanyan/lessons","sub_path":"encode_file.py","file_name":"encode_file.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"20751672713","text":"from mindsdb_server.namespaces.configs.predictors import ns_conf\n\nfrom flask_restx import fields\nimport datetime\n\npredictor_status = ns_conf.model('PredictorStatus', {\n # Primary key\n 'name': fields.String(required=False, description='The predictor name, NOTE: That primary key is made of name:version'),\n 'version': fields.String(required=False, description='The predictor version to publish under, this is so that we can train multiple predictors for the same problem but expose them via the same name'),\n # other attributes\n 'is_active': fields.Boolean(required=False, description='Only one predictor by public_name can be active'),\n 'data_source': fields.String(required=False, description='The data source it\\'s learning from'),\n 'predict': fields.List(fields.String, required=False, description='The list of columns/fields to be predicted'),\n 'accuracy': fields.Float(description='The current accuracy of the model'),\n 'status': fields.String(required=False, description='The current model status', enum=['training', 'complete', 'error']),\n 'current_phase': fields.String(required=False, description='Current training phase'),\n 'train_end_at': fields.DateTime(required=False, description='The time the predictor finished training'),\n 'updated_at': fields.DateTime(required=False, description='The time the predictor was last updated at'),\n 'created_at': fields.DateTime(required=False, description='The time the predictor was created at')\n})\n\n\n\n\n##EXAMPLES\n\n\n\nEXAMPLES = [\n {\n 'name': 'Price',\n 'version': 1,\n 'is_active': True,\n 'data_source': 'real_estate.csv',\n 'predict': ['price'],\n 'accuracy': '.97',\n 'status': 'training',\n 'train_end_at': datetime.datetime.now(),\n 'updated_at': datetime.datetime.now(),\n 'created_at': datetime.datetime.now()\n },\n {\n 'name': 'Price',\n 'version': 2,\n 'is_active': False,\n 'data_source': 'real_estate_complex.csv',\n 'predict': ['price'],\n 'accuracy': '.64',\n 'status': 'training',\n 'train_end_at': datetime.datetime.now(),\n 'updated_at': datetime.datetime.now(),\n 'created_at': datetime.datetime.now()\n },\n {\n 'name': 'Number of rooms',\n 'version': 1,\n 'is_active': True,\n 'data_source': 'real_estate.csv',\n 'predict': ['number_of_rooms'],\n 'accuracy': '.97',\n 'status': 'complete',\n 'train_end_at': datetime.datetime.now(),\n 'updated_at': datetime.datetime.now(),\n 'created_at': datetime.datetime.now()\n }\n]\n","repo_name":"mindsdb/mindsdb_server","sub_path":"mindsdb_server/namespaces/entitites/predictor_status.py","file_name":"predictor_status.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"}
+{"seq_id":"41979864200","text":"#!/usr/bin/env python\n# -*- mode: python; coding: koi8-r; -*-\n\nclass TextParagraph:\n def __init__(self, type='text'):\n\n self.type = type # text / title / subtitle / image / footnote\n self.data = ''\n\n self.styles = [] # [(begin, end, style), (begin, end, style), ...]\n # style: emphasis / strong / superscript\n\n self.footnotes = []\n self.href = ''\n\n self.title_level = 0\n\n\n","repo_name":"murminathor/MurReader","sub_path":"murreader/lib/python/murreader/textparagraph.py","file_name":"textparagraph.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"36973093548","text":"while True:\n\ttry:\n\t\tstring = input()\n\texcept Exception as e:\n\t\texit()\n\n\tupper_even = 0\n\tlower_even = 0\n\tupper_odd = 0\n\tlower_odd = 0\n\tfor x in range(0,len(string),2):\n\t\tif string[x] >= 'A' and string[x] <= 'Z':\n\t\t\tupper_even += 1\n\t\telse:\n\t\t\tlower_even += 1\n\tfor x in range(1,len(string),2):\n\t\tif string[x] >= 'A' and string[x] <= 'Z':\n\t\t\tupper_odd += 1\n\t\telse:\n\t\t\tlower_odd += 1\n\n\tprint(min(lower_even+upper_odd, upper_even+lower_odd))","repo_name":"guilhermetiaki/coding-challenges","sub_path":"SPOJ/EDIT.py","file_name":"EDIT.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"25113303271","text":"\"\"\"Add play history update datetime to users\n\nRevision ID: 098938c6cc3e\nRevises: 88e58e97c207\nCreate Date: 2020-04-05 17:32:22.431488\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '098938c6cc3e'\ndown_revision = '88e58e97c207'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('tracks_id_uniq', 'Tracks', ['id'])\n op.add_column('Users', sa.Column('last_play_history_upd', sa.DateTime(), nullable=True))\n op.create_unique_constraint('users_id_uniq', 'Users', ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('users_id_uniq', 'Users', type_='unique')\n op.drop_column('Users', 'last_play_history_upd')\n op.drop_constraint('tracks_id_uniq', 'Tracks', type_='unique')\n # ### end Alembic commands ###\n","repo_name":"cwmli/ayysmr","sub_path":"migrations/versions/098938c6cc3e_.py","file_name":"098938c6cc3e_.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"9931785242","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n node=end=head\n while end.next:\n end=end.next\n ith=0\n remove=head\n while ith!=n and remove:\n ith+=1\n remove=remove.next\n if not remove:\n return head.next\n while remove:\n if remove==end:\n node.next=node.next.next\n node=node.next\n remove=remove.next\n return head\n \n ","repo_name":"goldsergeant/Algorithm-problem-solving","sub_path":"leetcode/0019-remove-nth-node-from-end-of-list/0019-remove-nth-node-from-end-of-list.py","file_name":"0019-remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"36490225183","text":"import time\nimport torch\nfrom torch import nn, optim\nimport torchvision\nfrom utils import *\n#加载设备\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n#定义网络\nclass AlexNet(nn.Module):\n def __init__(self):\n super(AlexNet, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(1, 96, 11, 4),\n nn.ReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(96, 256, 5, 1, 2),\n nn.ReLU(),\n nn.MaxPool2d(3, 2),\n nn.Conv2d(256, 384, 3, 1, 1),\n nn.ReLU(),\n nn.Conv2d(384, 384, 3, 1, 1),\n nn.ReLU(),\n nn.Conv2d(384, 256, 3, 1, 1),\n nn.ReLU(),\n nn.MaxPool2d(3, 2)\n )\n self.fc = nn.Sequential(\n nn.Linear(256*5*5, 4096),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096, 10),\n )\n\n def forward(self, img):\n feature = self.conv(img)\n output = self.fc(feature.view(img.shape[0], -1))\n return output\n\n#实例化\nnet = AlexNet()\n#加载数据集,并resize到224\ntrain_iter, test_iter = load_data_fashion_mnist(batch_size=256, resize=224)\n#测试函数\ndef vaild(data_iter):\n acc_sum, n = 0.0, 0\n with torch.no_grad():\n for X, y in data_iter:\n net.eval()\n acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n net.train()\n n += y.shape[0]\n return acc_sum / n\n\ndef train(net, epochs, train_iter, test_iter, optimazer):\n net.to(device)\n print(\"train on cuda\", device)\n loss = nn.CrossEntropyLoss()\n batch_count = 0\n for epoch in range(epochs):\n train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()\n for X, y in train_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y)\n optimazer.zero_grad()\n l.backward()\n optimazer.step()\n train_l_sum += l.cpu().item()\n train_acc_sum += (net(X).argmax(dim=1) == y).sum().cpu().item()\n n += y.shape[0]\n batch_count += 1\n\n test_acc = vaild(test_iter)\n print(\"epoch %d, loss %4f, train_acc %3f, test_acc %3f, time %1f sec\" % (epoch, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time()-start))\n\nepochs, lr = 200, 0.01\noptimazer = torch.optim.Adam(net.parameters(), lr=lr)\ntrain(net, epochs, train_iter, test_iter, optimazer)\n\n\n","repo_name":"ethan-sui/Personal_Website_Code","sub_path":"Deep learning/Convolutional Neural Network/AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"41167650903","text":"import uuid\nfrom collections import defaultdict\nfrom copy import copy\n\nfrom django.db import connections\nfrom django.urls import path\nfrom django.utils.translation import gettext_lazy as _, ngettext\n\nfrom debug_toolbar import settings as dt_settings\nfrom debug_toolbar.forms import SignedDataForm\nfrom debug_toolbar.panels import Panel\nfrom debug_toolbar.panels.sql import views\nfrom debug_toolbar.panels.sql.forms import SQLSelectForm\nfrom debug_toolbar.panels.sql.tracking import wrap_cursor\nfrom debug_toolbar.panels.sql.utils import contrasting_color_generator, reformat_sql\nfrom debug_toolbar.utils import render_stacktrace\n\n\ndef get_isolation_level_display(vendor, level):\n if vendor == \"postgresql\":\n try:\n import psycopg\n\n choices = {\n # AUTOCOMMIT level does not exists in psycopg3\n psycopg.IsolationLevel.READ_UNCOMMITTED: _(\"Read uncommitted\"),\n psycopg.IsolationLevel.READ_COMMITTED: _(\"Read committed\"),\n psycopg.IsolationLevel.REPEATABLE_READ: _(\"Repeatable read\"),\n psycopg.IsolationLevel.SERIALIZABLE: _(\"Serializable\"),\n }\n except ImportError:\n import psycopg2.extensions\n\n choices = {\n psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT: _(\"Autocommit\"),\n psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED: _(\n \"Read uncommitted\"\n ),\n psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED: _(\"Read committed\"),\n psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ: _(\n \"Repeatable read\"\n ),\n psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE: _(\"Serializable\"),\n }\n\n else:\n raise ValueError(vendor)\n return choices.get(level)\n\n\ndef get_transaction_status_display(vendor, level):\n if vendor == \"postgresql\":\n try:\n import psycopg\n\n choices = {\n psycopg.pq.TransactionStatus.IDLE: _(\"Idle\"),\n psycopg.pq.TransactionStatus.ACTIVE: _(\"Active\"),\n psycopg.pq.TransactionStatus.INTRANS: _(\"In transaction\"),\n psycopg.pq.TransactionStatus.INERROR: _(\"In error\"),\n psycopg.pq.TransactionStatus.UNKNOWN: _(\"Unknown\"),\n }\n except ImportError:\n import psycopg2.extensions\n\n choices = {\n psycopg2.extensions.TRANSACTION_STATUS_IDLE: _(\"Idle\"),\n psycopg2.extensions.TRANSACTION_STATUS_ACTIVE: _(\"Active\"),\n psycopg2.extensions.TRANSACTION_STATUS_INTRANS: _(\"In transaction\"),\n psycopg2.extensions.TRANSACTION_STATUS_INERROR: _(\"In error\"),\n psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN: _(\"Unknown\"),\n }\n\n else:\n raise ValueError(vendor)\n return choices.get(level)\n\n\ndef _similar_query_key(query):\n return query[\"raw_sql\"]\n\n\ndef _duplicate_query_key(query):\n raw_params = () if query[\"raw_params\"] is None else tuple(query[\"raw_params\"])\n # repr() avoids problems because of unhashable types\n # (e.g. lists) when used as dictionary keys.\n # https://github.com/jazzband/django-debug-toolbar/issues/1091\n return (query[\"raw_sql\"], repr(raw_params))\n\n\ndef _process_query_groups(query_groups, databases, colors, name):\n counts = defaultdict(int)\n for (alias, _key), query_group in query_groups.items():\n count = len(query_group)\n # Queries are similar / duplicates only if there are at least 2 of them.\n if count > 1:\n color = next(colors)\n for query in query_group:\n query[f\"{name}_count\"] = count\n query[f\"{name}_color\"] = color\n counts[alias] += count\n for alias, db_info in databases.items():\n db_info[f\"{name}_count\"] = counts[alias]\n\n\nclass SQLPanel(Panel):\n \"\"\"\n Panel that displays information about the SQL queries run while processing\n the request.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._sql_time = 0\n self._queries = []\n self._databases = {}\n # synthetic transaction IDs, keyed by DB alias\n self._transaction_ids = {}\n\n def new_transaction_id(self, alias):\n \"\"\"\n Generate and return a new synthetic transaction ID for the specified DB alias.\n \"\"\"\n trans_id = uuid.uuid4().hex\n self._transaction_ids[alias] = trans_id\n return trans_id\n\n def current_transaction_id(self, alias):\n \"\"\"\n Return the current synthetic transaction ID for the specified DB alias.\n \"\"\"\n trans_id = self._transaction_ids.get(alias)\n # Sometimes it is not possible to detect the beginning of the first transaction,\n # so current_transaction_id() will be called before new_transaction_id(). In\n # that case there won't yet be a transaction ID. so it is necessary to generate\n # one using new_transaction_id().\n if trans_id is None:\n trans_id = self.new_transaction_id(alias)\n return trans_id\n\n def record(self, **kwargs):\n self._queries.append(kwargs)\n alias = kwargs[\"alias\"]\n if alias not in self._databases:\n self._databases[alias] = {\n \"time_spent\": kwargs[\"duration\"],\n \"num_queries\": 1,\n }\n else:\n self._databases[alias][\"time_spent\"] += kwargs[\"duration\"]\n self._databases[alias][\"num_queries\"] += 1\n self._sql_time += kwargs[\"duration\"]\n\n # Implement the Panel API\n\n nav_title = _(\"SQL\")\n\n @property\n def nav_subtitle(self):\n query_count = len(self._queries)\n return ngettext(\n \"%(query_count)d query in %(sql_time).2fms\",\n \"%(query_count)d queries in %(sql_time).2fms\",\n query_count,\n ) % {\n \"query_count\": query_count,\n \"sql_time\": self._sql_time,\n }\n\n @property\n def title(self):\n count = len(self._databases)\n return ngettext(\n \"SQL queries from %(count)d connection\",\n \"SQL queries from %(count)d connections\",\n count,\n ) % {\"count\": count}\n\n template = \"debug_toolbar/panels/sql.html\"\n\n @classmethod\n def get_urls(cls):\n return [\n path(\"sql_select/\", views.sql_select, name=\"sql_select\"),\n path(\"sql_explain/\", views.sql_explain, name=\"sql_explain\"),\n path(\"sql_profile/\", views.sql_profile, name=\"sql_profile\"),\n ]\n\n def enable_instrumentation(self):\n # This is thread-safe because database connections are thread-local.\n for connection in connections.all():\n wrap_cursor(connection)\n connection._djdt_logger = self\n\n def disable_instrumentation(self):\n for connection in connections.all():\n connection._djdt_logger = None\n\n def generate_stats(self, request, response):\n colors = contrasting_color_generator()\n trace_colors = defaultdict(lambda: next(colors))\n similar_query_groups = defaultdict(list)\n duplicate_query_groups = defaultdict(list)\n\n if self._queries:\n sql_warning_threshold = dt_settings.get_config()[\"SQL_WARNING_THRESHOLD\"]\n\n width_ratio_tally = 0\n factor = int(256.0 / (len(self._databases) * 2.5))\n for n, db in enumerate(self._databases.values()):\n rgb = [0, 0, 0]\n color = n % 3\n rgb[color] = 256 - n // 3 * factor\n nn = color\n # XXX: pretty sure this is horrible after so many aliases\n while rgb[color] < factor:\n nc = min(256 - rgb[color], 256)\n rgb[color] += nc\n nn += 1\n if nn > 2:\n nn = 0\n rgb[nn] = nc\n db[\"rgb_color\"] = rgb\n\n # the last query recorded for each DB alias\n last_by_alias = {}\n for query in self._queries:\n alias = query[\"alias\"]\n\n similar_query_groups[(alias, _similar_query_key(query))].append(query)\n duplicate_query_groups[(alias, _duplicate_query_key(query))].append(\n query\n )\n\n trans_id = query.get(\"trans_id\")\n prev_query = last_by_alias.get(alias, {})\n prev_trans_id = prev_query.get(\"trans_id\")\n\n # If two consecutive queries for a given DB alias have different\n # transaction ID values, a transaction started, finished, or both, so\n # annotate the queries as appropriate.\n if trans_id != prev_trans_id:\n if prev_trans_id is not None:\n prev_query[\"ends_trans\"] = True\n if trans_id is not None:\n query[\"starts_trans\"] = True\n if trans_id is not None:\n query[\"in_trans\"] = True\n\n if \"iso_level\" in query:\n query[\"iso_level\"] = get_isolation_level_display(\n query[\"vendor\"], query[\"iso_level\"]\n )\n if \"trans_status\" in query:\n query[\"trans_status\"] = get_transaction_status_display(\n query[\"vendor\"], query[\"trans_status\"]\n )\n\n query[\"form\"] = SignedDataForm(\n auto_id=None, initial=SQLSelectForm(initial=copy(query)).initial\n )\n\n if query[\"sql\"]:\n query[\"sql\"] = reformat_sql(query[\"sql\"], with_toggle=True)\n\n query[\"is_slow\"] = query[\"duration\"] > sql_warning_threshold\n query[\"is_select\"] = (\n query[\"raw_sql\"].lower().lstrip().startswith(\"select\")\n )\n\n query[\"rgb_color\"] = self._databases[alias][\"rgb_color\"]\n try:\n query[\"width_ratio\"] = (query[\"duration\"] / self._sql_time) * 100\n except ZeroDivisionError:\n query[\"width_ratio\"] = 0\n query[\"start_offset\"] = width_ratio_tally\n query[\"end_offset\"] = query[\"width_ratio\"] + query[\"start_offset\"]\n width_ratio_tally += query[\"width_ratio\"]\n query[\"stacktrace\"] = render_stacktrace(query[\"stacktrace\"])\n\n query[\"trace_color\"] = trace_colors[query[\"stacktrace\"]]\n\n last_by_alias[alias] = query\n\n # Close out any transactions that were in progress, since there is no\n # explicit way to know when a transaction finishes.\n for final_query in last_by_alias.values():\n if final_query.get(\"trans_id\") is not None:\n final_query[\"ends_trans\"] = True\n\n group_colors = contrasting_color_generator()\n _process_query_groups(\n similar_query_groups, self._databases, group_colors, \"similar\"\n )\n _process_query_groups(\n duplicate_query_groups, self._databases, group_colors, \"duplicate\"\n )\n\n self.record_stats(\n {\n \"databases\": sorted(\n self._databases.items(), key=lambda x: -x[1][\"time_spent\"]\n ),\n \"queries\": self._queries,\n \"sql_time\": self._sql_time,\n }\n )\n\n def generate_server_timing(self, request, response):\n stats = self.get_stats()\n title = \"SQL {} queries\".format(len(stats.get(\"queries\", [])))\n value = stats.get(\"sql_time\", 0)\n self.record_server_timing(\"sql_time\", title, value)\n","repo_name":"jazzband/django-debug-toolbar","sub_path":"debug_toolbar/panels/sql/panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":11850,"program_lang":"python","lang":"en","doc_type":"code","stars":7689,"dataset":"github-code","pt":"51"}
+{"seq_id":"41015092934","text":"import cv2\n\n\ndef showImage():\n imgFile = \"resource/ipad.jpeg\"\n img = cv2.imread(imgFile, cv2.IMREAD_GRAYSCALE)\n\n cv2.namedWindow('ipad', cv2.WINDOW_NORMAL)\n cv2.imshow('ipad', img)\n cv2.waitKey(10000000)\n cv2.destroyAllWindows()\n cv2.imwrite(\"resource/ipad_copy.jpeg\", img)\n\nshowImage()\n","repo_name":"dl57934/ML-Books","sub_path":"openCv/1. imageReadWrite/1imageReadingAndWriting.py","file_name":"1imageReadingAndWriting.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"7174879603","text":"import sys, os\nsys.path.append(os.path.join(sys.path[0], \"../..\"))\n\nimport condition\nimport ls\n\nclass ComparisonCondition (condition.Condition):\n ''' a condition that compares two variables with a given comparator\n ( ==, !=, <, >, <=, >= ).\n '''\n def __init__ ( self, id, a, b, aType, bType, operand):\n self.id = id\n self.type = \"comparison\"\n self.a = a\n self.b = b\n self.aType = aType\n self.bType = bType\n self.operand = operand\n\n def check ( self, vars, conds, locs=None, userLoc=None ):\n ''' check the condition. '''\n a = self.value(self.a, self.aType, vars)\n b = self.value(self.b, self.bType, vars)\n\n # in js, ( int undefined ) = false. in Python, it throws an error.\n if (a is None and type(b) is int) or (b is None and type(a) is int):\n return False\n\n if self.operand == \"==\":\n return a == b\n elif self.operand == \"!=\":\n return a != b\n elif self.operand == \"<\":\n return a < b\n elif self.operand == \">\":\n return a > b\n elif self.operand == \"<=\":\n return a <= b\n elif self.operand == \">=\":\n return a >= b\n\n def value ( self, value, type, vars ):\n ''' get the value of a / b. '''\n if type == \"Variable\":\n variable = ls.get(vars, value)\n return variable.value if variable else None\n if type == \"Integer\":\n return int(value)\n if type == \"String\":\n if value == \"true\" or value == \"false\":\n return value == \"true\"\n elif value.isdigit():\n return int(value)\n else:\n return value\n return value\n","repo_name":"charlie-wt/user-model","sub_path":"models/conditions/comparisonCondition.py","file_name":"comparisonCondition.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"71022364318","text":"import numpy as np\nfrom world import get_neighbours, calc_score, find_disjoint_components, DIRECTIONS, MOVES, roll_coordinates, update_direction\nimport time\n\nLIMIT = 0.9\nARROW_IS_HOT = True\n\ndef HC_1NB_CELLS(_map):\n result = []\n for y, x in np.argwhere(_map == '.'):\n nb = get_neighbours(_map, x, y)\n nb_ground = [e for e in nb if e[4] == False]\n if len(nb_ground) == 0:\n result.append((x, y, 'U'))\n if len(nb_ground) == 1:\n result.append((x, y, nb_ground[0][0]))\n return result\n\n\ndef any_arrow(nb):\n for n in nb:\n if n[3] in DIRECTIONS:\n return True\n return False\n\n\ndef find_variants_for_mc(_map):\n variants = []\n for y, x in np.argwhere(_map == '.'):\n nb = get_neighbours(_map, x, y)\n hot = nb[0][4] != nb[1][4] or nb[2][4] != nb[3][4]\n if not hot and ARROW_IS_HOT:\n hot = any_arrow(nb)\n if not hot and np.random.random() > 0.55:\n hot = True\n has = nb[0][4] == nb[1][4] or nb[2][4] == nb[3][4]\n if not hot:\n continue\n k = ([], [''])[has] + [e[0] for e in nb if not e[4]]\n if len(k) > 0:\n variants.append(((x, y), k))\n return variants\n\n\ndef DO_MONTE_CARLO(_map, robots, start_time):\n score = calc_score(_map, robots)\n variants = find_variants_for_mc(_map)\n result = []\n if len(variants) == 0:\n return result\n d = dict((v[0], '') for v in variants)\n while True:\n elapsed_time = time.time() - start_time\n if elapsed_time > 0.8:\n break\n for v in variants:\n d[v[0]] = v[1][np.random.randint(0, len(v[1]))]\n sc = calc_score(_map, robots, d)\n if sc > score:\n result = []\n for k in d:\n if d[k] != '':\n result.append((k[0], k[1], d[k]))\n score = sc\n return result\n\n\ndef move_till_hp_or_death(w, x, y, d, _map, visited_states, arrows, hot_points):\n sp = (x, y)\n arrows[sp] = w\n x, y, d, is_live, new_visited_states = move_till_hp_or_death2(x, y, d, _map, visited_states, arrows, hot_points)\n del arrows[sp]\n return w, x, y, d, is_live, new_visited_states\n\n\ndef move_till_hp_or_death2(x, y, d, _map, visited_states, arrows, hot_points):\n new_visited_states = set()\n d = update_direction(_map, x, y, d, arrows)\n is_live = True\n while is_live and (x, y) not in hot_points:\n new_visited_states.add((x, y, d))\n x += MOVES[d][0]\n y += MOVES[d][1]\n x, y = roll_coordinates(x, y)\n d = update_direction(_map, x, y, d, arrows)\n is_live = _map[y, x] != '#' and (x, y, d) not in visited_states and (x, y, d) not in new_visited_states\n return x, y, d, is_live, new_visited_states\n\n\ndef DO_SPLITTED_MONTE_CARLO(_map, robots, start_time, cutoff_score=0):\n components = find_disjoint_components(_map, robots)\n p2c = {}\n for i, cmp in enumerate(components):\n for p in cmp[1]:\n p2c[p] = i\n variants = find_variants_for_mc(_map)\n variants_splitted = [[] for i in range(len(components))]\n for v in variants:\n if v[0] in p2c:\n variants_splitted[p2c[v[0]]].append(v)\n scores = [calc_score(_map, c[0]) for c in components]\n results = [[]] * len(components)\n ds = [dict((v[0], '') for v in variants) for variants in variants_splitted]\n while True:\n elapsed_time = time.time() - start_time\n if elapsed_time > LIMIT:\n break\n for i in range(len(components)):\n variants = variants_splitted[i]\n d = ds[i]\n for v in variants:\n d[v[0]] = v[1][np.random.randint(0, len(v[1]))]\n sc = calc_score(_map, components[i][0], d)\n if sc > scores[i]:\n result = []\n for k in d:\n if d[k] != '':\n result.append((k[0], k[1], d[k]))\n scores[i] = sc\n results[i] = result\n if sum(scores) < cutoff_score:\n return []\n result = [item for sublist in results for item in sublist]\n return result\n\n\ndef rec(x, y, d, lv, _map, visited, arrows, hot_points, depth):\n if depth == 5 or not lv:\n return None, lv, len(visited)\n\n sp = (x, y)\n if sp in hot_points:\n hot_point = hot_points[sp]\n del hot_points[sp]\n _max = (False, 0)\n _wmax = None\n for w in hot_point:\n arrows[sp] = w\n _x, _y, _d, _lv, _vs = move_till_hp_or_death2(x, y, d, _map, visited, arrows, hot_points)\n e1, e2, e3 = rec(_x, _y, _d, _lv, _map, visited.union(_vs), arrows, hot_points, depth + 1)\n r = (e2, e3)\n if r > _max:\n _wmax, _max = w, r\n del arrows[sp]\n hot_points[sp] = hot_point\n return _wmax, _max[0], _max[1]\n else:\n _x, _y, _d, _lv, _vs = move_till_hp_or_death2(x, y, d, _map, visited, arrows, hot_points)\n return None, rec(_x, _y, _d, _lv, _map, visited.union(_vs), arrows, hot_points, depth + 1)\n\n\ndef DO_GREED(_map, robots):\n variants = find_variants_for_mc(_map)\n hot_points = {}\n for v in variants:\n hot_points[v[0]] = v[1]\n arrows = {}\n\n is_live = []\n states = []\n visited = []\n for x, y, d in robots:\n states.append((x, y, d))\n visited.append(set())\n is_live.append(True)\n\n while any(is_live):\n for i in range(len(robots)):\n x, y, d = states[i]\n sp = (x, y)\n if sp in hot_points:\n hot_point = hot_points[sp]\n del hot_points[sp]\n ways = []\n for w in hot_point:\n if w == '':\n continue\n ways.append(move_till_hp_or_death(w, x, y, d, _map, visited[i], arrows, hot_points))\n w, x, y, d, is_live[i], new_visited_states = max(ways, key=lambda x: (x[4], len(x[5])))\n arrows[sp] = w\n else:\n x, y, d, is_live[i], new_visited_states = move_till_hp_or_death2(x, y, d, _map, visited[i], arrows, hot_points)\n visited[i] = visited[i].union(new_visited_states)\n states[i] = (x, y, d)\n\n result = []\n for e in arrows:\n result.append((e[0], e[1], arrows[e]))\n return result\n\n\ndef DO_GREED_DEPTH(_map, robots):\n variants = find_variants_for_mc(_map)\n hot_points = {}\n for v in variants:\n hot_points[v[0]] = v[1]\n arrows = {}\n\n is_live = []\n states = []\n visited = []\n for x, y, d in robots:\n states.append((x, y, d))\n visited.append(set())\n is_live.append(True)\n\n while any(is_live):\n for i in range(len(robots)):\n x, y, d = states[i]\n sp = (x, y)\n if sp in hot_points:\n w, _, _ = rec(x, y, d, is_live[i], _map, visited[i], arrows, hot_points, 0)\n hot_point = hot_points[sp]\n del hot_points[sp]\n arrows[sp] = w\n x, y, d, is_live[i], new_visited_states = move_till_hp_or_death2(x, y, d, _map, visited[i], arrows, hot_points)\n else:\n x, y, d, is_live[i], new_visited_states = move_till_hp_or_death2(x, y, d, _map, visited[i], arrows, hot_points)\n visited[i] = visited[i].union(new_visited_states)\n states[i] = (x, y, d)\n\n result = []\n for e in arrows:\n if arrows[e] != '':\n result.append((e[0], e[1], arrows[e]))\n return result","repo_name":"empathy87/contests","sub_path":"A_Craft/solution/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"39918953019","text":"\"\"\"\nTheConstruct - NEO smart contract - https://github.com/nickazg/TheConstruct\n\nAuthor: Nick Grobler\nEmail: nickazg@gmail.com\nDate: Feb 25 2017\n\"\"\"\n\nVERSION = \"1.0.0\"\n\n# BOA\nfrom boa.blockchain.vm.Neo.Runtime import GetTrigger, CheckWitness\nfrom boa.blockchain.vm.Neo.TriggerType import Application, Verification\nfrom boa.blockchain.vm.System.ExecutionEngine import GetScriptContainer, GetExecutingScriptHash\nfrom boa.blockchain.vm.Neo.Transaction import Transaction, GetReferences, GetOutputs, GetInputs, GetUnspentCoins\nfrom boa.blockchain.vm.Neo.Action import RegisterAction\n\n\n# THE CONSTRUCT - PLATFORMS\nfrom construct.platform.SmartTokenShare import SmartTokenShare, sts_get_attr, sts_create, sts_get, get_total_in_circulation, sts_total_available_amount \nfrom construct.platform.FundingStage import FundingStage, fs_get_attr, fs_create, fs_get, fs_contribute, fs_status, fs_can_exchange, fs_add_to_circulation, fs_calculate_can_exchange, get_in_circulation, fs_claim_contributions, fs_refund, fs_get_addr_balance, fs_set_addr_balance, fs_claim_system_fee, fs_calculate_system_fee, fs_available_amount\nfrom construct.platform.Milestone import Milestone, ms_create, ms_get, ms_get_attr, ms_update_progress, ms_get_progress\nfrom construct.platform.FundingRoadmap import FundingRoadmap, fr_list_append, fr_add_list, fr_get_list, fr_add_funding_stage, fr_get_funding_stages, fr_add_milestone, fr_get_milestones, fr_add_project_admin, fr_get_project_admins, fr_set_active_index, fr_get_active_index, fr_update_milestone_progress\nfrom construct.platform.KYC import KYC\n\n# THE CONSTRUCT - COMMON\nfrom construct.common.StorageManager import StorageManager\nfrom construct.common.Txio import Attachments, get_asset_attachments, get_asset_attachments_for_prev\n\nOnOperationInvoke = RegisterAction('operations_invoke','op_name')\n\nGAS_ASSET_ID = b'\\xe7\\x2d\\x28\\x69\\x79\\xee\\x6c\\xb1\\xb7\\xe6\\x5d\\xfd\\xdf\\xb2\\xe3\\x84\\x10\\x0b\\x8d\\x14\\x8e\\x77\\x58\\xde\\x42\\xe4\\x16\\x8b\\x71\\x79\\x2c\\x60'\n\ndef Main(operation, args):\n \"\"\"Entry point for the smart contract.\n Args:\n operation (str):\n UUID used as the first part of the key for Storage.Put().\n args (str):\n UUID used as the second part of the key for Storage.Put().\n Return:\n (bytearray): The result of the operation\n \"\"\"\n\n # Gets the transaction trigger\n trigger = GetTrigger()\n storage = StorageManager()\n\n invalid_args_msg = 'INVALID ARGS'\n invaild_op_msg = 'INVALID OPERATION'\n\n if trigger == Verification:\n print('Verification')\n\n attachments = get_asset_attachments()\n prev_attachments = get_asset_attachments_for_prev()\n\n gas_requested = prev_attachments.gas_attached - attachments.gas_attached\n print(gas_requested)\n\n # Get amount avaliable for address\n claim_amount = storage.get_double('CLAIM', attachments.receiver_addr)\n\n # If the request is the EXACT amount (not less), approve the tx\n if claim_amount == gas_requested:\n print('Successfully send claim tx')\n return True \n\n elif trigger == Application:\n print('Application')\n\n kyc = KYC() \n\n # F U N D I N G R O A D M A P #\n \n project_id = args[0]\n\n sts = sts_get(project_id)\n \n # ARGS: project_id, refund_addr\n if operation == 'check_claim_owed':\n OnOperationInvoke('check_claim_owed')\n print('execute:check_claim_owed')\n if len(args) == 2:\n refund_addr = args[1]\n return storage.get_double('CLAIM', refund_addr)\n\n # ARGS: project_id, refund_addr\n if operation == 'reset_claim_owed':\n OnOperationInvoke('reset_claim_owed')\n print('execute:reset_claim_owed')\n if len(args) == 2:\n refund_addr = args[1]\n return storage.put_double('CLAIM', refund_addr, 0)\n\n # ARGS: project_id, new_admin\n if operation == 'add_project_admins':\n OnOperationInvoke('add_project_admins')\n print('execute:add_project_admins')\n if len(args) == 2:\n if CheckWitness(sts.owner):\n new_admin = args[1]\n fr_add_project_admin(project_id, new_admin)\n return True\n return invalid_args_msg\n\n # ARGS: project_id\n if operation == 'get_active_index':\n OnOperationInvoke('get_active_index')\n print('execute:get_active_index')\n if len(args) == 1:\n return fr_get_active_index(project_id) \n return invalid_args_msg\n \n # ARGS: project_id\n if operation == 'get_funding_stages':\n OnOperationInvoke('get_funding_stages')\n print('execute:get_funding_stages')\n if len(args) == 1:\n funding_stages = fr_get_funding_stages(project_id)\n return funding_stages\n return invalid_args_msg\n\n # ARGS: project_id\n if operation == 'get_active_fs':\n OnOperationInvoke('get_active_fs')\n print('execute:get_active_fs')\n if len(args) == 1:\n active_idx = fr_get_active_index(project_id)\n funding_stages = fr_get_funding_stages(project_id)\n active_funding_stage = funding_stages[active_idx]\n return active_funding_stage\n return invalid_args_msg\n\n # ARGS: project_id\n if operation == 'get_milestones':\n OnOperationInvoke('get_milestones')\n print('execute:get_milestones')\n if len(args) == 1:\n milestones = fr_get_milestones(project_id)\n return milestones\n return invalid_args_msg\n \n # ARGS: project_id\n if operation == 'get_active_ms':\n OnOperationInvoke('get_active_ms')\n print('execute:get_active_ms')\n if len(args) == 1:\n active_idx = fr_get_active_index(project_id)\n milestones = fr_get_milestones(project_id)\n active_milestone = milestones[active_idx]\n return active_milestone\n return invalid_args_msg\n\n # ARGS: project_id, updated_progress\n if operation == 'update_active_ms_progress':\n OnOperationInvoke('update_active_ms_progress')\n print('execute:update_active_ms_progress')\n if len(args) == 2:\n if CheckWitness(sts.owner):\n updated_progress = args[1]\n\n progress = fr_update_milestone_progress(project_id, updated_progress) \n \n return progress\n return invalid_args_msg\n \n \n \n # S M A R T T O K E N S H A R E #\n \n # ARGS: project_id, symbol, decimals, owner, total_supply\n if operation == 'create_sts':\n OnOperationInvoke('create_sts')\n print('execute:create_sts') \n if len(args) == 5:\n symbol = args[1]\n decimals = 8 # hardcoded to 8\n owner = args[3]\n total_supply = args[4] \n \n sts_create(project_id, symbol, decimals, owner, total_supply)\n fr_set_active_index(project_id, 0)\n return project_id\n return invalid_args_msg\n \n # ARGS: project_id, attribute: {'project_id', 'symbol', 'decimals', 'owner', 'total_supply', 'total_in_circulation'}\n if operation == 'sts_attribute':\n OnOperationInvoke('sts_attribute')\n print('execute:sts_attribute')\n if len(args) == 2:\n attr = args[1]\n \n sts = sts_get(project_id)\n return sts_get_attr(sts, attr)\n return invalid_args_msg\n\n # ARGS: project_id\n if operation == 'total_tokens_available':\n OnOperationInvoke('total_tokens_available')\n print('execute:total_tokens_available')\n if len(args) == 1:\n\n sts = sts_get(project_id)\n return sts_total_available_amount(sts)\n return invalid_args_msg\n \n \n \n # F U N D I N G S T A G E #\n \n funding_stage_id = args[1] \n\n # ARGS: project_id, funding_stage_id, start_block, end_block, supply, tokens_per_gas\n if operation == 'create_fs':\n OnOperationInvoke('create_fs')\n print('execute:create_fs')\n if len(args) == 6:\n if CheckWitness(sts.owner):\n start_block = args[2]\n end_block = args[3]\n supply = args[4]\n tokens_per_gas = args[5]\n\n fs_create(project_id, funding_stage_id, start_block, end_block, supply, tokens_per_gas)\n fr_add_funding_stage(project_id, funding_stage_id)\n return funding_stage_id\n return invalid_args_msg\n \n # ARGS: project_id, funding_stage_id, attribute: {'project_id', 'funding_stage_id', 'start_block', 'end_block', 'supply', 'tokens_per_gas', 'in_circulation'}\n if operation == 'fs_attribute':\n OnOperationInvoke('fs_attribute')\n print('execute:fs_attribute')\n if len(args) == 3:\n attr = args[2] \n\n fs = fs_get(project_id, funding_stage_id)\n return fs_get_attr(fs, attr)\n\n # ARGS: project_id, funding_stage_id \n if operation == 'fs_tokens_available':\n OnOperationInvoke('fs_tokens_available')\n print('execute:fs_tokens_available')\n if len(args) == 2: \n\n fs = fs_get(project_id, funding_stage_id )\n return fs_available_amount(fs)\n return invalid_args_msg\n \n # ARGS: project_id, funding_stage_id \n if operation == 'fs_status':\n OnOperationInvoke('fs_status')\n print('execute:fs_status')\n if len(args) == 2:\n\n fs = fs_get(project_id, funding_stage_id )\n return fs_status(fs) \n return invalid_args_msg \n \n # ARGS: project_id, funding_stage_id \n if operation == 'fs_contribute':\n OnOperationInvoke('fs_contribute')\n print('execute:fs_contribute')\n if len(args) == 2:\n\n fs = fs_get(project_id, funding_stage_id )\n return fs_contribute(fs) \n return invalid_args_msg \n\n # ARGS: project_id, funding_stage_id, addr \n if operation == 'fs_addr_balance':\n OnOperationInvoke('fs_addr_balance')\n print('execute:fs_addr_balance')\n if len(args) == 2:\n addr = args[2]\n fs = fs_get(project_id, funding_stage_id )\n return fs_get_addr_balance(fs, addr)\n return invalid_args_msg \n\n\n # M I L E S T O N E #\n\n milestone_id = args[1] \n\n # ARGS: project_id, milestone_id, title, subtitle, extra_info_hash\n if operation == 'create_ms':\n OnOperationInvoke('create_ms')\n print('execute:create_ms')\n if len(args) == 5:\n if CheckWitness(sts.owner):\n title = args[2]\n subtitle = args[3] \n extra_info_hash = args[4]\n\n ms_create(project_id, milestone_id, title, subtitle, extra_info_hash)\n fr_add_milestone(project_id, milestone_id)\n return milestone_id\n return invalid_args_msg \n\n # ARGS: project_id, milestone_id, attribute: {'project_id', 'milestone_id', 'title', 'subtitle', 'extra_info_hash', 'progress'}\n if operation == 'ms_attribute':\n OnOperationInvoke('ms_attribute')\n print('execute:ms_attribute')\n if len(args) == 3:\n attr = args[2] \n\n ms = ms_get(project_id, milestone_id)\n return ms_get_attr(ms, attr)\n\n # ARGS: project_id, milestone_id\n if operation == 'get_ms_progess':\n OnOperationInvoke('get_ms_progess')\n print('execute:get_ms_progess')\n if len(args) == 2:\n ms = ms_get(project_id, milestone_id)\n return ms_get_progress(ms)\n return invalid_args_msg \n\n\n # C L A I M S # \n\n funding_stage_id = args[1] \n \n # ARGS: project_id, funding_stage_id, refund_addr \n if operation == 'claim_fs_refund':\n OnOperationInvoke('claim_fs_refund')\n print('execute:claim_fs_refund')\n if len(args) == 3:\n refund_addr = args[2] \n\n fs = fs_get(project_id, funding_stage_id)\n return fs_refund(fs, refund_addr)\n return invalid_args_msg \n\n # ARGS: project_id, funding_stage_id, owner_addr\n if operation == 'claim_fs_contributions':\n OnOperationInvoke('claim_fs_contributions')\n print('execute:claim_fs_contributions')\n if len(args) == 3:\n owner_addr = args[2]\n\n fs = fs_get(project_id, funding_stage_id)\n return fs_claim_contributions(fs, owner_addr) \n return invalid_args_msg \n\n # ARGS: project_id, funding_stage_id, system_owner_addr\n if operation == 'claim_fs_system_fee':\n OnOperationInvoke('claim_fs_system_fee')\n print('execute:claim_fs_system_fee')\n if len(args) == 3: \n system_owner_addr = args[2]\n\n fs = fs_get(project_id, funding_stage_id)\n return fs_claim_system_fee(fs, system_owner_addr)\n return invalid_args_msg\n\n \n \n # K Y C #\n\n # ARGS: project_id, address, phys_address, first_name, last_name, id_type, id_number, id_expiry, file_location, file_hash\n if operation == 'kyc_submit':\n OnOperationInvoke('kyc_submit')\n print('execute:kyc_submit')\n if len(args) == 10:\n address = args[1]\n phys_address = args[2]\n first_name = args[3]\n last_name = args[4]\n id_type = args[5]\n id_number = args[6]\n id_expiry = args[7]\n file_location = args[8]\n file_hash = args[9]\n\n kyc.kyc_submit(project_id, address, phys_address, first_name, last_name, id_type, id_number, id_expiry, file_location, file_hash)\n return address\n return invalid_args_msg\n \n # ARGS: project_id, addresses -> \n if operation == 'kyc_register':\n if CheckWitness(sts.owner):\n OnOperationInvoke('kyc_register')\n print('execute:kyc_register')\n if len(args) > 1:\n # addresses = args[1:]\n\n return kyc.kyc_register(project_id, args)\n return invalid_args_msg\n \n # ARGS: project_id, address\n if operation == 'kyc_status':\n OnOperationInvoke('kyc_status')\n print('execute:kyc_status')\n if len(args) == 2:\n address = args[1]\n\n return kyc.kyc_status(project_id, address)\n return invalid_args_msg\n\n # ARGS: project_id, address\n if operation == 'get_kyc_submission':\n OnOperationInvoke('get_kyc_submission')\n print('execute:get_kyc_submission')\n if len(args) == 2:\n address = args[1]\n\n return kyc.get_kyc_submission(project_id, address)\n return invalid_args_msg\n\n \n return invaild_op_msg\n","repo_name":"nickazg/TheConstruct","sub_path":"construct/main/TheConstruct.py","file_name":"TheConstruct.py","file_ext":"py","file_size_in_byte":15988,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"51"}
+{"seq_id":"32389414333","text":"import discord\nfrom discord.ext import commands\nimport time\nimport random\n\nclass molestar(commands.Cog):\n\n\n\tdef __init__(self, client):\n\t\tself.client = client\n\n\t\n\n\t@commands.command()\n\tasync def tula(self, ctx):\n\t\tawait ctx.send('Calculando tula... Espere')\n\t\ttime.sleep(2)\n\t\tmida = tulam()\n\t\tembed = discord.Embed(title=\"Tula\", colour=discord.Colour(0x42ff68))\n\n\t\tembed.add_field(name='Tula', value=f'Tu tula mide {mida}cm, ¡espectacular!')\n\n\t\tawait ctx.send(embed=embed)\n\n\t@commands.command()\n\tasync def mira_que_tengo(self, ctx):\n\t\tawait ctx.send(':ok_hand:')\n\ndef setup(client):\n\tclient.add_cog(molestar(client))\n\ndef tulam():\n\tprobabilitat = random.randint(0,25)\n\tif probabilitat == 0 or probabilitat == 4 or probabilitat == 5:\n\t\tmida = random.randint(9, 99)\n\t\treturn(mida)\n\telif probabilitat == 1:\n\t\tmida1 = 0\n\t\treturn(mida1)\n\telif probabilitat == 2:\n\t\tmida2 = 100\n\t\treturn(mida2)\n\telse:\n\t\tmidae = random.randint(1, 8)\n\t\treturn(midae)","repo_name":"AnthrLine/M.Rajoy","sub_path":"cogs/per_molestar.py","file_name":"per_molestar.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"40388860935","text":"n=int(input())\ns=input()\nk=-1\ncnt=0\nfor i in range(n):\n if int(s[i])!=k:\n cnt+=1\n k=int(s[i])\nif s[0]=='0':\n cnt-=1\nprint(cnt//2)\n \n \n","repo_name":"qpwoeirut/competitive-programming","sub_path":"StanfordProco/2019/cloning/solutions_testdata/scarletwitch/solutions/scarletwitch_py.py","file_name":"scarletwitch_py.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"51"}
+{"seq_id":"32653319391","text":"from neuron import h\n\nh('''\n\nx=5\ndouble y[4]\n\nfunc f() { return 6 }\n\nbegintemplate A\nproc call() {\n $o1._()\n}\nendtemplate A\n''')\n\ndef callback():\n print('callback')\n print(h.f())\n h.x = 4\n print(h.x)\n h.y[2] = 6\n print(h.y[2])\n\n\na = h.A()\na.call(callback)\n\nfrom neuron import h, nonvint_block_supervisor\nh.load_file('stdrun.hoc')\n\n_callbacks = [None]*11\n_callbacks[1] = callback\nnonvint_block_supervisor.register(_callbacks)\n\n#h.cvode_active(0)\nh('''\nbegintemplate B\nproc call() {\n // anything that ends up calling a nonvint_block_supervisor callback\n // that calls 'def callback' within this context\n finitialize()\n}\n\nendtemplate B\n''')\n\nh.finitialize()\nprint('ok so far')\nb = h.B()\nb.call()\n","repo_name":"neuronsimulator/nrntest","sub_path":"npy/hobjcontext2.py","file_name":"hobjcontext2.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"19640530468","text":"import os\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-enum\", \"--entities-num\",\n default=100, type=int, help=\"number of entities\")\nparser.add_argument(\"--dir\",\n default='../data/', type=str, help=\"directory\")\n\n# entities_num = 100\n# dataset_name = './drugbank_{}'.format(entities_num)\n\ndef main():\n global args\n args = parser.parse_args()\n entities_num = args.entities_num\n dataset_name = os.path.join(args.dir, 'subsample_{}'.format(entities_num))\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n df = pd.read_csv('./drugbank_data_subset{}.txt'.format(entities_num), sep='\\t', header=None)\n num_samples = df.shape[0]\n\n entities = df.iloc[:, 0].tolist() + df.iloc[:, 2].tolist()\n entities = set(entities)\n entities = pd.DataFrame(entities)\n entities.reset_index(inplace=True)\n # entities.to_csv(dataset_name + '/id2entity.txt', sep='\\t', index=False, header=None)\n entities = entities.iloc[:, [1, 0]]\n entities.to_csv(dataset_name + '/entity2id.txt', sep='\\t',index=False, header=None)\n\n relations = df.iloc[:, 1].tolist()\n relations = set(relations)\n relations = pd.DataFrame(relations)\n relations.reset_index(inplace=True)\n # relations.to_csv(dataset_name + '/idrelation.txt', sep='\\t', index=False, header=None)\n relations = relations.iloc[:, [1, 0]]\n relations.to_csv(dataset_name + '/relation2id.txt', sep='\\t', index=False, header=None)\n\n train_size = int(0.7 * num_samples)\n val_size = int(0.1 * num_samples)\n test_size = int(0.2 * num_samples)\n print('Data size', train_size, val_size, test_size)\n relations_set = set(df.iloc[:, 1].tolist())\n count = 0\n # get all relation at least once\n drop_set = []\n train_data = []\n for r in relations_set:\n drug_interactions = df.loc[df.iloc[:, 1] == r]\n drug_interactions = drug_interactions.iloc[0]\n drop_set.append(drug_interactions.name)\n train_data.append(drug_interactions.tolist())\n\n df = df.loc[~df.index.isin(drop_set)]\n print('After processed', len(train_data))\n train_size = train_size - len(train_data)\n print('Train size left', train_size)\n train_data = pd.DataFrame(train_data)\n train_data = train_data.append(df.sample(n=train_size, replace=False))\n total_r_in_train = len(set(train_data.iloc[:,1].tolist()))\n print('Sampling condition:', len(relations_set), total_r_in_train)\n\n df = df.loc[~df.index.isin(train_data.index)]\n val_data = df.sample(n=val_size, replace=False)\n df = df.loc[~df.index.isin(val_data.index)]\n test_data = df.copy()\n print(train_data.shape, val_data.shape, test_data.shape)\n print(f'Train has {len(set(train_data.iloc[:,1].tolist()))}')\n print(f'Original dataset has {len(relations_set)}')\n print('Check overlap', train_data.index.isin(test_data.index).sum())\n train_data.to_csv(dataset_name + '/' + 'train.txt', sep='\\t', index=False, header=None)\n val_data.to_csv(dataset_name + '/' + 'valid.txt', sep='\\t', index=False, header=None)\n test_data.to_csv(dataset_name + '/' + 'test.txt', sep='\\t', index=False, header=None)\n\nif __name__=='__main__':\n main()\n","repo_name":"bofei5675/KGs_inferences","sub_path":"inferences/create_dataset_kbat.py","file_name":"create_dataset_kbat.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"12975296890","text":"\"\"\" Unit tests for `WithdrawalForecast`. \"\"\"\n\nimport unittest\nfrom decimal import Decimal\nfrom forecaster import (\n Person, Tax, Timing, WithdrawalForecast, TransactionStrategy,\n Account, canada, recorded_property)\nfrom tests.util import TestCaseTransactions\n\nclass WithholdingAccount(Account):\n \"\"\" Testing account. 50% of withdrawals withheld. \"\"\"\n\n @recorded_property\n def tax_withheld(self):\n \"\"\" Always withhold 50% \"\"\"\n return self.outflows() / 2\n\nclass TestWithdrawalForecast(TestCaseTransactions):\n \"\"\" Tests WithdrawalForecast. \"\"\"\n\n def setUp(self):\n \"\"\" Builds stock variables to test with. \"\"\"\n self.initial_year = 2000\n # Simple tax treatment: 50% tax rate across the board.\n tax = Tax(tax_brackets={\n self.initial_year: {0: 0.5}})\n # Accounts need an owner:\n timing = Timing(frequency='BW')\n self.person = Person(\n initial_year=self.initial_year,\n name=\"Test\",\n birth_date=\"1 January 1980\",\n retirement_date=\"31 December 1999\", # last year\n gross_income=5200,\n tax_treatment=tax,\n payment_timing=timing)\n # We want at least two accounts which are withdrawn from\n # in different orders depending on the strategy.\n self.account = Account(\n owner=self.person,\n balance=60000) # $60,000 <- BIGGER!\n self.rrsp = canada.accounts.RRSP(\n owner=self.person,\n contribution_room=1000,\n balance=6000) # $6,000\n\n # Assume there are $2000 in inflows and $22,000 in outflows,\n # for a net need of $20,000:\n self.available = {\n 0.25: 1000,\n 0.5: -11000,\n 0.75: 1000,\n 1: -11000\n }\n\n # Now we can set up the big-ticket items:\n self.strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_ordered,\n weights={\"RRSP\": 1, \"Account\": 2})\n self.forecast = WithdrawalForecast(\n initial_year=self.initial_year,\n people={self.person},\n accounts={self.account, self.rrsp},\n transaction_strategy=self.strategy)\n\n # Set up another forecast for testing withholding behaviour:\n self.withholding_account = WithholdingAccount(\n owner=self.person,\n balance=100000)\n self.withholding_strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_ordered,\n weights={\"WithholdingAccount\": 1})\n self.withholding_forecast = WithdrawalForecast(\n initial_year=self.initial_year,\n people={self.person},\n accounts={self.withholding_account},\n transaction_strategy=self.withholding_strategy)\n\n def setUp_decimal(self):\n \"\"\" Builds stock variables to test with. \"\"\"\n # pylint: disable=invalid-name\n # Pylint doesn't like `setUp_decimal`, but it's not our naming\n # convention, so don't complain to us!\n # pylint: enable=invalid-name\n\n self.initial_year = 2000\n # Simple tax treatment: 50% tax rate across the board.\n tax = Tax(tax_brackets={\n self.initial_year: {Decimal(0): Decimal(0.5)}},\n high_precision=Decimal)\n # Accounts need an owner:\n timing = Timing(frequency='BW',high_precision=Decimal)\n self.person = Person(\n initial_year=self.initial_year,\n name=\"Test\",\n birth_date=\"1 January 1980\",\n retirement_date=\"31 December 1999\", # last year\n gross_income=Decimal(5200),\n tax_treatment=tax,\n payment_timing=timing,\n high_precision=Decimal)\n # We want at least two accounts which are withdrawn from\n # in different orders depending on the strategy.\n self.account = Account(\n owner=self.person,\n balance=Decimal(60000), # $60,000 <- BIGGER!\n high_precision=Decimal)\n self.rrsp = canada.accounts.RRSP(\n owner=self.person,\n contribution_room=Decimal(1000),\n balance=Decimal(6000), # $6,000\n high_precision=Decimal)\n\n # Assume there are $2000 in inflows and $22,000 in outflows,\n # for a net need of $20,000:\n self.available = {\n Decimal(0.25): Decimal(1000),\n Decimal(0.5): Decimal(-11000),\n Decimal(0.75): Decimal(1000),\n Decimal(1): Decimal(-11000)\n }\n\n # Now we can set up the big-ticket items:\n self.strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_ordered,\n weights={\"RRSP\": Decimal(1), \"Account\": Decimal(2)})\n self.forecast = WithdrawalForecast(\n initial_year=self.initial_year,\n people={self.person},\n accounts={self.account, self.rrsp},\n transaction_strategy=self.strategy,\n high_precision=Decimal)\n\n # Set up another forecast for testing withholding behaviour:\n self.withholding_account = WithholdingAccount(\n owner=self.person,\n balance=Decimal(100000),\n high_precision=Decimal)\n self.withholding_strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_ordered,\n weights={\"WithholdingAccount\": Decimal(1)},\n high_precision=Decimal)\n self.withholding_forecast = WithdrawalForecast(\n initial_year=self.initial_year,\n people={self.person},\n accounts={self.withholding_account},\n transaction_strategy=self.withholding_strategy,\n high_precision=Decimal)\n\n def test_account_trans_ordered(self):\n \"\"\" Test account transactions under ordered strategy. \"\"\"\n # Set up forecast:\n self.forecast.transaction_strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_ordered,\n weights={\"RRSP\": 1, \"Account\": 2})\n self.forecast(self.available)\n # We are withdrawing $20,000. We'll withdraw the whole balance\n # of `rrsp` ($6000), with the rest from `account`:\n self.assertTransactions(\n self.forecast.account_transactions[self.rrsp], -6000)\n self.assertTransactions(\n self.forecast.account_transactions[self.account], -14000)\n\n def test_account_trans_weighted(self):\n \"\"\" Test account transactions under weighted strategy. \"\"\"\n # Set up forecast:\n self.forecast.transaction_strategy = TransactionStrategy(\n strategy=TransactionStrategy.strategy_weighted,\n weights={\"RRSP\": 3000, \"Account\": 17000})\n self.forecast(self.available)\n # We are withdrawing $20,000. We'll withdraw $3000 from\n # `rrsp`, with the rest from `account`:\n self.assertTransactions(\n self.forecast.account_transactions[self.rrsp], -3000)\n self.assertTransactions(\n self.forecast.account_transactions[self.account], -17000)\n\n def test_gross_withdrawals(self):\n \"\"\" Test total withdrawn from accounts. \"\"\"\n # Set up forecast:\n self.forecast(self.available)\n\n # For default `available`, should withdraw $20,000.\n self.assertEqual(\n self.forecast.gross_withdrawals, 20000)\n\n def test_tax_withheld(self):\n \"\"\" Test tax withheld from accounts. \"\"\"\n # Set up forecast:\n self.withholding_forecast(self.available)\n\n # Total withholdings are $10000 (half of $20,000 withdrawn)\n self.assertAlmostEqual(\n self.withholding_forecast.tax_withheld, -10000)\n\n def test_net_withdrawals(self):\n \"\"\" Test total withdrawn from accounts, net of taxes. \"\"\"\n # Set up forecast:\n self.withholding_forecast(self.available)\n\n # Total withdrawals are $20,000 and total withheld is $10,000,\n # for total of $10,000 in net withdrawals:\n self.assertAlmostEqual(\n self.withholding_forecast.net_withdrawals, 10000)\n\n def test_mutate_available(self):\n \"\"\" Invoke __call__ on `available`. \"\"\"\n # Invoke __call__:\n self.withholding_forecast(self.available)\n\n # The amount withdrawn should zero out `available`,\n # subject to 50% withholding taxes (i.e. `available` should\n # only be reduced to -$10,000):\n self.assertTransactions(self.available, -10000)\n\n def test_decimal(self):\n \"\"\" Test WithdrawalStrategy with Decimal inputs. \"\"\"\n # Convert values to Decimal:\n self.setUp_decimal()\n\n # This test is based on test_mutate_available:\n # Invoke __call__:\n self.withholding_forecast(self.available)\n\n # The amount withdrawn should zero out `available`,\n # subject to 50% withholding taxes (i.e. `available` should\n # only be reduced to -$10,000):\n self.assertTransactions(self.available, Decimal(-10000))\n\n\nif __name__ == '__main__':\n unittest.TextTestRunner().run(\n unittest.TestLoader().loadTestsFromName(__name__))\n","repo_name":"ChrisCScott/forecaster","sub_path":"tests/forecast/test_withdrawal.py","file_name":"test_withdrawal.py","file_ext":"py","file_size_in_byte":9137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"72113946398","text":"import warnings\nwarnings.simplefilter(action=\"ignore\", category=RuntimeWarning)\nwarnings.simplefilter(action=\"ignore\", category=PendingDeprecationWarning)\nimport pytest\nimport os\nimport numpy.testing as nt\n\nfrom tempfile import NamedTemporaryFile, mkdtemp\n\nfrom hicmatrix import HiCMatrix as hm\n\n\nfrom schicexplorer import scHicCreateBulkMatrix\nROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"test-data/\")\n\n\ndef test_correct_matrices():\n outfile = NamedTemporaryFile(suffix='.scool', delete=False)\n\n outfile.close()\n args = \"--matrix {} --outFileName {} -t {} \".format(ROOT + 'test_matrix.scool',\n outfile.name, 1).split()\n scHicCreateBulkMatrix.main(args)\n\n test_data_matrix = ROOT + 'scHicCreateBulkMatrix/test_matrix_bulk.cool'\n\n test = hm.hiCMatrix(test_data_matrix)\n created = hm.hiCMatrix(outfile.name)\n nt.assert_almost_equal(test.matrix.data, created.matrix.data, decimal=5)\n nt.assert_equal(test.cut_intervals, created.cut_intervals)\n\n os.unlink(outfile.name)\n\n\ndef test_version():\n args = \"--version\".split()\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n scHicCreateBulkMatrix.main(args)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n\n\ndef test_help():\n args = \"--help\".split()\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n scHicCreateBulkMatrix.main(args)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n","repo_name":"joachimwolff/scHiCExplorer","sub_path":"schicexplorer/test/test_scHicCreateBulkMatrix.py","file_name":"test_scHicCreateBulkMatrix.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"51"}
+{"seq_id":"74539949599","text":"import numpy as np\r\nfrom ._sampling import get_distribution_template, get_sample\r\nimport scipy.fftpack\r\n\r\n\r\ndef _fftconvolve1d(in1, in2):\r\n \"\"\"1D convolution along the first dimension\"\"\"\r\n m, n1 = in1.shape\r\n k, n2 = in2.shape\r\n rlen = m + k - 1\r\n rlen_p2 = scipy.fftpack.helper.next_fast_len(int(rlen))\r\n\r\n XX = np.fft.rfft(in1, rlen_p2, axis=0)\r\n YY = np.fft.rfft(in2, rlen_p2, axis=0)\r\n ret = np.fft.irfft(XX * YY, rlen_p2, axis=0)\r\n\r\n # the use of rfft seems to be faster than the rfftn used in scipy.signal.fftconvolve on CPU and numpy 1.16.2. Not\r\n # sure about the reason.\r\n #sp1 = np.fft.rfftn(in1, [rlen_p2,1], axes=(0,1))\r\n #sp2 = np.fft.rfftn(in2, [rlen_p2,1], axes=(0,1))\r\n #ret2 = np.fft.irfftn(sp1 * sp2, [rlen_p2,1], axes=(0,1))\r\n\r\n return ret[:rlen, :]\r\n\r\n\r\ndef _comp_noise_scale_given_snr(signal, noise, snr):\r\n Px = np.mean(signal ** 2)\r\n Pn = np.mean(noise ** 2)\r\n scale = np.sqrt(Px / Pn * 10 ** ((-snr) / 10))\r\n return scale\r\n\r\n\r\nclass _NoiseSampler:\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def sample_noise(noise, required_length):\r\n \"\"\"Sample noise start point.\r\n If noise is longer than required length, sample a sub-segment of noise whose length is the same as the required length.\r\n If noise is shorter than required length, sample a starting point for noise. \"\"\"\r\n required_length = int(required_length)\r\n n_sample, n_ch = noise.shape\r\n\r\n # repeat noise if necessary\r\n if n_sample <= required_length:\r\n n_extra = required_length - n_sample\r\n sampler_config = get_distribution_template('none', max=n_extra, min=0, mean=None, std=None, distribution='uniform_int')\r\n start = get_sample(sampler_config)[0]\r\n noise_sampled = np.zeros((required_length,n_ch))\r\n noise_sampled[start:start+n_sample,:] = noise\r\n\r\n else:\r\n n_extra = n_sample - required_length\r\n sampler_config = get_distribution_template('none', max=n_extra, min=0, mean=None, std=None, distribution='uniform_int')\r\n start = get_sample(sampler_config)[0]\r\n noise_sampled = noise[start:start+required_length,:]\r\n\r\n return noise_sampled, start\r\n\r\n @staticmethod\r\n def repeat_noise(noise, required_length):\r\n \"\"\"Randomly sample a noise of required length.\r\n If noise is shorter than required length, repeat noise first. \"\"\"\r\n required_length = int(required_length)\r\n n_sample, n_ch = noise.shape\r\n\r\n # repeat noise if necessary\r\n if n_sample < required_length:\r\n repeat_times = int(np.ceil(required_length / n_sample))\r\n noise = np.tile(noise, (repeat_times, 1))\r\n n_sample = noise.shape[0]\r\n\r\n # randomly sample an initial point\r\n if n_sample==required_length:\r\n start = 0\r\n else:\r\n start = np.random.randint(0, high=n_sample - required_length, size=1)[0]\r\n noise_repeated = noise[start:start + required_length,:]\r\n\r\n return noise_repeated, start\r\n\r\n\r\nclass Distorter:\r\n \"\"\"\r\n Apply room impuse response to input signals. Add noise.\r\n \"\"\"\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def apply_rir_and_noise():\r\n pass\r\n\r\n @staticmethod\r\n def add_noise(signal, noise, snr, noise_position_scheme='repeat_noise'):\r\n \"\"\"\r\n Add additive noise to signal\r\n :param signal: TxC matrix, where T is the number of samples and C is the number of channel\r\n :param noise: T2xC matrix, where T2 is the number of samples of the noise\r\n :param snr: a scalar that specifies signal-to-noise ratio (SNR)\r\n :param noise_position_scheme: specify how to position the noise in the final waveform.\r\n :return: distorted signal waveform\r\n \"\"\"\r\n n_sample,n_ch = signal.shape\r\n scale = _comp_noise_scale_given_snr(signal, noise, snr)\r\n noise_scaled = noise * scale\r\n if noise_position_scheme == 'repeat_noise':\r\n noise_positioned, idx = _NoiseSampler.repeat_noise(noise_scaled, n_sample)\r\n elif noise_position_scheme == 'sample_noise':\r\n noise_positioned, idx = _NoiseSampler.sample_noise(noise_scaled, n_sample)\r\n else:\r\n raise Exception(\"Unknown noise position scheme %s\" % (noise_position_scheme))\r\n\r\n distorted = signal + noise_positioned\r\n\r\n return distorted, noise_positioned\r\n\r\n @staticmethod\r\n def apply_rir(wav, rir, fs=16000, sync=True, get_early_reverb=False, early_reverb_cutoff_time=0.04):\r\n \"\"\"\r\n Apply room impulse response to the input signal.\r\n\r\n :param wav: 1D array of source signal waveform\r\n :param rir: TxC matrix, where T is the number of samples in RIR waveform, and C is the number of channels.\r\n :param fs: sampling rate\r\n :param sync: if set to True, output signal will be sample-synchrnoized to input signal. Note that RIR typically\r\n causes a time shift of convolved signal.\r\n :param get_early_reverb: whether to also return early reverbed signal. Early reverbed signal is the obtained by\r\n convoling the signal with only the early reverb part of the RIR. See definition of early_reverb_cutoff_time\r\n :param early_reverb_cutoff_time: the duration in terms of seconds of early reverb responses in RIR.\r\n :return: both reverb and early_reverb signals\r\n \"\"\"\r\n n_sample = wav.size\r\n n_sample_rir = rir.shape[0]\r\n wav = wav.reshape(n_sample, 1)\r\n delay = int(np.argmax(rir,axis=0)[0])\r\n\r\n # generate reverberant speech\r\n reverb = _fftconvolve1d(rir, wav)\r\n\r\n if get_early_reverb:\r\n rir_cutoff = int(np.minimum(n_sample_rir, early_reverb_cutoff_time*fs+delay))\r\n rir_early = rir[:rir_cutoff, :]\r\n early_reverb = _fftconvolve1d(rir_early, wav)\r\n else:\r\n early_reverb = None\r\n\r\n if sync:\r\n reverb = reverb[delay - 1:delay + n_sample - 1, :]\r\n if early_reverb is not None:\r\n early_reverb = early_reverb[delay - 1:delay + n_sample - 1, :]\r\n\r\n return reverb, early_reverb\r\n\r\n\r\ndef _test_speed():\r\n rir = np.random.randn(8000,1)\r\n speech = np.random.randn(160000,1)\r\n\r\n def tic():\r\n import time\r\n return time.time()\r\n\r\n def toc(start_time):\r\n import time\r\n print(\"Elapsed time is %s seconds.\" % (str(time.time() - start_time)))\r\n\r\n t1 = tic()\r\n for i in range(100):\r\n result1 = _fftconvolve1d(rir, speech)\r\n toc(t1)\r\n return_size = result1.size\r\n\r\n t2 = tic()\r\n for i in range(100):\r\n result2 = scipy.signal.fftconvolve(rir, speech)[:return_size, :]\r\n toc(t2)\r\n\r\n t3 = tic()\r\n # scipy convolve automatically select freq. or time domain convolution. May be much faster than scipy fftconvolve\r\n # for short inputs.\r\n for i in range(100):\r\n result3 = scipy.signal.convolve(rir, speech)[:return_size, :]\r\n toc(t3)\r\n\r\n import matplotlib.pyplot as plt\r\n plt.plot(result1 - result2)\r\n\r\n\r\n#_test_speed()","repo_name":"jzlianglu/pykaldi2","sub_path":"simulation/_distorter.py","file_name":"_distorter.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"51"}
+{"seq_id":"16040290964","text":"\"\"\"\n File: 1342.py\n Title: Number of Steps to Reduce a Number to Zero\n Difficulty: Easy\n URL: https://leetcode.com/problems/number-of-steps-to-reduce-a-number-to-zero/\n\"\"\"\n\nimport unittest\n\n\nclass Solution:\n def numberOfSteps(self, num: int) -> int:\n count = 0\n n = num\n while n > 0:\n if (n % 2) == 0:\n n //= 2\n else:\n n -= 1\n count += 1\n return count\n\n\nclass SolutionTestCase(unittest.TestCase):\n def test_example1(self):\n # Input\n num = 14\n # Output\n output = 6\n\n solution = Solution()\n self.assertEqual(solution.numberOfSteps(num), output)\n\n def test_example2(self):\n # Input\n num = 8\n # Output\n output = 4\n\n solution = Solution()\n self.assertEqual(solution.numberOfSteps(num), output)\n\n def test_example3(self):\n # Input\n num = 123\n # Output\n output = 12\n\n solution = Solution()\n self.assertEqual(solution.numberOfSteps(num), output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ghkim3221/algorithm","sub_path":"leetcode/1342.py","file_name":"1342.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"}
+{"seq_id":"18464828430","text":"#!/usr/bin/env python\nimport os\n\nfrom portal import get_portal_ip_list\nfrom cluster import get_cluster_ip_list\n\n\nif __name__ == \"__main__\":\n\tportals = get_portal_ip_list()\n\tclusters = get_cluster_ip_list()\n\tprint(\"portals\", portals)\n\tprint(\"clusters\", clusters)\n\n\tfor i,ip in enumerate(portals):\n\t\tdst = \"portal%02d\" % (1+i)\n\t\tcmd = \"rsync -avze ssh --exclude='*.app' --partial --exclude='.DS_Store' ~/personal/sync/projects/brahman/portal/media/ %s:~/media &\" % dst\n\n\t\t#cmd = \"ssh %s\" % dst\n\t\t#os.system(cmd)\n\t\t# print(cmd)\n\t\tos.system(cmd)\n","repo_name":"vaporstack/brahman","sub_path":"portal/etc/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"24351780450","text":"from __future__ import annotations\nfrom typing import Optional\nfrom collections.abc import Iterator\nfrom time import sleep\n\nfrom stoppy import Stopwatch\n\nfrom interval_timer.interval import Interval\n\n\n# Affects timer precision, but also prevents high CPU usage\n_CPU_SLEEP_S = 0.0001\n\n\ndef interval_timer(period: float, start: int = 0, stop: Optional[int] = None) -> Iterator[Interval]:\n \"\"\"\n An interval timer iterator that synchronises iterations to within specific time intervals.\n\n The time taken for code execution within each iteration will not affect the interval timing, provided that the\n execution time is not longer than the interval period. The caller can check if this is the case by checking the\n `missed` attribute on the returned `Interval` instance.\n\n :param period: The interval period, in seconds.\n :param start: The number of iterations to delay starting by.\n :param stop: The number of iterations to automatically stop after.\n \"\"\"\n index = start\n\n with Stopwatch() as stopwatch:\n while True:\n if stop is not None and index >= stop:\n return\n\n # Starts the stopwatch on first call, guaranteeing that the first call of stopwatch.time returns 0\n count = Interval(index, period, stopwatch.time(True))\n index += 1\n\n # Block the iteration until the next count time\n while stopwatch.time() < count.time:\n sleep(_CPU_SLEEP_S)\n\n yield count\n\n\nclass IntervalError(Exception):\n pass\n","repo_name":"morefigs/interval-timer","sub_path":"interval_timer/interval_timer.py","file_name":"interval_timer.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"51"}
+{"seq_id":"34578759602","text":"from typing import List, Dict, Any\n\nfrom pydantic import Field\n\nfrom recommender_system.utils.base_model import BaseModel\n\n\nclass EASEConfig(BaseModel):\n l2_options: List[float] = Field(\n default=[1, 10, 100],\n alias=\"l2Options\",\n title=\"L2 regularization options\",\n description=\"\"\"All options of the L2 regularization parameter that are tried to obtain the best results.\"\"\",\n )\n reviews_multiplier: float = Field(\n default=0.5,\n alias=\"reviewsMultiplier\",\n title=\"Reviews multiplier\",\n description=\"\"\"At least how many reviews must exist to let this model be trained, as a multiplier of the product\n variants in the database. E.g. at least 100 reviews for 200 product variants with a value of 0.5.\"\"\",\n )\n\n @property\n def info(self) -> Dict[str, Any]:\n return {\n field.alias: {\n \"title\": field.field_info.title,\n \"description\": field.field_info.description,\n }\n for field in self.__fields__.values()\n }\n\n\nclass GRU4RecConfig(BaseModel):\n num_epochs_options: List[int] = Field(\n default=[1],\n alias=\"numEpochsOptions\",\n title=\"Number of epochs options\",\n description=\"\"\"All options of the number of epochs that are tried to obtain the best results.\"\"\",\n )\n batch_size_options: List[int] = Field(\n default=[64],\n alias=\"batchSizeOptions\",\n title=\"Batch size options\",\n description=\"\"\"All options of the batch size that are tried to obtain the best results.\"\"\",\n )\n embedding_size_options: List[int] = Field(\n default=[100],\n alias=\"embeddingSizeOptions\",\n title=\"Number of epochs options\",\n description=\"\"\"All options of the embedding layer size that are tried to obtain the best results.\"\"\",\n )\n hidden_size_options: List[int] = Field(\n default=[100],\n alias=\"hiddenSizeOptions\",\n title=\"Hidden layer size options\",\n description=\"\"\"All options of the hidden layer size that are tried to obtain the best results.\"\"\",\n )\n learning_rate_options: List[float] = Field(\n default=[0.0001],\n alias=\"learningRateOptions\",\n title=\"Learning rate options\",\n description=\"\"\"All options of the learning rate that are tried to obtain the best results.\"\"\",\n )\n incremental_trainings: int = Field(\n default=10,\n alias=\"incrementalTrainings\",\n title=\"Number of incremental trainings\",\n description=\"\"\"Number of incremental trainings between two full trainings. The incremental training only selects\n new enters of the product details and runs one epoch with the currently used parameters.\"\"\",\n )\n events_multiplier: float = Field(\n default=10,\n alias=\"eventsMultiplier\",\n title=\"Events multiplier\",\n description=\"\"\"At least how many visits of product detail must exist to let this model be trained, as a\n multiplier of the product variants in the database. E.g. at least 1000 reviews for 100 product variants with a\n value 10.\"\"\",\n )\n\n @property\n def info(self) -> Dict[str, Any]:\n return {\n field.alias: {\n \"title\": field.field_info.title,\n \"description\": field.field_info.description,\n }\n for field in self.__fields__.values()\n }\n","repo_name":"ecoseller/ecoseller","sub_path":"src/recommender_system/app/recommender_system/models/prediction/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"}
+{"seq_id":"6778013017","text":"import time\nfrom typing import NamedTuple\nfrom .agent_based_api.v1 import (\n get_value_store,\n Metric,\n register,\n Service,\n)\nfrom .utils import diskstat\nfrom .utils.dell_storage import (\n DSResult\n)\n\n\nclass ScDisk(NamedTuple):\n name: str\n status: str\n statusMessage: str\n allocatedSpace: str\n totalSpace: str\n readIops: str\n readBps: str\n readLatency: str\n writeIops: str\n writeBps: str\n writeLatency: str\n\n\ndef parse_dell_storage_disk(string_table):\n return [ScDisk(*disk) for disk in string_table]\n\n\nregister.agent_section(\n name='dell_storage_disk',\n parse_function=parse_dell_storage_disk,\n)\n\n\ndef discovery_dell_storage_disk(section):\n for disk in section:\n yield Service(item=disk.name)\n\n\ndef check_dell_storage_disk(item, params, section):\n for disk in section:\n if not disk.name == item:\n continue\n\n yield from DSResult(disk)\n\n yield Metric('usage',\n int(disk.allocatedSpace),\n boundaries=(0, int(disk.totalSpace)))\n\n value_store = get_value_store()\n try:\n yield from diskstat.check_diskstat_dict(\n params=params,\n disk={\n 'read_ios': int(disk.readIops),\n 'read_throughput': int(disk.readBps),\n 'read_latency': float(disk.readLatency),\n 'write_ios': int(disk.writeIops),\n 'write_throughput': int(disk.writeBps),\n 'write_latency': float(disk.writeLatency),\n },\n value_store=value_store,\n this_time=time.time(),\n )\n except ValueError:\n pass\n\n return\n\n\nregister.check_plugin(\n name='dell_storage_disk',\n service_name='Disk %s',\n discovery_function=discovery_dell_storage_disk,\n check_function=check_dell_storage_disk,\n check_ruleset_name='diskstat',\n check_default_parameters={},\n)\n","repo_name":"jiuka/checkmk_dell_storage","sub_path":"agent_based/dell_storage_disk.py","file_name":"dell_storage_disk.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"12904305180","text":"#!/usr/bin/python3\n\nfrom distutils.core import setup, Extension\n\nFloatToHexModule = Extension('FloatToHex',\n sources = ['floattohexmodule.c'])\n\nsetup (name = 'FloatToHex',\n version = '1.0',\n description = 'Converts float to hex and back',\n ext_modules = [FloatToHexModule])\n\n","repo_name":"gregstoll/floattohex","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"13609937833","text":"def create_user():\r\n sayac = 0\r\n username = input(\"Kullanıcı Adınızı Oluşturunuz :\")\r\n print(\"Giriş Yap\")\r\n while sayac < 3:\r\n while True:\r\n usr = input(\"Kullanıcı Adı :\")\r\n if usr == username :\r\n print(\"Merhaba\", usr)\r\n sayac += 3\r\n break\r\n if usr != username:\r\n print(\"Kullanıcı adı hatalı...\")\r\n sayac += 1\r\n break\r\ncreate_user()\r\ndersler =input(\"almak istediğiniz dersleri yazınız:\")\r\na_ders=list (dersler)\r\nbosluk_sayac=0\r\nfor i in a_ders:\r\n if i == \" \":\r\n bosluk_sayac += 1\r\n #print(\"boşluk adedi\",bosluk_sayac)\r\n # adet=print(bosluk_sayac+1)\r\n adet = bosluk_sayac+1\r\n print(dersler)\r\nif adet <=5 and adet >3 :\r\n print(dersler)\r\nelif adet < 3:\r\n print(\"sınıfta başarısız oldunuz\")\r\n \r\n\r\ndef notlar():\r\n while True:\r\n arasınav = int(input(\"arasınav notunuzu girin:\"))\r\n final = int (input(\"finaş notunu girin:\"))\r\n proje = int (input(\"proje notunu gir:\"))\r\n ort = (arasınav*0.3)+(final*0.5)+(proje*0.2)\r\n if (ort>90):\r\n print (\"AA\",ort)\r\n break\r\n elif (ort> 70 and ort <=90):\r\n print (\"BB\",ort)\r\n break\r\n elif (ort> 50 and ort <=70):\r\n print (\"CC\",ort)\r\n break\r\n elif (ort> 30 and ort <=50):\r\n print (\"DD\",ort)\r\n break\r\n else:\r\n print(\"DD\",ort)\r\n break\r\n \r\n\r\nnotlar()","repo_name":"Bedriyecaglayan/GlobalAIHubPythonHomework","sub_path":"ogrenci_sistemi.py","file_name":"ogrenci_sistemi.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"25841155400","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport time\n#from datetime import datetime, date, time\nfrom selenium.webdriver.common.by import By\n\nfrom selenium import webdriver\nimport page\nimport os\nimport sys\n\n\nclass WebSearch(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.get(\"http://websearch.rakuten.co.jp/\")\n# self.driver.maximize_window()\n self.driver.implicitly_wait(3)\n# self.results_file = open(\"results.txt\", \"w\")\n# self.radio_results_file = open(\"radio_results.txt\", \"w\")\n \n def first_page(self):\n self.main_page = page.MainPage(self.driver)\n\n self.main_page.click_login()\n self.main_page.username = \"username\"\n self.main_page.password = \"password\"\n self.main_page.click_submit()\n\n keyword_jp = \"ご飯\"\n self.main_page.keyword = unicode(keyword_jp, 'utf-8') \n self.main_page.click_search()\n\n time.sleep(5)\n\n\n\n\n\n def test_search_in_python_org(self):\n self.first_page()\n\n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n","repo_name":"ouyangqinjp/selenium-projects","sub_path":"webSearch/webSerch.py","file_name":"webSerch.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"37767189744","text":"import schedule\nimport time\nimport os\nimport subprocess\nimport logging\nimport datetime\nimport pytz\nimport shutil\nfrom urllib.parse import urlparse\n\ntime.sleep(2) # container delay\n\n###############################################################################\n\nmax_attempts = 10\nsearch_query = \"iphone\"\ntimezone = pytz.timezone('US/Central')\ninitialize_mysql_tables = False\ndelay_job = False\n# 10 to 15 minutes\ndelay_in_minutes = 120\ndelay_in_minutes_2 = 150\nrun_it_once = False # If False job will repeat based on delay_in_minutes.\njob_counter_max = 1 # You can customize the amount of times job will run before\n# breaking while run_it_once is true\n\n# Author: Gavin Kondrath | gav.ink\n###############################################################################\n\nutc_now = datetime.datetime.now(tz=pytz.timezone('UTC'))\ncurrent_time = utc_now.astimezone(timezone).strftime(\"(%Y-%m-%d %H:%M)\")\ntime_file = utc_now.astimezone(timezone).strftime(\"%Y%m%d%H%M%S\")\n\nlauncher_path = os.path.dirname(os.path.abspath(__file__))\nscripts_folder = os.path.join(launcher_path, 'scripts')\nlog_folder = f'{launcher_path}/logs'\ntemp_folder = f'{launcher_path}/temp'\njob_error_log = f'{log_folder}/job_error.log'\ncl_urls = f'{launcher_path}/craigslist_urls.txt'\n\nif not os.path.exists(log_folder):\n os.makedirs(log_folder)\n\nif not os.path.exists(temp_folder):\n os.makedirs(temp_folder)\n\nwith open(job_error_log, 'w'):\n pass\nlogging.basicConfig(filename=job_error_log, level=logging.DEBUG)\nprint(\"Logging Setup Complete\")\n\njob_running = False\njob_counter = 0\n\n\ndef clear_temp():\n print(\"clearing the temp folder\")\n temp_items = os.listdir(temp_folder)\n for item in temp_items:\n temp_item_path = os.path.join(temp_folder, item)\n if os.path.isfile(temp_item_path):\n os.remove(temp_item_path)\n elif os.path.isdir(temp_item_path):\n shutil.rmtree(temp_item_path)\n\n\ndef run_script(script_path, file_name, launcher_path, search_query, url, max_retries=max_attempts):\n for retry in range(max_retries + 1):\n try:\n subprocess.run(['python3', script_path, file_name, launcher_path, search_query, url], check=True)\n print(f\"Script completed: {script_path}\")\n break\n\n except subprocess.CalledProcessError as e:\n print(f\"Error running script {script_path}: {e}\")\n logging.error(f\"({current_time}) {script_path} failed and is attempting to recover: {e}\")\n\n script_name = file_name[:-3]\n parsed_url = urlparse(url)\n parts_url = parsed_url.netloc.split('.')\n if len(parts_url) > 0:\n city_name = parts_url[0].lower()\n script_name_log = f'{temp_folder}/{script_name}.log'\n script_city_log = f'{temp_folder}/{script_name}_{city_name}.log'\n\n if os.path.isfile(script_name_log):\n if retry >= 1:\n new_log_name = f'{temp_folder}/{time_file}_{script_name}_{retry}.log'\n else:\n new_log_name = f'{temp_folder}/{time_file}_{script_name}.log'\n os.rename(script_name_log, new_log_name)\n shutil.move(new_log_name, f'{log_folder}/')\n\n elif os.path.isfile(script_city_log):\n if retry >= 1:\n new_log_name = f'{temp_folder}/{time_file}_{script_name}_{city_name}_{retry}.log'\n else:\n new_log_name = f'{temp_folder}/{time_file}_{script_name}_{city_name}.log'\n os.rename(script_city_log, new_log_name)\n shutil.move(new_log_name, f'{log_folder}/')\n\n else:\n print(\"No log files found for recovery.\")\n\n if retry < max_retries:\n print(f\"Retrying script... (attempt {retry + 2}/{max_retries + 1})\")\n\n else:\n print(f\"Script {script_path} failed.\")\n logging.error(f\"({current_time}) Max retries reached for script {script_path}: {e}\")\n\n send_email = os.path.join(scripts_folder, 'send_email.py')\n subprocess.run(['python3', send_email, launcher_path], check=True)\n with open(job_error_log, 'w'):\n pass\n clear_temp()\n break\n\n\ndef run_craigslist_scripts(urls):\n url_counter = 0\n for url in urls:\n url_counter += 1\n try:\n file_name = 'craigslist.py'\n script_path = os.path.join(scripts_folder, file_name)\n print(f\"Scraping Craiglist ({url_counter}/{len(urls)}): {url}\")\n run_script(script_path, file_name, launcher_path, search_query, url, max_retries=max_attempts)\n except Exception as e:\n logging.error(f\"({current_time}) Error running Craigslist script: {e}\")\n print(f\"Error running Craigslist script: {e}\")\n\n\ndef job(): # 25.1MiB\n global job_running\n global job_counter\n job_counter += 1\n if not job_running:\n job_running = True\n script_counter = 0\n\n if initialize_mysql_tables is True:\n print(\"Initializing MYSQL Tables in DB: webscrapes\")\n file_name = 'init_mysql.py'\n url = 'example.com' # Placeholder to get the script to run\n script_path = os.path.join(scripts_folder, file_name)\n run_script(script_path, file_name, launcher_path, search_query, url, max_retries=max_attempts)\n\n###############################################################################\n\n try:\n print(\"Starting Job...\")\n\n with open(cl_urls, 'r') as file:\n urls = file.read().splitlines()\n run_craigslist_scripts(urls)\n\n ordered_scripts = [\n 'filter_csv.py',\n 'remove_extra_images.py',\n 'to_mysql.py',\n 'to_cloudinary.py'\n ]\n\n###############################################################################\n\n script_paths = [os.path.join(scripts_folder, file_name) for file_name in ordered_scripts]\n url = 'https://craigslist.org/'\n\n for script_path in script_paths:\n script_counter += 1\n file_name = os.path.basename(script_path)\n print(f\"Running script ({script_counter}/{len(script_paths)}): {script_path}\")\n run_script(script_path, file_name, launcher_path, search_query, url, max_retries=max_attempts)\n\n print(\"Job Complete!\")\n\n except Exception as e:\n logging.error(f\"({current_time}) Error in job: {e}\")\n print(f\"Error: {e}\")\n job()\n\n finally:\n clear_temp()\n time.sleep(1)\n job_running = False\n\n\nif delay_job is False:\n job()\n if run_it_once is False:\n schedule.every(delay_in_minutes).to(delay_in_minutes_2).minutes.do(job)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\nif delay_job is True:\n print(f\"Waiting for {delay_in_minutes}-{delay_in_minutes_2} minutes before beginning\")\n schedule.every(delay_in_minutes).to(delay_in_minutes_2).minutes.do(job)\n\n while True:\n if run_it_once is True:\n if job_counter == job_counter_max:\n break\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"gavink97/cl-search","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":7361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"72899263839","text":"import random\n\n# List of Hangman ASCII art representations for different stages\nHANGMANPICS = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\n# List of words for the game\nguess_words = [\"banana\", \"apple\", \"pineapple\", \"watermelon\", \"computer\", \"rice\", \"bean\"]\n\n# Select a random word from the list\nrandom_choice = random.choice(guess_words)\n\n# Create a list to represent the hidden word with underscores\nsecret_word = [\"_\"] * len(random_choice)\n\n# Initialize game variables\ngame_over = False\nguess_count = 0 # Track incorrect guesses\nguess_limit = 6 # Maximum allowed incorrect guesses\n\n# Function to display the Hangman figure based on incorrect guesses\ndef display_hangman(num_incorrect_guesses):\n if num_incorrect_guesses < len(HANGMANPICS):\n print(HANGMANPICS[num_incorrect_guesses])\n\n# Main game loop\nwhile not game_over:\n # Display the current state of the secret word\n print(\"\".join(secret_word))\n\n # Ask the player for a letter guess (convert to lowercase for consistency)\n guess_letter = input(\"Enter a letter: \").lower()\n\n if guess_letter in random_choice:\n # Check if the guessed letter is in the random choice\n for i, letter in enumerate(random_choice):\n # Update the secret word if the guessed letter is correct\n if letter != \"_\" and guess_letter == letter:\n secret_word[i] = letter\n else:\n # Increment the guess count and display the Hangman figure\n guess_count += 1\n display_hangman(guess_count)\n \n if guess_count == guess_limit:\n # End the game if the guess limit is reached\n game_over = True\n print(\"You lost the game. Try again.\")\n print(\"The correct word was:\", random_choice)\n\n if \"\".join(secret_word) == random_choice:\n # End the game and display a win message if the word is guessed\n print(\"Congratulations! You've guessed the word:\", random_choice)\n game_over = True\n\n if game_over:\n # Ask if the player wants to play again and reset the game if they do\n play_again = input(\"Do you want to play again? (yes/no): \").lower()\n if play_again != \"yes\":\n break\n else:\n # Reset game variables and choose a new random word\n random_choice = random.choice(guess_words)\n secret_word = [\"_\"] * len(random_choice)\n game_over = False\n guess_count = 0\n","repo_name":"enthsz/jogo_da_velha","sub_path":"challenger02_hangman.py","file_name":"challenger02_hangman.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"15747054062","text":"import math\n\ndef calcs(d1_var_yards, d2_var_feet, h_var_yard, v_sand, n_var_slowdown):\n theta_idx = 0\n time_minimum = 1000000\n theta1_optimum = 0\n true = False\n while theta_idx < 90:\n theta_idx += 0.001\n x = d1_var_yards * 3 * math.tan(math.radians(theta_idx))\n l1 = math.sqrt(x ** 2 + (d1_var_yards * 3) ** 2)\n l2 = math.sqrt((h_var_yard * 3 - x) ** 2 + d2_var_feet ** 2)\n time_flag = (l1 + n_var_slowdown * l2) / (v_sand * 5280 / 3600)\n theta2_flag = math.atan2((h_var_yard * 3 - x), d2_var_feet)\n if time_minimum <= time_flag:\n continue\n time_minimum = time_flag\n theta1_optimum = theta_idx\n theta2 = math.degrees(theta2_flag)\n value1 = round(math.sin(math.radians(theta1_optimum)), 3)\n value2 = round(n_var_slowdown * math.sin(math.radians(theta2)), 3)\n true = value1 == value2\n\n print(\n f\"Если спасатель начнёт движение под углом theta1, равным {theta1_optimum:.0f} градусам, он достигнет утопащего\"\n f\" через минимальное время равное {time_minimum:.1f} секунды\")\n\n if true:\n print('Формула определеляющая оптимальный угол движения спасателя: \"Theta1 = n * Theta 2\" выполняется')\n\n\ncalcs(8, 10, 50, 5, 2)\n","repo_name":"AlexeyAllen/ITMO2022.Python","sub_path":"ITMO2022.Python/Practice02/Ex03/Loops.py","file_name":"Loops.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"71673947997","text":"import sys\nimport platform\nsys.path.append('./')\nimport pymqi\nqueue_manager = 'CSQ9'\nqmgr = pymqi.connect(queue_manager)\nqueue = pymqi.Queue(qmgr, 'CP0000')\nmessage = queue.put(\"COLINSMESSAGE\")\nqueue2= pymqi.Queue(qmgr, 'CP0000', pymqi.CMQC.MQOO_INPUT_SHARED +pymqi.CMQC.MQOO_INQUIRE+pymqi.CMQC.MQOO_SET)\nmxd = queue2.inquire(pymqi.CMQC.MQIA_INHIBIT_GET)\nprint(\"mxd\",mxd)\nprint(\"=======\")\nmx2 = queue2.set(pymqi.CMQC.MQIA_INHIBIT_GET,0 )\nprint(\"m2\",mx2)\nprint(\"=======\")\nprint(\"inh get \",mxd,mx2)\nmessage = queue2.get()\nprint(\"Message\", message)\nod = queue.getOD()\nprint(\"=OD==\")\nfor o in od:\n print(o,\":\",od[o])\n#rint(\"OD\",od)\n#q = od[\"DynamicQName\"]\n#rint(\"type rq\",type(rq))\n#rint(\"RQ\",rq)\n#rint(\"RQ2\",rq2)\n#rint(\"----------__\")\n#rint (queue.getOD())\nmd = (queue.getMD())\nprint(\"==MD===\")\nfor m in md:\n print(m,\":\",md[m])\nqueue.close()\nqueue2.close()\nqueuei= pymqi.Queue(qmgr, 'CP0000')\nprint(\"Queuei\",queuei)\nd = queuei.inquire(pymqi.CMQC.MQIA_CURRENT_Q_DEPTH)\nprint(\"depth\",d)\ntry:\n d = queuei.inquire(pymqi.CMQC.MQIA_INHIBIT_EVENT )\n#xcept MQMIError as e:\n# print(\"30 exception is \",e)\nexcept pymqi.MQMIError as e:\n e2 = sys.exc_info()\n v = e2[1].errorAsString()\n if v == \"FAILED: MQRC_SELECTOR_ERROR\":\n print(\"It gave the correct return code\")\n else:\n print(\"It gave the WRONG return code\",e2)\nexcept:\n e = sys.exc_info()\n print(\"35 exception is \",e)\n\nqueuei.close()\nqmgr.disconnect()\n","repo_name":"colinpaicemq/zpymqi","sub_path":"tests/mq4.py","file_name":"mq4.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"2880805701","text":"from urllib import parse\nimport copy\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.core import serializers\nimport json\nimport math\n\nfrom Recommender.handlers.util import dbOptions, package\n\n'''书本详细信息 需要连表查询 标签 、 是否已被该用户评分等'''\n\n\n@require_http_methods([\"GET\"])\ndef handle_book_detail(request):\n userId = request.GET.get(\"userId\") # userId的可能为空===》 用户未登录 不直接从cookie中获取,避免session错误\n bookId = request.GET.get(\"bookId\")\n\n sql = 'SELECT bookId, bookName, subjectUrl, imgUrl, author, pubDate, publisher, ratingScore, ratingNum, price, ISBN, summary FROM br_books WHERE bookId = %s'\n sql_tag = 'SELECT tagName, bookTagRank FROM br_tags WHERE bookId = %s ORDER BY bookTagRank'\n\n result_code, result = dbOptions.detail_query(sql, sql_tag, bookId)\n\n # 返回一个loginState字段, 用于判断是否展示用户对该书本的的评分\n\n if userId == '' or userId is None:\n data = {\n 'loginState': 0,\n 'loginMsg': '未登录',\n 'bookMsg': result\n }\n else:\n sql_favor_star = 'SELECT starNum FROM favor WHERE userId=%s AND bookId=%s'\n result_favor_code, starMsg = dbOptions.star_query(sql_favor_star, userId, bookId)\n data = {\n 'loginState': 1,\n 'loginMsg': '已登录',\n 'bookMsg': result,\n 'starMsg': starMsg,\n }\n\n if result_code == 0:\n return JsonResponse(package.successPack(data))\n elif result_code == 1:\n return JsonResponse(package.successPack(data))\n else:\n return JsonResponse(package.errorPack(data))\n","repo_name":"luchengLC/BooksRecommender","sub_path":"Recommender/handlers/book/detail.py","file_name":"detail.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"16233059169","text":"import string\nimport random\n\n\"\"\"\nEvery comment I add here is a comment on this code\n\n\"\"\"\n\nclass VehicleRegistry:\n # this class just looks like a container for these 2 helper methods\n\n def generate_vehicle_id(self, length):\n return ''.join(random.choices(string.ascii_uppercase, k=length))\n\n def generate_vehicle_license(self, id):\n return f\"{id[:2]}-{''.join(random.choices(string.digits, k=2))}-{''.join(random.choices(string.ascii_uppercase, k=2))}\"\n\n\nclass Application:\n # register_vehicle seems to do a lot of stuff\n # low cohesion that means\n # has high coupling as it directly relies on VehicleRegistry implementations\n # if we change VehicleRegistry, then I would have to change register_vehicle\n def register_vehicle(self, brand: string):\n # create a registry instance\n registry = VehicleRegistry()\n\n \"\"\"TO MUCH COUPLING HERE, ON methods of VehicleRegistry\"\"\"\n # generate a vehicle id of length 12\n vehicle_id = registry.generate_vehicle_id(12)\n\n # now generate a license plate for the vehicle\n # using the first two characters of the vehicle id\n license_plate = registry.generate_vehicle_license(vehicle_id)\n\n\n \"\"\"\n Weak cohesion is also affected our ability to add a new car, a new if needs to be added\n and we would need to change the if of electric car to make a change there\n \n Also there is coupling b/w brand name and catalogue price\n for electric tax is depending on car brands that currently signify electric, what if we want to add another electric\n brand to our code\n \"\"\"\n # compute the catalogue price\n catalogue_price = 0\n if brand == \"Tesla Model 3\":\n catalogue_price = 60000\n elif brand == \"Volkswagen ID3\":\n catalogue_price = 35000\n elif brand == \"BMW 5\":\n catalogue_price = 45000\n\n # compute the tax percentage (default 5% of the catalogue price, except for electric cars where it is 2%)\n tax_percentage = 0.05\n if brand == \"Tesla Model 3\" or brand == \"Volkswagen ID3\":\n tax_percentage = 0.02\n\n # compute the payable tax\n payable_tax = tax_percentage * catalogue_price\n\n # print out the vehicle registration information\n print(\"Registration complete. Vehicle information:\")\n print(f\"Brand: {brand}\")\n print(f\"Id: {vehicle_id}\")\n print(f\"License plate: {license_plate}\")\n print(f\"Payable tax: {payable_tax}\")\n\n\napp = Application()\napp.register_vehicle(\"Volkswagen ID3\")\n","repo_name":"arknandan25/Python101","sub_path":"solid/cohesion_coupling/cc_before.py","file_name":"cc_before.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"74346613277","text":"# coding=utf-8\n\"\"\"\nLEX è un modulo che costituisce una base di dati per le legislature italiane.\nDeve essere aggiornato manualmente ogni volta che inizia un nuova legislatura.\n\"\"\"\nfrom collections import namedtuple\nfrom datetime import date\n\n__author__ = 'daniele'\n\nLex = namedtuple('Legislatura', ['number', 'name', 'voting_date', 'start_date', 'end_date', 'database'])\n\nLEGISLATURE = [\n Lex(0, 'Assemblea Costituente', date(1946, 6, 2), date(1946, 6, 25), date(1948, 1, 31), None),\n Lex(1, 'I Legislatura', date(1948, 4, 18), date(1948, 5, 8), date(1953, 6, 24), None),\n Lex(2, 'II Legislatura', date(1953, 6, 7), date(1953, 6, 25), date(1958, 6, 11), None),\n Lex(3, 'III Legislatura', date(1958, 5, 25), date(1958, 6, 12), date(1963, 5, 15), None),\n Lex(4, 'IV Legislatura', date(1963, 4, 28), date(1963, 5, 16), date(1968, 6, 4), None),\n Lex(5, 'V Legislatura', date(1968, 5, 19), date(1968, 6, 5), date(1972, 5, 24), None),\n Lex(6, 'VI Legislatura', date(1972, 5, 7), date(1972, 5, 25), date(1976, 7, 4), None),\n Lex(7, 'VII Legislatura', date(1976, 6, 20), date(1976, 7, 5), date(1979, 6, 19), None),\n Lex(8, 'VIII Legislatura', date(1979, 6, 3), date(1979, 6, 20), date(1983, 7, 11), None),\n Lex(9, 'IX Legislatura', date(1983, 6, 26), date(1983, 7, 12), date(1987, 7, 1), None),\n Lex(10, 'X Legislatura', date(1987, 6, 14), date(1987, 7, 2), date(1992, 4, 22), None),\n Lex(11, 'XI Legislatura', date(1992, 4, 5), date(1992, 4, 23), date(1994, 4, 14), None),\n Lex(12, 'XII Legislatura', date(1994, 3, 27), date(1994, 4, 15), date(1996, 5, 8), None),\n Lex(13, 'XIII Legislatura', date(1996, 4, 21), date(1996, 5, 9), date(2001, 5, 29), None),\n Lex(14, 'XIV Legislatura', date(2001, 5, 13), date(2001, 5, 30), date(2006, 4, 27), None),\n Lex(15, 'XV Legislatura', date(2006, 4, 9), date(2006, 4, 28), date(2008, 4, 28), None),\n Lex(16, 'XVI Legislatura', date(2008, 4, 13), date(2008, 4, 29), date(2013, 3, 14), 'parlamento16'),\n Lex(17, 'XVII Legislatura', date(2013, 2, 24), date(2013, 3, 15), date(2018, 3, 23), 'parlamento17'),\n Lex(18, 'XVIII Legislatura', date(2018, 3, 4), date(2018, 4, 24), None, 'parlamento18')\n]\n\n\ndef get_legislature():\n \"\"\"\n This method returns all legislature instances.\n \"\"\"\n return LEGISLATURE\n\n\ndef get_legislatura(number=None):\n \"\"\"\n prova a recuperare una legislatura per dato il numero.\n per convenzione l'assemblea costituente è 0\n \"\"\"\n if number is None:\n number = -1\n return get_legislature()[int(number)]\n\n\ndef get_legislatura_per_data(day):\n \"\"\"\n cerca una legislatura in cui ricade il giorno fornito come parametro.\n se non esiste ritorna None\n \"\"\"\n for lex in get_legislature():\n if lex['start_date'] > day:\n continue\n if lex['end_date'] and lex['end_date'] < day:\n continue\n return lex\n return None","repo_name":"openpolis/op_api3","sub_path":"api_project/parlamento/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"10816929742","text":"#bubble sort\ndef bubble_sort():\n list = [12,56,28,46,89,64,18]\n print(list)\n for i in range(len(list)):\n for j in range(len(list) - 1):\n if list[j] > list[j+1]:\n list[j], list[j+1] = list[j+1], list[j] \n return list\n\n#insertion sort\ndef insertion_sort():\n list = [12,56,28,46,89,51,18]\n \n print(list)\n for i in range(len(list)):\n val = list[i]\n j = i-1\n while j >= 0 and val < list[j]:\n list[j+1] = list[j]\n j -= 1\n list[j+1] = val\n return list\n\n#selection sort\ndef selection_sort():\n list = [12,56,28,46,89,51,18]\n\n print(list)\n for i in range(len(list)):\n min_val_index = i\n for j in range(i+1, len(list)):\n if list[min_val_index] > list[j]:\n min_val_index = j\n list[i], list[min_val_index] = list[min_val_index], list[i]\n return list\n\n\n \n\na = input(\"Enter 1 to perform bubble sort \\nEnter 2 to perform insertion sort \\nEnter 3 to perform selection sort:\")\nif a=='1':\n print(\"bubble sort: \",bubble_sort())\nelif a=='2':\n print(\"insertion sort: \", insertion_sort())\nelif a=='3':\n print(\"selection sort: \", selection_sort())\nelse:\n print(\"invalid input\")\n","repo_name":"gunveen-bindra/Data-Structures","sub_path":"sorting_algos.py","file_name":"sorting_algos.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"35939966986","text":"#!/usr/bin/python\n# coding: utf8\n\nimport unittest\nimport logging\nimport os\nimport time\n\n\nfrom pytsdb.events import RedisPubSub\n\n\nclass EventTest(unittest.TestCase):\n def setUp(self):\n redis_host = os.getenv('REDIS_HOST', 'localhost')\n redis_port = os.getenv('REDIS_PORT', 6379)\n self.r = RedisPubSub(host=redis_host, port=redis_port, db=0)\n\n def tearDown(self):\n self.r.close()\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n @classmethod\n def setUpClass(cls):\n logging.basicConfig(level=logging.INFO)\n\n def test_callback(self):\n messages = []\n def test_function1(key, event):\n messages.append((key, event))\n self.r.register_callback(\"device.*\", test_function1)\n\n self.assertEqual(len(messages), 0)\n self.r.publish_event(\"xyz\", ts_min=1, ts_max=2, count=2)\n time.sleep(0.1)\n self.assertEqual(len(messages), 0)\n\n self.r.publish_event(\"device.xyz\", ts_min=1, ts_max=2, count=2)\n time.sleep(0.1)\n self.assertEqual(len(messages), 1)\n self.assertEqual(messages[-1][0], \"device.xyz\")\n self.assertEqual(messages[-1][1].ts_min, 1)\n self.assertEqual(messages[-1][1].ts_max, 2)\n self.assertEqual(messages[-1][1].count, 2)\n\n self.r.publish_event(\"device.abc\", ts_min=2, ts_max=2, count=1)\n time.sleep(0.1)\n self.assertEqual(len(messages), 2)\n self.assertEqual(messages[-1][0], \"device.abc\")\n self.assertEqual(messages[-1][1].ts_min, 2)\n self.assertEqual(messages[-1][1].ts_max, 2)\n self.assertEqual(messages[-1][1].count, 1)\n\n time.sleep(0.5)\n self.r.close()\n\n def test_raisingcallback(self):\n def test_function2(key, event):\n raise ValueError(\"Mein Fehler ...\")\n self.r.register_callback(\"device.*\", test_function2)\n self.assertEqual(self.r._last_error, None)\n self.r.publish_event(\"device.xyz\", ts_min=1, ts_max=2, count=2)\n time.sleep(0.1)\n self.assertIn(\"Mein Fehler\", self.r._last_error)\n","repo_name":"wuttem/pytsdb","sub_path":"tests/test_events.py","file_name":"test_events.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"37356496777","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom IPython import embed\nfrom daisy.model.GCE.gce import GCE\n\n\nclass PointMF(nn.Module):\n def __init__(self, \n user_num, \n max_dim,\n factors=100,\n optimizer='adam',\n epochs=20, \n lr=0.01, \n reg_1=0.001,\n reg_2=0.001,\n loss_type='CL', \n gpuid='0',\n X = None,\n A = None,\n reindex=False,\n GCE_flag=False,\n dropout=0,\n early_stop=True):\n \"\"\"\n Point-wise MF Recommender Class\n Parameters\n ----------\n user_num : int, the number of users\n max_dim : int, the number of items or context max dimension\n factors : int, the number of latent factor\n epochs : int, number of training epochs\n lr : float, learning rate\n reg_1 : float, first-order regularization term\n reg_2 : float, second-order regularization term\n loss_type : str, loss function type\n gpuid : str, GPU ID\n early_stop : bool, whether to activate early stop mechanism\n \"\"\"\n super(PointMF, self).__init__()\n\n os.environ['CUDA_VISIBLE_DEVICES'] = gpuid\n cudnn.benchmark = True\n\n self.lr = lr\n self.reg_1 = reg_1\n self.reg_2 = reg_2\n self.epochs = epochs\n self.optimizer = optimizer\n self.dropout = dropout\n self.reindex = reindex\n self.GCE_flag = GCE_flag\n\n if GCE_flag:\n print('GCE EMBEDDINGS DEFINED')\n self.embeddings = GCE(max_dim, factors, X, A) if reindex else ValueError(f'Can not use GCE with'\n f'reindex=False')\n else:\n if reindex:\n self.embeddings = nn.Embedding(max_dim, factors)\n nn.init.normal_(self.embeddings.weight, std=0.01)\n else:\n self.embed_user = nn.Embedding(user_num, factors)\n self.embed_item = nn.Embedding(max_dim, factors)\n nn.init.normal_(self.embed_user.weight, std=0.01)\n nn.init.normal_(self.embed_item.weight, std=0.01)\n\n self.loss_type = loss_type\n\n def forward(self, user, item, context):\n\n if self.reindex:\n # embed()\n if context is None:\n embeddings = self.embeddings(torch.stack((user, item), dim=1))\n else:\n embeddings = self.embeddings(torch.stack((user, item, context), dim=1))\n\n nn.functional.dropout(embeddings, p=self.dropout, training=self.training, inplace=True)\n # ix = torch.bmm(embeddings[:, :1, :], embeddings[:, 1:, :].permute(0, 2, 1))\n pred = embeddings.prod(dim=1).sum(dim=1)\n return pred\n else:\n embed_user = self.embed_user(user)\n embed_item = self.embed_item(item)\n pred = (embed_user * embed_item).sum(dim=-1)\n return pred\n\n def predict(self, u, i, c):\n pred = self.forward(u, i, c).cpu()\n return pred","repo_name":"paulagd/GCE","sub_path":"daisy/model/point/MFRecommender.py","file_name":"MFRecommender.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"11217374095","text":"import turtle\nimport globals as g\n\n\nUP = 90\nDOWN = 270\nLEFT = 180\nRIGHT = 0\n\nSNAKE_HEAD_COLOR = \"brown\"\nSNAKE_COLOR = \"orange\"\n\n\nclass Snake:\n\n def __init__(self):\n self.body = []\n self.add_body_part(head=True)\n self.add_body_part()\n self.add_body_part()\n self.head = self.body[0]\n\n def add_body_part(self, head=False):\n if head:\n part = turtle.Turtle(\"circle\")\n part.penup()\n part.color(SNAKE_HEAD_COLOR)\n else:\n part = self.body[-1].clone()\n part.color(SNAKE_COLOR)\n\n self.body.append(part)\n\n def forward(self):\n for i in range(len(self.body)-1, 0, -1):\n new_x = self.body[i-1].xcor()\n new_y = self.body[i-1].ycor()\n self.body[i].goto(new_x, new_y)\n self.head.forward(g.STEP_SIZE)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.setheading(LEFT)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.setheading(RIGHT)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.setheading(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.setheading(DOWN)\n","repo_name":"JacquelineBashta/Py_SnakeGame","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"27863672318","text":"import streamlit as st\r\nfrom langchain.document_loaders.csv_loader import CSVLoader\r\nfrom langchain.vectorstores import FAISS\r\nfrom langchain.prompts import PromptTemplate\r\nfrom langchain.chains import LLMChain\r\nfrom langchain.embeddings import HuggingFaceInstructEmbeddings\r\nfrom langchain.llms import HuggingFaceHub\r\nfrom dotenv import load_dotenv\r\nfrom langchain.embeddings.openai import OpenAIEmbeddings\r\nimport os\r\n\r\nload_dotenv()\r\n\r\n# 1. Vectorise the responses csv data\r\nloader = CSVLoader(file_path=\"dataset.csv\")\r\ndocuments = loader.load()\r\n\r\n\r\nembeddings = OpenAIEmbeddings()\r\ndb = FAISS.from_documents(documents, embeddings)\r\n\r\n\r\n# 2. Function for similarity search\r\n\r\n\r\ndef retrieve_info(query):\r\n similar_response = db.similarity_search(query, k=3)\r\n\r\n page_contents_array = [doc.page_content for doc in similar_response]\r\n\r\n return page_contents_array\r\n\r\n\r\n# 3. Setup LLMChain & prompts\r\n\r\nrepo_id = \"google/flan-t5-xxl\"\r\n\r\nllm = HuggingFaceHub(\r\n repo_id=repo_id, model_kwargs={\"temperature\": 0.5, \"max_length\": 64}\r\n)\r\n\r\ntemplate = \"\"\"\r\nYou will help me provide medical related advice for common diseases \r\nI will share a user's message with you and you will give me the best answer that \r\nI should send to this user based on past responses, \r\nand you will follow ALL of the rules below:\r\n\r\n1/ Response should be very similar or even identical to the past responses\r\n\r\n2/ If the responses are irrelevant, then try to mimic the style of the past responses to user's message\r\n\r\n\r\nBelow is a message I received from theuser:\r\n{message}\r\n\r\nHere is a list of past responses of how we normally respond to user in similar scenarios:\r\n{past_responses}\r\n\r\nPlease write the best response that I should send to this user:\r\n\"\"\"\r\n\r\nprompt = PromptTemplate(\r\n input_variables=[\"message\", \"past_responses\"], template=template\r\n)\r\n\r\nchain = LLMChain(llm=llm, prompt=prompt)\r\n\r\n\r\n# 4. Retrieval augmented generation\r\n\r\n\r\ndef generate_response(message):\r\n past_responses = retrieve_info(message)\r\n response = chain.run(message=message, past_responses=past_responses)\r\n return response\r\n\r\n\r\n# 5. Build an app with streamlit\r\ndef main():\r\n st.set_page_config(page_title=\"Medical Chatbot\", page_icon=\":books:\")\r\n\r\n st.header(\"Medical Chatbot :book:\")\r\n message = st.text_area(\"user query\")\r\n\r\n if message:\r\n st.write(\"Generating best advicee...\")\r\n\r\n result = generate_response(message)\r\n\r\n st.info(result)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Vedant2k03/Medical-Chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"28884438609","text":"from pathlib import Path\nimport logging\n\ndef start():\n\n path_socket=Path('/amniotic.socket')\n\n if path_socket.exists():\n msg=f'Pulse socket found at \"{path_socket}\". Using host-shared audio.'\n logging.info(msg)\n import os\n os.environ['PULSE_SERVER'] = \"unix://amniotic.socket\"\n os.environ['PULSE_COOKIE'] = \"/amniotic.cookie\"\n else:\n msg = f'No Pulse socket found at \"{path_socket}\". Using dedicated audio.'\n logging.warning(msg)\n\n from amniotic.mqtt import loop\n loop.start()\n\nif __name__ == '__main__':\n start()\n","repo_name":"fmtr/amniotic","sub_path":"amniotic/mqtt/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"55"}
+{"seq_id":"11413862396","text":"# importing the requests library\nimport json\nimport time\nimport requests\n\n# api-endpoint\nURL = \"http://192.168.1.6/api/pWH0J9PLMzz5bRzy3wPgHfTOuUz2UKf3XNW13owt\"\n\nLIGHT_GET = \"/lights/1\"\nLIGHT_PUT = \"/lights/1/state\"\nREMOTE_GET = \"/sensors/2\"\n\nHEADERS = {\"Content-Type\": \"application/json\"}\n\n# Json preformatting\non = {\"on\": True}\noff = {\"on\": False}\nalert = {\"alert\": \"select\"}\n\n# Remote codes\nI_button = 1002\nbright = 2002\ndim = 3002\nO_button = 4002\n\n# Last button press\nmax_t = 0\n\n# blinker values\nwanted = 0\nblinked = 0\nloops = 0\ncounter_step = 10 #once per x seconds? so 60 gives blink per remaining minutes\n\n# running\nrunning = False\nblinking = False\nstopped = True\nlighted = False\n\n# timer values in secs\nloop_speed = 1 # 0.5 is fastest that works with blinking\non_delay = 60 # time from buttonpress to light on\nt = 0 # running timer\noff_delay = 60 # from start to off\ncounter_delay = 10 # blink every minute?\n\ndef switch(direction):\n \"\"\"\n Switch light (on)/(off)\n \"\"\"\n path = LIGHT_PUT\n requests.put(URL + path, data=json.dumps(direction), headers=HEADERS)\n return 1\n\n\ndef last_stamp():\n \"\"\"\n Gets the last stamp from the remote\n \"\"\"\n path = REMOTE_GET\n res = requests.get(URL + path)\n sensor = res.json()\n stamp = sensor['state']['lastupdated']\n return stamp\n\n\ndef button_pressed():\n \"\"\"\n Detect button presses\n Save time of latest press, and compare to that\n \"\"\"\n global max_t\n path = REMOTE_GET\n res = requests.get(URL + path)\n dictionary = res.json()\n last = dictionary['state']['buttonevent']\n timestamp = dictionary['state']['lastupdated']\n\n if timestamp > max_t:\n max_t = timestamp\n if last == I_button:\n print(\"Started\")\n return 1\n if last == bright:\n return 0\n if last == dim:\n return 0\n if last == O_button:\n return 0\n else:\n return 0\n else:\n return 0\n\ndef flash():\n \"\"\"\n Inbuilt flashing ability\n \"\"\"\n path = \"/lights/1/state\"\n requests.put(URL + path, data=json.dumps(alert), headers=HEADERS)\n return 1\n\n\ndef blinks_wanted():\n \"\"\"\n Check and save the amount of blinks wanted\n \"\"\"\n global lastblink\n global wanted\n global blinking\n\n lastblink = t\n print(t)\n wanted = int((1 + t) / counter_step) # want to blink correct amount (once for minute)\n blinking = True # blink with the first press too\n print(\"Blinking \", wanted, \" times\")\n print(\"Started\")\n return 1\n\n\ndef control_loop():\n \"\"\"\n Here the program looks at the user inputs and controls lights accordingly\n \"\"\"\n # Timers\n global on_delay\n global t\n global off_delay\n global counter_delay\n global lastblink\n global loops\n\n # States\n global running\n global stopped\n global blinking\n global lighted\n\n # Counters\n global blinked\n global wanted\n\n t -= 1\n\n if button_pressed():\n # set current to timer\n t = on_delay\n lastblink = on_delay\n print(\"On delay: \", on_delay)\n stopped = False\n running = True\n lighted = False\n loops = 0 #reset loops\n blinks_wanted() # sets counters and timers and blinking = true\n\n if stopped:\n t = 0\n\n if blinking:\n if blinked < wanted:\n blinked += 1\n print(\"Blink number: \", blinked)\n flash()\n if blinked >= wanted:\n blinked = 0\n blinking = False\n\n if running:\n if t <= 0:\n switch(on)\n print(\"Off delay: \", off_delay)\n running = False\n blinking = False\n stopped = False\n lighted = True\n # if last blink was 60s ago\n if (t <= on_delay - (loops * counter_delay)) & (t > 0):\n loops += 1\n blinks_wanted() # sets counters and timers and blinking = true\n\n\n if lighted:\n #if light is on 60sec?\n #stay on for off_delay\n if t <= -off_delay:\n switch(off)\n print(\"Switched off\")\n running = False\n blinking = False\n stopped = True\n lighted = False\n\n\n# get last stamp first so we dont start immediately\n# also works to find that the we have access to bridge?\nmax_t = last_stamp()\n\n#also switch off before loop?\nswitch(off)\n\nwhile 1:\n # control loop here\n control_loop()\n print(t)\n time.sleep(loop_speed)\n","repo_name":"oja89/python_controlled_Hue","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"13448726406","text":"# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: sherlockliao01@gmail.com\n\"\"\"\nimport copy\nimport logging\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom .evaluator import DatasetEvaluator\nfrom .query_expansion import aqe\nfrom .rank import evaluate_rank\nfrom .roc import evaluate_roc\nfrom .rerank import re_ranking\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReidEvaluator(DatasetEvaluator):\n def __init__(self, cfg, num_query, output_dir=None):\n self.cfg = cfg\n self._num_query = num_query\n self._output_dir = output_dir\n\n self.features = []\n self.pids = []\n self.camids = []\n\n def reset(self):\n self.features = []\n self.pids = []\n self.camids = []\n\n def process(self, outputs):\n self.features.append(outputs[0].cpu())\n self.pids.extend(outputs[1].cpu().numpy())\n self.camids.extend(outputs[2].cpu().numpy())\n\n @staticmethod\n def cal_dist(metric: str, query_feat: torch.tensor, gallery_feat: torch.tensor):\n assert metric in [\"cosine\", \"euclidean\"], \"must choose from [cosine, euclidean], but got {}\".format(metric)\n if metric == \"cosine\":\n query_feat = F.normalize(query_feat, dim=1)\n gallery_feat = F.normalize(gallery_feat, dim=1)\n dist = 1 - torch.mm(query_feat, gallery_feat.t())\n else:\n m, n = query_feat.size(0), gallery_feat.size(0)\n xx = torch.pow(query_feat, 2).sum(1, keepdim=True).expand(m, n)\n yy = torch.pow(gallery_feat, 2).sum(1, keepdim=True).expand(n, m).t()\n dist = xx + yy\n dist.addmm_(1, -2, query_feat, gallery_feat.t())\n dist = dist.clamp(min=1e-12).sqrt() # for numerical stability\n return dist.cpu().numpy()\n\n def evaluate(self):\n features = torch.cat(self.features, dim=0)\n\n # query feature, person ids and camera ids\n query_features = features[:self._num_query]\n query_pids = np.asarray(self.pids[:self._num_query])\n query_camids = np.asarray(self.camids[:self._num_query])\n\n # gallery features, person ids and camera ids\n gallery_features = features[self._num_query:]\n gallery_pids = np.asarray(self.pids[self._num_query:])\n gallery_camids = np.asarray(self.camids[self._num_query:])\n\n self._results = OrderedDict()\n\n if self.cfg.TEST.AQE.ENABLED:\n logger.info(\"Test with AQE setting\")\n qe_time = self.cfg.TEST.AQE.QE_TIME\n qe_k = self.cfg.TEST.AQE.QE_K\n alpha = self.cfg.TEST.AQE.ALPHA\n query_features, gallery_features = aqe(query_features, gallery_features, qe_time, qe_k, alpha)\n\n dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, gallery_features)\n\n if self.cfg.TEST.RERANK.ENABLED:\n logger.info(\"Test with rerank setting\")\n k1 = self.cfg.TEST.RERANK.K1\n k2 = self.cfg.TEST.RERANK.K2\n lambda_value = self.cfg.TEST.RERANK.LAMBDA\n q_q_dist = self.cal_dist(self.cfg.TEST.METRIC, query_features, query_features)\n g_g_dist = self.cal_dist(self.cfg.TEST.METRIC, gallery_features, gallery_features)\n dist = re_ranking(dist, q_q_dist, g_g_dist, k1, k2, lambda_value)\n\n cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)\n mAP = np.mean(all_AP)\n mINP = np.mean(all_INP)\n for r in [1, 5, 10]:\n self._results['Rank-{}'.format(r)] = cmc[r - 1]\n self._results['mAP'] = mAP\n self._results['mINP'] = mINP\n\n tprs = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)\n fprs = [1e-4, 1e-3, 1e-2]\n for i in range(len(fprs)):\n self._results[\"TPR@FPR={}\".format(fprs[i])] = tprs[i]\n return copy.deepcopy(self._results)\n","repo_name":"JDAI-CV/Partial-Person-ReID","sub_path":"fastreid/evaluation/reid_evaluation.py","file_name":"reid_evaluation.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"55"}
+{"seq_id":"13059668957","text":"import numpy as np\r\nimport cv2\r\nfrom PIL import Image\r\nimport logging\r\n\r\n\r\n#***********************************************************************\r\n# Constants\r\n#**********************************************************************\r\ndroste_scale = 256\r\nTWO_PI_i = 2j * np.pi\r\nalpha = (TWO_PI_i + np.log(droste_scale)) / TWO_PI_i\r\nalpha_inv = TWO_PI_i / (TWO_PI_i + np.log(droste_scale))\r\n\r\n\r\n#***********************************************************************\r\n# Mapping functions\r\n#**********************************************************************\r\n\r\ndef cartesian_to_complex(x,y):\r\n '''\r\n :param x: x-coordinates in Cartesian plane\r\n :param y: y-coordinates in Cartesian plane\r\n :return: one grid representing the complex plane\r\n '''\r\n Z = x + y * 1j\r\n return Z\r\n\r\ndef complex_to_cartesian(Z):\r\n x = np.real(Z).astype(np.float32)\r\n y = np.imag(Z).astype(np.float32)\r\n return x,y\r\n\r\n\r\n#***********************************************************************\r\n# Twisting functions\r\n#**********************************************************************\r\nclass Escher:\r\n\r\n def __init__(self, coeff_):\r\n self.coeff = coeff_\r\n\r\n def log_z(self, Z):\r\n '''\r\n :param Z: grid of complex coordinates\r\n :return: log of the complex coordinates\r\n '''\r\n return np.log(Z)\r\n\r\n def rotation(self, Z):\r\n '''\r\n :param Z: grid of complex coordinates\r\n :param alpha: complex constant\r\n :return: rotated complex coordinates rotated by \"alpha\" degrees\r\n '''\r\n return Z* self.coeff\r\n\r\n def exp_z(self,Z):\r\n '''\r\n :param Z: grid of complex coordinates\r\n :return: exponential of the complex coordinates\r\n '''\r\n return np.exp(Z)\r\n\r\n def twist(self,Z):\r\n '''\r\n Function to receive straight grid of complex coordinates and warps them according to a coefficient\r\n Input: Takes complex coordinates, applies transform\r\n Output: Returns a single grid of complex coordinates\r\n '''\r\n #Log\r\n lnz = self.log_z(Z)\r\n #Rotation\r\n lnz_alpha = self.rotation(lnz)\r\n #Exponentiation\r\n ez = self.exp_z(lnz_alpha)\r\n return ez\r\n\r\n\r\n#***********************************************************************\r\n# Grid handling functions\r\n#***********************************************************************\r\n\r\ndef meshgrid(src, resolution = 10):\r\n '''\r\n Function to meshgrid\r\n :param src:\r\n :return: meshgrid of X,Y coordinates in Cartesian plane\r\n '''\r\n src_row, src_col = src.shape[0], src.shape[1]\r\n out_row = src_row * resolution # multiply by 20 to get higher resolution results\r\n out_col = src_row * resolution\r\n range_ = 1 # increasing this value from 1 puts more of the circle on the deformed output but also rotates it\r\n vec1 = np.linspace(-range_, range_, num=out_row, endpoint=True)\r\n vec2 = np.linspace(-range_, range_, num=out_col, endpoint=True)\r\n x, y = np.meshgrid(vec2, vec1)\r\n return x,y\r\n\r\n\r\n\r\ndef droste_transformation(x, y, c=1):\r\n for _ in range(2):\r\n #pixels out of bounds\r\n indx = (np.abs(x) >= c) | (np.abs(y) >= c)\r\n x[indx] *= 1. / droste_scale\r\n y[indx] *= 1. / droste_scale\r\n\r\n # Pixels close to zero\r\n indx = (np.abs(x) < c / droste_scale) & (np.abs(y) < c / droste_scale)\r\n x[indx] *= droste_scale\r\n y[indx] *= droste_scale\r\n return x, y\r\n\r\n\r\n\r\ndef out_file(x,y,src,outFile,c=1,resolution = 10):\r\n x,y = droste_transformation(x, y, c=1)\r\n #Center grid\r\n x2 = (x / c + 1) * src.shape[0] / 2\r\n y2 = (y / c + 1) * src.shape[1] / 2\r\n #Generate output image\r\n out2 = cv2.remap(src, x2, y2, interpolation=cv2.INTER_CUBIC, borderValue=200)\r\n cv2.imwrite(outFile, out2)\r\n\r\n\r\ndef main():\r\n\r\n # ****************************************************************\r\n # Input files\r\n # ****************************************************************\r\n inFile = 'images/escher_straight2.jpg'\r\n #outFile = 'images/circle_LOG_jim2.png'\r\n outFile = 'images/grid_straight2_try.png'\r\n\r\n #Image cropping\r\n #TODO: crop images (no margin)\r\n #TODO: make dimensions square (same hght and length)\r\n src = np.array(cv2.imread(inFile))\r\n\r\n # ****************************************************************\r\n # Escher Transformation\r\n # ****************************************************************\r\n\r\n #Create complex grid\r\n x, y = meshgrid(src) #center coordinates at (0,0)\r\n Z = cartesian_to_complex(x, y) #convert Cartesian plane to complex\r\n\r\n\r\n # TODO: SELECT ONE: to do the forward deformation, use the reverse function (remap logic)\r\n #coeff = alpha #From twisted to straight\r\n coeff = alpha_inv #From straight to twisted\r\n\r\n # Apply Transformation\r\n escher = Escher(coeff)\r\n #Znew = escher.twist(Z)\r\n\r\n #region\r\n #***************************************************************\r\n # Select for Individual operations (only for debugging)\r\n #*************************************************************\r\n\r\n #TODO: Don't forget to select the right coeff from above!!!\r\n #Znew = escher.log_z(Z) #Log\r\n #Znew = escher.rotation(Znew, coeff) #Rotation\r\n Znew = escher.exp_z(Z) #Exponentiation\r\n #endregion\r\n\r\n\r\n # ****************************************************************\r\n # Display output picture\r\n # *****************************************************************\r\n X2, Y2 = complex_to_cartesian(Znew) # Back to Cartesians for display\r\n out_file(X2,Y2,src,outFile,c=1,resolution = 10) # Create output image\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"karynaur/escher","sub_path":"escher.py","file_name":"escher.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"16120609194","text":"class SimulatorSymbolic(SystemSParametersSymbolic, Simulator):\n def __init__(self,sd=None,**args):\n SystemSParametersSymbolic.__init__(self,sd,**args)\n Simulator.__init__(self, sd)\n def LaTeXTransferMatrix(self):\n self.Check()\n self._LaTeXSi()\n veosi = MatrixMultiply(\n self.VoltageExtractionMatrix(self.pOutputList), self.SIPrime(True))\n veosil = Matrix2LaTeX(veosi, self._SmallMatrix())\n if len(veosi)==1:\n if len(veosi[0])==1:\n veosil='\\\\left('+veosil+'\\\\right)'\n on=Matrix2LaTeX([[D+'_{'+str(P)+'}'] for (D,P) in self.pOutputList])\n sv=Matrix2LaTeX(SubscriptedVector(self.SourceVector()))\n ssm=self.SourceToStimsPrimeMatrix(False)\n if len(ssm) == len(ssm[0]): # matrix is square\n isidentity=True\n for r in range(len(ssm)):\n for c in range(len(ssm[0])):\n if r==c:\n if ssm[r][c]!=1.:\n isidentity=False\n break\n else:\n if ssm[r][c]!=0.:\n isidentity=False\n break\n else: isidentity=False\n if isidentity:\n sm = ''\n else:\n sm=' \\\\cdot '+Matrix2LaTeX(self.SourceToStimsPrimeMatrix(True))\n line = on + '=' + veosil+sm+'\\\\cdot '+sv\n self._AddEq(line)\n return self\n def LaTeXEquations(self):\n self.LaTeXSystemEquation()\n self.LaTeXTransferMatrix()\n return self","repo_name":"TeledyneLeCroy/SignalIntegrity","sub_path":"Test/TestSignalIntegrity/SimulatorSymbolic_SimulatorSymbolic___init__.py","file_name":"SimulatorSymbolic_SimulatorSymbolic___init__.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"55"}
+{"seq_id":"32622640088","text":"import sys\n\nclass Queue:\n def __init__(self, max_size):\n self.__max_size = max_size\n self.__items = [0] * self.__max_size\n self.__size = 0\n self.__head = self.__tail = 0\n \n def push(self, item):\n if self.__size == self.__max_size:\n raise ValueError('overflow')\n self.__size += 1\n self.__items[self.__tail] = item\n self.__tail = (self.__tail + 1) % self.__max_size\n \n def pop(self):\n if self.__size == 0:\n raise ValueError('underflow')\n self.__size -= 1\n item = self.__items[self.__head]\n self.__head = (self.__head + 1) % self.__max_size\n return item\n \n def print(self, out):\n if self.__size == 0:\n out.write('empty\\n')\n return\n out.write(f'{self.__items[self.__head]}')\n it = (self.__head + 1) % self.__max_size\n while it != self.__tail:\n out.write(f' {self.__items[it]}')\n it = (it + 1) % self.__max_size\n out.write('\\n')\n\ntry:\n fin = open(sys.argv[1], 'r')\n fout = open(sys.argv[2], 'w')\nexcept:\n quit()\n\nqueue = None\nfor line in fin:\n if len(line) == 0 or line == '\\n':\n continue\n if line.startswith('set_size '):\n try:\n size = int(line[9:])\n if size >= 0:\n queue = Queue(size)\n break\n except:\n fout.write('error\\n')\n continue\n fout.write('error\\n')\nif queue is None:\n quit()\n\nfor line in fin:\n line = line.strip('\\n')\n if len(line) == 0:\n continue\n if line.startswith('push '):\n command = line.split()\n if len(command) == 2:\n try:\n queue.push(command[1])\n except ValueError as e:\n fout.write(f'{e}\\n')\n else:\n fout.write('error\\n')\n elif line == 'pop':\n try:\n fout.write(f'{queue.pop()}\\n')\n except ValueError as e:\n fout.write(f'{e}\\n')\n elif line == 'print':\n queue.print(fout)\n else:\n fout.write('error\\n')\nfin.close()\nfout.close()","repo_name":"Heinene/Algoritms_and_data_structures","sub_path":"3_Queue/_3_Queue.py","file_name":"_3_Queue.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"3323482889","text":"from django.urls import include, path\n\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import (\n ArticleViewSet, ArticlesFavoriteAPIView, CommentsListCreateAPIView,\n CommentsDestroyAPIView\n)\n\nrouter = DefaultRouter(trailing_slash=False)\nrouter.register(r'articles', ArticleViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n\n path('articles//favorite',\n ArticlesFavoriteAPIView.as_view()),\n\n path('articles//comments',\n CommentsListCreateAPIView.as_view()),\n\n path('articles//comments/',\n CommentsDestroyAPIView.as_view()),\n]\n","repo_name":"Ralst0n/conduit-django","sub_path":"conduit/apps/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"72452293291","text":"# Identifies wacom tablet model\nfrom wacom_data import TabletIdentities\nimport os\n\n\nclass TabletIdClass:\n def __init__(self, cloak=\"\"):\n self.TabletIds = TabletIdentities()\n self.tablets = []\n\n def identify(self, cloak=\"\"):\n if cloak != \"\":\n self.data = [\"iProduct \" + cloak]\n else:\n # self.Data = os.popen(\"lsusb -v | grep 'iProduct'\").readlines()\n self.data = os.popen(\"lsusb\").readlines()\n\n for item in self.data:\n if item.count(\"iProduct\"): # Identify by model name\n model = item.split(\" \")[-1].replace(\"\\n\", \"\")\n tablet = self.identify_by_model(model)\n if tablet:\n self.tablets.append(tablet)\n else: # Identify by USB device code (more reliable)\n code = item.split(\" \")[5].split(\":\")\n tablet = self.identify_by_usb_id(code[0], code[1])\n if tablet:\n self.tablets.append(tablet)\n return self.tablets\n\n def identify_by_model(self, model):\n for item in self.TabletIds.Tablets:\n if item.Model == model:\n return item\n\n def identify_by_usb_id(self, vendor_id, device_id):\n if int(vendor_id, 16) == 0x56a:\n for item in self.TabletIds.Tablets:\n if item.ProductId == int(device_id, 16):\n return item\n","repo_name":"lubosz/wacom-utility","sub_path":"wacom_identify.py","file_name":"wacom_identify.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"55"}
+{"seq_id":"17696220","text":"def selection_sort(nLst):\n ''' 选择排序 '''\n for i in range(len(nLst) - 1):\n index = i # 最小值的索引\n for j in range(i + 1, len(nLst)): # 找最小值的索引\n if nLst[index][2] < nLst[j][2]:\n index = j\n if i == index: # 如果目前索引是最小值索引\n pass # 不更动\n else:\n nLst[i], nLst[index] = nLst[index], nLst[i] # 资料对调\n return nLst\n\n\nmusic = [('李宗盛', '山丘', 24740000),\n ('赵传', '我是一只小小鸟', 8310000),\n ('五佰', '挪威的森林', 34130000),\n ('林忆莲', '听说爱情回来过', 12710000)\n ]\n\nprint(\"YouTube点播排行\")\nselection_sort(music)\nfor i in range(len(music)):\n print(\"{}:{} {} -- 点播次数 {}\".format(i + 1, music[i][0], music[i][1], music[i][2]))","repo_name":"wuuuuji/dedexq","sub_path":"蓝桥杯/算法零基础/第9章_排序/ch9_7.py","file_name":"ch9_7.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"2709062683","text":"def zipper(a, b):\n \"\"\"\n Zips the two lists 'a' and 'b' together. The lists must be of\n equal length.\n :param a: The first list to be zipped.\n :param b: The second list to be zipped.\n :return: The newly generated list of 'a' and 'b' zipped.\n \"\"\"\n if len(a) != len(b):\n print(\"Lists are not the same length!\")\n return\n new_list = []\n for element in a:\n new_list.append([element, b[a.index(element)]])\n return new_list\n\n\n# Testing the function\nprint(zipper([1, 2, 3, 4, 5], ['A', 'B', 'C', 'D', 'E']))\n","repo_name":"MattJonesDev/PythonChallenges","sub_path":"Challenge_15/zipper.py","file_name":"zipper.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"27343864794","text":"import tkinter as tk\nfrom tkinter import filedialog\nimport os\nimport pygame\nimport PyPDF2\nfrom gtts import gTTS\n\nclass PDFToAudioConverterApp:\n def __init__(self, window):\n self.window = window\n self.window.geometry('400x200')\n self.window.title('PDF to Audio Converter')\n self.window.configure(bg='#FFFAFF')\n\n self.heading = tk.Label(window, text='PDF to AUDIO', font=('Helvetica', 18, \"bold\"), bg='#FFFAFF', fg='#B10F2E')\n self.heading.grid(row=0, column=2, columnspan=3, pady=30, padx=10)\n\n self.button = tk.Button(window, text=\"Browse\", command=self.browse_file)\n self.button.grid(row=1, column=1, padx=40)\n\n self.file_path = tk.Text(window, height=1, width=30, bg='#FAFFFF')\n self.file_path.grid(row=1, column=4)\n\n self.audio_button = tk.Button(window, text=\"PDF to audio\", command=self.pdf_to_speech)\n self.audio_button.grid(row=2, column=4, pady=10)\n self.init_audio()\n\n def init_audio(self):\n pygame.mixer.init()\n\n def browse_file(self):\n path = filedialog.askopenfilename(filetypes=[(\"PDF Files\", \"*.pdf\")])\n if path:\n self.file_path.delete(\"1.0\", tk.END)\n self.file_path.insert(tk.END, path)\n else:\n self.file_path.delete(\"1.0\", tk.END)\n self.file_path.insert(tk.END, \"No PDF selected\")\n\n def pdf_to_speech(self):\n pdf_path = self.file_path.get(\"1.0\", tk.END).strip()\n if pdf_path.lower().endswith('.pdf'):\n file = open(pdf_path, 'rb')\n pdf_reader = PyPDF2.PdfReader(pdf_path)\n\n full_text = ''\n for page_num in range(len(pdf_reader.pages)):\n page = pdf_reader.pages[page_num]\n full_text += page.extract_text()\n print(full_text)\n\n file.close()\n\n speech = gTTS(full_text)\n pdf_name = os.path.basename(pdf_path)\n audio_filename = pdf_name.replace('.pdf', '_output.mp3')\n\n audio_filename = str(audio_filename)\n\n speech.save(audio_filename)\n\n pygame.mixer.music.load(audio_filename)\n print(audio_filename)\n pygame.mixer.music.play()\n\n else:\n self.file_path.delete(\"1.0\", tk.END)\n self.file_path.insert(tk.END, \"Invalid PDF file\")\n","repo_name":"rupaltyagi/pdf-to-audio","sub_path":"PDF_to_audio/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"73499501930","text":"import fiona\nfrom collections import OrderedDict\nfrom fiona.crs import from_epsg\nfrom shapely.geometry import Polygon\n\n\nwith fiona.open(\"../data/ne_10m_urban_areas/ne_10m_urban_areas.shp\") as src:\n\n print(src.meta)\n print(src.meta['driver'])\n print(src.meta['schema']['properties']['scalerank'])\n print(src.meta['schema']['properties']['featurecla'])\n print(src.meta['schema']['properties']['area_sqkm'])\n print(src.meta['schema']['properties']['min_zoom'])\n print(src.meta['schema']['geometry'])\n # ...etc\nprint(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\np = Polygon([(0,0),(0,11),(11,11),(11,0)])\nprint(p.__dict__)\npoly = {\n 'type': 'Polygon',\n 'coordinates': [[\n (1179091.1646903288, 712782.8838459781),\n (1161053.0218226474, 667456.2684348812),\n (1214704.933941905, 641092.8288590391),\n (1228580.428455506, 682719.3123998424),\n (1218405.0658121984, 721108.1805541387),\n (1179091.1646903288, 712782.8838459781) ]]}\nsch = {\n 'geometry': 'Polygon',\n 'type':'str',\n 'coordinates':'list',\n 'properties': OrderedDict([\n ('id', 4),\n ('objectid', 5)])\n}\n\ncrs = from_epsg(25831)\n\n\n\nwith fiona.open('test.geojson','w',driver=\"GeoJSON\",schema=sch,crs=crs) as dst:\n dst.write(poly)\n dst.write(p)\n","repo_name":"SylvainDeker/Distributed-Systems","sub_path":"draft/draft_fiona.py","file_name":"draft_fiona.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"29421717689","text":"from typing import List\nfrom collections import OrderedDict\n\n#\n# @lc app=leetcode.cn id=164 lang=python3\n#\n# [164] 最大间距\n#\n\n# @lc code=start\nclass Solution:\n def maximumGap(self, nums: List[int]) -> int:\n nums.sort()\n max_sub = 0\n for i in range(len(nums) - 1):\n max_sub = max(max_sub, abs(nums[i+1] - nums[i]))\n return max_sub\n# @lc code=end\n\nprint(Solution().maximumGap([3,6,9,1]))\n","repo_name":"pauvrepetit/leetcode","sub_path":"164/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"40003626304","text":"from django.shortcuts import get_object_or_404\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom rest_framework import serializers, status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework_jwt.authentication import JSONWebTokenAuthentication\nfrom .models import Movie, Article, Comment, Likes\nfrom .serializers import MovieSerializer, ArticleSerializer, CommentSerializer, CommentDonateSerializer, ArticlePointSerializer, ArticleUpdateSerializer, LikesSerializer, UserCuratorSerializer\n\n# user 모델 가져오기\nfrom django.contrib.auth import get_user_model\n\n# 이번 주 내에 적힌 평가 필터용\nfrom datetime import datetime, timedelta\n\n\n\n# 영화 단일 데이터\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef movie_detail(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n serializer = MovieSerializer(movie)\n return Response(serializer.data)\n\n# 머신러닝 프로세스 기반 추천\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef movie_recommend(request, movie_pk):\n movie = get_object_or_404(Movie, pk=movie_pk)\n movies_recommend =[]\n for pk in movie.movie_reference_overview:\n answer = get_object_or_404(Movie, pk=pk)\n movies_recommend.append(answer)\n serializer = MovieSerializer(movies_recommend, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 머신러닝 프로세스 기반 영화 추천\n# 현재는 인기순으로 영화 제목 보내기 (selectBox)\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef home(request):\n # 선정 알고리즘은 동봉된 01.ML_recommend.py와 기술서 참조\n movies = Movie.objects.filter(pk__in=[588228, 508943, 438631, 436969, 566525, 550988, 522402, 497698, 451048, 370172, 459151, 482373])\n # 만약을 위한 대비코드\n # movies = Movie.objects.order_by('-popularity')[:12]\n\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 영화 장르별/최신순/평점순/인기순\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication]) # JWT가 유효한지\n# @permission_classes([IsAuthenticated]) # 인증 여부를 확인\n@permission_classes([AllowAny])\ndef movie_list(request):\n filters = request.GET.get('filter')\n # 최신순/평점순/인기순\n if filters in ('release_date', 'vote_average', 'popularity'):\n movies = Movie.objects.order_by(f'-{filters}')[:30]\n # 장르별\n else:\n movies = Movie.objects.filter(genre_ids__name=filters)\n movies = movies.order_by('-popularity')[:30]\n #movies = Movie.objects.filter(genre_ids__name=filters)[:30]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data)\n\n# 영화 이름으로 검색\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef movie_search(request):\n searchKeyword = request.GET.get('searchKeyword')\n # 한글 제목이나 원본 제목이 사용자의 입력를 포함하는 영화들을 반환\n movies = Movie.objects.filter(Q(title__icontains=searchKeyword)|Q(original_title__icontains=searchKeyword))[:25]\n serializer = MovieSerializer(movies, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n# 단일 평가 생성, 조회, 삭제, 수정\n@api_view(['GET', 'POST', 'PUT', 'DELETE'])\n@authentication_classes([JSONWebTokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef article_detail(request, movie_pk):\n # 평가 생성, 조회\n movie = get_object_or_404(Movie, pk=movie_pk)\n if request.method == 'GET':\n article = Article.objects.filter(user__pk=request.user.pk, movie__pk=movie_pk).first()\n serializer = ArticleSerializer(article)\n return Response(serializer.data, status=status.HTTP_200_OK)\n elif request.method == 'POST':\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save(user=request.user, movie=movie)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n # 평가 삭제, 수정 (작성자인지 아닌지 체크)\n article = Article.objects.filter(user__pk=request.user.pk, movie__pk=movie_pk).first()\n articleId = request.data.get('id')\n if articleId == article.pk:\n # 평가 제거\n if request.method == 'DELETE':\n article.delete()\n data = {\n 'delete' : '평가가 삭제되었습니다.'\n }\n return Response(data, status=status.HTTP_204_NO_CONTENT)\n # 평가 수정\n elif request.method == 'PUT':\n serializer = ArticleUpdateSerializer(article, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n return Response({ 'Unauthorized': '작성자가 아닙니다.'}, status=status.HTTP_403_FORBIDDEN)\n\n\n# 평가 번호로 평가 내용 가져오기\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef get_article(request, article_pk):\n article = get_object_or_404(Article, pk=article_pk)\n serializer = ArticleSerializer(article)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 해당 영화에 적힌 평가 다 가져오기\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef article_list(request, movie_pk):\n # 현재는 작성순이지만, 마일리지 추가로 알고리즘 변동\n articles = Article.objects.filter(movie__pk=movie_pk).order_by('-points','-pk')\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 해당 작성자가 적은 평가 최신 3개\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef article_curator(request, user_pk):\n articles = Article.objects.filter(user__pk=user_pk).order_by('-pk')[:3]\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 해당 작성자가 적은 평가(일단 전부)\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef article_curator_all(request, user_pk):\n articles = Article.objects.filter(user__pk=user_pk).order_by('-pk')\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n# 이번주에 적힌 모든 평가 중 상위 6개\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef article_home(request):\n # 필터 : 이번 주\n one_week_ago = datetime.today() - timedelta(days=7)\n\n articles = Article.objects.filter(created_at__gte=one_week_ago).order_by('-points')[:6]\n serializer = ArticleSerializer(articles, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n# 단일 댓글 생성, 조회(?), 삭제, 수정\n@api_view(['GET', 'POST', 'PUT', 'DELETE'])\n@authentication_classes([JSONWebTokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef comment_detail(request, article_pk):\n # 댓글 생성, 조회\n article = get_object_or_404(Article, pk=article_pk)\n if request.method == 'GET':\n comment = Comment.objects.filter(user__pk=request.user.pk, article__pk=article_pk).first()\n serializer = CommentSerializer(comment)\n return Response(serializer.data, status=status.HTTP_200_OK)\n elif request.method == 'POST':\n # 도네이션 코멘트인지 아닌지에 따라 직렬화가 바뀐다.\n mileage = request.data.get('mileage')\n if mileage:\n serializer = CommentDonateSerializer(data=request.data)\n else:\n serializer = CommentSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save(user=request.user, article=article)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n # 댓글 삭제, 수정 (작성자인지 아닌지 체크)\n comment = get_object_or_404(Comment, pk=request.data.get('commentId'))\n if request.user == comment.user or request.user == article.user:\n # 댓글 제거\n if request.method == 'DELETE':\n comment.delete()\n data = {\n 'delete' : '댓글이 삭제되었습니다.'\n }\n return Response(data, status=status.HTTP_204_NO_CONTENT)\n # 댓글 수정\n elif request.method == 'PUT':\n serializer = CommentSerializer(comment, data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n return Response({ 'Unauthorized': '권한이 없습니다.'}, status=status.HTTP_403_FORBIDDEN)\n\n\n# 해당 평가에 적힌 댓글 다 가져오기\n@api_view(['GET'])\n# @authentication_classes([JSONWebTokenAuthentication])\n# @permission_classes([IsAuthenticated])\n@permission_classes([AllowAny])\ndef comment_list(request, article_pk):\n # 마일리지 > 최신 순으로 출력\n comments = Comment.objects.filter(article__pk=article_pk).order_by('-mileage','-pk')\n serializer = CommentDonateSerializer(comments, many=True)\n return Response(serializer.data)\n\n\n# 평가 포인트 갱신 (마일리지)\n@api_view(['PUT'])\n@authentication_classes([JSONWebTokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef point_change(request, article_pk):\n article = get_object_or_404(Article, pk=article_pk)\n points = article.points + request.data.get('mileage')\n data = {\n 'points': points\n }\n serializer = ArticlePointSerializer(article, data=data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(serializer.data)\n\n\n# 좋아요 단일 조회, 생성(추가), 삭제\n@api_view(['GET', 'POST', 'DELETE'])\n@authentication_classes([JSONWebTokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef likes(request, article_pk):\n if request.method == 'GET':\n likes = Likes.objects.filter(user__pk=request.user.pk, article__pk=article_pk).first()\n serializer = LikesSerializer(likes)\n return Response(serializer.data)\n elif request.method == 'POST':\n article = get_object_or_404(Article, pk=article_pk)\n\n # 포인트 관련\n points = article.points + 1000\n data = {\n 'points': points\n }\n serializer = ArticlePointSerializer(article, data=data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n # 저장 및 반환\n movie = Movie.objects.filter(articles__pk=article_pk).first() \n serializer = LikesSerializer(data=request.data)\n if serializer.is_valid():\n # 판별용, \n serializer.save(user=request.user, article=article, backdrop_path=movie.backdrop_path, title=movie.title)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n elif request.method == 'DELETE':\n # 포인트 관련\n article = get_object_or_404(Article, pk=article_pk)\n points = article.points - 1000\n data = {\n 'points': points\n }\n serializer = ArticlePointSerializer(article, data=data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n\n # 삭제 및 반환\n likes = Likes.objects.filter(user__pk=request.user.pk, article__pk=article_pk).first()\n likes.delete()\n data = {\n 'delete' : '좋아요를 해제하셨습니다.'\n }\n return Response(data, status=status.HTTP_204_NO_CONTENT)\n\n# 좋아요 조회(이 유저가 좋아요 한 모든 평가를 최신순으로 가져오자.)\n# 프로필 인증 요청 GET은 인증 필요\n@api_view(['GET'])\n@authentication_classes([JSONWebTokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef likes_list(request):\n likes = Likes.objects.filter(user__pk=request.user.pk).order_by('-pk')[:12]\n serializer = LikesSerializer(likes, many=True)\n return Response(serializer.data)\n","repo_name":"limgeonho/MovieCurators","sub_path":"pjt-final-drf/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"30085128120","text":"# AtCoder Beginner Contest 030 C - 飛行機乗り\n# https://atcoder.jp/contests/abc030/tasks/abc030_c\n# tag: 愚直 高橋君\n\n# そこまで難しい問題ではないが、時間をループ変数として取ると\n# 10^9 の制限によって時間切れになるので、飛行機の時刻表を\n# そのまま使用し、時間を加算していく。\n# また、時刻表をどこまで使用したかを保持しておかないと、\n# いちいち最初から見直していくことになり、計算量が O(N^2)に\n# なってしまうので注意。\n\ndef main():\n N, M = map(int, input().split())\n X, Y = map(int, input().split())\n A = list(map(int, input().split()))\n B = list(map(int, input().split()))\n\n # 0なら空港Aに、1なら空港Bにいるとする\n airport_now = 0\n\n # どこまで時刻表をチェックしたかを保持しておく。\n index_a = 0\n index_b = 0\n\n time = 0\n result = 0\n\n while True:\n # 現在の時刻で乗れるところまで時刻表を進める\n if airport_now == 0:\n while time > A[index_a]:\n index_a += 1\n # 時刻表が終了したら、ループを終わる\n if index_a >= len(A):\n break\n # 進めきったら、飛行機に乗る\n else:\n time = A[index_a] + X\n airport_now = 1\n continue\n # while ~ else を利用した多重ループ脱出\n break\n # 以下同じ\n else:\n while time > B[index_b]:\n index_b += 1\n if index_b >= len(B):\n break\n else:\n time = B[index_b] + Y\n airport_now = 0\n # B → A の移動で一往復なので、答えに1を足す\n result += 1\n continue\n break\n \n print(result)\n\nmain()\n","repo_name":"scrblbug/atcoder","sub_path":"python/abc030_c.py","file_name":"abc030_c.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21815336507","text":"from django.core.management.base import BaseCommand\nfrom apps.podcasts.models import Podcast\nfrom apps.podcast_sources.models import PodcastSource\nfrom apps.podcast_source_types.models import PodcastSourceType\n\nfrom urllib.request import Request, urlopen\nfrom time import mktime\nimport json\nimport feedparser\nfrom datetime import datetime, timedelta\n\nclass Command(BaseCommand):\n args = ''\n help = 'our help string comes here'\n\n def clear_html_tags(self, s):\n if(s.find(\"<\") == -1):\n if len(s)>200:\n s = s[:190]\n return s\n print(s)\n else:\n s = s[s.find(\">\")+1:]\n return self.clear_html_tags(s)\n\n\n def get_podcasts(self, *args, **kwargs):\n sources = PodcastSource.objects.all()\n podcasts = Podcast.objects.all()\n for source in sources:\n d = feedparser.parse(source.link)\n for item in d['items']:\n if not podcasts.filter(title=item['title']).exists():\n obj = {}\n dur1 = 0;\n dur2 = 0;\n print(item)\n obj[\"title\"] = item['title']\n obj[\"description\"] = self.clear_html_tags(item['summary'])\n obj[\"image\"] = \"\"\n if \"image\" in item:\n obj[\"image\"] = item['image']['href']\n obj[\"source\"] = d['channel']['title']\n\n\n if \"links\" in item:\n for link in item['links']:\n print(\"______________\")\n print(link)\n print(\"______________\")\n if link['type'] == \"audio/mpeg\":\n obj['link'] = link['href']\n dur1 = int(link['length'])\n if \"itunes_duration\" in link:\n dur2 = link[\"itunes_duration\"]\n if \"itunes_duration\" in item:\n s=item[\"itunes_duration\"]\n if \":\" in s:\n if (len(s[:s.find(\":\")])) != 3:\n hours = int(s[:s.find(\":\")])\n s = (s[s.find(\":\")+1:])\n minutes = int(s[:s.find(\":\")])\n dur2 = hours*60 + minutes\n else:\n dur2 = int(s[s.find(\":\"):])\n else:\n dur2 = int(s)/60\n\n if dur2 != 0:\n obj[\"duration\"] = dur2\n elif dur1 !=0:\n obj[\"duration\"] = dur1\n\n obj[\"published\"] = datetime.fromtimestamp(mktime(item[\"published_parsed\"]))\n if obj[\"published\"]>=datetime.now()-timedelta(days=30) and 'link' in obj:\n new_podcast = Podcast.objects.create(\n title=obj['title'],\n link=obj['link'],\n image=obj['image'],\n source=obj['source'],\n duration=obj['duration'],\n published= obj['published'],\n description = obj['description']\n )\n for tag in source.tags.all():\n new_podcast.tags.add(tag)\n print(new_podcast.title)\n\n def handle(self, *args, **options):\n self.get_podcasts()\n","repo_name":"andrewdbass/wickwockpy","sub_path":"apps/podcasts/management/commands/get_podcasts.py","file_name":"get_podcasts.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"27770792000","text":"#!/usr/bin/env python3\n\nimport rclpy\nfrom rclpy.node import Node\nfrom sensor_msgs.msg import LaserScan\n\nclass SubLaser(Node):\n def __init__(self):\n super().__init__(\"laser_node\")\n subscriber = self.create_subscription(LaserScan, 'robot_boat/laser_scan', self.clb, 2)\n\n\n def clb(self, msg):\n print(msg.ranges)\n\ndef main():\n \n rclpy.init()\n\n xx = SubLaser()\n\n while rclpy.ok():\n rclpy.spin(xx)\n \n xx.destroy_node()\n \n rclpy.shutdown()\n # MAIN\n\nif __name__=='__main__':\n main()","repo_name":"Hercogs/robot_boat","sub_path":"robot_boat_navigation_pkg/scripts/print_scan.py","file_name":"print_scan.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"39648828688","text":"import os\nimport re\nimport unittest\nimport webtest\n\nfrom core.domain import config_domain\nfrom core.platform import models\n(base_models, exp_models, file_models, stats_models, user_models) = (\n models.Registry.import_models([\n models.NAMES.base_model,\n models.NAMES.exploration, models.NAMES.file, models.NAMES.statistics,\n models.NAMES.user\n ])\n)\ncurrent_user_services = models.Registry.import_current_user_services()\nimport feconf\nimport main\n\nimport json\n\n\nCSRF_REGEX = (\n r'csrf_token: JSON\\.parse\\(\\'\\\\\\\"([A-Za-z0-9/=_-]+)\\\\\\\"\\'\\)')\n\n\ndef empty_environ():\n os.environ['AUTH_DOMAIN'] = 'example.com'\n os.environ['SERVER_NAME'] = 'localhost'\n os.environ['HTTP_HOST'] = 'localhost'\n os.environ['SERVER_PORT'] = '8080'\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['USER_IS_ADMIN'] = '0'\n\n\nclass TestTags(object):\n \"\"\"Tags for labelling particular tests.\"\"\"\n\n # Tag that is used to flag tests which take a long time to run, so that\n # they can be excluded via a command-line argument.\n SLOW_TEST = 1\n\n\nclass TestBase(unittest.TestCase):\n \"\"\"Base class for all tests.\"\"\"\n\n maxDiff = 2500\n\n TAGS = []\n\n DEFAULT_USERNAME = 'defaultusername'\n\n def _delete_all_models(self):\n versioned_model_classes = frozenset([\n exp_models.ExplorationModel,\n exp_models.ExplorationRightsModel,\n file_models.FileMetadataModel,\n file_models.FileModel,\n ])\n\n unversioned_model_classes = frozenset([\n exp_models.ExplorationSnapshotMetadataModel,\n exp_models.ExplorationSnapshotContentModel,\n exp_models.ExplorationRightsSnapshotMetadataModel,\n exp_models.ExplorationRightsSnapshotContentModel,\n file_models.FileMetadataSnapshotMetadataModel,\n file_models.FileMetadataSnapshotContentModel,\n file_models.FileSnapshotMetadataModel,\n file_models.FileSnapshotContentModel,\n stats_models.StateCounterModel,\n stats_models.StateRuleAnswerLogModel,\n stats_models.FeedbackItemModel,\n user_models.UserSettingsModel,\n ])\n\n for clazz in versioned_model_classes:\n for entity in clazz.get_all(include_deleted_entities=True):\n entity.delete(\n feconf.ADMIN_COMMITTER_ID, '', force_deletion=True)\n\n for clazz in unversioned_model_classes:\n for entity in clazz.get_all(include_deleted_entities=True):\n entity.delete()\n\n def setUp(self):\n self.testapp = webtest.TestApp(main.app)\n\n def tearDown(self): # pylint: disable-msg=g-bad-name\n self._delete_all_models()\n\n def shortDescription(self):\n \"\"\"Additional information logged during unit test invocation.\"\"\"\n # Suppress default logging of docstrings.\n return None\n\n def get_expected_login_url(self, slug):\n \"\"\"Returns the expected login URL.\"\"\"\n return current_user_services.create_login_url(slug)\n\n def get_expected_logout_url(self, slug):\n \"\"\"Returns the expected logout URL.\"\"\"\n return current_user_services.create_logout_url(slug)\n\n def _parse_json_response(self, json_response, expect_errors=False):\n \"\"\"Convert a JSON server response to an object (such as a dict).\"\"\"\n if not expect_errors:\n self.assertEqual(json_response.status_int, 200)\n\n self.assertEqual(\n json_response.content_type, 'application/javascript')\n self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))\n\n return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])\n\n def get_json(self, url):\n \"\"\"Get a JSON response, transformed to a Python object.\"\"\"\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)\n\n def post_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200, upload_files=None):\n \"\"\"Post an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.post(\n str(url), data, expect_errors=expect_errors,\n upload_files=upload_files)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def put_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200):\n \"\"\"Put an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.put(\n str(url), data, expect_errors=expect_errors)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def get_csrf_token_from_response(self, response):\n \"\"\"Retrieve the CSRF token from a GET response.\"\"\"\n return re.search(CSRF_REGEX, response.body).group(1)\n\n def register_editor(self, email, username=None):\n \"\"\"Register a user with the given username as an editor.\"\"\"\n if username is None:\n username = self.DEFAULT_USERNAME\n\n self.login(email)\n\n response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)\n csrf_token = self.get_csrf_token_from_response(response)\n\n response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {\n 'csrf_token': csrf_token,\n 'payload': json.dumps({\n 'username': username,\n 'agreed_to_terms': True\n })\n })\n self.assertEqual(response.status_int, 200)\n\n self.logout()\n\n def set_admins(self, admin_emails):\n \"\"\"Set the ADMIN_EMAILS property.\"\"\"\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.ADMIN_EMAILS.name: admin_emails,\n }\n }, csrf_token)\n self.logout()\n\n def set_moderators(self, moderator_emails):\n \"\"\"Set the MODERATOR_EMAILS property.\"\"\"\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.MODERATOR_EMAILS.name: moderator_emails,\n }\n }, csrf_token)\n self.logout()\n\n def get_current_logged_in_user_id(self):\n return os.environ['USER_ID']\n\n def get_user_id_from_email(self, email):\n return current_user_services.get_user_id_from_email(email)\n\n\nclass AppEngineTestBase(TestBase):\n \"\"\"Base class for tests requiring App Engine services.\"\"\"\n\n def login(self, email, is_super_admin=False):\n os.environ['USER_EMAIL'] = email\n os.environ['USER_ID'] = self.get_user_id_from_email(email)\n os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'\n\n def logout(self):\n # TODO(sll): Move this to the tearDown() method of the generic test\n # base?\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n del os.environ['USER_IS_ADMIN']\n\n def setUp(self): # pylint: disable-msg=g-bad-name\n empty_environ()\n\n from google.appengine.datastore import datastore_stub_util\n from google.appengine.ext import testbed\n\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n\n # Configure datastore policy to emulate instantaneously and globally\n # consistent HRD; we also patch dev_appserver in main.py to run under\n # the same policy.\n policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(\n probability=1)\n\n # Declare any relevant App Engine service stubs here.\n self.testbed.init_user_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_datastore_v3_stub(consistency_policy=policy)\n self.testbed.init_taskqueue_stub()\n self.taskq = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n\n # Set up the app to be tested.\n self.testapp = webtest.TestApp(main.app)\n\n def tearDown(self): # pylint: disable-msg=g-bad-name\n os.environ['USER_IS_ADMIN'] = '0'\n self.testbed.deactivate()\n\n\nif feconf.PLATFORM == 'gae':\n GenericTestBase = AppEngineTestBase\nelse:\n raise Exception('Invalid platform: expected one of [\\'gae\\']')\n","repo_name":"aldeka/oppia","sub_path":"core/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"}
+{"seq_id":"31378743805","text":"import math\ndef solution(n, stations, w):\n answer = 0\n check = (w*2) + 1\n start = 1\n for i in stations:\n answer += max(math.ceil((i-w-start)/check),0)\n start = i + w + 1\n \n if n >= start:\n answer += math.ceil((n-start+1)/check)\n return answer","repo_name":"JShistory/Programmers","sub_path":"프로그래머스/lv3/12979. 기지국 설치/기지국 설치.py","file_name":"기지국 설치.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"15055358432","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# 计算求和的前 n 项\nn=7 \nx = np.linspace(1, n, n)\ny = np.zeros(n)\ny[0] = 1\nfor i in range(n-1):\n y[i+1] = y[i] + (-1)**(i+1) * 1/(i+2) \n\n# 进行 Shanks transform\ndef shanks_trans(y):\n \"\"\"Shanks transform\"\"\"\n n = y.shape[0]\n # 计算了求和的前 n 项, 最多可做 order 次 Shanks transform\n order = int(np.floor((n - 1) / 2)) \n st = np.full((order+1, n), np.NaN) \n st[0] = y\n for i in range(order):\n i += 1\n for j in range(n - 2*i):\n j += i\n st[i, j] = ((st[i-1, j]**2 - st[i-1, j+1]*st[i-1, j-1])\n /(2*st[i-1, j] - st[i-1, j-1] - st[i-1, j+1]))\n return st\nst = shanks_trans(y)\n\n# 画图\nfor i in range(st.shape[0]):\n plt.plot(x, st[i], 'o', label=f'N = {i:.0f}')\nplt.plot(x, x*0+np.log(2), label=f'$\\ln 2$')\nplt.title('after N times Shanks transform')\nplt.legend()\nplt.savefig('ShanksTrans.jpg')\nplt.show()\n","repo_name":"phyer219/old_version_blog","sub_path":"2020/10/07/CarlBenderMathematicalPhysicsLecture3and4/ShanksTrans.py","file_name":"ShanksTrans.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"55"}
+{"seq_id":"32309532225","text":"\n# coding: utf-8\n\n# # This script is used for getting the features of essays to build a machine learning model\n# '''\n# This script includes several functions, including:\n# 1. grammar error number check\n# 2. Spelling error number check \n# 3. Word Count\n# 4. Number of sentence \n# 5. Average length of sentences \n# 6. Paragraph number \n# '''\n\n# # Read data\n\n# In[1]:\n\n# import pandas as pd\n# import numpy as np\n\n\n# In[2]:\n\n# read data \n# training_set = pd.read_csv(\"data/comments.csv\", )\n\n\n# In[3]:\n\n# X = training_set['comment_text']\n# y = training_set['commenter_credibility']\n\n\n# In[4]:\n\n# print (X[0])\n# print (y[0])\n\n\n# # Extract features\n\n# In[5]:\n\nimport re\nfrom collections import Counter\nimport nltk\nimport math\nimport re\n\n\n# ## Grammar errors features\n# Grammar errors are currently not considered due to the computation time and accuracy.\n\n# ## Spelling \n# ### Some words that not appear in the en-spelling.txt are considered as errors. \n\n# In[6]:\n\n# function 2: spelling error number checking. This function can be updated with nltk.\ndef get_words(text):\n ''' Get all the words in a text \n :param text: raw text\n :return: all the words in the text\n Example: \n print (get_words('I amd the .. . /3'))\n '''\n return re.findall(r'\\w+', text.lower())\n\n# Get all the words in en-spelling.txt which contains most words in English dictionary\n# words.txt is from https://github.com/dwyl/english-words\n# WORDS = Counter(get_words(open('en-spelling.txt').read()))\nWORDS = Counter(get_words(open('words.txt').read()))\n\n\n# In[7]:\n\ndef get_spelling_error_number(text):\n '''\n 1. Check if it is in the words.txt\n 2. Check if it is a number\n Get the number of errors in the text. The spelling error defined here is the words not in en-spelling.txt\n :param text: the raw text\n :return: the number of spelling errors. \n Example:\n print (get_spelling_error_number(\"he kk uu in jsas ssssss\"))\n '''\n text_words = get_words(text)\n error_numer = 0\n for w in text_words:\n if w not in WORDS:\n if w.isdigit():\n pass\n else:\n error_numer += 1\n# print (w)\n return float(error_numer)\n\n\n# In[8]:\n\n# print (get_spelling_error_number(\"he kk uu in jsas ssssss\"))\n\n\n# ## Wors Number \n\n# In[9]:\n\n# Function 3: word count\ndef get_word_number(text):\n '''\n Get the number of words (including numbers) in the raw text \n :param text: the raw text\n :return: the number of words\n Example: \n print (get_word_number(\"this is a number 's \")) \n '''\n return float(len(re.findall(r'\\w+', text)))\n\n\n# In[10]:\n\n# print (get_word_number(\"This is a . @@@ 555 good point\"))\n\n\n# ## Sentence number \n\n# In[11]:\n\n# Function 4: Number of sentence\ndef get_sent_number(text):\n '''\n Get the number of sentences in the text\n :param text: the raw text\n :return: the number of sentences \n Example: \n print (get_sent_number(\"This is number one. This ise number2, and number 2.\"))\n '''\n paraList = text.splitlines()\n paraList[:] = [element for element in paraList if element != \"\"]\n count = 0 \n for t in paraList:\n count = count + len(nltk.sent_tokenize(t))\n return float(count)\n\n\n\n# In[12]:\n\n# s = \"This a first . and the Second! The \"\n# print (get_sent_number(s))\n\n\n# In[13]:\n\n## Get average sentence length\n\n\n# In[14]:\n\n# Function 5: Average length of sentences\n\ndef get_average_sent_length(text):\n '''\n Get the average sentence length of the given text.\n :param text: the raw text \n :return: the average length of the sentences\n Example: \n print (get_average_sent_length(\"this is. This is two. Three.\"))\n '''\n\n sents = nltk.sent_tokenize(text)\n num = get_sent_number(text)\n \n return round(get_word_number(text)/num,2)\n\n\n# In[15]:\n\n# s = \"This ia first. .\"\n# print (get_sent_number(s))\n# print (get_average_sent_length(s))\n\n\n# # Get Paragraph number\n\n# In[16]:\n\n# Function 6: Paragraph number\ndef get_para_number(text):\n '''\n Get the number of paragraphs.\n :param text: the raw text.\n :return: the number of paragraphs.\n Example: \n s = \"This is first. this is first. \\n This is sencond. \\n\\n this is third.\" \\\n print (get_para_number(s))\n '''\n paraList = text.splitlines()\n paraList[:] = [element for element in paraList if element != \"\"]\n return len(paraList)\n\n\n# In[17]:\n\n# i = 1\n# print (X[i])\n# print (get_para_number(X[i])) \n\n\n# ## Summary of features\n\n# In[18]:\n\n# summarize all the return value of the functions above.\ndef features_summary(text):\n '''\n Summary the function values of the functions above, so that don't need \n to call funtions one by one. \n :param text: the raw text \n :return: a list contains all the information mentioned above.\n Examples:\n s = \"This is a text. This is a text.\"\n print (inf_summary(s))\n '''\n values = [\n # text_grammar_check(text),\n get_spelling_error_number(text),\n get_word_number(text),\n get_sent_number(text),\n get_average_sent_length(text),\n get_para_number(text)\n ]\n return values\n\n\n# In[19]:\n\n# i = 111\n# print (X[i])\n# print (y[i])\n# print (features_summary(X[i]))\n\n","repo_name":"robspringles/Argument-Scoring-System","sub_path":"3. prediction model/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"12230465757","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\"\"\"\nCommands work with servers. (Hiss, boo.)\n\"\"\"\n\nimport copy\nimport logging\n\nfrom fabric.api import local, put, settings, require, run, sudo, task\nfrom fabric.state import env\nfrom jinja2 import Template\n\nimport app_config\n\nlogging.basicConfig(format=app_config.LOG_FORMAT)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(app_config.LOG_LEVEL)\n\n\"\"\"\nSetup\n\"\"\"\n\n@task\ndef setup():\n \"\"\"\n Setup servers for deployment.\n\n This does not setup services or push to S3. Run deploy() next.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n require('branch', provided_by=['stable', 'master', 'branch'])\n\n if not app_config.DEPLOY_TO_SERVERS:\n logger.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py before setting up the servers.')\n\n return\n\n install_google_oauth_creds()\n create_directories()\n create_virtualenv()\n clone_repo()\n checkout_latest()\n install_requirements()\n setup_logs()\n\ndef create_directories():\n \"\"\"\n Create server directories.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)\n run('mkdir -p /var/www/uploads/%(PROJECT_FILENAME)s' % app_config.__dict__)\n\ndef create_virtualenv():\n \"\"\"\n Setup a server virtualenv.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)\n run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)\n\ndef clone_repo():\n \"\"\"\n Clone the source repository.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__)\n\n if app_config.REPOSITORY_ALT_URL:\n run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)\n\n@task\ndef checkout_latest(remote='origin'):\n \"\"\"\n Checkout the latest source.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n require('branch', provided_by=['stable', 'master', 'branch'])\n\n run('cd %s; git fetch %s' % (app_config.SERVER_REPOSITORY_PATH, remote))\n run('cd %s; git checkout %s; git pull %s %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, remote, env.branch))\n\n@task\ndef install_requirements():\n \"\"\"\n Install the latest requirements.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_REPOSITORY_PATH)s/requirements.txt' % app_config.__dict__)\n run('cd %(SERVER_REPOSITORY_PATH)s; npm install' % app_config.__dict__)\n\n@task\ndef setup_logs():\n \"\"\"\n Create log directories.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__)\n sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)\n\n@task\ndef install_crontab():\n \"\"\"\n Install cron jobs script into cron.d.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n sudo('cp %(SERVER_REPOSITORY_PATH)s/crontab /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)\n\n@task\ndef uninstall_crontab():\n \"\"\"\n Remove a previously install cron jobs script from cron.d\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n sudo('rm /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)\n\n@task\ndef install_google_oauth_creds():\n \"\"\"\n Install Google Oauth credentials file (global) from workinprivate repo\n \"\"\"\n run('git clone git@github.com:nprapps/workinprivate.git /tmp/workinprivate-tmp')\n run('cp /tmp/workinprivate-tmp/.google_oauth_credentials %s' % app_config.GOOGLE_OAUTH_CREDENTIALS_PATH)\n run('rm -Rf /tmp/workinprivate-tmp')\n\n@task\ndef remove_google_oauth_creds():\n \"\"\"\n Remove Google oauth credentials file (global)\n \"\"\"\n run('rm %s' % app_config.GOOGLE_OAUTH_CREDENTIALS_PATH)\n\ndef delete_project():\n \"\"\"\n Remove the project directory. Invoked by shiva.\n \"\"\"\n run('rm -rf %(SERVER_PROJECT_PATH)s' % app_config.__dict__)\n\n\"\"\"\nConfiguration\n\"\"\"\n\ndef _get_template_conf_path(service, extension):\n \"\"\"\n Derive the path for a conf template file.\n \"\"\"\n return 'confs/%s.%s' % (service, extension)\n\ndef _get_rendered_conf_path(service, extension):\n \"\"\"\n Derive the rendered path for a conf file.\n \"\"\"\n return 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)\n\ndef _get_installed_conf_path(service, remote_path, extension):\n \"\"\"\n Derive the installed path for a conf file.\n \"\"\"\n return '%s/%s.%s.%s' % (remote_path, app_config.PROJECT_FILENAME, service, extension)\n\ndef _get_installed_service_name(service):\n \"\"\"\n Derive the init service name for an installed service.\n \"\"\"\n return '%s.%s' % (app_config.PROJECT_FILENAME, service)\n\n@task\ndef render_confs():\n \"\"\"\n Renders server configurations.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n with settings(warn_only=True):\n local('mkdir confs/rendered')\n\n # Copy the app_config so that when we load the secrets they don't\n # get exposed to other management commands\n context = copy.copy(app_config.__dict__)\n context.update(app_config.get_secrets())\n\n for service, remote_path, extension in app_config.SERVER_SERVICES:\n template_path = _get_template_conf_path(service, extension)\n rendered_path = _get_rendered_conf_path(service, extension)\n\n with open(template_path, 'r') as read_template:\n\n with open(rendered_path, 'wb') as write_template:\n payload = Template(read_template.read())\n write_template.write(payload.render(**context))\n\n@task\ndef deploy_confs():\n \"\"\"\n Deploys rendered server configurations to the specified server.\n This will reload nginx and the appropriate uwsgi config.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n render_confs()\n\n with settings(warn_only=True):\n for service, remote_path, extension in app_config.SERVER_SERVICES:\n rendered_path = _get_rendered_conf_path(service, extension)\n installed_path = _get_installed_conf_path(service, remote_path, extension)\n\n a = local('md5 -q %s' % rendered_path, capture=True)\n b = run('md5sum %s' % installed_path).split()[0]\n\n if a != b:\n logging.info('Updating %s' % installed_path)\n put(rendered_path, installed_path, use_sudo=True)\n\n if service == 'nginx':\n sudo('service nginx reload')\n elif service == 'uwsgi':\n service_name = _get_installed_service_name(service)\n sudo('initctl reload-configuration')\n sudo('service %s restart' % service_name)\n elif service == 'app':\n run('touch %s' % app_config.UWSGI_SOCKET_PATH)\n sudo('chmod 644 %s' % app_config.UWSGI_SOCKET_PATH)\n sudo('chown www-data:www-data %s' % app_config.UWSGI_SOCKET_PATH)\n else:\n logging.info('%s has not changed' % rendered_path)\n\n@task\ndef nuke_confs():\n \"\"\"\n DESTROYS rendered server configurations from the specified server.\n This will reload nginx and stop the uwsgi config.\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n for service, remote_path, extension in app_config.SERVER_SERVICES:\n with settings(warn_only=True):\n installed_path = _get_installed_conf_path(service, remote_path, extension)\n\n sudo('rm -f %s' % installed_path)\n\n if service == 'nginx':\n sudo('service nginx reload')\n elif service == 'uwsgi':\n service_name = _get_installed_service_name(service)\n sudo('service %s stop' % service_name)\n sudo('initctl reload-configuration')\n elif service == 'app':\n sudo('rm %s' % app_config.UWSGI_SOCKET_PATH)\n\n\"\"\"\nFabcasting\n\"\"\"\n\n@task\ndef fabcast(command):\n \"\"\"\n Actually run specified commands on the server specified\n by staging() or production().\n \"\"\"\n require('settings', provided_by=['production', 'staging'])\n\n if not app_config.DEPLOY_TO_SERVERS:\n logging.error('You must set DEPLOY_TO_SERVERS = True in your app_config.py and setup a server before fabcasting.')\n\n run('cd %s && bash run_on_server.sh fab $DEPLOYMENT_TARGET branch:%s %s' % (app_config.SERVER_REPOSITORY_PATH, env.branch, command))\n","repo_name":"nprapps/app-template","sub_path":"fabfile/servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":8697,"program_lang":"python","lang":"en","doc_type":"code","stars":1535,"dataset":"github-code","pt":"55"}
+{"seq_id":"27718355954","text":"from mock import Mock\nfrom mock import patch\n\n\nclass TestRequestCache(object):\n\n @property\n def cache_decorator(self):\n from kotti.util import request_cache\n return request_cache\n\n def test_it(self, dummy_request):\n from kotti.util import clear_cache\n\n called = []\n\n @self.cache_decorator(lambda a, b: (a, b))\n def my_fun(a, b):\n called.append((a, b))\n\n my_fun(1, 2)\n my_fun(1, 2)\n assert len(called) == 1\n my_fun(2, 1)\n assert len(called) == 2\n\n clear_cache()\n my_fun(1, 2)\n assert len(called) == 3\n\n def test_dont_cache(self, dummy_request):\n from kotti.util import DontCache\n called = []\n\n def dont_cache(a, b):\n raise DontCache\n\n @self.cache_decorator(dont_cache)\n def my_fun(a, b):\n called.append((a, b))\n\n my_fun(1, 2)\n my_fun(1, 2)\n assert len(called) == 2\n\n\nclass TestLRUCache(TestRequestCache):\n\n @property\n def cache_decorator(self):\n from kotti.util import lru_cache\n return lru_cache\n\n\nclass TestTitleToName:\n def setUp(self):\n from pyramid.threadlocal import get_current_registry\n from kotti.url_normalizer import url_normalizer\n r = get_current_registry()\n settings = r.settings = {}\n settings['kotti.url_normalizer'] = [url_normalizer]\n settings['kotti.url_normalizer.map_non_ascii_characters'] = False\n\n def test_max_length(self):\n self.setUp()\n from kotti.util import title_to_name\n assert len(title_to_name(u'a' * 50)) == 40\n\n def test_normal(self):\n self.setUp()\n from kotti.util import title_to_name\n assert title_to_name(u'Foo Bar') == u'foo-bar'\n\n def test_disambiguate_name(self):\n from kotti.util import disambiguate_name\n assert disambiguate_name(u'foo') == u'foo-1'\n assert disambiguate_name(u'foo-3') == u'foo-4'\n\n\nclass TestCommand:\n def test_it(self):\n from kotti.util import command\n\n func = Mock()\n closer = Mock()\n with patch('kotti.util.docopt') as docopt:\n with patch('kotti.util.bootstrap') as bootstrap:\n docopt.return_value = {'': 'uri'}\n bootstrap.return_value = {'closer': closer}\n assert command(func, 'doc') == 0\n func.assert_called_with({'': 'uri'})\n docopt.assert_called_with('doc')\n bootstrap.assert_called_with('uri')\n assert closer.call_count == 1\n","repo_name":"mindreframer/python-pyramid-stuff","sub_path":"kotti__kotti/kotti/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"55"}
+{"seq_id":"2222689225","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom netex.empty_type_1 import EmptyType1\n\n__NAMESPACE__ = \"http://www.siri.org.uk/siri\"\n\n\n@dataclass(unsafe_hash=True, kw_only=True)\nclass TerminateSubscriptionRequestBodyStructure:\n \"\"\"Type for Body of Terminate Subscription Request content.\n\n Used in WSDL.\n\n :ivar subscriber_ref: Participant identifier of Subscriber.\n Subscription ref will be unique within this.\n :ivar all_or_subscription_ref:\n \"\"\"\n subscriber_ref: Optional[str] = field(\n default=None,\n metadata={\n \"name\": \"SubscriberRef\",\n \"type\": \"Element\",\n \"namespace\": \"http://www.siri.org.uk/siri\",\n }\n )\n all_or_subscription_ref: List[object] = field(\n default_factory=list,\n metadata={\n \"type\": \"Elements\",\n \"choices\": (\n {\n \"name\": \"All\",\n \"type\": EmptyType1,\n \"namespace\": \"http://www.siri.org.uk/siri\",\n },\n {\n \"name\": \"SubscriptionRef\",\n \"type\": str,\n \"namespace\": \"http://www.siri.org.uk/siri\",\n },\n ),\n }\n )\n","repo_name":"skinkie/reference","sub_path":"gtfs-netex-test/netex/terminate_subscription_request_body_structure.py","file_name":"terminate_subscription_request_body_structure.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"5570803104","text":"#!/usr/bin/python3\n\nfrom django.db.models import Max\nfrom django.utils import timezone\nfrom logs.models import *\nfrom monitor.settings import IP_GEOLOCATION_API_KEY\nimport logs.models\nimport sys\nimport re\nimport datetime\nimport urllib.parse\nimport urllib.request\nimport json\nimport logging\nimport shlex\nimport ipaddress\n\n\ndef parse(logger, line):\n line = re.sub(r\"[\\[\\]]\", \"\", line)\n data = shlex.split(line)\n return {\n \"domain\": data[0],\n \"ip\": data[1],\n \"datetime\": datetime.datetime.strptime(data[4] + \" \" + data[5], \"%d/%b/%Y:%X %z\"),\n \"request\": data[6],\n \"status_code\": data[7],\n \"referrer\": data[9],\n \"user_agent\": data[10],\n }\n\n\ndef get_ip(logger, ip):\n try:\n ip = IP.objects.get(address=ip)\n return ip\n except logs.models.IP.DoesNotExist:\n try:\n page = urllib.request.urlopen(\"https://api.ipgeolocation.io/ipgeo?apiKey=%s&ip=%s\" % (IP_GEOLOCATION_API_KEY, ip))\n data = json.loads(page.read().decode(\"utf-8\"))\n\n country, _ = Country.objects.get_or_create(name=data[\"country_name\"])\n city, _ = City.objects.get_or_create(name=data[\"city\"], country=country)\n provider, _ = Provider.objects.get_or_create(name=data[\"organization\"])\n ip, _ = IP.objects.get_or_create(address=ip, city=city, provider=provider)\n logger.debug(\"+%s\" % ip)\n return ip\n except urllib.error.HTTPError as e:\n logger.warning(\"Can not get IP details: %s, %s\" % (ip, e))\n country, _ = Country.objects.get_or_create(name=\"\")\n city, _ = City.objects.get_or_create(name=\"\", country=country)\n provider, _ = Provider.objects.get_or_create(name=\"\")\n ip, _ = IP.objects.get_or_create(address=ip, city=city, provider=provider)\n logger.debug(\"+%s\" % ip)\n return ip\n\n\ndef run(*args):\n logger = logging.getLogger(\"logs\")\n logger.setLevel(logging.INFO)\n latest_date = Request.objects.aggregate(Max(\"posted_date\"))[\"posted_date__max\"]\n latest_date = latest_date.astimezone(timezone.get_current_timezone())\n logger.info(\"latest log date: %s\" % latest_date.strftime(\"%Y-%m-%d %H:%M:%S\"))\n processed = 0\n skipped = 0\n for line in sys.stdin:\n data = parse(logger, line)\n try:\n if latest_date < data[\"datetime\"]:\n if ipaddress.ip_address(data[\"ip\"]).is_private:\n skipped += 1\n continue\n try:\n [method, resource, protocol] = data[\"request\"].split(\" \")\n except ValueError:\n [method, resource, protocol] = [\"\", data[\"request\"], \"\"]\n domain, _ = Domain.objects.get_or_create(name=data[\"domain\"])\n ip = get_ip(logger, data[\"ip\"])\n method, _ = Method.objects.get_or_create(name=method)\n protocol, _ = Protocol.objects.get_or_create(name=protocol)\n referrer, _ = Referrer.objects.get_or_create(name=data[\"referrer\"])\n resource, _ = Resource.objects.get_or_create(name=resource)\n user_agent, _ = UserAgent.objects.get_or_create(name=data[\"user_agent\"])\n processed += 1\n Request.objects.get_or_create(\n domain=domain,\n ip=ip,\n method=method,\n posted_date=data[\"datetime\"],\n protocol=protocol,\n referrer=referrer,\n resource=resource,\n user_agent=user_agent,\n )\n logger.debug(\"+%s\" % line.strip())\n else:\n skipped += 1\n except Exception as e:\n logger.error(\"Error, can not parse line: %s\" % data)\n logger.exception(e)\n logger.info(\"processed logs: %d/%d\" % (processed, processed + skipped))\n","repo_name":"shajen/monitor","sub_path":"scripts/logs_worker.py","file_name":"logs_worker.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"8600746469","text":"from flask import Flask, render_template, request, redirect\nfrom flask_mail import Mail, Message\nimport csv\napp = Flask(__name__)\n\n#confuguring the Flask Mail\n# app.config['MAIL_SERVER'] = 'smtp.gmail.com'\n# app.config['MAIL_PORT'] = 587\n# app.config['MAIL_USERNAME']=\"@gmail.com\"\n# app.config['MAIL_PASSWORD'] = \"xxxxxxxx\"\n# app.config['MAIL_USE_TLS'] = True\n\n# mail = Mail(app)\n\n@app.route('/')\ndef my_home():\n return render_template('home.html')\n\ndef write_to_file(data):\n with open('database.txt', mode='a') as database:\n email = data[\"email\"]\n subject = data[\"subject\"]\n message = data[\"message\"]\n \n file = database.write(f'\\n{email},{subject},{message}')\n\ndef write_to_csv(data):\n with open('database.csv', newline = '', mode='a') as database2:\n name = data[\"name\"]\n email = data[\"email\"]\n # subject = data[\"subject\"]\n message = data[\"message\"]\n \n csv_writer = csv.writer(database2, delimiter=',', quotechar='\"',quoting = csv.QUOTE_MINIMAL)\n csv_writer.writerow([name,email,message])\n\n\n@app.route('/')\ndef html_page(page_name):\n return render_template(page_name)\n\n\n@app.route('/submit_form', methods=['POST', 'GET'])\ndef submit_form():\n if request.method == 'POST':\n try:\n data = request.form.to_dict()\n write_to_csv(data)\n\n #Send an email\n # msg = Message('New form Submission', sender=\"santosh@gamil.com\",recipients=data['email'])\n # msg.body = f\"Name: {data['name']}\\nEmail: {data['email']}\\nMessage:; {data['message']}\"\n # mail.send(msg)\n return redirect('/thankyou.html')\n except:\n return 'did not save to databases'\n else:\n return 'Something went wrong.Try Again !!'\n\n\n\n\n\n\n#Manual step by step code below for one step dynamic function above\n\n# @app.route('/index.html')\n# def my_home2():\n# return render_template('index.html')\n\n# @app.route('/elements.html')\n# def blog():\n\n# return render_template('elements.html')\n\n# @app.route('/generic.html')\n# def blog2():\n# return render_template('generic.html')","repo_name":"Santooos/Personal_Portfolio","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"36466833074","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport io, os, sys, random\nimport numpy as np\nimport pickle\nimport torch\nfrom torch.utils.data import IterableDataset, DataLoader\nfrom tqdm import trange\nfrom itertools import chain\n\nfrom spender.flow import NeuralDensityEstimator\n\ndef _train(self, n_epochs: int = 2000, suffix: str = \"nde\"):\n \"\"\"\n Train the neural density estimator based on input data.\n Here we use the ``log(P)`` loss. This function is not used in the ``popsed`` project.\n Parameters\n ----------\n n_epochs: int.\n Number of epochs to train.\n display: bool.\n Whether to display the training loss.\n suffix: str.\n Suffix to add to the output file.\n \"\"\"\n min_loss = -19\n patience = 5\n self.best_loss_epoch = 0\n self.net.train()\n\n for epoch in trange(n_epochs, desc='Training NDE', unit='epochs'):\n self.optimizer.zero_grad()\n loss = -self.net.log_prob(self.batch_x).mean()\n loss.backward()\n self.optimizer.step()\n self.train_loss_history.append(loss.item())\n\n if loss.item() < min_loss:\n min_loss = loss.item()\n if epoch - self.best_loss_epoch > patience:\n # Don't save model too frequently\n self.best_loss_epoch = epoch\n self.save_model(\n f'best_loss_model_{suffix}_{self.method}.pkl')\n\n if min_loss == -18:\n raise Warning('The training might be failed, try more epochs')\n\n\nclass CPU_Unpickler(pickle.Unpickler):\n def find_class(self, module, name):\n if module == 'torch.storage' and name == '_load_from_bytes':\n return lambda b: torch.load(io.BytesIO(b), map_location='cpu')\n else: return super().find_class(module, name)\n\nclass BatchedFilesDataset(IterableDataset):\n\n def __init__(self, file_list, load_fct, shuffle=False, shuffle_instance=False):\n assert len(file_list), \"File list cannot be empty\"\n self.file_list = file_list\n self.shuffle = shuffle\n self.shuffle_instance = shuffle_instance\n self.load_fct = load_fct\n\n def process_data(self, idx):\n if self.shuffle:\n idx = random.randint(0, len(self.file_list) -1)\n batch_name = self.file_list[idx]\n data = self.load_fct(batch_name)\n data = list(zip(*data))\n if self.shuffle_instance:\n random.shuffle(data)\n for x in data:\n yield x\n\n def get_stream(self):\n return chain.from_iterable(map(self.process_data, range(len(self.file_list))))\n\n def __iter__(self):\n return self.get_stream()\n\n def __len__(self):\n return len(self.file_list)\n \nfrom functools import partial\n\ndef load_batch(batch_name):\n #print(\"batch_name:\",batch_name)\n with open(batch_name, 'rb') as f:\n if torch.cuda.is_available():\n batch = pickle.load(f)\n else:\n batch = CPU_Unpickler(f).load()\n batch = [item.detach().to(device) for item in batch]\n return batch\n\ndef get_latent_data_loader(dir, which=None, batch_size=10000, shuffle=False, shuffle_instance=True,latent_tag=None):\n files = [\"%s/%s\"%(dir,item) for item in os.listdir(dir)]\n if latent_tag is not None:files=[item for item in files if latent_tag in item]\n NBATCH = len(files)\n train_batches = files[:int(0.85*NBATCH)]\n valid_batches = files[int(0.85*NBATCH):]\n\n if which == \"valid\":files = valid_batches\n elif which == \"train\": files = train_batches\n\n load_fct = partial(load_batch)\n data = BatchedFilesDataset(files, load_fct, shuffle=shuffle, shuffle_instance=shuffle_instance)\n return DataLoader(data, batch_size=batch_size)\n\n\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ns_dir = \"runtime\" # directory that saves latent vectors\nmodel_tag = \"star_k2\" # spender model + data tag\ndata_tag = \"DESIStars\" # DESI data\nlatent_tag = \"%s-%s\"%(model_tag,data_tag)\n\nflow_file = sys.argv[1]\n\nprint(\"flow_file:\",flow_file)\nprint(\"latent data:\",latent_tag)\n\ndata_loader = get_latent_data_loader(s_dir,which=\"train\",latent_tag=latent_tag)\nvalid_data_loader = get_latent_data_loader(s_dir,which=\"valid\",latent_tag=latent_tag)\n\nfor k,batch in enumerate(data_loader):\n sample = batch[0]\n break\n\nprint(\"sample to infer dimensionality\",\n sample.shape,sample.device)\nprint(\"device:\", device)\nprint(\"torch.cuda.device_count():\",torch.cuda.device_count())\nn_latent = 6\n\nif \"new\" in sys.argv:\n NDE_theta = NeuralDensityEstimator(normalize=False,initial_pos={'bounds': [[0, 0]] * n_latent, 'std': [0.05] * n_latent}, method='maf')\n sample = torch.Tensor(sample).to(device)\n NDE_theta.build(sample)\nelse: NDE_theta = torch.load(flow_file,map_location=device)\n\nn_epoch = 100\nn_steps = 20\n\nscheduler = torch.optim.lr_scheduler.OneCycleLR(NDE_theta.optimizer,max_lr=2e-3,\n steps_per_epoch=n_steps,\n epochs=n_epoch)\nfor i, epoch in enumerate(range(n_epoch)):\n print(' Epoch {0}'.format(epoch))\n print(' lr:', NDE_theta.optimizer.param_groups[0]['lr'])\n \n train_loss = []\n #t = trange(100,desc='Training NDE_theta',unit='epochs')\n for k,batch in enumerate(data_loader):\n NDE_theta.optimizer.zero_grad()\n latent = batch[0]\n loss = -NDE_theta.net.log_prob(latent).mean()\n loss.backward()\n NDE_theta.optimizer.step()\n train_loss.append(loss.item())\n if k>=n_steps:continue\n train_loss = np.mean(train_loss)\n NDE_theta.train_loss_history.append(train_loss)\n\n valid_loss = []\n for k,batch in enumerate(valid_data_loader):\n latent = batch[0]\n loss = -NDE_theta.net.log_prob(latent).mean()\n valid_loss.append(loss.item())\n valid_loss = np.mean(valid_loss)\n NDE_theta.valid_loss_history.append(valid_loss)\n print(f'Loss = {train_loss:.3f} (train), {valid_loss:.3f} (valid)')\n scheduler.step()\n\n if epoch%10 ==0 or epoch==n_epoch-1:\n NDE_theta.save_model(flow_file)\n","repo_name":"pmelchior/spender","sub_path":"train/train_DESI_flow.py","file_name":"train_DESI_flow.py","file_ext":"py","file_size_in_byte":6041,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"55"}
+{"seq_id":"73089935211","text":"import math as m\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# BARRA = INSTANCIA DE BARRA\r\n# TODAS AS POSICOES SAO EM RELACAO À ORIGEM\r\n# barra.tamanho => tamanho da barra\r\n# RELATIVO AOS APOIOS\r\n# barra.apoio[\"tipo\"] => 1 - Engastamento\r\n# 2 - Simples e fixo\r\n# barra.apoio[\"pos_engaste\"] => auto explicativo\r\n# barra.apoio[\"pos_simples\"] => auto explicativo\r\n# barra.apoio[\"pos_fixo\"] => auto explicativo\r\n# RELATIVO ÀS FORCAS\r\n# barra.forcas[\"tipo\"] => 1 - Pontal\r\n# 2 - Distribuida\r\n# barra.forcas[\"quant\"] => quant de forcas\r\n\r\n# PARA FORCAS PONTUAIS\r\n# barra.forcas[\"pos_\"+NUMERO(COMECA EM 1)] => posicao da força NUMERO\r\n# barra.forcas[\"int_\"+NUMERO(COMECA EM 1)] => intensidade\r\n\r\n# PARA FORCAS DISTRIBUIDA\r\n# barra.forcas[\"pos_ini_\"+NUMERO(COMECA EM 1)] => posicao inicial \r\n# barra.forcas[\"pos_fim_\"+NUMERO(COMECA EM 1)] => posicao final\r\n# barra.forcas[\"int_ini_\"+NUMERO(COMECA EM 1)] => intensidade inicial\r\n# barra.forcas[\"int_fin_\"+NUMERO(COMECA EM 1)] => intensidade final\r\n# RELATIVO AOS MOMENTOS\r\n# barra.momentos[\"quant\"] => quant de momentos\r\n# barra.momentos[\"pos_\"+NUMERO(COMECA EM 1)] => posicao do momento NUMERO\r\n# barra.momentos[\"int_\"+NUMERO(COMECA EM 1)] => intensidade\r\n\r\n# PARA BARRA ANGULADA\r\n# angulo => angulo entre a primeira barra e o eixo horizontal\r\n# tamanho é baseado em lei dos cossenos\r\n\r\n\r\nclass Barra():\r\n def __init__(self):\r\n self.apoio = []\r\n self.forcas = []\r\n self.momentos = []\r\n self.inclinacao = 0\r\n self.tamanho = 0\r\n\r\n def set_comprimento(self):\r\n while True:\r\n alfa = float(input('Defina um comprimento (em metros 0<=L<=100): '))\r\n if alfa > 0.0 and alfa <= 100.0:\r\n self.tamanho = alfa\r\n break\r\n else:\r\n print(\"Comprimento invalido!\")\r\n def set_inclinacao(self):\r\n while True:\r\n alfa = float(input('Defina um inclinacao (em graus 0º<=α<90º): '))\r\n if alfa>= 0.0 and alfa <= 90.0:\r\n self.inclinacao = alfa\r\n break\r\n else:\r\n print('Inclinação inválida')\r\n def set_apoio(self):\r\n print(\"Defina um apoio: \")\r\n while True:\r\n alfa = input(\"1 - Engastamento\\n2- Simples e fixo\\n\")\r\n if alfa == \"1\":\r\n while True:\r\n beta = int(input(\"Defina a posicao do engastamento:\\n1 - Origem\\n2 - Fim\\n\"))\r\n if beta == 1:\r\n self.apoio.append(engaste(0,self.inclinacao))\r\n break\r\n elif beta == 2:\r\n self.apoio.append(engaste(self.tamanho,self.inclinacao))\r\n break\r\n break\r\n elif alfa == \"2\":\r\n apoioS = apoioSimples()\r\n apoioS.set_posicao(self.tamanho,self.inclinacao)\r\n self.apoio.append(apoioS)\r\n apoioF = apoioFixo()\r\n apoioF.set_posicao(self.tamanho,self.inclinacao)\r\n self.apoio.append(apoioF)\r\n break\r\n\r\n def set_forcas(self):\r\n while True:\r\n alfa = int(input(\"Digite o numero de forcas(0= 0 or alfa <= 100:\r\n break\r\n else:\r\n print('Número inválido')\r\n for i in range(0, alfa):\r\n print('Força número %d' %(i+1))\r\n f = forca()\r\n f.set_intensidade()\r\n f.set_posicao(self.tamanho,self.inclinacao)\r\n f.set_angulo()\r\n self.forcas.append(f)\r\n \r\n def set_momentos(self):\r\n while True:\r\n alfa = int(input(\"Digite o numero de momentos(0<=x<=100): \"))\r\n if alfa >= 0 or alfa <= 100:\r\n break\r\n else:\r\n print('Número inválido')\r\n for i in range(0, alfa):\r\n print('Momento número %d' %(i+1))\r\n m = momento()\r\n m.set_intensidade()\r\n self.momentos.append(m)\r\n \r\n \r\nclass engaste():\r\n def __init__(self,pos,inclinacao):\r\n self.reacaoX = 0\r\n self.reacaoY = 0\r\n self.reacaoM = 0\r\n self.posicaoX = pos*m.cos(m.radians(inclinacao))\r\n self.posicaoY = pos*m.sin(m.radians(inclinacao))\r\n\r\n \r\n \r\nclass apoioFixo():\r\n def __init__(self):\r\n self.reacaoX = 0\r\n self.reacaoY = 0\r\n self.posicaoX = 0\r\n self.posicaoY = 0\r\n\r\n def set_posicao(self,tamanho,inclinacao):\r\n while True:\r\n alfa = float(input('Digite a posição na barra do apoio fixo, que deve ser dentro dos limites da barra: '))\r\n if alfa >= 0.0 and alfa <= tamanho:\r\n self.posicaoX = m.cos(m.radians(inclinacao))*alfa\r\n self.posicaoY = m.sin(m.radians(inclinacao)) * alfa\r\n break\r\n else:\r\n print('Posição inválida')\r\n\r\nclass apoioSimples():\r\n def __init__(self):\r\n self.reacaoY = 0\r\n self.posicaoX = 0\r\n self.posicaoY = 0\r\n\r\n def set_posicao(self,tamanho,inclinacao):\r\n while True:\r\n alfa = float(input('Digite a posição na barra do apoio simples, que deve ser dentro dos limites da barra: '))\r\n if alfa >= 0.0 and alfa <= tamanho:\r\n self.posicaoX = m.cos(m.radians(inclinacao))*alfa\r\n self.posicaoY = m.sin(m.radians(inclinacao)) * alfa\r\n break\r\n else:\r\n print('Posição inválida')\r\n\r\n\r\nclass forca():\r\n def __init__(self):\r\n self.intensidade = 0\r\n self.posicaoX = 0\r\n self.posicaoY = 0\r\n self.angulo = 0\r\n\r\n def set_intensidade(self):\r\n while True:\r\n alfa = float(input('Digite a intensidade (-1000000<=I<=1000000): '))\r\n if alfa >= -1000000.0 and alfa <= 1000000.0:\r\n self.intensidade = alfa\r\n break\r\n else:\r\n print('Intensidade inválida')\r\n\r\n def set_posicao(self,tamanho,inclinacao):\r\n while True:\r\n alfa = float(input('Digite a posição na barra da força, que deve ser dentro dos limites da barra: '))\r\n if alfa >= 0.0 and alfa <= tamanho:\r\n self.posicaoX = m.cos(m.radians(inclinacao))*alfa\r\n self.posicaoY = m.sin(m.radians(inclinacao)) * alfa\r\n break\r\n else:\r\n print('Posição inválida')\r\n\r\n def set_angulo(self):\r\n while True:\r\n alfa = float(input('Digite o ângulo de aplicação da força (0º<=β<=180º): '))\r\n if alfa >= 0.0 and alfa <= 180.0:\r\n self.angulo = alfa\r\n break\r\n else:\r\n print('Ângulo de aplicação inválido')\r\n self.FX = self.intensidade * m.cos(m.radians(self.angulo))\r\n self.FY = self.intensidade * m.sin(m.radians(self.angulo))\r\n\r\nclass momento():\r\n def __init__(self):\r\n self.intensidade = 0\r\n\r\n def set_intensidade(self):\r\n while True:\r\n alfa = float(input('Digite a intensidade (-1000000<=I<=1000000): '))\r\n if alfa >= -1000000.0 and alfa <= 1000000.0:\r\n self.intensidade = alfa\r\n break\r\n else:\r\n print('Intensidade inválida')\r\n\r\n\r\n\r\nprint(\"Bem vindo à calculadora de esforcos do Grupo 19!\\nIntegrantes:\\nEugenio Sabatini\\nGabriel dos Anjos\\nVitor Duque\\nPedro Rabelo Chato\\n\")\r\nbarra = Barra()\r\nbarra.set_comprimento()\r\nbarra.set_inclinacao()\r\nbarra.set_apoio()\r\nbarra.set_forcas()\r\nbarra.set_momentos()\r\nif len(barra.apoio) == 1:\r\n plt.plot([0,barra.tamanho*m.cos(m.radians(barra.inclinacao))],[0,barra.tamanho*m.sin(m.radians(barra.inclinacao))],[barra.apoio[0].posicaoX],[barra.apoio[0].posicaoY],'ro',)\r\nelse:\r\n plt.plot([0,barra.tamanho*m.cos(m.radians(barra.inclinacao))],[0,barra.tamanho*m.sin(m.radians(barra.inclinacao))],[barra.apoio[0].posicaoX],[barra.apoio[0].posicaoY],'b^',[barra.apoio[1].posicaoX],[barra.apoio[1].posicaoY],'r^',)\r\nplt.show()\r\nsumFX = 0\r\nsumFY = 0\r\npassa = input('Então vamos lá: para começar a resolução digite qualquer coisa...')\r\nprint()\r\nprint('Primeiramente vamos calcular as componentes das forças em cada direção, sendo:\\nFX = intensidade * cos(ângulo de aplicação)\\nFY = intensidade* sen(ângulo de aplicação)')\r\nfor i in range(0, len(barra.forcas)): #somatoria das forcas dadas (x e y)\r\n print('Força %d' %(i+1))\r\n print('\\t FX = %dN' %barra.forcas[i].FX)\r\n print('\\t FY = %dN' %barra.forcas[i].FY)\r\n sumFX += barra.forcas[i].FX\r\n sumFY += barra.forcas[i].FY\r\nprint()\r\nprint('Agora podemos calcular a resultante em cada direção')\r\nprint('FX = %dN' %sumFX)\r\nprint('FY = %dN' %sumFY)\r\nprint()\r\npassar = input('Agora vamos para os momentos das forças! Para continuar digite qualquer coisa...')\r\nprint()\r\nprint('Como o problema só tem duas dimensões todos os momentos serão na direção z. Assim a resultante dos momentos aplicados é a somente a soma das intesidades')\r\nsumM = 0\r\nfor i in range(0, len(barra.momentos)): #somatoria dos momentos dados\r\n sumM += barra.momentos[i].intensidade\r\nprint('Momento total é M = %d' %sumM)\r\nprint()\r\nif len(barra.apoio) == 1:\r\n barra.apoio[0].reacaoX, barra.apoio[0].reacaoY = -sumFX, -sumFY\r\n M = 0\r\n print('O momento de cada força em relação a uma referência é o produto vetorial da distância entre do ponto de aplicação e a referência e o vetor força. Como referência adotaremos o engastamento')\r\n print()\r\n for i in range(0, len(barra.forcas)): #soma dos momentos provocados pelas forças\r\n print('Momento da Força %d:' %(i+1))\r\n print('\\t M = %dN.m' %((barra.forcas[i].posicaoX - barra.apoio[0].posicaoX)*barra.forcas[i].FY - (barra.forcas[i].posicaoY - barra.apoio[0].posicaoY)*barra.forcas[i].FX))\r\n M += (barra.forcas[i].posicaoX - barra.apoio[0].posicaoX)*barra.forcas[i].FY - (barra.forcas[i].posicaoY - barra.apoio[0].posicaoY)*barra.forcas[i].FX\r\n print()\r\n print('A soma é Mforças = %dN.m' %M)\r\n barra.apoio[0].reacaoM = -(sumM + M)\r\n print()\r\n passar = input('Agora montaremos as equações! Digite qualquer coisa para continuar...')\r\n print()\r\n print('Nós podemos dizer que:\\nXengastamento = -FX\\nYengastamento = -FY\\nMengastamento = -(Mtotal + Mforças)')\r\n print()\r\n passar = input('Para ver a resposta digite qualquer coisa...')\r\n print()\r\n print('Reação X do engastamento: %dN' %barra.apoio[0].reacaoX)\r\n print('Reação Y do engastamento: %dN' %barra.apoio[0].reacaoY)\r\n print('Reação de momento do engastamento: %dN.m' %barra.apoio[0].reacaoM)\r\n\r\nelse:\r\n M = 0\r\n print('O momento de cada força em relação a uma referência é o produto vetorial da distância entre do ponto de aplicação e a referência e o vetor força. Como referência adotaremos o apoio fixo')\r\n for i in range(0, len(barra.forcas)): #soma dos momentos provocados pelas forças\r\n print('Momento da Força %d:' %(i+1))\r\n print('\\t M = %dN.m' %((barra.forcas[i].posicaoX - barra.apoio[1].posicaoX)*barra.forcas[i].FY - (barra.forcas[i].posicaoY - barra.apoio[1].posicaoY)*barra.forcas[i].FX))\r\n M += (barra.forcas[i].posicaoX - barra.apoio[1].posicaoX)*barra.forcas[i].FY - (barra.forcas[i].posicaoY - barra.apoio[1].posicaoY)*barra.forcas[i].FX\r\n print()\r\n print('A soma é Mforças = %dN.m' %M)\r\n print()\r\n passar = input('Agora montaremos o sistema de equações! Digite qualquer coisa para continuar...')\r\n print()\r\n print('Nós temos o seguinte sistma:\\nXfixo = -FX\\nYsimples + Yfixo = -FY\\nL^Ysimples = -(Mtotal + Mforças)\\n\\nSendo L o vetor distância do apoio simples ao apoio fixo')\r\n print()\r\n passar = input('Para ver a resposta digite qualquer coisa...')\r\n print()\r\n matriz = np.array([[0,1,0],[1,0,1],[(barra.apoio[0].posicaoX - barra.apoio[1].posicaoX),0,0]])\r\n resp = np.array([-sumFX, -sumFY, -(sumM + M)])\r\n x = np.linalg.solve(matriz,resp)\r\n barra.apoio[0].reacaoY = x[0]\r\n barra.apoio[1].reacaoX = x[1]\r\n barra.apoio[1].reacaoY = x[2]\r\n\r\n print('Reação Y do apoio simples: %dN' %barra.apoio[0].reacaoY)\r\n print('Reação X do apoio fixo: %dN' %barra.apoio[1].reacaoX)\r\n print('Reação Y do apoio fixo: %dN' %barra.apoio[1].reacaoY)\r\n\r\nplt.close()\r\n\r\n\r\n\r\n","repo_name":"Kamorst/TrabalhoPEFBoladao","sub_path":"python_pef.py","file_name":"python_pef.py","file_ext":"py","file_size_in_byte":12553,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"71808767211","text":"import streamlit as st\nimport numpy as np\nimport operator\nimport pandas as pd\ncol1, col2,col3, col4 = st.beta_columns(4)\n\n\nServicios=[\"Guión\",\"Modelos\",\"VFX\"]\nCantidades=[1,2,5]\nPrecios=[100,60,50]\nTotal = list(map(operator.mul, Cantidades, Precios))\n\npal=[]\npalc=[]\npalp=[]\nwith col1:\n st.markdown(\"**Incluye**\")\n for i in Servicios :\n i=st.checkbox(i)\n pal.append(i)\n\nwith col2:\n st.markdown(\"**Cantidad**\")\n for i in Cantidades :\n st.markdown(i)\n # palc.append(i)\n\nwith col3:\n st.markdown(\"**Precio**\")\n for i in Precios :\n st.markdown(i)\n\ndf=pd.DataFrame({\"check\":pal,\"Servicio\":Servicios,\"Cantidades\":Cantidades,\"Precios\":Precios,\"Total\":Total})\n\ndf.loc[df[\"check\"] == 0, [\"Total\"]] = 0\n\nwith col4: \n col4.markdown(\"**Total**\")\n for i in df[\"Total\"]:\n st.markdown(i) \n\n\nst.markdown(\"# Total\")\ndfs=df.loc[df['check'] == 1]\n\nsuma=dfs[\"Total\"].sum()\n\nst.write(\" #\",suma)\n\ndff=df.drop(\"check\", axis=1)\n\n\n","repo_name":"diegopenaloza/Planes","sub_path":"plan.py","file_name":"plan.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21959014785","text":"#!/usr/bin/env python\n\n# Imports\nimport re\n\nimport requests as r\nfrom bs4 import BeautifulSoup\n\n\ndef returnHandleAvailability():\n lookupHandleUrl = \"https://www.twitter.com/\"\n loadSite = r.get(lookupHandleUrl)\n soup = BeautifulSoup(loadSite.content, \"html.parser\")\n searchForTag = soup.find(\"meta\", attrs={\"name\":\"apple-mobile-web-app-title\"})\n\n # Exctracting site title\n pattern = r'\"(.+?)\"' # First element between two parenthesis (\"\")\n result = re.search(pattern, str(searchForTag))\n return result.group(1)\n\ndef main():\n siteTitle = returnHandleAvailability()\n print(siteTitle)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"imparpaing/misc","sub_path":"handle-checker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21960093474","text":"from xgboost import XGBClassifier\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.cross_validation import StratifiedKFold\n\ndataset = pd.read_csv('data/MatchOverviewTraining.csv')\ntestset = pd.read_csv('data/MatchOverviewTest.csv')\ndetails = pd.read_csv('data/MatchDetail.csv')\n\nX = dataset.ix[: , :11]\nY = dataset.ix[: , 11:]\nX_t = testset.ix[: , :11]\n\ndet = np.array(details)\nprint(\"Done loading data.\")\nprint(len(dataset), \"rows in dataset.\")\nprint(len(det), \"rows in details.\")\n\ngames = {}\nfor row in det:\n key = int(row[0]); # match_id\n if key in games:\n # if match already in games\n games[key].update({row[1]:row[2:]}) \n else:\n # add match to games\n games.update({key:{row[1]:row[2:]}})\n\nfullset = []\nfor row in np.array(X):\n insert = []\n key = int(row[0]) # match_id\n if key not in games:\n player = {}\n else:\n player = games[key]\n for i in range(1, 11): # hero_1 - 10\n replace = [0] * 21; # default null values\n if row[i] in player: replace = list(player[row[i]])\n # add null values\n if len(replace) < 21: replace.extend([0] * (21 - len(replace)))\n \n replace.insert(row[i], 0)\n insert.extend(replace)\n fullset.append(insert)\n\nX = np.array(fullset) \n\nfullset = []\nfor row in np.array(X_t):\n insert = []\n key = int(row[0]) # match_id\n if key not in games:\n player = {}\n else:\n player = games[key]\n for i in range(1, 11): # hero_1 - 10\n replace = [0] * 21; # default null values\n if row[i] in player: replace = list(player[row[i]])\n # add null values\n if len(replace) < 21: replace.extend([0] * (21 - len(replace)))\n \n replace.insert(row[i], 0)\n insert.extend(replace)\n fullset.append(insert)\n\nX_t = np.array(fullset)\nY = (np.array(Y)).flatten()\n\nmodel = XGBClassifier(\n learning_rate =0.2,\n n_estimators=750,\n max_depth=5,\n min_child_weight=1,\n gamma=0,\n subsample=0.8,\n colsample_bytree=0.8,\n objective= 'binary:logistic',\n nthread=64,\n scale_pos_weight=1,\n seed=27,\n silent=1\n)\n\nmodel.fit(X, Y)\n\ny_pred = model.predict(X_t)\npredictions = [round(value) for value in y_pred]\n\nlabels = np.array(testset.ix[:,:1])\nfor k, prediction in enumerate(predictions):\n print(str(labels[k][0]) + \",\" + (\"TRUE\" if prediction else \"FALSE\"))\n","repo_name":"msohcw/dota-2-match-prediction","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"74529678570","text":"import logging\nimport sys\n\nfrom datetime import datetime\n\nfrom sqlalchemy.exc import DatabaseError\nfrom sqlalchemy.sql.expression import and_, or_\n\nimport rucio.core.rule\nimport rucio.core.did\n\nfrom rucio.common.config import config_get\nfrom rucio.core.lifetime_exception import define_eol\nfrom rucio.core.rse import get_rse_name, get_rse_id\nfrom rucio.db.sqla import models\nfrom rucio.db.sqla.constants import LockState, RuleState, RuleGrouping, DIDType, RuleNotification\nfrom rucio.db.sqla.session import read_session, transactional_session, stream_session\n\nlogging.basicConfig(stream=sys.stdout,\n level=getattr(logging, config_get('common', 'loglevel').upper()),\n format='%(asctime)s\\t%(process)d\\t%(levelname)s\\t%(message)s')\n\n\n@stream_session\ndef get_dataset_locks(scope, name, session=None):\n \"\"\"\n Get the dataset locks of a dataset\n\n :param scope: Scope of the dataset.\n :param name: Name of the dataset.\n :param session: The db session.\n :return: List of dicts {'rse_id': ..., 'state': ...}\n \"\"\"\n\n query = session.query(models.DatasetLock.rse_id,\n models.DatasetLock.scope,\n models.DatasetLock.name,\n models.DatasetLock.rule_id,\n models.DatasetLock.account,\n models.DatasetLock.state,\n models.DatasetLock.length,\n models.DatasetLock.bytes,\n models.DatasetLock.accessed_at).filter_by(scope=scope, name=name)\n\n dict = {}\n for rse_id, scope, name, rule_id, account, state, length, bytes, accessed_at in query.yield_per(500):\n if rse_id not in dict:\n dict[rse_id] = get_rse_name(rse_id, session=session)\n yield {'rse_id': rse_id,\n 'rse': dict[rse_id],\n 'scope': scope,\n 'name': name,\n 'rule_id': rule_id,\n 'account': account,\n 'state': state,\n 'length': length,\n 'bytes': bytes,\n 'accessed_at': accessed_at}\n\n\n@stream_session\ndef get_dataset_locks_by_rse_id(rse_id, session=None):\n \"\"\"\n Get the dataset locks of an RSE.\n\n :param rse_id: RSE id to get the locks from.\n :param session: The db session.\n :return: List of dicts {'rse_id': ..., 'state': ...}\n \"\"\"\n query = session.query(models.DatasetLock.rse_id,\n models.DatasetLock.scope,\n models.DatasetLock.name,\n models.DatasetLock.rule_id,\n models.DatasetLock.account,\n models.DatasetLock.state,\n models.DatasetLock.length,\n models.DatasetLock.bytes,\n models.DatasetLock.accessed_at).filter_by(rse_id=rse_id).\\\n with_hint(models.DatasetLock, \"index(DATASET_LOCKS DATASET_LOCKS_RSE_ID_IDX)\", 'oracle')\n\n dict = {}\n for rse_id, scope, name, rule_id, account, state, length, bytes, accessed_at in query.yield_per(500):\n if rse_id not in dict:\n dict[rse_id] = get_rse_name(rse_id, session=session)\n yield {'rse_id': rse_id,\n 'rse': dict[rse_id],\n 'scope': scope,\n 'name': name,\n 'rule_id': rule_id,\n 'account': account,\n 'state': state,\n 'length': length,\n 'bytes': bytes,\n 'accessed_at': accessed_at}\n\n\n@read_session\ndef get_replica_locks(scope, name, nowait=False, restrict_rses=None, session=None):\n \"\"\"\n Get the active replica locks for a file\n\n :param scope: Scope of the did.\n :param name: Name of the did.\n :param nowait: Nowait parameter for the FOR UPDATE statement.\n :param restrict_rses: Possible RSE_ids to filter on.\n :param session: The db session.\n :return: List of dicts {'rse': ..., 'state': ...}\n :raises: NoResultFound\n \"\"\"\n\n query = session.query(models.ReplicaLock).filter_by(scope=scope, name=name)\n if restrict_rses is not None:\n rse_clause = []\n for rse_id in restrict_rses:\n rse_clause.append(models.ReplicaLock.rse_id == rse_id)\n if rse_clause:\n query = query.filter(or_(*rse_clause))\n\n return query.with_for_update(nowait=nowait).all()\n\n\n@read_session\ndef get_replica_locks_for_rule_id(rule_id, session=None):\n \"\"\"\n Get the active replica locks for a rule_id.\n\n :param rule_id: Filter on rule_id.\n :param session: The db session.\n :return: List of dicts {'scope':, 'name':, 'rse': ..., 'state': ...}\n :raises: NoResultFound\n \"\"\"\n\n locks = []\n\n query = session.query(models.ReplicaLock).filter_by(rule_id=rule_id)\n for row in query:\n locks.append({'scope': row.scope,\n 'name': row.name,\n 'rse_id': row.rse_id,\n 'rse': get_rse_name(rse_id=row.rse_id, session=session),\n 'state': row.state,\n 'rule_id': row.rule_id})\n\n return locks\n\n\n@read_session\ndef get_replica_locks_for_rule_id_per_rse(rule_id, session=None):\n \"\"\"\n Get the active replica locks for a rule_id per rse.\n\n :param rule_id: Filter on rule_id.\n :param session: The db session.\n :return: List of dicts {'rse_id':, 'rse':}\n :raises: NoResultFound\n \"\"\"\n\n locks = []\n\n query = session.query(models.ReplicaLock.rse_id).filter_by(rule_id=rule_id).group_by(models.ReplicaLock.rse_id)\n for row in query:\n locks.append({'rse_id': row.rse_id,\n 'rse': get_rse_name(rse_id=row.rse_id, session=session)})\n\n return locks\n\n\n@read_session\ndef get_files_and_replica_locks_of_dataset(scope, name, nowait=False, restrict_rses=None, only_stuck=False, session=None):\n \"\"\"\n Get all the files of a dataset and, if existing, all locks of the file.\n\n :param scope: Scope of the dataset\n :param name: Name of the datset\n :param nowait: Nowait parameter for the FOR UPDATE statement\n :param restrict_rses: Possible RSE_ids to filter on.\n :param only_stuck: If true, only get STUCK locks.\n :param session: The db session.\n :return: Dictionary with keys: (scope, name)\n and as value: [LockObject]\n :raises: NoResultFound\n \"\"\"\n # with_hint(models.ReplicaLock, \"INDEX(LOCKS LOCKS_PK)\", 'oracle').\\\n query = session.query(models.DataIdentifierAssociation.child_scope,\n models.DataIdentifierAssociation.child_name,\n models.ReplicaLock).\\\n with_hint(models.DataIdentifierAssociation, \"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)\", 'oracle').\\\n outerjoin(models.ReplicaLock,\n and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,\n models.DataIdentifierAssociation.child_name == models.ReplicaLock.name))\\\n .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)\n\n if restrict_rses is not None:\n rse_clause = []\n for rse_id in restrict_rses:\n rse_clause.append(models.ReplicaLock.rse_id == rse_id)\n if rse_clause:\n query = session.query(models.DataIdentifierAssociation.child_scope,\n models.DataIdentifierAssociation.child_name,\n models.ReplicaLock).\\\n with_hint(models.DataIdentifierAssociation, \"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)\", 'oracle').\\\n outerjoin(models.ReplicaLock,\n and_(models.DataIdentifierAssociation.child_scope == models.ReplicaLock.scope,\n models.DataIdentifierAssociation.child_name == models.ReplicaLock.name,\n or_(*rse_clause)))\\\n .filter(models.DataIdentifierAssociation.scope == scope,\n models.DataIdentifierAssociation.name == name)\n\n if only_stuck:\n query = query.filter(models.ReplicaLock.state == LockState.STUCK)\n\n query = query.with_for_update(nowait=nowait, of=models.ReplicaLock.state)\n\n locks = {}\n\n for child_scope, child_name, lock in query:\n if (child_scope, child_name) not in locks:\n if lock is None:\n locks[(child_scope, child_name)] = []\n else:\n locks[(child_scope, child_name)] = [lock]\n else:\n locks[(child_scope, child_name)].append(lock)\n\n return locks\n\n\n@transactional_session\ndef successful_transfer(scope, name, rse_id, nowait, session=None):\n \"\"\"\n Update the state of all replica locks because of an successful transfer\n\n :param scope: Scope of the did\n :param name: Name of the did\n :param rse_id: RSE id\n :param nowait: Nowait parameter for the for_update queries.\n :param session: DB Session.\n \"\"\"\n\n locks = session.query(models.ReplicaLock).with_for_update(nowait=nowait).filter_by(scope=scope, name=name, rse_id=rse_id)\n for lock in locks:\n if lock.state == LockState.OK:\n continue\n logging.debug('Marking lock %s:%s for rule %s on rse %s as OK' % (lock.scope, lock.name, str(lock.rule_id), str(lock.rse_id)))\n # Update the rule counters\n rule = session.query(models.ReplicationRule).with_for_update(nowait=nowait).filter_by(id=lock.rule_id).one()\n logging.debug('Updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))\n\n if lock.state == LockState.REPLICATING:\n rule.locks_replicating_cnt -= 1\n elif lock.state == LockState.STUCK:\n rule.locks_stuck_cnt -= 1\n rule.locks_ok_cnt += 1\n lock.state = LockState.OK\n logging.debug('Finished updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))\n\n # Insert UpdatedCollectionReplica\n if rule.did_type == DIDType.DATASET:\n models.UpdatedCollectionReplica(scope=rule.scope,\n name=rule.name,\n did_type=rule.did_type,\n rse_id=rse_id).save(flush=False, session=session)\n elif rule.did_type == DIDType.CONTAINER:\n # Resolve to all child datasets\n for dataset in rucio.core.did.list_child_datasets(scope=rule.scope, name=rule.name, session=session):\n models.UpdatedCollectionReplica(scope=dataset['scope'],\n name=dataset['name'],\n did_type=dataset['type'],\n rse_id=rse_id).save(flush=False, session=session)\n\n # Update the rule state\n if rule.state == RuleState.SUSPENDED:\n pass\n elif rule.locks_stuck_cnt > 0:\n pass\n elif rule.locks_replicating_cnt == 0 and rule.state == RuleState.REPLICATING:\n rule.state = RuleState.OK\n # Try to update the DatasetLocks\n if rule.grouping != RuleGrouping.NONE:\n ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)\n for ds_lock in ds_locks:\n ds_lock.state = LockState.OK\n session.flush()\n rucio.core.rule.generate_message_for_dataset_ok_callback(rule=rule, session=session)\n if rule.notification == RuleNotification.YES:\n rucio.core.rule.generate_email_for_rule_ok_notification(rule=rule, session=session)\n # Try to release potential parent rules\n rucio.core.rule.release_parent_rule(child_rule_id=rule.id, session=session)\n\n # Insert rule history\n rucio.core.rule.insert_rule_history(rule=rule, recent=True, longterm=False, session=session)\n session.flush()\n\n\n@transactional_session\ndef failed_transfer(scope, name, rse_id, error_message=None, broken_rule_id=None, broken_message=None, nowait=True, session=None):\n \"\"\"\n Update the state of all replica locks because of a failed transfer.\n If a transfer is permanently broken for a rule, the broken_rule_id should be filled which puts this rule into the SUSPENDED state.\n\n :param scope: Scope of the did.\n :param name: Name of the did.\n :param rse_id: RSE id.\n :param error_message: The error why this transfer failed.\n :param broken_rule_id: Id of the rule which will be suspended.\n :param broken_message: Error message for the suspended rule.\n :param nowait: Nowait parameter for the for_update queries.\n :param session: The database session in use.\n \"\"\"\n\n locks = session.query(models.ReplicaLock).with_for_update(nowait=nowait).filter_by(scope=scope, name=name, rse_id=rse_id)\n for lock in locks:\n if lock.state == LockState.STUCK:\n continue\n logging.debug('Marking lock %s:%s for rule %s on rse %s as STUCK' % (lock.scope, lock.name, str(lock.rule_id), str(lock.rse_id)))\n # Update the rule counters\n rule = session.query(models.ReplicationRule).with_for_update(nowait=nowait).filter_by(id=lock.rule_id).one()\n logging.debug('Updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))\n if lock.state == LockState.REPLICATING:\n rule.locks_replicating_cnt -= 1\n elif lock.state == LockState.OK:\n rule.locks_ok_cnt -= 1\n rule.locks_stuck_cnt += 1\n lock.state = LockState.STUCK\n logging.debug('Finished updating rule counters for rule %s [%d/%d/%d]' % (str(rule.id), rule.locks_ok_cnt, rule.locks_replicating_cnt, rule.locks_stuck_cnt))\n\n # Update the rule state\n if rule.state == RuleState.SUSPENDED:\n pass\n elif lock.rule_id == broken_rule_id:\n rule.state = RuleState.SUSPENDED\n rule.error = (broken_message[:245] + '...') if len(broken_message) > 245 else broken_message\n # Try to update the DatasetLocks\n if rule.grouping != RuleGrouping.NONE:\n ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)\n for ds_lock in ds_locks:\n ds_lock.state = LockState.STUCK\n elif rule.locks_stuck_cnt > 0:\n if rule.state != RuleState.STUCK:\n rule.state = RuleState.STUCK\n # Try to update the DatasetLocks\n if rule.grouping != RuleGrouping.NONE:\n ds_locks = session.query(models.DatasetLock).with_for_update(nowait=nowait).filter_by(rule_id=rule.id)\n for ds_lock in ds_locks:\n ds_lock.state = LockState.STUCK\n if rule.error != error_message:\n rule.error = (error_message[:245] + '...') if len(error_message) > 245 else error_message\n\n # Insert rule history\n rucio.core.rule.insert_rule_history(rule=rule, recent=True, longterm=False, session=session)\n\n\n@transactional_session\ndef touch_dataset_locks(dataset_locks, session=None):\n \"\"\"\n Update the accessed_at timestamp of the given dataset locks + eol_at.\n\n :param replicas: the list of dataset locks.\n :param session: The database session in use.\n\n :returns: True, if successful, False otherwise.\n \"\"\"\n\n rse_ids, now = {}, datetime.utcnow()\n for dataset_lock in dataset_locks:\n if 'rse_id' not in dataset_lock:\n if dataset_lock['rse'] not in rse_ids:\n rse_ids[dataset_lock['rse']] = get_rse_id(rse=dataset_lock['rse'], session=session)\n dataset_lock['rse_id'] = rse_ids[dataset_lock['rse']]\n\n eol_at = define_eol(dataset_lock['scope'], dataset_lock['name'], rses=[{'id': dataset_lock['rse_id']}], session=session)\n try:\n session.query(models.DatasetLock).filter_by(scope=dataset_lock['scope'], name=dataset_lock['name'], rse_id=dataset_lock['rse_id']).\\\n update({'accessed_at': dataset_lock.get('accessed_at') or now}, synchronize_session=False)\n session.query(models.ReplicationRule).filter_by(scope=dataset_lock['scope'], name=dataset_lock['name']).update({'eol_at': eol_at}, synchronize_session=False)\n except DatabaseError:\n return False\n\n return True\n","repo_name":"zzaiin/Rucio","sub_path":"lib/rucio/core/lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":16993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"70735260971","text":"#!/usr/bin/env python3\n###################################################################\n#\n# CSSE1001/7030 - Assignment 2\n#\n# Student Username: s4378702\n#\n# Student Name: Youwen Mao\n#\n###################################################################\n\n###################################################################\n#\n# The following is support code. DO NOT CHANGE.\n\nfrom a2_support import *\n\n# End of support code\n################################################################\n\n# Write your code here\n\nclass GameObject(object):\n \"\"\"\n Manage all the object in the game.\n \"\"\"\n def __init__(self,name,position):\n \"\"\"\n Consturctor\n \n GameObject.__init__(GameObject, str, tuple)\n \"\"\"\n self._name = name\n self._position = position\n\n def set_position(self,position):\n \"\"\"\n Set gameobject position.\n\n GameObject.set_position(GameObject, tuple)\n \"\"\"\n x,y = position\n if x%1 == 0 and y%1 ==0:\n self._position = position\n\n def get_position(self):\n \"\"\"\n Get gameobject position.\n\n GameObject.get_position(GameObject) -> (int, int)\n \"\"\"\n return self._position\n\n def set_name(self,name):\n \"\"\"\n Set gameobject name.\n\n GameObject.set_position(GameObject, str)\n \"\"\"\n self._name = name\n\n def get_name(self):\n \"\"\"\n Get gameobject name.\n\n GameObject.get_name(GameObject) -> str\n \"\"\"\n return self._name\n\n def __str__(self):\n \"\"\"\n Returns a human readable representation of this instance.\n\n GameObject.__str__(GameObject) -> str\n \"\"\"\n return GAME_OBJECT_FORMAT.format(self._name,self._position)\n\nclass Pokemon(GameObject):\n \"\"\"\n Manage pokemons.\n \"\"\"\n def __init__(self,name,position,terrain):\n \"\"\"\n Consturctor\n \n Pokemon.__init__(Pokemon, str, tuple, str)\n \"\"\"\n super().__init__(name,position)\n self._terrain = terrain\n\n def set_terrain(self,terrain):\n \"\"\"\n Set pokemon terrain.\n\n Pokemon.set_Terrain(Pokemon, str)\n \"\"\"\n self._terrain = terrain\n\n def get_terrain(self):\n \"\"\"\n Get pokemon terrain.\n\n Pokemon.get_Terrain(Pokemon) -> str\n \"\"\"\n return self._terrain\n\n def __str__(self):\n \"\"\"\n Returns a human readable representation of this instance.\n\n Pokemon.__str__(Pokemon) -> str\n \"\"\"\n return POKEMON_FORMAT.format(self._name,self._position,self._terrain)\n\nclass Wall(GameObject):\n \"\"\"\n Manage pokemons.\n \"\"\"\n pass\n\nclass Player(GameObject):\n \"\"\"\n Manage the player\n \"\"\"\n def __init__(self,name):\n \"\"\"\n Consturctor\n \n Plyaer.__init__(Player, str)\n \"\"\"\n self._name = name\n self._position = None\n self._list = []\n self._dex = Dex(self._list)\n\n def get_pokemons(self):\n \"\"\"\n Get pokemon caught by player.\n\n Player.get_pokemons(Player) -> list\n \"\"\"\n return self._list\n\n def get_dex(self):\n \"\"\"\n Get dex of player.\n\n Player.get_dxe(Player) -> Dex\n \"\"\"\n return self._dex\n \n def reset_pokemons(self):\n \"\"\"\n Reset the list of pokemon player caught and the dex of player.\n\n Player.reset_pokemons(Player)\n \"\"\"\n self._list.clear()\n self._dex = Dex(self._list)\n\n def register_pokemon(self, pokemon):\n \"\"\"\n Register the pokemon.\n\n Player.register_pokemon(Player, Pokemon)\n \"\"\"\n self._list.append(pokemon)\n self._dex.register(pokemon.get_name())\n \n def __str__(self):\n \"\"\"\n Returns a human readable representation of this instance.\n\n Player.__str__(Player) -> str\n \"\"\"\n return PLAYER_FORMAT.format(self._name,self._position,len(self._list))\n \nclass Dex(object):\n \"\"\"\n Manage the dex.\n \"\"\"\n def __init__(self, pokemon_names):\n \"\"\"\n Consturctor\n \n Dex.__init__(Dex, list)\n \"\"\"\n self._pokemons ={}\n self.expect_pokemons(pokemon_names)\n\n def expect_pokemons(self, pokemon_names):\n \"\"\"\n Expect the pokemons.\n\n Dex.expect_pokemons(Dex, list)\n \"\"\"\n for i in pokemon_names:\n self._pokemons[i] = False\n \n def expect_pokemons_from_dex(self,other_dex):\n \"\"\"\n Expect the pokemons for other dex.\n\n Dex.expect_pokemons_from_dex(Dex, Dex)\n \"\"\"\n pokemonslist = []\n for i in other_dex.get_pokemons():\n pokemonslist.append(i[0])\n self.expect_pokemons(pokemonslist)\n\n def register(self,pokemon_name):\n \"\"\"\n Register the pokemons in dex and return True iff the pokemon has been registered, else False.\n\n Dex.register(Dex, list) -> bool\n \"\"\"\n try:\n if self._pokemons[pokemon_name]:\n return True\n else:\n self._pokemons[pokemon_name] = True\n return False\n except Exception as s:\n raise UnexpectedPokemonError(str(DexError(s))+\" is not expected by this Dex\")\n\n def register_from_dex(self,other_dex):\n \"\"\"\n Register the pokemons from other dex in this dex.\n\n Dex.register(Dex, Dex)\n \"\"\"\n other_dex = other_dex.get_registered_pokemons()\n exist = []\n for i in other_dex:\n if i in self.get_unregistered_pokemons():\n exist.append(i)\n for x in exist:\n self.register(x)\n\n def get_pokemons(self):\n \"\"\"\n Get all pokemons in the dex.\n\n Dex.get_pokemons(Dex) -> tuple\n \"\"\"\n pokemon_list = []\n for i in sorted(list(self._pokemons)):\n pokemon_list.append((i,self._pokemons[i]))\n return pokemon_list\n def get_registered_pokemons(self):\n \"\"\"\n Get all registered pokemons in the dex.\n\n Dex.get_get_registered_pokemons(Dex) -> list\n \"\"\"\n rgd_pokemons = []\n for i in sorted(list(self._pokemons)):\n if self._pokemons[i]:\n rgd_pokemons.append(i)\n return rgd_pokemons\n def get_unregistered_pokemons(self):\n \"\"\"\n Get all unregistered pokemons in the dex.\n\n Dex.get_get_unregistered_pokemons(Dex) -> list\n \"\"\"\n urgd_pokemons = []\n for i in sorted(list(self._pokemons)):\n if not self._pokemons[i]:\n urgd_pokemons.append(i)\n return urgd_pokemons\n def __len__(self):\n \"\"\"\n Return the numbers of pokemons in the dex.\n\n Dex.__len__(Dex) -> int\n \"\"\"\n return len(self._pokemons)\n\n def __contains__(self,name):\n \"\"\"\n Return True iff the name of pokemon exist in Dex, else False.\n\n Dex.__contains__(Dex, str) -> bool\n \"\"\"\n if self._pokemons[name]:\n return True\n else:\n return False\n\n def __str__(self):\n \"\"\"\n Returns a human readable representation of this instance.\n\n Dex.__str__(Dex) -> str\n \"\"\"\n rgd_pokemons = \"\"\n urgd_pokemons = \"\"\n for i in self.get_registered_pokemons():\n if i is self.get_registered_pokemons()[-1]:\n rgd_pokemons += i\n else:\n rgd_pokemons += i\n rgd_pokemons += \", \"\n for s in self.get_unregistered_pokemons():\n if s is self.get_unregistered_pokemons()[-1]:\n urgd_pokemons += s\n else:\n urgd_pokemons += s\n urgd_pokemons += \", \"\n self._string = str(DEX_FORMAT.format(len(self.get_registered_pokemons()),rgd_pokemons,len(self.get_unregistered_pokemons()),urgd_pokemons))\n return self._string\n\nclass Level(object):\n \"\"\"\n Manage the Level.\n \"\"\"\n def __init__(self,player,data):\n \"\"\"\n Consturctor\n \n Level.__init__(Level, Player, dict)\n \"\"\"\n self._player = player\n self._data = data\n player_position = (self._data[\"player\"][0], self._data[\"player\"][0])\n pkml = []\n for i in self._data[\"pokemons\"]:\n pkml.append(i[\"name\"])\n self._dex = Dex(pkml)\n self._player.get_dex().expect_pokemons(pkml)\n self._cell = {}\n for s in self._data[\"pokemons\"]:\n self._cell[(s[\"position\"][0],s[\"position\"][1])] = Pokemon(s[\"name\"],s[\"position\"],self._data[\"terrain\"])###\n self._wall = {}\n for x in self._data[\"walls\"]:\n self._wall[x] = \"wall\"\n if not is_position_valid(player_position, self.get_size()):\n raise InvalidPositionError\n for n in list(self._wall):\n if not is_wall_position_valid(n, self.get_size()):\n raise InvalidPositionError\n xcount = 0\n ycount = 0\n x, y= self.get_size()\n while xcount != x:\n self._wall[(xcount,-0.5)] = \"wall\"\n self._wall[(xcount,y-0.5)] = \"wall\"\n xcount += 1\n while ycount != y:\n self._wall[(-0.5,ycount)] = \"wall\"\n self._wall[(x-0.5,ycount)] = \"wall\"\n ycount += 1\n def get_size(self):\n \"\"\"\n Get the grid size of this level.\n \n Level.get_size(Level) -> tuple\n \"\"\"\n return (self._data[\"rows\"], self._data[\"columns\"])\n \n def get_terrain(self):\n \"\"\"\n Get the terrain of this level.\n \n Level.get_terrain(Level) -> str\n \"\"\"\n return self._data[\"terrain\"]\n\n def get_dex(self):\n \"\"\"\n Get the Dex of this level\n \n Level.get_dex(Level) -> Dex\n \"\"\"\n return self._dex\n\n\n def get_starting_position(self):\n \"\"\"\n Get the starting point of player of this level.\n \n Level.get_starting_position(Level) -> str\n \"\"\"\n return self._data[\"player\"]\n\n def is_obstacle_at(self,position):\n \"\"\"\n Return True iff there is a wall, else False.\n \n Level.is_obstacle_at(Level. tuple) -> bool\n \"\"\"\n if position in self.get_obstacles():\n return True\n else:\n return False\n\n def get_obstacles(self):\n \"\"\"\n Get all the walls existed in this level.\n \n Level.get_obstacles(Level) -> list\n \"\"\"\n return list(self._wall)\n\n def get_pokemons(self):\n \"\"\"\n Get all the pokemons existed in this level.\n \n Level.get_pokemons(Level) -> Pokemon\n \"\"\"\n pkl = []\n for i in list(self._cell):\n pkl.append(self._cell[(i[0],i[1])])\n return pkl\n\n def get_pokemon_at(self,position):\n \"\"\"\n Return the pokemon exist in this position, else None.\n \n Level.get_pokemon_at(Level, tuple) -> Pokemon\n \"\"\"\n pkm = None\n if position in self._cell:\n pkm = self._cell[position]\n return pkm\n def catch_pokemon_at(self,position):\n \"\"\"\n Catch and register the pokemon in this position and return it.\n \n Level.catch_pokemon_at(Level, tuple) -> Pokemon\n \"\"\"\n try:\n if is_position_valid(position, self.get_size()):\n pass\n else:\n raise InvalidPositionError(x)\n pkm = self.get_pokemon_at(position)\n if pkm is not None:\n self.get_dex().register(pkm.get_name())\n self._player.register_pokemon(pkm)\n x,y = pkm.get_position()\n del self._cell[(x,y)]\n return pkm\n except Exception as x:\n raise InvalidPositionError(x)\n\n def is_complete(self):\n \"\"\"\n Returns True iff pokemon with name is registered in this Dex, else False.\n \n Level.is_complete(Level) -> bool\n \"\"\"\n if self.get_dex().get_unregistered_pokemons() == []:\n return True\n else:\n return False\n\n \nclass Game(object):\n \"\"\"\n Manage the game.\n \"\"\"\n def __init__(self):\n \"\"\"\n Consturctor\n \n Game.__init__(Game)\n \"\"\"\n self._player = Player(DEFAULT_PLAYER_NAME)\n self._levels_list = []\n self._level = -1\n self._game_file = None\n self._data = []\n self._pkmc = {}\n def load_file(self,game_file):\n \"\"\"\n Loads a game from a file.\n \n Game.load_file(Game, str)\n \"\"\"\n try:\n self._levels_list = []\n self._game_file = load_game_file(game_file)\n for i in self._game_file[\"levels\"]:\n self._data.append(i)\n self._levels_list.append(Level(self._player,i))\n self._level = -1\n self._pkmc = {}\n except:\n pass\n \n def load_url(self,game_url):\n \"\"\"\n Loads a game from a url.\n \n Game.load_erl(Game, str)\n \"\"\"\n try:\n self._levels_list = []\n self._game_file = load_game_url(game_url)\n for i in self._game_file[\"levels\"]:\n self._data.append(i)\n self._levels_list.append(Level(self._player,i))\n self._level = -1\n self._pkmc = {}\n except:\n pass\n \n def start_next_level(self):\n \"\"\"\n Attempts to start the next level of the game, Returns True iff the game is completed, else False.\n \n Game.start_next_level(Game) -> bool\n \"\"\"\n if self.is_complete():\n return True\n else:\n if (self._level+1) != len(self._levels_list):\n previous = None\n if self.get_level() != None:\n previous = self.get_level().get_dex().get_registered_pokemons()\n self._level += 1\n ###\n \n for i in self._data[self._level]['walls']:\n if not is_wall_position_valid(i,self.get_level().get_size()):\n raise InvalidPositionError()\n for s in self._data[self._level]['pokemons']:\n if not is_cell_position_valid(s['position'],self.get_level().get_size()):\n raise InvalidPositionError()\n if not is_cell_position_valid(self._data[self._level]['player'],self.get_level().get_size()):\n raise InvalidPositionError()\n \n ###\n self._player.get_dex().expect_pokemons(self.get_level().get_dex().get_unregistered_pokemons())\n if previous != None:\n '''for x in previous:'''\n for x in list(self._pkmc):\n if x in self.get_level().get_dex().get_unregistered_pokemons():\n self.get_level().get_dex().register(x)\n self._player.set_position(self.get_level().get_starting_position())\n return False\n\n def get_player(self):\n \"\"\"\n Returns the player of the game.\n \n Game.get_player(Game) -> Player\n \"\"\"\n return self._player\n\n def get_level(self):\n \"\"\"\n Returns the current level, an instance of Level, else None if the game hasn’t started.\n \n Game.get_level(Game) -> bool\n Game.get_level(Game) -> dict\n \"\"\"\n if self._level == -1:\n return None\n else:\n return self._levels_list[self._level]\n\n def __len__(self):\n \"\"\"\n Returns the total number of levels in the game.\n \n Game.__len__(Game) -> int\n \"\"\"\n return len(self._game_file[\"levels\"])\n \n def is_complete(self):\n \"\"\"\n Returns True iff no levels remain incomplete, else False.\n \n Game.is_complete(Game) -> bool\n \"\"\"\n if len(self._levels_list)-1 == self._level:\n return True\n else:\n return False\n\n def move_player(self,direction):\n \"\"\"\n Attempts to move the player in the given direction. Returns whatever the player would hit in attempting to move, else None.\n \n Game.move_player(Game, str) -> Pokemon\n Game.move_player(Game, str) -> Wall\n \"\"\"\n try:\n position = None\n x1,y1 = DIRECTION_DELTAS[direction]\n x2,y2 = DIRECTION_WALL_DELTAS[direction]\n x,y = self._player.get_position()\n position_wall = (x2+x, y2+y)\n if_wall = False\n if self.get_level().is_obstacle_at(position_wall):\n if_wall = True\n position = Wall(\"#\",position_wall)\n if not if_wall:\n if is_position_valid((x1+x,y1+y),self.get_level().get_size()):\n position_pokemon = (x1+x, y1+y)\n position = self.get_level().get_pokemon_at(position_pokemon)\n if position is not None:\n self.get_level().catch_pokemon_at(position_pokemon)\n self._pkmc[position.get_name()] = None\n self._player.set_position(position_pokemon)\n return position\n except Exception as x:\n raise DirectionError(x)\n\n\"\"\"\nif __name__ == \"__main__\":\n import gui\n gui.main()\n\"\"\"\n","repo_name":"brakchen/CSSE1001_2016s2_assignment2","sub_path":"a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":17467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"29915332628","text":"\"\"\"\nAn encoder-decoder structure tries to encode a feature map to a lower dimensional representation\nusing the encoder network and then reconstructs the feature map with the decoder network.\n\nMathematically, the encoder: z = f(h_e(x))\n the decoder: x_hat = f(h_d(z))\n\nThe encoder and the decoder can be implemented with a neural network which learns the important features\nto be encoded and decoded (auto encoder).\n\nReference: https://medium.com/pytorch/implementing-an-autoencoder-in-pytorch-19baa22647d1\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass AutoEncoder(nn.Module):\n def __init__(self, in_features):\n super(AutoEncoder, self).__init__()\n\n # encoder layers\n self.in_1 = nn.Linear(in_features=in_features, out_features=256)\n self.in_2 = nn.Linear(in_features=256, out_features=128)\n\n # decoder layers\n self.out_1 = nn.Linear(in_features=128, out_features=256)\n self.out_2 = nn.Linear(in_features=256, out_features=in_features)\n\n def forward(self, x):\n x = self.in_1(x)\n x = F.relu(x)\n x = self.in_2(x)\n x = F.relu(x)\n\n x = self.out_1(x)\n x = F.relu(x)\n x = self.out_2(x)\n x = F.relu(x)\n\n return x\n\n\ndef train_model(num_epochs):\n # prepare data\n input_size = 784\n tf = transforms.Compose([transforms.ToTensor()])\n\n train_data = torchvision.datasets.MNIST(\n root='../data',\n train=True,\n transform=tf,\n download=True\n )\n train_loader = torch.utils.data.DataLoader(\n train_data,\n batch_size=64,\n shuffle=True,\n num_workers=0,\n pin_memory=True\n )\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = AutoEncoder(input_size)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n criterion = nn.MSELoss()\n\n # train auto encoder\n for epoch in range(num_epochs):\n cum_loss = 0\n for i, (data, label) in enumerate(train_loader):\n data = data.view(-1, input_size).to(device)\n\n optimizer.zero_grad()\n output = model(data)\n\n # learn to reconstruct\n loss = criterion(output, data)\n loss.backward()\n\n optimizer.step()\n cum_loss += loss.item()\n\n if (i + 1) % 100 == 0:\n print(f\"Epoch: {epoch + 1}; Iteration: {i + 1}; Avg loss: {cum_loss / (i+1)}.\")\n\n return model\n\n\ndef evaluate(model):\n input_size = 784\n tf = transforms.Compose([transforms.ToTensor()])\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n test_data = torchvision.datasets.MNIST(\n root='../data',\n train=False,\n transform=tf,\n download=True\n )\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=64,\n shuffle=True,\n num_workers=0,\n pin_memory=True\n )\n\n outputs = []\n\n with torch.no_grad():\n for i, (data, label) in enumerate(test_loader):\n data = data.view(-1, input_size).to(device)\n output = model(data).cpu().numpy()\n outputs.append(output)\n\n outputs = np.concatenate(outputs, axis=0)\n return outputs\n\n\nif __name__ == '__main__':\n model = train_model(20)\n res = evaluate(model)\n\n # visualize a few images\n sample_img = res[0].reshape(28,28)\n plt.imshow(sample_img, interpolation='nearest')\n plt.show()\n","repo_name":"BobbyZhouZijian/AI-Algo-Implmentations","sub_path":"unsupervised/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"14319127018","text":"import abc\n\n\nclass Environment(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return (\n hasattr(subclass, \"initialize\")\n and callable(subclass.initialize)\n and hasattr(subclass, \"get_all_possible_states\")\n and callable(subclass.get_all_possible_states)\n and hasattr(subclass, \"get_possible_actions\")\n and callable(subclass.get_possible_actions)\n and hasattr(subclass, \"take_action\")\n and callable(subclass.take_action)\n or NotImplemented\n )\n\n @abc.abstractmethod\n def initialize(self, method, state):\n \"\"\"Initialize environment\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_all_possible_states(self) -> list:\n \"\"\"Get all possible states in the environment\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_possible_actions(self, state=None) -> list:\n \"\"\"Get all possible actions of the current state or of a specific one\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def take_action(self) -> tuple:\n \"\"\"Sends action to the environment and receives new state and reward\"\"\"\n raise NotImplementedError\n","repo_name":"rafabr4/gridworld","sub_path":"src/base_env.py","file_name":"base_env.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"38856330545","text":"def solve_puzzle(board, source, destination):\n \"\"\"\n :param board: M X N matrix, representing a Puzzle with obstacles\n :param source: tuple of starting cell\n :param destination: tuple of ending cell\n :return: directions in the form of \"LRUD\" and a list\n of tuples representing the path taken\n \"\"\"\n\n # create output variables and BFS queue\n visited = [(source[0], source[1])]\n path = ''\n q = [(source[0], source[1], path)]\n if source == destination:\n return [source], path\n shortest_path = None\n\n # traverse through queue to neighboring cells\n while q:\n r, c, path = q.pop(0)\n if (r, c) == destination:\n shortest_path = path\n break\n\n # possible path options, will verify move is within bounds\n moves = [(r, c - 1, 'L'), (r, c + 1, 'R'), (r - 1, c, 'U'), (r + 1, c, 'D')]\n for i, j, k in moves:\n if 0 <= i < len(board) and 0 <= j < len(board[0]):\n if board[i][j] != '#':\n if (i, j) not in visited:\n q.append((i, j, path + k))\n visited.append((i, j))\n\n if not shortest_path:\n return None\n # retrace path from source\n res = []\n move = {'L': (0, -1), 'R': (0, 1), 'U': (-1, 0), 'D': (1, 0)}\n res.append((source[0], source[1]))\n x = source[0]\n y = source[1]\n for letter in range(len(shortest_path)):\n d = shortest_path[letter]\n e, f = move[d]\n resB = (x + e, f + y)\n x, y = resB[0], resB[1]\n res.append(resB)\n\n return res, shortest_path\n\n\npuzzle = [\n ['-', '-', '#'],\n ['#', '-', '-'],\n ['-', '#', '-']\n]\n\nPuzzle = [\n ['-', '-', '-', '-', '-'],\n ['-', '-', '#', '-', '-'],\n ['-', '-', '-', '-', '-'],\n ['#', '-', '#', '#', '-'],\n ['-', '#', '-', '-', '-']\n]\nstart = (0, 2)\nend = (2, 2)\n","repo_name":"welchmea/algorithms_portfolio","sub_path":"Puzzle.py","file_name":"Puzzle.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"9992487849","text":"# Emre YİĞİT 150116056\nimport numpy as np\nimport math\n\ne = math.e\n\nlrate = 0.1 # learning rate\n\n# the points\nu = 1\nv = 1\n# this loop to find the error\nfor iter in range(15):\n # following lines are for finding partial derivative, calculating the gradient and update the points\n dE_du = 2*(u*e**v - 2*v*e**(-u)) * (e**v + 2*v*e**(-u))\n gradient = np.array([dE_du, 0])\n\n u = u - lrate * gradient[0]\n\n dE_dv = 2*(u*e**v - 2*v*e**(-u)) * (u*e**v - 2*e**(-u))\n gradient = np.array([0, dE_dv])\n v = v - lrate * gradient[1]\n\nerror = (u*e**v - 2*v*e**(-u))**2\n\nprint(\"error = \", error)","repo_name":"mr-ygt/ML-Homeworks","sub_path":"Problem7.py","file_name":"Problem7.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"31469536745","text":"import time\nfrom selfcaffeinate import SelfCaffeinate\n\n\ndef main():\n print(\"Self caffeinating\")\n SLEEP_PERIOD = 60\n sc = SelfCaffeinate()\n for i in range(0, 60):\n print(\"Sleeping {}\".format(SLEEP_PERIOD))\n time.sleep(SLEEP_PERIOD)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zcutlip/py-self-caffeinate","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"15902202709","text":"#!/usr/bin/env python\n\"\"\"\nPymodbus Synchronous Server Example\n--------------------------------------------------------------------------\n\nThe synchronous server is implemented in pure python without any third\nparty libraries (unless you need to use the serial protocols which require\npyserial). This is helpful in constrained or old environments where using\ntwisted is just not feasible. What follows is an example of its use:\n\"\"\"\n# --------------------------------------------------------------------------- #\n# import the various server implementations\n# --------------------------------------------------------------------------- #\nfrom pymodbus.server.sync import StartTcpServer\nfrom pymodbus.server.sync import StartTlsServer\nfrom pymodbus.server.sync import StartUdpServer\nfrom pymodbus.server.sync import StartSerialServer\n\nfrom pymodbus.device import ModbusDeviceIdentification\nfrom pymodbus.datastore import ModbusSequentialDataBlock, ModbusSparseDataBlock\nfrom pymodbus.datastore import ModbusSlaveContext, ModbusServerContext\n\nfrom pymodbus.transaction import ModbusRtuFramer, ModbusBinaryFramer\n\nfrom pymodbus.client.sync import ModbusTcpClient as ModbusClient\n\n# --------------------------------------------------------------------------- #\n# configure the service logging\n# --------------------------------------------------------------------------- #\nimport logging\nimport os\n\nFORMAT = ('%(asctime)-15s %(threadName)-15s'\n ' %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s')\nlogging.basicConfig(format=FORMAT)\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n\n\nclass ModbusRelayHoldingRegisterDataBlock:\n \n def __init__(self, address, length, serveraddress, serverport ):\n ''' Initializes the datastore\n\n :param address: The starting address of the datastore\n :param values: Either a list or a dictionary of values\n '''\n self.address = address\n self.length = length\n self.serveraddress = serveraddress\n self.serverport = serverport\n self.client = ModbusClient(serveraddress, serverport)\n self.client.connect()\n self.client\n\n\n def validate(self, address, count=1):\n ''' Checks to see if the request is in range\n\n :param address: The starting address\n :param count: The number of values to test for\n :returns: True if the request in within range, False otherwise\n '''\n\n address-=1\n result = (self.address <= address)\n result &= ((self.address + self.length) >= (address + count))\n log.debug(\"validate for address {0} count {1}: result {2}\".format(address, count, result))\n return result\n\n def getValues(self, address, count=1):\n ''' Returns the requested values of the datastore\n\n :param address: The starting address\n :param count: The number of values to retrieve\n :returns: The requested values from a:a+c\n '''\n\n address-=1\n rr = self.client.read_holding_registers(address, count, unit=0x1)\n log.debug(\"getValues for address {0} count {1}:{2}\".format(address, count, rr.registers))\n return rr.registers\n\n def setValues(self, address, values):\n ''' Sets the requested values of the datastore\n\n :param address: The starting address\n :param values: The new values to be set\n '''\n\n address-=1\n log.debug(\"setValues for address {0} values {1}\".format(address, values))\n rq = self.client.write_registers(address,values, unit=0x1)\n log.debug(\"setValues returned with:{0}\".format(rq))\n\n def __del__(self): \n self.client.close()\n\ndef run_server():\n # ----------------------------------------------------------------------- #\n # initialize your data store\n # ----------------------------------------------------------------------- #\n # The datastores only respond to the addresses that they are initialized to\n # Therefore, if you initialize a DataBlock to addresses of 0x00 to 0xFF, a\n # request to 0x100 will respond with an invalid address exception. This is\n # because many devices exhibit this kind of behavior (but not all)::\n #\n # block = ModbusSequentialDataBlock(0x00, [0]*0xff)\n #\n # Continuing, you can choose to use a sequential or a sparse DataBlock in\n # your data context. The difference is that the sequential has no gaps in\n # the data while the sparse can. Once again, there are devices that exhibit\n # both forms of behavior::\n #\n # block = ModbusSparseDataBlock({0x00: 0, 0x05: 1})\n # block = ModbusSequentialDataBlock(0x00, [0]*5)\n #\n # Alternately, you can use the factory methods to initialize the DataBlocks\n # or simply do not pass them to have them initialized to 0x00 on the full\n # address range::\n #\n # store = ModbusSlaveContext(di = ModbusSequentialDataBlock.create())\n # store = ModbusSlaveContext()\n #\n # Finally, you are allowed to use the same DataBlock reference for every\n # table or you may use a separate DataBlock for each table.\n # This depends if you would like functions to be able to access and modify\n # the same data or not::\n #\n # block = ModbusSequentialDataBlock(0x00, [0]*0xff)\n # store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)\n #\n # The server then makes use of a server context that allows the server to\n # respond with different slave contexts for different unit ids. By default\n # it will return the same context for every unit id supplied (broadcast\n # mode).\n # However, this can be overloaded by setting the single flag to False and\n # then supplying a dictionary of unit id to context mapping::\n #\n # slaves = {\n # 0x01: ModbusSlaveContext(...),\n # 0x02: ModbusSlaveContext(...),\n # 0x03: ModbusSlaveContext(...),\n # }\n # context = ModbusServerContext(slaves=slaves, single=False)\n #\n # The slave context can also be initialized in zero_mode which means that a\n # request to address(0-7) will map to the address (0-7). The default is\n # False which is based on section 4.4 of the specification, so address(0-7)\n # will map to (1-8)::\n #\n # store = ModbusSlaveContext(..., zero_mode=True)\n # ----------------------------------------------------------------------- #\n\n targethost = os.environ.get('MODBUS_RELAY_TARGET_HOST') if (os.environ.get('MODBUS_RELAY_TARGET_HOST') is not None ) else 'localhost'\n targetport = int(os.environ.get('MODBUS_RELAY_TARGET_PORT') if (os.environ.get('MODBUS_RELAY_TARGET_PORT') is not None) else 5020)\n serverinterface = os.environ.get('MODBUS_RELAY_SERVER_INTERFACE') if (os.environ.get('MODBUS_RELAY_SERVER_INTERFACE') is not None ) else \"\"\n serverport = int(os.environ.get('MODBUS_RELAY_SERVER_PORT') if (os.environ.get('MODBUS_RELAY_SERVER_PORT') is not None) else 5021)\n startaddress = int(os.environ.get('MODBUS_RELAY_START_ADDRESS') if (os.environ.get('MODBUS_RELAY_START_ADDRESS') is not None) else 40071)\n addresscount = int(os.environ.get('MODBUS_RELAY_ADDRESS_COUNT') if (os.environ.get('MODBUS_RELAY_ADDRESS_COUNT') is not None) else 180)\n\n\n store = ModbusSlaveContext(\n hr= ModbusRelayHoldingRegisterDataBlock(startaddress,addresscount,targethost,targetport))\n\n context = ModbusServerContext(slaves=store, single=True)\n\n # ----------------------------------------------------------------------- #\n # initialize the server information\n # ----------------------------------------------------------------------- #\n # If you don't set this or any fields, they are defaulted to empty strings.\n # ----------------------------------------------------------------------- #\n identity = ModbusDeviceIdentification()\n identity.VendorName = 'Pymodbus'\n identity.ProductCode = 'PM'\n identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'\n identity.ProductName = 'Pymodbus Server'\n identity.ModelName = 'Pymodbus Server'\n identity.MajorMinorRevision = '2.3.0'\n\n # ----------------------------------------------------------------------- #\n # run the server you want\n # ----------------------------------------------------------------------- #\n # Tcp:\n StartTcpServer(context, identity=identity, address=(serverinterface, serverport))\n #\n # TCP with different framer\n # StartTcpServer(context, identity=identity,\n # framer=ModbusRtuFramer, address=(\"0.0.0.0\", 5020))\n\n # TLS\n # StartTlsServer(context, identity=identity, certfile=\"server.crt\",\n # keyfile=\"server.key\", address=(\"0.0.0.0\", 8020))\n\n # Udp:\n # StartUdpServer(context, identity=identity, address=(\"0.0.0.0\", 5020))\n\n # socat -d -d PTY,link=/tmp/ptyp0,raw,echo=0,ispeed=9600 PTY,link=/tmp/ttyp0,raw,echo=0,ospeed=9600\n # Ascii:\n # StartSerialServer(context, identity=identity,\n # port='/dev/ttyp0', timeout=1)\n\n # RTU:\n # StartSerialServer(context, framer=ModbusRtuFramer, identity=identity,\n # port='/tmp/ttyp0', timeout=.005, baudrate=9600)\n\n # Binary\n # StartSerialServer(context,\n # identity=identity,\n # framer=ModbusBinaryFramer,\n # port='/dev/ttyp0',\n # timeout=1)\n\n\nif __name__ == \"__main__\":\n run_server()\n\n","repo_name":"goergch/ModbusRelay","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"32472188518","text":"import jax\nimport optax\nimport jax.numpy as jnp\n\nfrom rl_zoo.utils.common import Transition\nfrom rl_zoo.utils.model import build_mlp, hard_update\nfrom collections import namedtuple\nfrom typing import List, Tuple, NamedTuple\n\nQParameters = namedtuple(\"QParameters\", \"q q_target\")\nPolicyParameters = namedtuple(\"PolicyParameters\", \"policy\")\nOutput = namedtuple(\"Output\", \"action q\")\nOptimizerState = namedtuple(\"OptimizerState\", \"q policy\")\nLoss = namedtuple(\"Loss\", \"q_loss policy_loss\")\n\n\nclass Parameters(NamedTuple):\n q: QParameters\n policy: PolicyParameters\n\n\nclass DDPG:\n \"\"\"\n An implementation of DDPG\n Reference:\n https://arxiv.org/abs/1509.02971\n \"\"\"\n\n def __init__(self,\n obs_dim: int,\n action_dim: int,\n hiddens: List[int] = [64, 64],\n gamma: float = 0.95,\n learning_rate: float = 1e-3):\n self.gamma = gamma\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n\n # q network\n self.q = build_mlp(hiddens, action_dim)\n self.q_opt = optax.adam(learning_rate)\n\n # policy network\n self.policy = build_mlp(hiddens, action_dim)\n self.policy_opt = optax.adam(learning_rate)\n\n # self.update_parameters = self.update_parameters\n # self.get_action = self.get_action\n\n self.update_parameters = jax.jit(self.update_parameters)\n self.get_action = jax.jit(self.get_action)\n self.get_random_action = jax.jit(self.get_random_action)\n\n def initial_parameters(self, rng) -> Parameters:\n # q network\n sample_input = jnp.zeros((1, self.obs_dim + self.action_dim))\n\n q_rng, q_target_rng = jax.random.split(rng, 2)\n q_params = self.q.init(q_rng, sample_input)\n q_target_params = self.q.init(q_target_rng, sample_input)\n\n # policy network\n sample_input = jnp.zeros((1, self.obs_dim))\n\n policy_params = self.policy.init(rng, sample_input)\n\n return Parameters(\n QParameters(q_params, q_target_params),\n PolicyParameters(policy_params)\n )\n\n def initial_optimizer(self, params: Parameters) -> OptimizerState:\n # q network\n opt_state = self.q_opt.init(params.q.q)\n\n # policy network\n policy_opt_state = self.policy_opt.init(params.policy.policy)\n\n return OptimizerState(\n opt_state,\n policy_opt_state\n )\n\n def q_loss(self, q_params: QParameters, policy_params: PolicyParameters, data: Transition) -> float:\n obs, action, reward, next_obs, done = data\n\n q = self.q.apply(q_params.q, jnp.concatenate([obs, action], axis=1))\n\n next_action = self.policy.apply(policy_params.policy, next_obs)\n next_q_target = self.q.apply(q_params.q_target, jnp.concatenate([next_obs, next_action], axis=1))\n\n target = reward + self.gamma * (1 - done) * jax.lax.stop_gradient(next_q_target)\n\n return jnp.mean(jnp.square(q - target))\n\n def policy_loss(self, policy_params: PolicyParameters, q_params: QParameters, data: Transition) -> float:\n obs, _, _, _, _ = data\n\n action = self.policy.apply(policy_params.policy, obs)\n q = self.q.apply(q_params.q, jnp.concatenate([obs, action], axis=1))\n\n return -jnp.mean(q)\n\n def update_parameters(self,\n params: Parameters,\n opt: OptimizerState,\n data: Transition) -> Tuple[Parameters, OptimizerState, Loss]:\n\n # update q network\n q_grad_fn = jax.value_and_grad(self.q_loss)\n q_loss, q_grads = q_grad_fn(params.q, params.policy, data) # q_grads is for both q and q_target\n q_updates, q_opt_state = self.q_opt.update(q_grads.q, opt.q)\n q_params = optax.apply_updates(params.q.q, q_updates)\n\n new_q_params = QParameters(q_params, params.q.q_target)\n\n # update policy network\n policy_grad_fn = jax.value_and_grad(self.policy_loss)\n policy_loss, policy_grads = policy_grad_fn(params.policy, params.q, data)\n policy_updates, policy_opt_state = self.policy_opt.update(policy_grads.policy, opt.policy)\n policy_params = optax.apply_updates(params.policy.policy, policy_updates)\n\n new_policy_params = PolicyParameters(policy_params)\n\n return (\n Parameters(new_q_params, new_policy_params),\n OptimizerState(q_opt_state, policy_opt_state),\n Loss(q_loss, policy_loss)\n )\n\n def update_target(self, params: Parameters) -> Parameters:\n q_target_params = hard_update(params.q.q_target, params.q.q)\n return Parameters(\n QParameters(params.q.q, q_target_params),\n params.policy\n )\n\n def get_action(self, params: Parameters, obs: jnp.ndarray, rng) -> Output:\n obs = jnp.reshape(obs, (1, -1))\n return Output(\n self.policy.apply(params.policy.policy, obs),\n None\n )\n\n def get_random_action(self, params: Parameters, obs: jnp.ndarray, rng) -> Output:\n obs = jnp.reshape(obs, (1, -1))\n action = self.policy.apply(params.policy.policy, obs)\n return Output(\n action + jax.random.normal(rng, action.shape),\n None\n )\n","repo_name":"nutorbit/rl-zoo","sub_path":"rl_zoo/off_policy/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"20196785025","text":"__author__ = [\"Amir Hossein Sorouri\"]\n__copyright__ = \"Copyright 2019, DSTea\"\n__email__ = [\"amirsorouri26@gmail.com\"]\n__license__ = \"Apache-2.0\"\n__version__ = \"2.0\"\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .explore import explore_job\n\n\ndef crawl(request):\n url = request.POST.get('url', None)\n res = explore_job(url)\n if 1 == res:\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=500)\n","repo_name":"Amirsorouri00/DSL-SE","sub_path":"crawler_django/crawler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"55"}
+{"seq_id":"13589260075","text":"\"\"\"\nhash_map.py\n\nhash functions map data of any size to a fixed and unique value for storing\n\nRefer to Jupyter Notebook HashMap.ipynb for further explanation of key\nconcepts used. Though this source duplicates the code it does not have\nthe full explanations.\n\"\"\"\n\n\n\"\"\"\nFor a given string, say abcd, a simple hash function can be sum of\ncorresponding ASCII values multiplied by a prime number and raised to a power.\n\nNote: use ord(character) to determine ASCII value of a particular character\ne.g. ord('a') will return 97\n\nSimilarly, we can treat abcde as\n𝑎∗𝑝4 + 𝑏∗𝑝3 + 𝑐∗𝑝2 + 𝑑∗𝑝1 + 𝑒∗𝑝0\n\nHere, we replace each character with its corresponding ASCII value.\n\nThis hash function is one of the most popular functions used for strings.\nWe use prime numbers because the provide a good distribution.\nThe most common prime numbers used for this function are 31 and 37.\n\nWe can get a corresponding integer value for each string key and store it in an array.\n\"\"\"\ndef hash_function(string):\n hash_code = 0\n exp = len(string) - 1\n prime = 37\n for character in string:\n hash_code += (ord(character) * (prime ** exp))\n # print(character, exp, (prime**exp))\n exp -= 1\n return hash_code\n\nhash_code_1 = hash_function(\"abcd\")\nprint(\"hash code for 'abcd' is\", hash_code_1)\nhash_code_1 = hash_function(\"dcba\")\nprint(\"hash code for 'dcba' is\", hash_code_1)\nhash_code_1 = hash_function(\"bcda\")\nprint(\"hash code for 'bcda' is\", hash_code_1)\n\n\n\"\"\"\nLinked list is used to handle collisions that might occur\n\nClosed Addressing or Separate Chaining\nUse the same bucket to store multiple objects. The bucket in this case will\nstore a linked list of key-value pairs. Every bucket has it's own separate\nchain of linked list nodes.\n\"\"\"\nclass LinkedListNode:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\n\"\"\"\nGet a corresponding integer value for each string key and store it in an array\n\nThe array used for this purpose is called a bucket array.\nIt is not a special array. We simply choose to give a special name to arrays\nfor this purpose. Each entry in this bucket array is called a bucket and the\nindex in which we store a bucket is called bucket index.\n\nAlso note the hash code method is different than the above in that the exponent\nto raise the prime number begins at one instead of ending at zero.\n\nFinally, it uses compression to create array values of reasonable size.\nIf we have an array of size 10, we can be sure that modulo of any number with 10\nwill be less than 10, allowing it to fit into our bucket array.\n\n!!! Using compression increases likelhood of COLLISIONS !!!\n\nhttps://www.geeksforgeeks.org/hashing-set-2-separate-chaining/\n\n\"\"\"\nclass HashMap:\n def __init__(self, initial_size=10):\n self.bucket_array = [None for _ in range(initial_size)]\n self.p = 31\n self.num_entries = 0\n self.load_factor = 0.7\n\n def put(self, key, value):\n bucket_index = self.get_bucket_index(key)\n new_node = LinkedListNode(key, value)\n head = self.bucket_array[bucket_index]\n\n # check if key is already present in the map, and update it's value\n while head is not None:\n if head.key == key:\n head.value = value\n return\n head = head.next\n\n # key not found in the chain --> create a new entry and place it at\n # the head of the chain\n head = self.bucket_array[bucket_index]\n new_node.next = head\n self.bucket_array[bucket_index] = new_node\n self.num_entries += 1\n\n # check for load factor\n current_load_factor = self.num_entries / len(self.bucket_array)\n if current_load_factor > self.load_factor:\n self.num_entries = 0\n self._rehash()\n\n def get(self, key):\n bucket_index = self.get_hash_code(key)\n head = self.bucket_array[bucket_index]\n while head is not None:\n if head.key == key:\n return head.value\n head = head.next\n return None\n\n def get_bucket_index(self, key):\n return self.get_hash_code(key)\n\n def get_hash_code(self, key):\n key = str(key)\n num_buckets = len(self.bucket_array)\n current_coefficient = 1\n hash_code = 0\n for character in key:\n hash_code += ord(character) * current_coefficient\n # compress the hash_code\n hash_code = hash_code % num_buckets\n current_coefficient *= self.p\n # compress the coefficient\n current_coefficient = current_coefficient % num_buckets\n return hash_code % num_buckets # one last compression\n\n def size(self):\n return self.num_entries\n\n # see notes in HashMap.ipynb, but the bucket array size changes\n # which results in a different compression so the index will change\n def _rehash(self):\n old_num_buckets = len(self.bucket_array)\n old_bucket_array = self.bucket_array\n num_buckets = 2 * old_num_buckets\n self.bucket_array = [None for _ in range(num_buckets)]\n\n for head in old_bucket_array:\n while head is not None:\n key = head.key\n value = head.value\n # we can use our put() method to rehash\n self.put(key, value)\n head = head.next\n\n def delete(self, key):\n bucket_index = self.get_bucket_index(key)\n head = self.bucket_array[bucket_index]\n previous = None\n while head is not None:\n if head.key == key:\n if previous is None:\n self.bucket_array[bucket_index] = head.next\n else:\n previous.next = head.next\n self.num_entries -= 1\n return\n else:\n previous = head\n head = head.next\n\n\nhash_map = HashMap()\nhash_map.put(\"one\", 1)\nhash_map.put(\"two\", 2)\nhash_map.put(\"three\", 3)\nhash_map.put(\"neo\", 11)\nhash_map.put(\"abcd\", 9)\nhash_map.put(\"dcba\", 13)\n\nprint(\"one: {}\".format(hash_map.get(\"one\")))\nprint(\"neo: {}\".format(hash_map.get(\"neo\")))\nprint(\"three: {}\".format(hash_map.get(\"three\")))\nprint(\"abcd: {}\".format(hash_map.get(\"abcd\")))\nprint(\"dcba: {}\".format(hash_map.get(\"dcba\")))\nprint(\"size: {}\".format(hash_map.size()))\n\nhash_map.delete(\"one\")\n\nprint(hash_map.get(\"one\"))\nprint(hash_map.size())\n\n","repo_name":"ssi112/data-structures-algorithms","sub_path":"practice/maps_hashing/hash_map.py","file_name":"hash_map.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"55"}
+{"seq_id":"71623164973","text":"import os\r\nimport random\r\nimport sys\r\nimport time\r\nmazzo=[\r\n \"1♥\", \"1♦\", \"1♣\", \"1♠\",\r\n \"2♥\", \"2♦\", \"2♣\", \"2♠\",\r\n \"3♥\", \"3♦\", \"3♣\", \"3♠\",\r\n \"4♥\", \"4♦\", \"4♣\", \"4♠\",\r\n \"5♥\", \"5♦\", \"5♣\", \"5♠\",\r\n \"6♥\", \"6♦\", \"6♣\", \"6♠\",\r\n \"7♥\", \"7♦\", \"7♣\", \"7♠\",\r\n \"J♥\", \"J♦\", \"J♣\", \"J♠\",\r\n \"Q♥\", \"Q♦\", \"Q♣\", \"Q♠\",\r\n \"K♥\", \"K♦\", \"K♣\", \"K♠\"\r\n ]\r\n\r\nmazzo_reset=[\r\n \"1♥\", \"1♦\", \"1♣\", \"1♠\",\r\n \"2♥\", \"2♦\", \"2♣\", \"2♠\",\r\n \"3♥\", \"3♦\", \"3♣\", \"3♠\",\r\n \"4♥\", \"4♦\", \"4♣\", \"4♠\",\r\n \"5♥\", \"5♦\", \"5♣\", \"5♠\",\r\n \"6♥\", \"6♦\", \"6♣\", \"6♠\",\r\n \"7♥\", \"7♦\", \"7♣\", \"7♠\",\r\n \"J♥\", \"J♦\", \"J♣\", \"J♠\",\r\n \"Q♥\", \"Q♦\", \"Q♣\", \"Q♠\",\r\n \"K♥\", \"K♦\", \"K♣\", \"K♠\"\r\n ]\r\n\r\ncarte_valori={\r\n \"1\" : 1,\r\n \"2\" : 2,\r\n \"3\" : 3,\r\n \"4\" : 4,\r\n \"5\" : 5,\r\n \"6\" : 6,\r\n \"7\" : 7,\r\n \"J\" : 8,\r\n \"Q\" : 9,\r\n \"K\" : 10\r\n}\r\n\r\ntavolo, manoPlayerOne, manoPlayerTwo , mazzettoPlayerOne, mazzettoPlayerTwo = [] , [] , [] , [] , []\r\n\r\nplayer=1\r\n\r\nscopePlayerOne,scopePlayerTwo=0,0\r\n\r\nlastPickup=0\r\n\r\nultimamano=False\r\n\r\nnomeGiocatore1,nomeGiocatore2=\"\",\"\"\r\n\r\nfinePartita=0\r\n\r\ndef loading():\r\n for i in range(6):\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n print(\"\\n\")\r\n print(\"Caricamento\")\r\n print(\"° \"*(i+1))\r\n time.sleep(0.5)\r\n \r\n time.sleep(1)\r\n \r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n \r\n print(\"\\n\\n\")\r\n print(\" ______ ______ ______ _______ ______ \\n / \\ / \\ / \\| \\ / \\ \\n| ▓▓▓▓▓▓\\ ▓▓▓▓▓▓\\ ▓▓▓▓▓▓\\ ▓▓▓▓▓▓▓\\ ▓▓▓▓▓▓\\ \\n| ▓▓___\\▓▓ ▓▓ \\▓▓ ▓▓ | ▓▓ ▓▓__/ ▓▓ ▓▓__| ▓▓\\n \\▓▓ \\| ▓▓ | ▓▓ | ▓▓ ▓▓ ▓▓ ▓▓ ▓▓\\n _\\▓▓▓▓▓▓\\ ▓▓ __| ▓▓ | ▓▓ ▓▓▓▓▓▓▓| ▓▓▓▓▓▓▓▓\\n| \\__| ▓▓ ▓▓__/ \\ ▓▓__/ ▓▓ ▓▓ | ▓▓ | ▓▓\\n \\▓▓ ▓▓\\▓▓ ▓▓\\▓▓ ▓▓ ▓▓ | ▓▓ | ▓▓\\n \\▓▓▓▓▓▓ \\▓▓▓▓▓▓ \\▓▓▓▓▓▓ \\▓▓ \\▓▓ \\▓▓\")\r\n print(\"-------------------\")\r\n print(\"╔═══╗ ╔═══╗╔═══╗\\n║╔═╗║ ║╔═╗║║╔═╗║\\n║║ ╚╝╔╗╔╗║╚══╗║║ ║║\\n║║ ╔╗║║║║╚══╗║║║ ║║\\n║╚═╝║║╚╝║║╚═╝║║╚═╝║\\n╚═══╝╚══╝╚═══╝╚═══╝\")\r\n print(\"-------------------\")\r\n time.sleep(2.4)\r\n\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\ndef reset():\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n mazzo=mazzo_reset\r\n tavolo, manoPlayerOne, manoPlayerTwo , mazzettoPlayerOne, mazzettoPlayerTwo = [] , [] , [] , [] , []\r\n scopePlayerOne,scopePlayerTwo=0,0\r\n lastPickup=0\r\n ultimamano=False\r\n print(f\"Eseguo il reset: \\nmazzo={mazzo};\\ntavolo={tavolo}; \\nmanoPlayerOne={manoPlayerOne};\\nmanoPlayerTwo={manoPlayerTwo}\\nmazzettoPlayerOne={mazzettoPlayerOne};\\nmazzettoPlayerTwo={mazzettoPlayerTwo};\\nscopePlayerOne={scopePlayerOne};\\nscopePlayerTwo={scopePlayerTwo};\\nlastPickup={lastPickup};\\nultimamano={ultimamano}\")\r\n time.sleep(1.5)\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\ndef askBeforeStart(nomeGiocatore1,nomeGiocatore2,finePartita):\r\n while nomeGiocatore1==\"\":\r\n nomeGiocatore1=input(\"Giocatore1, come ti chiami?\\n\")\r\n while nomeGiocatore2==\"\":\r\n nomeGiocatore2=input(\"Giocatore2, come ti chiami?\\n\")\r\n while finePartita==0:\r\n finePartita=int(input(\"A quanti punti volete che la partita finisca? (solitamente si gioca a 11 o 21 punti)\\n\"))\r\n print(f\"Ottimo, benvenuti {nomeGiocatore1} e {nomeGiocatore2}, che vinca il migliore\") \r\n\r\ndef tableView():\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n print(f\"carte nel mazzo: {len(mazzo)}\")\r\n print(\"******\"*len(tavolo),end=\"\\n\")\r\n print()\r\n print(\" ---- \"*len(tavolo))\r\n for ele in tavolo:\r\n print(\"|\",end=\" \")\r\n print(ele,end=\" |\") \r\n print()\r\n print(\" ---- \"*len(tavolo))\r\n print()\r\n print(\"******\"*len(tavolo))\r\n print()\r\n print(\"la tua mano:\")\r\n\r\ndef shuffle():\r\n random.shuffle(mazzo)\r\n\r\ndef start():\r\n shuffle()\r\n distro()\r\n for _ in range(4):\r\n tavolo.append(mazzo[0])\r\n del mazzo[0]\r\n drop(player)\r\n\r\ndef distro():\r\n for i in range (3):\r\n manoPlayerOne.append(mazzo[0])\r\n del mazzo[0]\r\n manoPlayerTwo.append(mazzo[0])\r\n del mazzo[0]\r\n\r\ndef checkPickUp(player,carta):\r\n if len(tavolo)>=1:\r\n for j in range(len(tavolo)):\r\n cartaDaControllare=tavolo[j]\r\n val_cartaDaControllare=carte_valori[cartaDaControllare[0]]\r\n val_cartaGiocatore=carte_valori[carta[0]]\r\n if val_cartaGiocatore==val_cartaDaControllare:\r\n if player==1:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} con {carta}\")\r\n mazzettoPlayerOne.append(carta)\r\n mazzettoPlayerOne.append(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare)\r\n lastPickup=1\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n else:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} con {carta}\")\r\n mazzettoPlayerTwo.append(carta)\r\n mazzettoPlayerTwo.append(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare)\r\n lastPickup=2\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n#------------------------\r\n if len(tavolo)>=2:\r\n for j in range(len(tavolo)-1):\r\n cartaDaControllare=tavolo[j]\r\n val_cartaDaControllare=carte_valori[cartaDaControllare[0]]\r\n val_cartaGiocatore=carte_valori[carta[0]]\r\n for t in range(j+1, len(tavolo)):\r\n cartaDaControllare2=tavolo[t]\r\n val_cartaDaControllare2=carte_valori[cartaDaControllare2[0]]\r\n if val_cartaGiocatore==(val_cartaDaControllare+val_cartaDaControllare2):\r\n if player==1:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} con {carta}\")\r\n mazzettoPlayerOne.append(carta)\r\n mazzettoPlayerOne.append(cartaDaControllare)\r\n mazzettoPlayerOne.append(cartaDaControllare2) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n lastPickup=1\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n else:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} con {carta}\")\r\n mazzettoPlayerTwo.append(carta)\r\n mazzettoPlayerTwo.append(cartaDaControllare)\r\n mazzettoPlayerTwo.append(cartaDaControllare2) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n lastPickup=2\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n#----------------------- \r\n if len(tavolo)>=3:\r\n for j in range(len(tavolo)-2):\r\n cartaDaControllare=tavolo[j]\r\n val_cartaDaControllare=carte_valori[cartaDaControllare[0]]\r\n val_cartaGiocatore=carte_valori[carta[0]]\r\n for t in range(j+1, len(tavolo)-1):\r\n cartaDaControllare2=tavolo[t]\r\n val_cartaDaControllare2=carte_valori[cartaDaControllare2[0]]\r\n for i in range (t+1, len(tavolo)):\r\n cartaDaControllare3=tavolo[i]\r\n val_cartaDaControllare3=carte_valori[cartaDaControllare3[0]]\r\n if val_cartaGiocatore==(val_cartaDaControllare+val_cartaDaControllare2+val_cartaDaControllare3):\r\n if player==1:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} e {cartaDaControllare3} con {carta}\")\r\n mazzettoPlayerOne.append(carta)\r\n mazzettoPlayerOne.append(cartaDaControllare)\r\n mazzettoPlayerOne.append(cartaDaControllare2) \r\n mazzettoPlayerOne.append(cartaDaControllare3) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n tavolo.remove(cartaDaControllare3)\r\n lastPickup=1\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n else:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} e {cartaDaControllare3} con {carta}\")\r\n mazzettoPlayerTwo.append(carta)\r\n mazzettoPlayerTwo.append(cartaDaControllare)\r\n mazzettoPlayerTwo.append(cartaDaControllare2) \r\n mazzettoPlayerTwo.append(cartaDaControllare3) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n tavolo.remove(cartaDaControllare3)\r\n lastPickup=2\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n#----------------------- \r\n if len(tavolo)>=4:\r\n for j in range(len(tavolo)-3):\r\n cartaDaControllare=tavolo[j]\r\n val_cartaDaControllare=carte_valori[cartaDaControllare[0]]\r\n val_cartaGiocatore=carte_valori[carta[0]]\r\n for t in range(j+1, len(tavolo)-2):\r\n cartaDaControllare2=tavolo[t]\r\n val_cartaDaControllare2=carte_valori[cartaDaControllare2[0]]\r\n for i in range (t+1, len(tavolo)-1):\r\n cartaDaControllare3=tavolo[i]\r\n val_cartaDaControllare3=carte_valori[cartaDaControllare3[0]]\r\n for w in range(i+1, len(tavolo)):\r\n cartaDaControllare4=tavolo[w]\r\n val_cartaDaControllare4=carte_valori[cartaDaControllare4[0]]\r\n if val_cartaGiocatore==(val_cartaDaControllare+val_cartaDaControllare2+val_cartaDaControllare3+val_cartaDaControllare4):\r\n if player==1:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} e {cartaDaControllare3} e {cartaDaControllare4} con {carta}\")\r\n mazzettoPlayerOne.append(carta)\r\n mazzettoPlayerOne.append(cartaDaControllare)\r\n mazzettoPlayerOne.append(cartaDaControllare2) \r\n mazzettoPlayerOne.append(cartaDaControllare3) \r\n mazzettoPlayerOne.append(cartaDaControllare4) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n tavolo.remove(cartaDaControllare3)\r\n tavolo.remove(cartaDaControllare4)\r\n lastPickup=1\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n else:\r\n print(f\"il giocatore {player} ha preso {cartaDaControllare} e {cartaDaControllare2} e {cartaDaControllare3} e {cartaDaControllare4} con {carta}\")\r\n mazzettoPlayerTwo.append(carta)\r\n mazzettoPlayerTwo.append(cartaDaControllare)\r\n mazzettoPlayerTwo.append(cartaDaControllare2) \r\n mazzettoPlayerTwo.append(cartaDaControllare3) \r\n mazzettoPlayerTwo.append(cartaDaControllare4) \r\n tavolo.remove(cartaDaControllare)\r\n tavolo.remove(cartaDaControllare2)\r\n tavolo.remove(cartaDaControllare3)\r\n tavolo.remove(cartaDaControllare4)\r\n lastPickup=2\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n tavolo.append(carta)\r\n turnSwap(player,carta,ultimamano,scopePlayerOne,scopePlayerTwo)\r\n\r\ndef lastChance(tavolo):\r\n if lastPickup==1:\r\n for ele in tavolo:\r\n mazzettoPlayerOne.append(ele)\r\n print(f\"essendo l'ultima mano, il giocatore 1 prende {tavolo}\")\r\n tavolo=[]\r\n else:\r\n for ele in tavolo:\r\n mazzettoPlayerTwo.append(ele)\r\n print(f\"essendo l'ultima mano, il giocatore 2 prende {tavolo}\")\r\n tavolo=[]\r\n scoreCheck()\r\n \r\ndef turnSwap(player, carta, ultimamano,scopePlayerOne,scopePlayerTwo):\r\n if len(mazzo)==0 and len(manoPlayerOne)==0:\r\n ultimamano=True\r\n if len(tavolo)==0 and ultimamano==False:\r\n print(f\"il giocatore {player} ha fatto scopa con {carta}\")\r\n if player==1:\r\n scopePlayerOne+=1\r\n else:\r\n scopePlayerTwo+=1\r\n if player==1:\r\n player=2\r\n else:\r\n player=1\r\n if len(manoPlayerTwo)==0 and len(mazzo)>=6:\r\n distro()\r\n if len(manoPlayerTwo)==0 and ultimamano==True:\r\n lastChance(tavolo)\r\n #time.sleep(1.3)\r\n drop(player)\r\n \r\ndef drop(player):\r\n tableView()\r\n inp=0\r\n if player==1:\r\n print(manoPlayerOne,end=\"\\n\\n\")\r\n else:\r\n print(manoPlayerTwo,end=\"\\n\\n\")\r\n while not 1<=inp<=len(manoPlayerTwo) and inp!=\" \":\r\n inp=int(input(f\"Giocatore {player} che carta vuoi giocare?\"))\r\n if player==1:\r\n carta=manoPlayerOne[inp-1]\r\n del manoPlayerOne[inp-1]\r\n checkPickUp(player,carta)\r\n else:\r\n carta=manoPlayerTwo[inp-1]\r\n del manoPlayerTwo[inp-1]\r\n checkPickUp(player,carta)\r\n\r\ndef carte():\r\n if len(mazzettoPlayerOne) >20:\r\n return(1)\r\n elif len(mazzettoPlayerOne)==20:\r\n return(\"patta\")\r\n else:\r\n return(2)\r\n\r\ndef ori():\r\n ori=0\r\n for ele in mazzettoPlayerOne:\r\n if ele[1]==\"♦\":\r\n ori+=1\r\n if ori>5:\r\n return(1)\r\n elif ori==5:\r\n return(\"patta\")\r\n else:\r\n return(2)\r\n\r\ndef settebello():\r\n if \"7♦\" in mazzettoPlayerOne:\r\n return(1)\r\n else:\r\n return(2)\r\n\r\ndef primiera():\r\n q, p, c, f=False , False, False, False \r\n puntiPrimiera1=0\r\n puntiPrimiera2=0 \r\n for ele in mazzettoPlayerOne:\r\n if ele[0]==\"7\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera1+=21\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera1+=21\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera1+=21\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera1+=21\r\n if puntiPrimiera1<70:\r\n for ele in mazzettoPlayerOne:\r\n if ele[0]==\"6\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera1+=18\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera1+=18\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera1+=18\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera1+=18\r\n if puntiPrimiera1<70:\r\n for ele in mazzettoPlayerOne:\r\n if ele[0]==\"1\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera1+=16\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera1+=16\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera1+=16\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera1+=16\r\n \r\n q, p, c, f=False , False, False, False \r\n for ele in mazzettoPlayerTwo:\r\n if ele[0]==\"7\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera2+=21\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera2+=21\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera2+=21\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera2+=21\r\n if puntiPrimiera2<70:\r\n for ele in mazzettoPlayerTwo:\r\n if ele[0]==\"6\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera2+=18\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera2+=18\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera2+=18\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera2+=18\r\n if puntiPrimiera2<70:\r\n for ele in mazzettoPlayerOne:\r\n if ele[0]==\"1\":\r\n if ele[1]==\"♦\" and q==False:\r\n q=True\r\n puntiPrimiera2+=16\r\n if ele[1]==\"♠\" and p==False:\r\n p=True\r\n puntiPrimiera2+=16\r\n if ele[1]==\"♥\" and c==False:\r\n c=True\r\n puntiPrimiera2+=16\r\n if ele[1]==\"♣\" and f==False:\r\n c=True\r\n puntiPrimiera2+=16\r\n \r\n if puntiPrimiera1>puntiPrimiera2:\r\n return(1)\r\n elif puntiPrimiera1=finePartita:\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n if puntiPartitaP1>puntiPartitaP2:\r\n print(f\"Complimenti {nomeGiocatore1}, hai vinto contro {nomeGiocatore2} con un totale di {puntiPartitaP1} a {puntiPartitaP2}!\")\r\n sys.exit()\r\n else:\r\n print(f\"Il vostro punteggio è di {puntiPartitaP1} a {puntiPartitaP2} per {nomeGiocatore1}\")\r\n risp=\"\"\r\n while risp !=\"sì\"or risp!=\"no\" or risp!=\"si\":\r\n risp=input(\"Volete continuare a giocare?\\n\")\r\n risp=risp.lower()\r\n if risp==\"sì\" or risp==\"si\":\r\n print(\"Ottimo!\")\r\n replay()\r\n elif risp==\"no\":\r\n print(\"Va bene, allora dite addio ai vostri progressi :) \")\r\n destroy()\r\n \r\ndef destroy():\r\n f=open(\"cache.txt\", \"w\")\r\n f.close()\r\n\r\ndef replay():\r\n reset()\r\n start()\r\n\r\ndef main():\r\n loading()\r\n askBeforeStart(nomeGiocatore1,nomeGiocatore2,finePartita)\r\n start()\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"CuSO-the-coder/cardGame-Scopa","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":21813,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"38392044619","text":"import string\n# input and outputs the second largest number in the list using\n\n# conditional statements and a for loop.\ndef second_larg(numbers):\n sorting=sorted(numbers)\n second_largest=sorting[-2]\n for num in numbers:\n if(num==second_largest):\n return second_largest\n\nprint(second_larg(numbers=[2,63,6,53,13,8]))\n\n# Write a Python program that takes a year as input and\n# determines if it is a leap year.\ndef leapyear(theyear):\n if(theyear % 4==0):\n return f\"{theyear} is a leapyear\"\n else:\n return theyear\n\nprint(leapyear(theyear=2020))\n\n# Write a Python program that takes a string as input and\n# checks if it is a palindrome (reads the same forwards and backwards),\n# ignoring spaces and punctuation\ndef palindrome(mystring):\n ignorecase=mystring.casefold()\n remove_punct=ignorecase.translate(str.maketrans('','',string.punctuation))\n if(remove_punct==remove_punct[::-1]):\n print(f'{remove_punct} is a palindrome')\npalindrome(\"Ra..Da'r\")","repo_name":"Loice-KaniniMwau/PYTHON_PRACTICE","sub_path":"pip.py","file_name":"pip.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21594581590","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render, reverse, HttpResponseRedirect\nfrom .models import Message\nfrom accounts.models import MyUser\nfrom .forms import AddTextForm\n\nfrom message_notification.views import create_message_notification\n\n\nfrom message_notification.models import MessageNotification\nfrom review_notification.models import ReviewNotification\nfrom faq_notification.models import FaqNotification\n\nfrom all_notifications.views import get_notification_count\n\nfrom django.contrib import messages\n\n\ndef get_messages_count(logged_in_user):\n target_user = logged_in_user\n user_messages = Message.objects.filter(recipient=target_user)\n\n messages_count = len(user_messages)\n return messages_count\n\n\ndef MessageView(req, id):\n template = \"generic_form.html\"\n recip = MyUser.objects.get(id=id)\n signed_in_user = req.user\n user_messages = Message.objects.filter(recipient=recip)\n if req.method == \"POST\":\n form = AddTextForm(req.POST)\n if form.is_valid():\n data = form.cleaned_data\n message = Message.objects.create(\n message=data[\"message\"], author=req.user, recipient=recip\n )\n create_message_notification(message, recip)\n messages.add_message(req, message=\"Message sent.\", level=messages.SUCCESS)\n return HttpResponseRedirect(reverse(\"profile\", args=(id,)))\n form = AddTextForm()\n context = {\n \"user_messages\": user_messages,\n \"form\": form,\n \"header\": \"message\",\n \"signed_in_user\": signed_in_user,\n }\n return render(req, \"generic_form.html\")\n\n\ndef UserMessages(req, id):\n target_user = MyUser.objects.get(id=id)\n signed_in_user = MyUser.objects.get(id=req.user.id)\n\n notifications_count = get_notification_count(req.user)\n\n user_messages = Message.objects.filter(recipient=target_user)\n context = {\n \"user_messages\": user_messages,\n \"notifications_count\": notifications_count,\n \"signed_in_user\": signed_in_user,\n \"target_user\": target_user\n }\n return render(req, \"messages.html\", context)\n\n\ndef DeleteMessage(req, id):\n del_message = Message.objects.get(id=id)\n user_id = req.user.id\n del_message.delete()\n messages.add_message(req, message=\"Message deleted.\", level=messages.ERROR)\n return HttpResponseRedirect(reverse(\"usermessages\", args=(user_id,)))\n","repo_name":"Jacob-Short/Q4-Capstone","sub_path":"message/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"12478432524","text":"from django.shortcuts import render, redirect\nfrom dashboard.forms import FormBarang\nfrom dashboard.models import Barang\nfrom dashboard.forms import FormAksesoris\nfrom dashboard.models import Aksesoris\nfrom django.contrib import messages\n\n# Create your views here.\n\n\ndef produk(request):\n titelnya=\"Produk\"\n konteks = {\n 'titel':titelnya,\n }\n return render(request,'produk.html',konteks)\n\ndef Barang_View(request):\n barangs=Barang.objects.all()\n\n konteks={\n 'barangs':barangs,\n }\n return render(request, 'tampil_brg.html', konteks)\n\ndef Aksesoris_View(request):\n aksesoriss=Aksesoris.objects.all()\n\n konteks={\n 'aksesoriss':aksesoriss,\n }\n return render(request, 'aksesoris.html', konteks)\n\ndef tambah_barang(request):\n form= FormBarang(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Data Berhasil Ditambahkan\")\n form =FormBarang()\n konteks = {\n 'form' : form,\n }\n return render(request, 'tambah_barang.html', konteks)\n else:\n form=FormBarang()\n konteks ={\n 'form':form,\n }\n return render(request, 'tambah_barang.html', konteks)\n\ndef tambah_aksesoris(request):\n form= FormAksesoris(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Data Berhasil Ditambahkan\")\n form =FormAksesoris()\n konteks = {\n 'form' : form,\n }\n return render(request, 'tambah_aksesoris.html', konteks)\n else:\n form=FormAksesoris()\n konteks ={\n 'form':form,\n }\n return render(request, 'tambah_aksesoris.html', konteks)\n\ndef ubah_brg(request,id_barang):\n barangs=Barang.objects.get(id=id_barang)\n if request.POST:\n form=FormBarang(request.POST, instance=barangs)\n if form.is_valid():\n form.save()\n messages.success(request, \"Data Berhasil diubah\")\n return redirect('ubah_brg', id_barang=id_barang)\n else:\n form=FormBarang(instance=barangs)\n konteks = {\n 'form' : form,\n 'barangs' : barangs\n }\n return render(request, 'ubah_brg.html', konteks)\n\ndef ubah_aksesoris(request,id_aksesoris):\n aksesoriss=Aksesoris.objects.get(id=id_aksesoris)\n if request.POST:\n form=FormAksesoris(request.POST, instance=aksesoriss)\n if form.is_valid():\n form.save()\n messages.success(request, \"Data Berhasil diubah\")\n return redirect('ubah_aksesoris', id_aksesoris=id_aksesoris)\n else:\n form=FormAksesoris(instance=aksesoriss)\n konteks = {\n 'form' : form,\n 'aksesoriss' : aksesoriss\n }\n return render(request, 'ubah_aksesoris.html', konteks)\n\ndef hapus_brg(request, id_barang):\n barangs=Barang.objects.filter(id=id_barang)\n barangs.delete()\n messages.success(request,\"Data Terhapus\")\n return redirect('Vbrg')\n\ndef hapus_aksesoris(request, id_aksesoris):\n aksesoriss=Aksesoris.objects.filter(id=id_aksesoris)\n aksesoriss.delete()\n messages.success(request,\"Data Terhapus\")\n return redirect('aksesoris')","repo_name":"alfinawyn/FP-UAS","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"21142274359","text":"import requests\nimport pandas as pd\nfrom multiprocessing import Process\nfrom functools import partial\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\nclass Parser(Process):\n\tdef __init__(self):\n\t\tProcess.__init__(self)\n\t\tself.__result_link = 'https://search.rozetka.com.ua/search/api/v6/'\n\t\tself.__param_dict = {'front-type': 'xl', 'country': 'UA', 'lang': 'ru'}\n\t\tself.__sku_list = self.__get_sku_from_file('sku.xlsx')\n\t\tself.__thread_num = 8\n\t\tself.__class__.__parse_withot_multithreading(self.__result_link, self.__param_dict, self.__sku_list).to_csv('rez.csv')\n\t\tself.__class__.__parse_with_multithreading(self.__result_link, self.__param_dict, self.__sku_list).to_csv('rez2.csv')\n\t\tself.__class__.__parse_seller_name(self.__param_dict, '5')\n\n\n\n\t@staticmethod\n\tdef __get_json_response_with_parametr(link, param_dict):\n\t\treturn requests.get(link, params = param_dict).json()\n\n\t@classmethod\n\tdef __process_goods_json_to_df(cls, response, param_dict, sku):\n\t\tif response['data']['goods']:\n\t\t\tdf = pd.json_normalize(response['data']['goods'])\n\t\t\tdf['seller_name'] = df['seller_id'].apply(lambda x: cls.__parse_seller_name(param_dict, x))\n\t\t\tdf['sku'] = str(sku)\n\t\t\treturn df[['sku', 'title', 'price', 'seller_name', 'href', 'sell_status']]\n\n\t@classmethod\n\tdef __parse_withot_multithreading(cls, link, param_dict, sku_list):\n\t\tdf_list = []\n\t\tfor item in sku_list:\n\t\t\tupd_dict = param_dict.copy()\n\t\t\tupd_dict.update({'text':str(item)})\n\t\t\tdf_list.append(cls.__process_goods_json_to_df(cls.__get_json_response_with_parametr(link, upd_dict),param_dict, item))\n\t\treturn pd.concat(df_list)\n\n\n\t@classmethod\n\tdef __parse_seller_name(cls, param_dict, seller_id):\n\t\tlink = 'https://product-api.rozetka.com.ua/v4/sellers/get'\t\t\n\t\tupd_dict = param_dict.copy()\n\t\tupd_dict.update({'id': str(seller_id)})\n\t\tr = cls.__get_json_response_with_parametr(link, upd_dict)\n\t\treturn r['data']['title']\n\n\n\n\t@staticmethod\n\tdef __get_sku_from_file(path):\n\t\treturn pd.read_excel(path, header=None, index_col=None)[0].tolist()\n\n\t@classmethod\n\tdef __thread_wrapper(cls, link, param_dict, sku):\n\t\tupd_dict = param_dict.copy()\n\t\tupd_dict.update({'text':str(sku)})\n\t\treturn cls.__process_goods_json_to_df(cls.__get_json_response_with_parametr(link, upd_dict),param_dict, sku)\n\n\t@classmethod\n\tdef __parse_with_multithreading(self, link, param_dict, sku_list):\n\t\twith ThreadPool(8) as pool:\n\t\t\tself.__thread_wrapper = partial(self.__thread_wrapper, link, param_dict)\n\t\t\tresult = pool.map(self.__thread_wrapper, sku_list)\n\t\treturn pd.concat(result)\n\n\na = Parser()","repo_name":"demchyk/rozetka_parser","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"10492367865","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers import LeakyReLU\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nseed = 785\nnp.random.seed(seed)\n\nprint(\"Loading dataset...\")\ndataset = np.loadtxt('data/A_Z Handwritten Data.csv', delimiter=',')\nprint(\"Preparing data\")\n\n\nX = dataset[:,0:784]\nY = dataset[:,0]\n\n(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, test_size=0.2, random_state=seed)\n\n\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32')\nX_train = X_train / 255\nX_test = X_test / 255\n\nY_train = np_utils.to_categorical(Y_train)\nY_test = np_utils.to_categorical(Y_test)\n\nnum_classes = Y_test.shape[1]\n\nprint(\"Creating model\")\nmodel = Sequential()\n#model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu'))\n#model.add(MaxPooling2D(pool_size=(2, 2)))\n#model.add(Dropout(0.2))\n\nmodel.add(Conv2D(32, kernel_size=(5, 5), activation='linear', padding='same', input_shape=(28, 28, 1)))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D((2, 2), padding='same'))\nmodel.add(Dropout(0.25))\nmodel.add(Conv2D(64, (3, 3), activation='linear', padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel.add(Dropout(0.25))\nmodel.add(Conv2D(128, (3, 3), activation='linear', padding='same'))\nmodel.add(LeakyReLU(alpha=0.1))\nmodel.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nprint(\"Compiling and learning\")\n\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\ntrain = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=5, batch_size=200, verbose=2)\n\nscores = model.evaluate(X_test, Y_test, verbose=0)\nprint(\"CNN Error: %.2f%%\" % (100-scores[1]*100))\n\nmodel.save('weights.model')\n","repo_name":"Afohtim/university_practice","sub_path":"letter_regognition_cnn.py","file_name":"letter_regognition_cnn.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"43317846190","text":"import wiringpi\nimport time\n\n\nSENSOR_PIN = 7\nINPUT, OUTPUT = 0, 1\nLOW, HIGH = 0, 1\n\n \n# One of the following MUST be called before using IO functions:\nwiringpi.wiringPiSetup() # For sequential pin numbering\n'''\n# OR\nwiringpi.wiringPiSetupSys() # For /sys/class/gpio with GPIO pin numbering\n# OR\nwiringpi.wiringPiSetupGpio() # For GPIO pin numbering\n'''\n\nwiringpi.pinMode(SENSOR_PIN, INPUT) \t# Set pin 7 to 0 ( INPUT )\n#wiringpi.digitalWrite(SENSOR_PIN, LOW) \t# Write 1 ( HIGH ) to pin 7\n#wiringpi.digitalRead(SENSOR_PIN) \t# Read pin 7\n \ntry:\n while True:\n if wiringpi.digitalRead(SENSOR_PIN) == HIGH:\n print('Motion detected!')\n else:\n print('No Motion!')\n time.sleep(1)\nexcept KeyboardInterrupt:\n print(\"Finish...\")\n\n#wiringpi.digitalWrite(SENSOR_PIN, LOW)","repo_name":"engrjislam/pisensors","sub_path":"src/motion/pir_wiringpi.py","file_name":"pir_wiringpi.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"55"}
+{"seq_id":"28057688542","text":"import pandas as pd, re, time, os\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\nfrom config import AppConfig\n\nconfig = AppConfig()\n\nfactory = StemmerFactory()\nstemmer = factory.create_stemmer()\n\ndf_slang = pd.read_csv('data/data_set/new_kamusalay.csv', encoding='latin-1', header=None)\ndf_slang = df_slang.rename(columns={0: 'original', 1: 'default'})\n\nid_stopword_dict = pd.read_csv('data/data_set/stopwordbahasa.csv', header=None)\nid_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'})\nstopwords_new = pd.DataFrame(['sih','nya', 'iya', 'nih', 'biar', 'tau', 'kayak', 'banget'], columns=['stopword'])\nid_stopword_dict = pd.concat([id_stopword_dict,stopwords_new]).reset_index()\nid_stopword_dict = pd.DataFrame(id_stopword_dict['stopword'])\n\ndef unnecessary_char_remover(text):\n new_text = re.sub(r'pic.twitter.com.[\\w]+', '', text) \n new_text = new_text.lower()\n new_text = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+)|(http?://[^\\s]+))',' ',new_text) \n \n new_text = re.sub('gue','saya',new_text) \n new_text = re.sub('\\n',' ',new_text) \n \n to_delete = ['hypertext', 'transfer', 'protocol', 'over', 'secure', 'socket', 'layer', 'dtype', 'tweet', 'name', 'object'\n ,'twitter','com', 'pic', ' ya ']\n \n for word in to_delete:\n new_text = re.sub(word,'', new_text)\n new_text = re.sub(word.upper(),' ',new_text)\n \n retweet_user = ['rt ', ' rt ', ' user ']\n \n for word in retweet_user:\n new_text = re.sub(word,' ',new_text) \n new_text = re.sub(word.upper(),' ',new_text)\n \n new_text = re.sub(' +', ' ', new_text) \n \n result = {'original' : text, 'result' : new_text}\n return result\n\ndef remove_nonaplhanumeric(text):\n new_text = re.sub('[^0-9a-zA-Z]+', ' ', text) \n result = {'original' : text, 'result' : new_text}\n return result\n\ndf_slang_map = dict(zip(df_slang['original'], df_slang['default']))\n\ndef normalize_slang(text):\n new_text = ' '.join([df_slang_map[word] if word in df_slang_map else word for word in text.split(' ')])\n result = {'original' : text, 'result' : new_text}\n return result\n\ndef remove_stopword(text):\n new_text = ' '.join(['' if word in id_stopword_dict.stopword.values else word for word in text.split(' ')])\n new_text = re.sub(' +', ' ', new_text) # Remove extra spaces\n new_text = new_text.strip()\n result = {'original' : text, 'result' : new_text}\n return result\n\ndef stemming(text):\n new_text = stemmer.stem(text)\n \n result = {'original' : text, 'result' : new_text}\n return result\n\ndef preprocess(text):\n new_text = unnecessary_char_remover(text)['result']\n new_text = remove_nonaplhanumeric(new_text)['result']\n new_text = normalize_slang(new_text)['result']\n new_text = stemming(new_text)['result']\n new_text = remove_stopword(new_text)['result']\n result = {'original' : text, 'result' : new_text}\n return result\n\ndef csv_text(text):\n return preprocess(text)['result']\n\ndef fileremover(dir_path, limitfile=50):\n count = 0\n files = []\n for x in os.listdir(dir_path):\n if os.path.isfile(os.path.join(dir_path, x)):\n files.append(x)\n count += 1\n \n if count>=limitfile:\n for i in range(int(limitfile/2)):\n os.remove(f\"{dir_path}{files[i-1]}\")\n\ndef csv_cleaning(file, col_name, encoding='latin-1', delimiter=','):\n \n df = pd.read_csv(file, encoding=encoding, delimiter=delimiter)\n df.insert(1, f'{col_name}_Result', df[col_name].apply(csv_text))\n\n now = time.strftime(\"%H%M%S_%d%m%Y\")\n file_name = f\"file_response_{now}.csv\"\n file_path = f\"{config.UPLOAD_FOLDER}{file_name}\"\n df.to_csv(file_path)\n fileremover(f\"{config.UPLOAD_FOLDER}\", limitfile=10)\n return {'filename':file_name, 'fullpath':file_path}","repo_name":"Xsanjaya/BINAR-BOOTCAMP","sub_path":"app/utils/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"12484963693","text":"from ctypes import *\nfrom calibration.definevariable import definevariable\nfrom calibration.multhread import mythread\n\nVCI_USBCAN2 = 4\nSTATUS_OK = 1\n\nCanDLLName = './ControlCAN.dll' #把DLL放到对应的目录下\ncanDLL = windll.LoadLibrary(CanDLLName)\n\nclass VCI_INIT_CONFIG(Structure):\n _fields_ = [(\"AccCode\", c_uint),\n (\"AccMask\", c_uint),\n (\"Reserved\", c_uint),\n (\"Filter\", c_ubyte),\n (\"Timing0\", c_ubyte),\n (\"Timing1\", c_ubyte),\n (\"Mode\", c_ubyte)\n ]\nclass VCI_CAN_OBJ(Structure):\n _fields_ = [(\"ID\", c_uint),\n (\"TimeStamp\", c_uint),\n (\"TimeFlag\", c_ubyte),\n (\"SendType\", c_ubyte),\n (\"RemoteFlag\", c_ubyte),\n (\"ExternFlag\", c_ubyte),\n (\"DataLen\", c_ubyte),\n (\"Data\", c_ubyte*8),\n (\"Reserved\", c_ubyte*3)\n ]\n\ndef can_open(groupCan1Info, groupCan2Info):\n Can1Variable = 0\n Can2Variable = 0\n ta1 = 0\n ta2 = 0\n\n ret = canDLL.VCI_OpenDevice(VCI_USBCAN2, 0, 0)\n # if ret == STATUS_OK:\n # print('调用 VCI_OpenDevice成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_OpenDevice出错\\r\\n')\n\n # 初始通道\n vci_initconfig = VCI_INIT_CONFIG(0x80000008, 0xFFFFFFFF, 0,\n 0, 0x01, 0x1C, 0) # 波特率250k,正常模式\n if groupCan1Info != \"无\":\n ret = canDLL.VCI_InitCAN(VCI_USBCAN2, 0, 0, byref(vci_initconfig))\n # if ret == STATUS_OK:\n # print('调用 VCI_InitCAN1成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_InitCAN1出错\\r\\n')\n\n ret = canDLL.VCI_StartCAN(VCI_USBCAN2, 0, 0)\n # if ret == STATUS_OK:\n # print('调用 VCI_StartCAN1成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_StartCAN1出错\\r\\n')\n\n Can1Variable = definevariable()\n ta1 = mythread(0, Can1Variable) # 实例化线程\n ta1.start() # 开启ta线程\n\n if groupCan2Info != \"无\":\n ret = canDLL.VCI_InitCAN(VCI_USBCAN2, 0, 1, byref(vci_initconfig))\n # if ret == STATUS_OK:\n # print('调用 VCI_InitCAN2成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_InitCAN2出错\\r\\n')\n\n ret = canDLL.VCI_StartCAN(VCI_USBCAN2, 0, 1)\n # if ret == STATUS_OK:\n # print('调用 VCI_StartCAN2成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_StartCAN2出错\\r\\n')\n\n Can2Variable = definevariable()\n ta2 = mythread(1, Can2Variable) # 实例化线程\n ta2.start() # 开启ta线程\n\n return Can1Variable, Can2Variable, ta1, ta2\n\ndef can_close():\n ret = canDLL.VCI_CloseDevice(VCI_USBCAN2, 0)\n # if ret == STATUS_OK:\n # print('调用 VCI_CloseDevice成功\\r\\n')\n # if ret != STATUS_OK:\n # print('调用 VCI_CloseDevice出错\\r\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"tzhuhua/EOL_Plat","sub_path":"CanOperation/canoperation.py","file_name":"canoperation.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"70097252000","text":"from forte.data.multi_pack import MultiPack\nfrom forte.processors.base import MultiPackProcessor\n\nfrom edu.cmu import EventMention, CrossEventRelation\n\n\nclass BaseSuggestionProvider(MultiPackProcessor):\n \"\"\"\n Base class to mark example coreference relations.\n \"\"\"\n\n def _process(self, input_pack: MultiPack):\n pack_i = input_pack.get_pack_at(0)\n pack_j = input_pack.get_pack_at(1)\n\n for evm_i in pack_i.get(EventMention):\n for evm_j in pack_j.get(EventMention):\n if self.use_this_pair(evm_i, evm_j):\n link = CrossEventRelation(input_pack, evm_i, evm_j)\n link.rel_type = 'suggested'\n input_pack.add_entry(link)\n\n def use_this_pair(self, evm_i, evm_j) -> bool:\n raise NotImplementedError\n\n\nclass SameLemmaSuggestionProvider(BaseSuggestionProvider):\n \"\"\"\n Mark some example coreference relations using lemma.\n \"\"\"\n\n def use_this_pair(self, evm_i, evm_j) -> bool:\n if evm_i.text == evm_j.text:\n return True\n","repo_name":"hunterhector/event_data","sub_path":"processors/coref_propose.py","file_name":"coref_propose.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"32622284941","text":"\"\"\"Tools to construct a dataset of DGL graphs.\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport os\nfrom typing import TYPE_CHECKING, Callable\n\nimport dgl\nimport numpy as np\nimport torch\nfrom dgl.data import DGLDataset\nfrom dgl.data.utils import load_graphs, save_graphs\nfrom dgl.dataloading import GraphDataLoader\nfrom tqdm import trange\n\nfrom matgl.graph.compute import compute_pair_vector_and_distance, create_line_graph\nfrom matgl.layers import BondExpansion\n\nif TYPE_CHECKING:\n from matgl.graph.converters import GraphConverter\n\n\ndef collate_fn(batch, include_line_graph: bool = False):\n \"\"\"Merge a list of dgl graphs to form a batch.\"\"\"\n if include_line_graph:\n graphs, line_graphs, state_attr, labels = map(list, zip(*batch))\n else:\n graphs, state_attr, labels = map(list, zip(*batch))\n g = dgl.batch(graphs)\n labels = torch.tensor(labels, dtype=torch.float32)\n state_attr = torch.stack(state_attr)\n if include_line_graph:\n l_g = dgl.batch(line_graphs)\n return g, l_g, state_attr, labels\n return g, labels, state_attr\n\n\ndef collate_fn_efs(batch):\n \"\"\"Merge a list of dgl graphs to form a batch.\"\"\"\n graphs, line_graphs, state_attr, energies, forces, stresses = map(list, zip(*batch))\n g = dgl.batch(graphs)\n l_g = dgl.batch(line_graphs)\n e = torch.tensor(energies, dtype=torch.float32)\n f = torch.vstack(forces)\n s = torch.vstack(stresses)\n state_attr = torch.stack(state_attr)\n return g, l_g, state_attr, e, f, s\n\n\ndef MGLDataLoader(\n train_data: dgl.data.utils.Subset,\n val_data: dgl.data.utils.Subset,\n collate_fn: Callable,\n batch_size: int,\n num_workers: int,\n use_ddp: bool = False,\n pin_memory: bool = False,\n test_data: dgl.data.utils.Subset | None = None,\n generator: torch.Generator | None = None,\n) -> tuple[GraphDataLoader, ...]:\n \"\"\"Dataloader for MEGNet training.\n\n Args:\n train_data (dgl.data.utils.Subset): Training dataset.\n val_data (dgl.data.utils.Subset): Validation dataset.\n collate_fn (Callable): Collate function.\n batch_size (int): Batch size.\n num_workers (int): Number of workers.\n use_ddp (bool, optional): Whether to use DDP. Defaults to False.\n pin_memory (bool, optional): Whether to pin memory. Defaults to False.\n test_data (dgl.data.utils.Subset | None, optional): Test dataset. Defaults to None.\n generator (torch.Generator | None, optional): Random number generator. Defaults to None.\n\n Returns:\n tuple[GraphDataLoader, ...]: Train, validation and test data loaders. Test data\n loader is None if test_data is None.\n \"\"\"\n train_loader = GraphDataLoader(\n train_data,\n batch_size=batch_size,\n shuffle=True,\n collate_fn=collate_fn,\n num_workers=num_workers,\n pin_memory=pin_memory,\n use_ddp=use_ddp,\n generator=generator,\n )\n\n val_loader = GraphDataLoader(\n val_data,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=collate_fn,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n if test_data is not None:\n test_loader = GraphDataLoader(\n test_data,\n batch_size=batch_size,\n shuffle=False,\n collate_fn=collate_fn,\n num_workers=num_workers,\n pin_memory=pin_memory,\n )\n return train_loader, val_loader, test_loader\n return train_loader, val_loader\n\n\nclass MEGNetDataset(DGLDataset):\n \"\"\"Create a dataset including dgl graphs.\"\"\"\n\n def __init__(\n self,\n structures: list,\n labels: list,\n label_name: str,\n converter: GraphConverter,\n initial: float = 0.0,\n final: float = 5.0,\n num_centers: int = 100,\n width: float = 0.5,\n name: str = \"MEGNETDataset\",\n graph_labels: list | None = None,\n ):\n \"\"\"\n Args:\n structures: Pymatgen structure\n labels: property values\n label_name: label name\n converter: Transformer for converting structures to DGL graphs, e.g., Pmg2Graph.\n initial: initial distance for Gaussian expansions\n final: final distance for Gaussian expansions\n num_centers: number of Gaussian functions\n width: width of Gaussian functions\n name: Name of dataset\n graph_labels: graph attributes either integers and floating point numbers.\n \"\"\"\n self.converter = converter\n self.structures = structures\n self.labels = torch.FloatTensor(labels)\n self.label_name = label_name\n self.initial = initial\n self.final = final\n self.num_centers = num_centers\n self.width = width\n self.graph_labels = graph_labels\n\n super().__init__(name=name)\n\n def has_cache(self, filename: str = \"dgl_graph.bin\") -> bool:\n \"\"\"Check if the dgl_graph.bin exists or not\n Args:\n :filename: Name of file storing dgl graphs\n Returns: True if file exists.\n \"\"\"\n return os.path.exists(filename)\n\n def process(self) -> tuple:\n \"\"\"Convert Pymatgen structure into dgl graphs.\"\"\"\n num_graphs = self.labels.shape[0]\n graphs = []\n state_attrs = []\n bond_expansion = BondExpansion(\n rbf_type=\"Gaussian\", initial=self.initial, final=self.final, num_centers=self.num_centers, width=self.width\n )\n for idx in trange(num_graphs):\n structure = self.structures[idx]\n graph, state_attr = self.converter.get_graph(structure)\n bond_vec, bond_dist = compute_pair_vector_and_distance(graph)\n graph.edata[\"edge_attr\"] = bond_expansion(bond_dist)\n graphs.append(graph)\n state_attrs.append(state_attr)\n if self.graph_labels is not None:\n if np.array(self.graph_labels).dtype == \"int64\":\n state_attrs = torch.tensor(self.graph_labels).long() # type: ignore\n else:\n state_attrs = torch.tensor(self.graph_labels) # type: ignore\n else:\n state_attrs = torch.tensor(state_attrs) # type: ignore\n self.graphs = graphs\n self.state_attr = state_attrs\n return self.graphs, self.state_attr\n\n def save(self, filename: str = \"dgl_graph.bin\", filename_state_attr: str = \"state_attr.pt\"):\n \"\"\"Save dgl graphs\n Args:\n :filename: Name of file storing dgl graphs\n :filename_state_attr: Name of file storing graph attrs.\n \"\"\"\n labels_with_key = {self.label_name: self.labels}\n save_graphs(filename, self.graphs, labels_with_key)\n torch.save(self.state_attr, filename_state_attr)\n\n def load(self, filename: str = \"dgl_graph.bin\", filename_state_attr: str = \"state_attr.pt\"):\n \"\"\"Load dgl graphs\n Args:\n :filename: Name of file storing dgl graphs\n :filename: Name of file storing state attrs.\n \"\"\"\n self.graphs, label_dict = load_graphs(filename)\n self.label = torch.stack([label_dict[key] for key in self.label_keys], dim=1)\n self.state_attr = torch.load(\"state_attr.pt\")\n\n def __getitem__(self, idx: int):\n \"\"\"Get graph and label with idx.\"\"\"\n return self.graphs[idx], self.state_attr[idx], self.labels[idx]\n\n def __len__(self):\n \"\"\"Get size of dataset.\"\"\"\n return len(self.graphs)\n\n\nclass M3GNetDataset(DGLDataset):\n \"\"\"Create a dataset including dgl graphs.\"\"\"\n\n def __init__(\n self,\n converter: GraphConverter,\n threebody_cutoff: float,\n structures: list,\n energies: list | None = None,\n forces: list | None = None,\n stresses: list | None = None,\n labels: list | None = None,\n name=\"M3GNETDataset\",\n label_name: str | None = None,\n graph_labels: list | None = None,\n ):\n \"\"\"\n Args:\n converter: dgl graph converter\n threebody_cutoff: cutoff for three body\n structures: Pymatgen structure\n energies: Target energies\n forces: Target forces\n stresses: Target stresses\n labels: target properties\n name: name of dataset\n label_name: name of target properties\n graph_labels: state attributes.\n \"\"\"\n self.converter = converter\n self.structures = structures\n self.energies = energies\n self.forces = forces\n self.labels = labels\n self.label_name = label_name\n self.threebody_cutoff = threebody_cutoff\n self.stresses = np.zeros(len(self.structures)) if stresses is None else stresses\n self.graph_labels = graph_labels\n super().__init__(name=name)\n\n def has_cache(self, filename: str = \"dgl_graph.bin\") -> bool:\n \"\"\"Check if the dgl_graph.bin exists or not\n Args:\n :filename: Name of file storing dgl graphs\n Returns: True if file exists.\n \"\"\"\n return os.path.exists(filename)\n\n def process(self) -> tuple:\n \"\"\"Convert Pymatgen structure into dgl graphs.\"\"\"\n num_graphs = len(self.structures)\n graphs = []\n line_graphs = []\n state_attrs = []\n for idx in trange(num_graphs):\n structure = self.structures[idx]\n graph, state_attr = self.converter.get_graph(structure)\n graphs.append(graph)\n state_attrs.append(state_attr)\n bond_vec, bond_dist = compute_pair_vector_and_distance(graph)\n graph.edata[\"bond_vec\"] = bond_vec\n graph.edata[\"bond_dist\"] = bond_dist\n line_graph = create_line_graph(graph, self.threebody_cutoff)\n for name in [\"bond_vec\", \"bond_dist\", \"pbc_offset\"]:\n line_graph.ndata.pop(name)\n line_graphs.append(line_graph)\n if self.graph_labels is not None:\n state_attrs = torch.tensor(self.graph_labels).long() # type: ignore\n else:\n state_attrs = torch.tensor(state_attrs) # type: ignore\n\n self.graphs = graphs\n self.line_graphs = line_graphs\n self.state_attr = state_attrs\n\n return self.graphs, self.line_graphs, self.state_attr\n\n def save(\n self,\n filename: str = \"dgl_graph.bin\",\n filename_line_graph: str = \"dgl_line_graph.bin\",\n filename_state_attr: str = \"state_attr.pt\",\n ):\n \"\"\"Save dgl graphs\n Args:\n :filename: Name of file storing dgl graphs\n :filename_state_attr: Name of file storing graph attrs.\n \"\"\"\n if self.labels is None:\n labels_with_key = {\"energies\": self.energies, \"forces\": self.forces, \"stresses\": self.stresses}\n else:\n labels_with_key = {self.label_name: self.labels} # type: ignore\n save_graphs(filename, self.graphs)\n save_graphs(filename_line_graph, self.line_graphs)\n torch.save(self.state_attr, filename_state_attr)\n with open(\"labels.json\", \"w\") as file:\n file.write(\"\".join(str(labels_with_key).split(\"\\n\")))\n\n def load(\n self,\n filename: str = \"dgl_graph.bin\",\n filename_line_graph: str = \"dgl_line_graph.bin\",\n filename_state_attr: str = \"state_attr.pt\",\n ):\n \"\"\"\n Load dgl graphs from files.\n\n Args:\n filename: Name of file storing dgl graphs\n filename_line_graph: Name of file storing dgl line graphs\n filename_state_attr: Name of file storing state attrs.\n \"\"\"\n self.graphs = load_graphs(filename)\n self.line_graphs = load_graphs(filename_line_graph)\n with open(\"labels.json\") as file:\n labels: dict = json.load(file)\n if self.labels is None:\n self.energies = labels[\"energies\"]\n self.forces = labels[\"forces\"]\n self.stresses = labels[\"stresses\"]\n self.state_attr = torch.load(\"state_attr.pt\")\n else:\n self.labels = labels # type: ignore\n\n def __getitem__(self, idx: int):\n \"\"\"Get graph and label with idx.\"\"\"\n if self.labels is None:\n return (\n self.graphs[idx],\n self.line_graphs[idx],\n self.state_attr[idx],\n self.energies[idx], # type: ignore\n torch.tensor(self.forces[idx]).float(), # type: ignore\n torch.tensor(self.stresses[idx]).float(), # type: ignore\n )\n return (self.graphs[idx], self.line_graphs[idx], self.state_attr[idx], self.labels[idx])\n\n def __len__(self):\n \"\"\"Get size of dataset.\"\"\"\n return len(self.graphs)\n","repo_name":"chiku-parida/matgl","sub_path":"matgl/graph/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"}
+{"seq_id":"10155415699","text":"class employee:\n \n num_of_emps = 0\n raise_amt = 1.04\n \n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + \".\" + last + \"@company.com\"\n employee.num_of_emps += 1\n \n def fullname(self):\n return \"My full name is: {0:s} {1:s}\".format(self.first, self.last)\n \n def apply_raise(self):\n self.pay = int(self.pay * self.raise_amt)\n\nclass Developer(employee):\n # raise_amt = 1.10\n\n def __init__(self, first, last, pay, prog_lang):\n super().__init__(first, last, pay)\n self.prog_lang = prog_lang\n \n \ndev1 = Developer(\"Corey\", \"schafer\", 50000, \"Python\")\n# dev2 = Developer(\"Test\", \"user\", 60000, \"Java\")\n\n# print(help(employee))\n# print(help(Developer))\n\n# dev1.raise_amt = 1.30\nprint(dev1.email)\nprint(dev1.prog_lang)\nprint(help(dev1))\nprint(dev1.__dict__)\nprint(dev1.num_of_emps)\nprint(dev1.raise_amt)","repo_name":"EbertPedro/Python_challenges","sub_path":"Python/Python_exercises/Python_OOP/Inheritance/1inheritance_of_class.py","file_name":"1inheritance_of_class.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"32667361190","text":"# -*- encoding: utf-8 -*-\nimport pytest\n\nfrom django.core.urlresolvers import reverse\n\nfrom contact.tests.factories import ContactFactory\nfrom crm.tests.factories import CrmContactFactory, NoteFactory, TicketFactory\nfrom login.tests.factories import TEST_PASSWORD\nfrom login.tests.fixture import perm_check\nfrom login.tests.scenario import (\n default_scenario_login,\n get_user_staff,\n user_contractor,\n)\n\n\n@pytest.mark.django_db\ndef test_crm_contact_create(perm_check):\n contact = ContactFactory()\n url = reverse('crm.contact.create', kwargs={'pk': contact.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_crm_contact_update(perm_check):\n contact = ContactFactory()\n crm_contact = CrmContactFactory(contact=contact)\n url = reverse('crm.contact.update', kwargs={'pk': crm_contact.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_note_create(perm_check):\n ticket = TicketFactory()\n url = reverse('crm.note.create', kwargs={'pk': ticket.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_note_update(perm_check):\n note = NoteFactory()\n url = reverse('crm.note.update', kwargs={'pk': note.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_ticket_complete(perm_check):\n ticket = TicketFactory()\n url = reverse('crm.ticket.complete', kwargs={'pk': ticket.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_ticket_create(perm_check):\n contact = ContactFactory()\n url = reverse('crm.ticket.create', kwargs={'pk': contact.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_ticket_detail(perm_check):\n ticket = TicketFactory()\n url = reverse('crm.ticket.detail', kwargs={'pk': ticket.pk})\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_ticket_home(perm_check):\n TicketFactory()\n url = reverse('crm.ticket.home')\n perm_check.staff(url)\n\n\n@pytest.mark.django_db\ndef test_ticket_update(perm_check):\n ticket = TicketFactory()\n url = reverse('crm.ticket.update', kwargs={'pk': ticket.pk})\n perm_check.staff(url)\n\n\n# def _assert_get(self, url):\n# # User must be logged in to access this URL\n# response = self.client.get(url)\n# self.assertEqual(\n# response.status_code,\n# 302,\n# 'status {}\\n{}'.format(response.status_code, response),\n# )\n# # Log the user in so they can access this URL\n# self.client.login(\n# username=self.staff.username,\n# password=TEST_PASSWORD,\n# )\n# response = self.client.get(url)\n# self.assertEqual(\n# response.status_code,\n# 200,\n# 'status {}\\n{}'.format(response.status_code, response),\n# )\n","repo_name":"pkimber/old-crm-migrated-to-gitlab","sub_path":"crm/tests/test_view_perm.py","file_name":"test_view_perm.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"}
+{"seq_id":"42690250079","text":"import pygame\n\n\n#definier la classe qui va gérer le projectile de notre joueur\nclass Projectile(pygame.sprite.Sprite):\n\n #définer le costructeur de cete classe\n def __init__(self,player):\n super().__init__()\n self.velocity =12\n self.player= player\n self.image = pygame.image.load('assets/projectile.png')\n self.image = pygame.transform.scale(self.image,(50,50))\n self.rect = self.image.get_rect()\n self.rect.x = player.rect.x + 120\n self.rect.y = player.rect.y + 80\n self.origin_image = self.image\n self.angle = 0\n def rotate(self):\n #tourner le projectile\n self.angle +=12\n self.image= pygame.transform.rotozoom(self.origin_image,self.angle,1)\n self.rect=self.image.get_rect(center=self.rect.center)\n def remove(self):\n self.player.all_projectiles.remove(self)\n def move(self):\n self.rect.x += self.velocity\n self.rotate()\n #verifier si le projectile entre en collision avc un monster\n\n for monster in self.player.game.chek_collision(self,self.player.game.all_monsters):\n #supprimer la projectile\n self.remove()\n monster.damage(self.player.attack)\n\n #verifier si notre projectile n'est plus présent sur l'ecran\n if self.rect.x>1080:\n self.remove()\n","repo_name":"MOHAMED-EL-HADDIOUI/Comet-Fall-Game","sub_path":"projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"15544088256","text":"import argparse\n\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.commands.repro import CmdRepro\nfrom dvc.commands.repro import add_arguments as add_repro_arguments\nfrom dvc.log import logger\n\nlogger = logger.getChild(__name__)\n\n\nclass CmdExperimentsRun(CmdRepro):\n def run(self):\n self.repo.experiments.run(\n name=self.args.name,\n queue=self.args.queue,\n run_all=self.args.run_all,\n jobs=self.args.jobs,\n params=self.args.set_param,\n tmp_dir=self.args.tmp_dir,\n copy_paths=self.args.copy_paths,\n message=self.args.message,\n **self._common_kwargs,\n )\n\n return 0\n\n\ndef add_parser(experiments_subparsers, parent_parser):\n EXPERIMENTS_RUN_HELP = \"Run an experiment.\"\n experiments_run_parser = experiments_subparsers.add_parser(\n \"run\",\n parents=[parent_parser],\n description=append_doc_link(EXPERIMENTS_RUN_HELP, \"exp/run\"),\n help=EXPERIMENTS_RUN_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n _add_run_common(experiments_run_parser)\n experiments_run_parser.set_defaults(func=CmdExperimentsRun)\n\n\ndef _add_run_common(parser):\n \"\"\"Add common args for 'exp run'.\"\"\"\n # inherit arguments from `dvc repro`\n add_repro_arguments(parser)\n parser.add_argument(\n \"-n\",\n \"--name\",\n default=None,\n help=(\n \"Human-readable experiment name. If not specified, a name will \"\n \"be auto-generated.\"\n ),\n metavar=\"\",\n )\n parser.add_argument(\n \"-S\",\n \"--set-param\",\n action=\"append\",\n default=[],\n help=\"Use the specified param value when reproducing pipelines.\",\n metavar=\"[:]=\",\n )\n parser.add_argument(\n \"--queue\",\n action=\"store_true\",\n default=False,\n help=\"Stage this experiment in the run queue for future execution.\",\n )\n parser.add_argument(\n \"--run-all\",\n action=\"store_true\",\n default=False,\n help=\"Execute all experiments in the run queue. Implies --temp.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n default=1,\n help=\"Run the specified number of experiments at a time in parallel.\",\n metavar=\"\",\n )\n parser.add_argument(\n \"--temp\",\n action=\"store_true\",\n dest=\"tmp_dir\",\n help=(\n \"Run this experiment in a separate temporary directory instead of \"\n \"your workspace.\"\n ),\n )\n parser.add_argument(\n \"-C\",\n \"--copy-paths\",\n action=\"append\",\n default=[],\n help=(\n \"List of ignored or untracked paths to copy into the temp directory.\"\n \" Only used if `--temp` or `--queue` is specified.\"\n ),\n )\n parser.add_argument(\n \"-m\",\n \"--message\",\n type=str,\n default=None,\n help=\"Custom commit message to use when committing the experiment.\",\n )\n parser.add_argument(\n \"-M\", # obsolete\n dest=\"message\",\n help=argparse.SUPPRESS,\n )\n","repo_name":"iterative/dvc","sub_path":"dvc/commands/experiments/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":12419,"dataset":"github-code","pt":"51"}
+{"seq_id":"74111725599","text":"\"\"\"initial2\n\nRevision ID: 9d9a746dbfd7\nRevises: a57c89b47e7b\nCreate Date: 2021-11-01 04:28:38.426261\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = '9d9a746dbfd7'\ndown_revision = 'a57c89b47e7b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('images',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_images_id'), 'images', ['id'], unique=False)\n op.create_index(op.f('ix_images_url'), 'images', ['url'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_images_url'), table_name='images')\n op.drop_index(op.f('ix_images_id'), table_name='images')\n op.drop_table('images')\n # ### end Alembic commands ###\n","repo_name":"madpin/renthub","sub_path":"old/rentcrud/alembic/versions/9d9a746dbfd7_initial2.py","file_name":"9d9a746dbfd7_initial2.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"28506262060","text":"from utils.data import *\nfrom mlp.mlp import MLP\n\nimport numpy as np\n\ndef main():\n objects, labels, ids = load_data('./data/train.csv')\n normalizer = DataNormalizer()\n normalizer.fit(objects)\n objects = normalizer.transform(objects)\n\n xTrain, yTrain, xTest, yTest = split_dataset(objects, labels, testRatio=.2, seed=1)\n\n n_epochs = 2\n batch_size = 200\n num_iters = int(float(len(xTrain)) / batch_size * n_epochs)\n print('SGD max iters: {}'.format(num_iters))\n\n clf = MLP(hidden_dims=[10], lr=0.1, bs=batch_size, momentum=0.9, verbose=True, max_iters=num_iters, eps=1e-8)\n\n print('Training...')\n clf.fit(xTrain, yTrain)\n print('Testing...')\n score = clf.score(xTest, yTest)\n print('Test score = ' + str(score))\n\n score = clf.score(xTrain, yTrain)\n print('Train score = ' + str(score))\n\n print('Writing a submission file...')\n test_objects, test_labels, test_ids = load_data('./data/test.csv')\n test_objects = normalizer.transform(test_objects)\n predictions = clf.predict(test_objects)\n save_predictions('submission.csv', test_ids, predictions)\n\nif __name__ == '__main__':\n main()\n","repo_name":"sovrasov/mlp_sample","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"27339905037","text":"#!/usr/bin/env python3\n\nimport heapq\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport jinja2\n\nabbr = 'qsbk'\n\nclass QsItem(dict):\n\n def __init__(self, url, text, img_urls, votes, comments):\n self['url'] = url\n self['text'] = text\n self['images'] = img_urls\n self['votes'] = votes\n self['comments'] = comments\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return 'Qs<{text} - {votes} 好笑, {comments} 评论>'.format(**self)\n\nclass CsbkCrawler:\n\n def __init__(self, page, qs_list):\n self.page = page\n res = requests.get(page)\n res.raise_for_status()\n html = BeautifulSoup(res.text, 'lxml')\n self.crawl_page(page, html, qs_list)\n next_ = html.find('span', class_='next')\n next_string = next_.get_text().strip()\n if next_string == '下一页':\n next_page = next_.parent['href']\n next_page = urljoin(page, next_page)\n crawler = CsbkCrawler(next_page, qs_list)\n\n def crawl_page(self, page_url, html, qs_list):\n content = html.find('div', id='content-left')\n articles = content.find_all('div', class_='article')\n for article in articles:\n a = article.find('a', recursive=False)\n link = urljoin(page_url, a['href'])\n span_text = a.find('div', class_='content').find('span')\n text = list(span_text.stripped_strings)\n thumb = article.find('div', class_='thumb', recursive=False)\n if thumb is None:\n img_urls = []\n else:\n imgs = thumb.find_all('img', class_='illustration')\n img_urls = [urljoin(page_url, img['src']) for img in imgs]\n stats = article.find('div', class_='stats', recursive=False)\n span_vote = stats.find('span', class_='stats-vote').find('i', class_='number')\n votes = int(span_vote.get_text().strip())\n span_comment = stats.find('span', class_='stats-comments').find('i', class_='number')\n comments = int(span_comment.get_text().strip())\n qs = QsItem(link, text, img_urls, votes, comments)\n qs_list.append(qs)\n\ndef collect():\n\n qss = []\n\n first_page = 'https://www.qiushibaike.com/hot/'\n crawler = CsbkCrawler(first_page, qss)\n\n selected_qss = heapq.nlargest(20, qss, lambda qs: qs[\"votes\"])\n return selected_qss\n\ndef generate_mail(qs_list, date_string):\n\n mimetype = 'html'\n subject = '糗事百科每日精选 {}'.format(date_string)\n\n template = jinja2.Template('''\n\n {% for qs in qs_list %}\n {{ loop.index }} \n {{ qs.text | join(' ') }}
\n \n {% for img in qs.images %}\n \n {% endfor %}\n
\n {{ qs.votes }} 好笑 {{ qs.comments }} 评论 查看原文
\n {% if not loop.last %}\n \n {% endif %}\n {% endfor %}\n\n ''')\n body = template.render(qs_list=qs_list)\n\n return {\n 'mimetype': mimetype,\n 'subject': subject,\n 'body': body\n }\n\n\nif __name__ == '__main__':\n\n selected_qss = collect()\n\n for qs in selected_qss:\n print(qs)\n \n \n","repo_name":"nettee/newsmailer","sub_path":"qsbk.py","file_name":"qsbk.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"72249888477","text":"from typing import Optional\n\nfrom custom_optional import CustomOptional\nfrom http import HTTPStatus\nfrom logging import Logger\n\nfrom odoo_connectors.basic_odoo_connector import BasicOdooConnector\n\n\ndef get_planet_by_name(odoo_connector, name) -> Optional[int]:\n domain = [(\"name\", \"=\", name)]\n\n planet = odoo_connector.db_models.execute_kw(\n odoo_connector.db_name,\n odoo_connector.db_uid,\n odoo_connector.db_password,\n \"res.planet\",\n \"search_read\",\n [domain],\n {\"limit\": 1},\n )\n\n return planet[0][\"id\"] if planet else None\n\n\ndef modify_planet_data(planet_data_optional: CustomOptional) -> CustomOptional:\n if planet_data_optional.status == HTTPStatus.OK:\n if planet_data_optional.data[\"name\"] != \"unknown\":\n planet_data = {\n key: val\n for key, val in planet_data_optional.data.items()\n if (\n key\n in (\n \"name\",\n \"diameter\",\n \"population\",\n \"rotation_period\",\n \"orbital_period\",\n )\n )\n and (val != \"unknown\")\n }\n planet_data_optional.data = planet_data\n return planet_data_optional\n else:\n return CustomOptional(\n HTTPStatus.NO_CONTENT, None, None, planet_data_optional.data_id\n )\n return planet_data_optional\n\n\ndef update_or_create_planet(\n odoo_connector: BasicOdooConnector,\n logger: Logger,\n planet_data_optional: CustomOptional,\n) -> Optional[int]:\n mod_planet_optional = modify_planet_data(planet_data_optional)\n if mod_planet_optional.status != HTTPStatus.OK:\n if mod_planet_optional.status != HTTPStatus.NO_CONTENT:\n logger.info(\n f\"Не удалось выгрузить из swapi данные о планете со следующими данными: \"\n f\"swapi_id: {planet_data_optional.data_id} - данных о записи нет\"\n )\n return None\n\n existing_planet = get_planet_by_name(\n odoo_connector, mod_planet_optional.data[\"name\"]\n )\n try:\n if existing_planet is not None:\n odoo_connector.db_models.execute_kw(\n odoo_connector.db_name,\n odoo_connector.db_uid,\n odoo_connector.db_password,\n \"res.planet\",\n \"write\",\n [[existing_planet], mod_planet_optional.data],\n )\n logger.info(\n f\"Успешно обновлены данные о планете {mod_planet_optional.data['name']} \"\n f\"(swapi_id: {planet_data_optional.data_id}, odoo_id: {existing_planet}, \"\n f\"пакет обновленных данных - {mod_planet_optional.data}\"\n )\n planet = existing_planet\n else:\n planet = odoo_connector.db_models.execute_kw(\n odoo_connector.db_name,\n odoo_connector.db_uid,\n odoo_connector.db_password,\n \"res.planet\",\n \"create\",\n [mod_planet_optional.data],\n )\n logger.info(\n f\"Успешно создана запись о планете {mod_planet_optional.data['name']} \"\n f\"(swapi_id: {planet_data_optional.data_id}, odoo_id: {planet}, \"\n f\"занесеенные данные - {mod_planet_optional.data}\"\n )\n\n except Exception as exc:\n planet = None\n logger.error(\n f\"При переносе данных о планете (swapi_id: {planet_data_optional.data_id}) \"\n f\"возникла непредвиденная ошибка: {exc}\"\n )\n return planet\n","repo_name":"SlivnyiArtem/OdooDataMigrator","sub_path":"services/planet_service.py","file_name":"planet_service.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"41796263549","text":"\nclass PaystackError(Exception):\n \"\"\"Base class for Paystack API errors.\"\"\"\n\n\nclass APIError(PaystackError):\n \"\"\"Exception raised for errors in the Paystack API.\n\n Attributes:\n status_code -- the HTTP status code indicating the error\n error_message -- a description of the error\n \"\"\"\n\n def __init__(self, status_code, error_message):\n self.status_code = status_code\n self.error_message = error_message\n super().__init__(self.error_message)\n","repo_name":"NUCCASJNR/PaystackPyAPI","sub_path":"errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"51"}
+{"seq_id":"38415912833","text":"'''\n LIVE - check\n 53,448 kb / 143 ms / 855 lines\n\n LIVE - check2\n 55,540 kb / 160 ms / 1,024 lines\n'''\n\nimport sys\nsys.stdin = open('input.txt')\n\n\ndef change_to_dec(num, notation):\n tmp = 0\n\n for n, val in enumerate(list(map(int, num))[::-1]):\n tmp += val * (notation**n)\n return tmp\n\ndef check(num, notation):\n change_num = change_to_dec(num, notation)\n # change_num = int(num, notation)\n\n for n, val in enumerate(list(map(int, num))[::-1]):\n for j in range(notation):\n if val == j: continue\n tmp = change_num - val * notation**n + j * notation**n\n\n # 2진수 차곡차곡 저장, 3진수도 저장하다가 똑같은 수 발견하면 else ㄱㄱ!\n if tmp not in binary:\n binary.append(tmp)\n # else: 3진수 위함\n else:\n return tmp\n\n\ndef check2():\n # 2진수 문자열 10진수로 변환\n bi_num = 0\n for x in bi:\n bi_num = bi_num*2 + int(x)\n\n # 2진수 후보들 생성\n for i in range(len(bi)):\n # exclusive OR : ^\n # => 한글자씩 바꿈 (0은 1로, 1은 0으로)\n binary.append(bi_num ^ (1 << i))\n\n # 3진수 후보 생성하며 10진수로 변환\n # i번째 자리 바꿀거임\n for i in range(len(tr)):\n num1 = 0\n num2 = 0\n # i = j 인 경우만 숫자 바꾸고, 아닌 경우는 원래 값으로 계산\n for j in range(len(tr)):\n if i != j:\n num1 = num1 * 3 + int(tr[j])\n num2 = num2 * 3 + int(tr[j])\n else:\n num1 = num1 * 3 + (int(tr[j]) + 1) % 3\n num2 = num2 * 3 + (int(tr[j]) + 2) % 3\n\n if num1 in binary:\n return num1\n if num2 in binary:\n return num2\n\n\nT = int(input())\nfor tc in range(1, T + 1):\n bi = list(input())\n tr = list(input())\n\n binary = []\n\n # check(bi, 2)\n # print('#{} {}'.format(tc, check(tr, 3)))\n\n print('#{} {}'.format(tc, check2()))\n","repo_name":"Hyojeong721/TIL","sub_path":"algorithm/SWA/date/1008/nyl2353/4366_정식이의은행업무/s2.py","file_name":"s2.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"39954076232","text":"import os\nimport json\nimport numpy as np\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\n\nimport pdb\n\nclass Featset(Data.Dataset):\n def __init__(self, sample='full'):\n\n vis_feat_name_list = ['train2014', 'val2014']\n feat_root_path = '/data03/liangyzh/bottomup_feature'\n full_data_list = []\n except_list = ['COCO_val2014_000000096923.jpg', 'COCO_train2014_000000028645.jpg']\n for folder in vis_feat_name_list:\n folder_path = os.path.join(feat_root_path, folder)\n name_list = os.listdir(folder_path)\n for name in name_list:\n if name.strip('.npz') in except_list:\n continue\n feat_path = os.path.join(folder_path, name)\n full_data_list.append(feat_path)\n\n print('full list len: ', len(full_data_list))\n\n if isinstance(sample, str):\n self.iid_to_frcn_feat_path = full_data_list\n\n if isinstance(sample, int):\n self.iid_to_frcn_feat_path = full_data_list[:sample]\n\n\n kg_vec_path = '/data03/liangyzh/numberbatch/vgdet_vector.pt'\n self.kg_vec_dict = torch.load(kg_vec_path)\n kg_vec_sim_path = '/data03/liangyzh/numberbatch/sim_dict.pt'\n self.kg_vec_sim = torch.load(kg_vec_sim_path)\n\n self.gen_iter = 0\n\n\n def proc_img_feat(self, img_feat, img_feat_pad_size):\n\n if img_feat.shape[0] > img_feat_pad_size:\n img_feat = img_feat[:img_feat_pad_size]\n\n img_feat = np.pad(\n img_feat,\n ((0, img_feat_pad_size - img_feat.shape[0]), (0, 0)),\n mode='constant',\n constant_values=0\n )\n\n return torch.from_numpy(img_feat).float()\n\n def proc_bbox_feat(self, bbox, img_shape):\n bbox_feat = np.zeros((bbox.shape[0], 5), dtype=np.float32)\n\n bbox_feat[:, 0] = bbox[:, 0] / float(img_shape[1])\n bbox_feat[:, 1] = bbox[:, 1] / float(img_shape[0])\n bbox_feat[:, 2] = bbox[:, 2] / float(img_shape[1])\n bbox_feat[:, 3] = bbox[:, 3] / float(img_shape[0])\n bbox_feat[:, 4] = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) / float(img_shape[0] * img_shape[1])\n\n return bbox_feat\n\n\n def load_img_feats(self, iid):\n frcn_feat = np.load(self.iid_to_frcn_feat_path[iid])\n frcn_feat_x = frcn_feat['x'].transpose((1, 0))\n bbox = self.proc_bbox_feat(\n frcn_feat['bbox'],\n (frcn_feat['image_h'], frcn_feat['image_w'])\n )\n\n if 'e/train' in self.iid_to_frcn_feat_path[iid]:\n file_name = self.iid_to_frcn_feat_path[iid].replace('ture/train', 'ture/vgdet_train').replace('.npz', '.npy')\n else:\n file_name = self.iid_to_frcn_feat_path[iid].replace('ture/val', 'ture/vgdet_val').replace('.npz', '.npy')\n\n cls_name = list(np.load(file_name))\n return frcn_feat_x, bbox, cls_name, len(cls_name)\n\n\n def get_sim_list(self, cur_cls, ind_len=10):\n sim_score = self.kg_vec_sim[cur_cls]['scores']\n sim_names = self.kg_vec_sim[cur_cls]['names']\n sort_ind = sorted(range(len(sim_score)), key=lambda k: sim_score[k], reverse=True)\n get_sort_ind = sort_ind[:ind_len]\n get_sort_name = [sim_names[x] for x in get_sort_ind]\n return get_sort_name\n\n\n def gen_ans_cls(self, cls_list):\n ans = []\n for sub_cls in cls_list:\n sim_list = self.get_sim_list(sub_cls)\n sample_cls = random.sample(sim_list, 1)\n ans += sample_cls\n return ans\n\n\n def gen_ins_sample(self, ins_ind, bbox, cls_name, ans_len=10):\n gt_ans = [cls_name[x] for x in ins_ind]\n gt_ans_ind = random.randint(0, ans_len-1)\n ans_list = []\n addition_iter = 0\n while ans_len != 1:\n fake_ans = self.gen_ans_cls(gt_ans)\n if fake_ans != gt_ans:\n ans_len = ans_len - 1\n ans_list.append(fake_ans)\n\n ans_list.insert(gt_ans_ind, gt_ans)\n return ans_list, gt_ans_ind, ins_ind\n\n\n def gen_motif_sample(self, ins_ind, bbox, cls_name, ins_size, ans_len=10):\n if 4 > ins_size and 2 < ins_size:\n motif_num = random.randint(2, ins_size)\n ans_list, ans_ind, gt_ind = self.gen_single_motif_sample(motif_num, ins_ind, bbox, cls_name)\n elif 2 > ins_size:\n ans_list, ans_ind, gt_ind = self.gen_ins_sample(ins_ind, bbox, cls_name)\n else:\n motif_num = random.randint(2, 4)\n ans_list, ans_ind, gt_ind = self.gen_single_motif_sample(motif_num, ins_ind, bbox, cls_name)\n return ans_list, ans_ind, gt_ind\n\n\n def gen_scene_sample(self, scene_num, cls_name, ins_size, ans_len=10):\n ans_list = []\n gt_ans_ind = random.randint(0, ans_len - 1)\n gt_inds = random.sample(list(range(len(cls_name))), scene_num)\n gt_ans = [cls_name[x] for x in gt_inds]\n\n while ans_len != 1:\n fake_ans = self.gen_ans_cls(gt_ans)\n if fake_ans != gt_ans:\n ans_len = ans_len - 1\n ans_list.append(fake_ans)\n ans_list.insert(gt_ans_ind, gt_ans)\n return ans_list, gt_ans_ind, gt_inds\n\n\n def gen_single_motif_sample(self, motifs_num, ins_ind, bbox, cls_name, ans_len=10):\n cen_x = (bbox[:, 0] + bbox[:, 2]) / 2\n cen_y = (bbox[:, 1] + bbox[:, 3]) / 2\n dis = (cen_x - cen_x[ins_ind])**2 + (cen_y - cen_y[ins_ind])**2\n sort_ind = dis.argsort()[1:motifs_num]\n cur_inds = dis.argsort()[:motifs_num].tolist()\n cur_clses = [cls_name[x] for x in sort_ind]\n gt_ans = [cls_name[ins_ind]] + cur_clses\n gt_ans_ind = random.randint(0, ans_len - 1)\n ans_list = []\n addition_iter = 0\n while ans_len != 1:\n fake_ans = self.gen_ans_cls(gt_ans)\n if fake_ans != gt_ans:\n ans_len = ans_len - 1\n ans_list.append(fake_ans)\n ans_list.insert(gt_ans_ind, gt_ans)\n return ans_list, gt_ans_ind, cur_inds\n\n\n def gen_sample(self, iid):\n feat, bbox, cls_name, ins_size = self.load_img_feats(iid)\n\n gen_iter = random.sample(['instance', 'motif', 'scene'], 1)[0]\n if gen_iter == 'instance':\n ins_sample_id = random.randint(0, ins_size -1)\n anses, ans_gt_ind, vis_ind = self.gen_ins_sample([ins_sample_id], bbox, cls_name)\n\n elif gen_iter == 'motif':\n ins_sample_id = random.randint(0, ins_size -1)\n anses, ans_gt_ind, vis_ind = self.gen_motif_sample(ins_sample_id, bbox, cls_name, ins_size)\n\n elif gen_iter == 'scene':\n if ins_size > 5:\n scene_num = random.randint(5, 10)\n anses, ans_gt_ind, vis_ind = self.gen_scene_sample(scene_num, cls_name, ins_size)\n else:\n ins_sample_id = random.randint(0, ins_size -1)\n anses, ans_gt_ind, vis_ind = self.gen_motif_sample(ins_sample_id, bbox, cls_name, ins_size)\n\n return anses, ans_gt_ind, vis_ind,\\\n feat, bbox, cls_name, ins_size\n\n def __getitem__(self, item):\n ans_set, gt_set, vis_ind, feat, bbox, cls_name, ins_size = self.gen_sample(item)\n vis_feat = feat[vis_ind, :]\n vis_pad_feat = self.proc_img_feat(vis_feat, 10)\n\n ans_feat_set = []\n for sub_ans in ans_set:\n sub_ans_len = len(sub_ans)\n sub_ans_feat = torch.zeros(300)\n for ans in sub_ans:\n sub_ans_feat += self.kg_vec_dict[ans]\n sub_ans_feat = sub_ans_feat / sub_ans_len\n ans_feat_set.append(sub_ans_feat)\n\n ans_feat = torch.stack(ans_feat_set)\n\n return vis_pad_feat, ans_feat, gt_set\n\n\n def __len__(self):\n return len(self.iid_to_frcn_feat_path)\n\n\n\ndef collate_func(batch):\n label = []\n vis_feat_gather = []\n kg_feat_gather = []\n for sample in batch:\n vis_feat_gather.append(sample[0])\n kg_feat_gather.append(sample[1])\n label.append(sample[2])\n return torch.stack(vis_feat_gather, 0),\\\n torch.stack(kg_feat_gather, 0),\\\n torch.LongTensor(label)\n\n\n\n\n\nif __name__ == '__main__':\n feat_set = Featset()\n\n for cnt in range(10):\n feat_set.test__getitem__(random.randint(0, 1000))\n\n\n\n\n","repo_name":"akira-l/knowledge_bt","sub_path":"data_loader/feat_loader.py","file_name":"feat_loader.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"7535288492","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nimport openpyxl\nimport datetime\nimport base64\nimport tempfile\nimport shutil\n\nclass XlsMoves(models.TransientModel):\n _name = 'nexthope_xls.xls_moves'\n\n state = fields.Char(selection=(('draft','draft'),('done','done')), default='draft')\n date_from = fields.Date('From')\n date_to = fields.Date('To')\n name = fields.Char()\n file = fields.Binary(readonly=True)\n \n @api.multi\n def do_export(self):\n tmpdir = tempfile.mkdtemp()\n tmpdir = tmpdir.rstrip('/')\n \n wb = openpyxl.Workbook()\n ws = wb.active\n\n line = 0\n am_model = self.env['account.move']\n moves = am_model.search([('date', '>=', self.date_from), ('date', '<=', self.date_to)])\n for move in moves:\n line = line + 1\n ws['A%s' % line] = move.ref\n for moveline in move.line_id:\n line = line + 1\n ws['B%s' % line] = moveline.account_id.name\n ws['C%s' % line] = moveline.debit\n ws['D%s' % line] = moveline.credit\n \n \n wb.save(\"%s/moves.xlsx\" % tmpdir)\n \n with open(\"%s/moves.xlsx\" % tmpdir, \"rb\") as xls_file:\n encoded_string = base64.b64encode(xls_file.read())\n \n self.name=\"moves.xlsx\"\n self.file = encoded_string\n self.state = 'done'\n \n shutil.rmtree(tmpdir)\n \n return {\n 'name': 'Download file',\n 'context': self._context,\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'nexthope_xls.xls_moves',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'res_id': self[0].id,\n }\n \n \n","repo_name":"nseinlet/nexthope","sub_path":"nexthope_xls/wizard/xls_moves.py","file_name":"xls_moves.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"19392184373","text":"from typing import List, Set, Dict, Tuple, Optional, Callable\r\n\r\nclass Graph:\r\n def __init__(self, vertices: List[str], edges : Dict[str, List[str]]):\r\n self.vertices = vertices\r\n self.edges = edges\r\n\r\n self.vertex_to_index = {vertex: i for i, vertex in enumerate(self.vertices)}\r\n self.index_to_vertex = {i : vertex for vertex, i in enumerate(self.vertex_to_index)}\r\n\r\n self.adjacency_matrix = self._get_adjacency_matrix()\r\n \r\n def _get_adjacency_matrix(self):\r\n n = len(self.vertices)\r\n matrix = [[0 for i in range(n)] for j in range(n)]\r\n for vertex in self.vertices:\r\n for neighbor in self.edges[vertex]:\r\n i, j = self.vertex_to_index[vertex], self.vertex_to_index[neighbor]\r\n matrix[i][j] = 1\r\n \r\n return matrix\r\n\r\n\r\n# Class handling message content and its manipulation.\r\n# A word consists of a list of strings, which we treat as \r\n# variables being XOR'ed.\r\n# Example: Word(['a', 'b']) := a \\oplus b\r\nclass Word:\r\n def __init__(self, message: List[str] = []):\r\n self.message = self.reduce(message)\r\n\r\n def __repr__(self):\r\n return ' + '.join(self.message)\r\n \r\n # Defines the sum of two messages to be their XOR\r\n def __add__(self, other):\r\n message1 = self.message\r\n message2 = other.message\r\n union = message1 + message2\r\n return Word(union)\r\n \r\n # Simplify a message (consisting of XOR'ed variables) by reducing each \r\n # variable to be included once or not based on the parity of its occurrences\r\n def reduce(self, expanded: List[str]):\r\n message = []\r\n counter = 0\r\n prev = ''\r\n for ch in sorted(expanded):\r\n if ch != prev:\r\n if counter % 2 != 0:\r\n message.append(prev)\r\n counter = 0\r\n counter += 1\r\n prev = ch\r\n if counter % 2 != 0:\r\n message.append(prev)\r\n \r\n return message\r\n\r\n# Wrapper class for a word (the content) and its sender/recipient\r\nclass Message:\r\n def __init__(self, sender: str, recipient: str, contents: Word):\r\n self.sender = sender\r\n self.recipient = recipient\r\n self.contents = contents\r\n\r\n# Class to organize all the messages sent in a round.\r\n# Contains dictionaries 'outgoing' and 'incoming' to index all \r\n# the messages being sent from or sent to a particular vertex\r\nclass MessageList:\r\n def __init__(self, graph: Graph, messages: List[Message] = []):\r\n self.outgoing = {vertex: {neighbor: Word() for neighbor in graph.edges[vertex]} for vertex in graph.vertices}\r\n self.incoming = {vertex: {neighbor: Word() for neighbor in graph.edges[vertex]} for vertex in graph.vertices}\r\n self.graph = graph\r\n self.process_messages(messages)\r\n\r\n def process_messages(self, messages: List[Message]):\r\n for message in messages:\r\n self.outgoing[message.sender][message.recipient] = message.contents\r\n self.incoming[message.recipient][message.sender] = message.contents\r\n\r\n# Class to organize all communications sent over several rounds of a protocol\r\nclass Transcript:\r\n def __init__(self, graph: Graph):\r\n self.graph = graph\r\n self.rounds: List[MessageList] = []\r\n\r\n def add_round(self, messages: MessageList):\r\n self.rounds.append(messages)\r\n\r\nclass Protocol:\r\n def __init__(self, graph: Graph, forward_func: Callable[[Graph, str, Dict[str, Word]], List[Message]]):\r\n self.graph = graph\r\n self.forward_func = forward_func\r\n\r\n # Forwards the messages for a single round using forward_func\r\n def _forward(self, messages: MessageList) -> MessageList:\r\n outgoing = MessageList(messages.graph)\r\n for vertex in messages.incoming:\r\n outgoing_messages = self.forward_func(self.graph, vertex, messages.incoming[vertex])\r\n outgoing.process_messages(outgoing_messages)\r\n \r\n return outgoing\r\n \r\n # Forward the messages num_rounds times\r\n def simulate(self, initial: MessageList, num_rounds: int) -> Transcript:\r\n transcript = Transcript(self.graph)\r\n transcript.add_round(initial)\r\n\r\n messages = initial\r\n for _ in range(num_rounds):\r\n messages = self._forward(messages)\r\n transcript.add_round(messages)\r\n \r\n return transcript","repo_name":"mbpereira49/thc-protocols","sub_path":"protocols.py","file_name":"protocols.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"4036752503","text":"Import('env')\n\nobjects = map(env.Object, [\n 'buffer.cc',\n 'commands.cc',\n 'fbo.cc',\n 'gl.cc',\n 'pipeline.cc',\n 'program.cc',\n 'query_pool.cc',\n 'resource.cc',\n 'state.cc',\n 'texture.cc',\n 'vertex_data.cc',\n 'window.cc',\n])\n\nReturn('objects')\n","repo_name":"aejsmith/orion","sub_path":"engine/runtime/gpu/src/gl/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"}
+{"seq_id":"19983475881","text":"# Import general dependancies\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\n\n# Import SQLAlchemy dependancies\nimport sqlalchemy\nfrom sqlalchemy.ext import automap\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n# Import Flask dependancies\nfrom flask import Flask, jsonify\n\n# Set Up the Database\n# Access the SQLite database\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n# Allow query to the database file\nBase = automap_base()\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n# Create a session link from Python to the database\nsession = Session(engine)\n\n# Define the Flask app\napp = Flask(__name__)\n\n# Define the welcome route\n@app.route(\"/\")\n\n# Create to reference all other routes\ndef welcome():\n return(\n '''\n Welcome to the Climate Analysis API! \n Available Routes: \n /api/v1.0/precipitation \n /api/v1.0/stations \n /api/v1.0/tobs \n /api/v1.0/temp/start/end \n ''')\n\n# Define the precipitation route\n@app.route(\"/api/v1.0/precipitation\")\n\ndef precipitation():\n # calculates the date one year ago from the most recent date in the database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # get the date and precipitation for the previous year\n precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n # create a dictionary with the date as the key and the precipitation as the value\n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)\n\n# Define the stations route\n@app.route(\"/api/v1.0/stations\")\n\ndef stations():\n # get all of the stations in our database\n results = session.query(Station.station).all()\n # unravel the results into a one-dimensional array and convert that array into a list. \n stations = list(np.ravel(results))\n # Then jsonify the list and return\n return jsonify(stations=stations)\n\n# Define the temperature route\n@app.route(\"/api/v1.0/tobs\")\n\ndef temp_monthly():\n # calculate the date one year ago from the last date in the database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # query the primary station for all the temperature observations from the previous year\n results = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').\\\n filter(Measurement.date >= prev_year).all()\n # unravel the results into a one-dimensional array and convert that array into a list. \n temps = list(np.ravel(results))\n # Then jsonify the list and return\n return jsonify(temps)\n\n# Define the stats route\n@app.route(\"/api/v1.0/temp/\")\n@app.route(\"/api/v1.0/temp//\")\n\ndef stats(start=None, end=None):\n # create a query to select the minimum, average, and maximum temperatures from our SQLite database\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n\n if not end:\n results = session.query(*sel).\\\n filter(Measurement.date >= start).all()\n temps = list(np.ravel(results))\n return jsonify(temps=temps)\n\n results = session.query(*sel).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n temps = list(np.ravel(results))\n return jsonify(temps)\n\n","repo_name":"smacpherson2021/surfs_up","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"14197248300","text":"from dagster import asset, FreshnessPolicy\nimport pandas as pd\nimport os\n\n@asset(\n metadata={\n \"source\": \"Financial Modeling Prep\",\n \"name\": \"Earnings Surprises Data\",\n \"description\": \"Retrieves data on earnings surprises, comparing actual earnings results with estimated earnings for various companies over time.\",\n \"columns\": [\n {\"name\": \"date\", \"type\": \"date\", \"description\": \"Date of the earnings report.\"},\n {\"name\": \"symbol\", \"type\": \"string\", \"description\": \"Ticker symbol of the company.\"},\n {\"name\": \"actual_earning_result\", \"type\": \"float\", \"description\": \"Actual earnings result reported by the company.\"},\n {\"name\": \"estimated_earning\", \"type\": \"float\", \"description\": \"Estimated earnings projected for the company.\"}\n ]\n },\n freshness_policy=FreshnessPolicy(maximum_lag_minutes=60 * 24 * 30, cron_schedule=\"0 0 1 * *\") # Adjust the freshness policy as needed\n)\ndef fmp_earnings_surprises():\n dfs = [handle_request(year) for year in range(1985, 2023)]\n df = pd.concat(dfs)\n df = df.dropna(how='all')\n return df\n\ndef handle_request(year):\n BASE_URL = 'https://financialmodelingprep.com/api/v4/'\n url = BASE_URL + f'earnings-surprises-bulk?year={year}&period=quarter&datatype=csv&apikey=' + os.environ['FMP_API_KEY']\n df = pd.read_csv(url)\n \n column_name_mapping = {\n\t \"date\": \"date\",\n\t \"symbol\": \"symbol\",\n\t \"actualEarningResult\": \"actual_earning_result\",\n\t \"estimatedEarning\": \"estimated_earning\"\n\t}\n df = df.rename(columns=column_name_mapping)\n df['date'] = pd.to_datetime(df['date']).dt.date\n return df\n","repo_name":"subsetsio/subsets-connectors","sub_path":"integrations/assets/fmp/earnings_suprise.py","file_name":"earnings_suprise.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"72803944479","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport mnist_inference as infer\nimport os\n\nMODEL_PATH = \"/Users/liyanan/Documents/Test/Tensorflow/models/model_ckpt/model_minst/\"\nMODEL_NAME = \"mnist_model.ckpt\"\n\nBATCH_SIZE = 200\nTRAINING_STEP = 30000\nREGULARIZATION_RATE = 0.001 \nLEARNING_RATE_BASE = 0.8\nLEARNING_RATE_DECAY = 0.9\nMOVING_AVERAGE_DECAY = 0.99\n\ndef train(mnist,num_examples):\n input_x = tf.placeholder(tf.float32,shape=[None,infer.INPUT_SIZE],name=\"input_x\")\n input_y = tf.placeholder(tf.float32,shape=[None,infer.OUTPUT_SIZE],name=\"input_y\")\n\n regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n predict = infer.construct_network(input_x,regularizer)\n\n global_step = tf.Variable(0,trainable=False)\n\n #define the loss\n cross_entroy = tf.nn.softmax_cross_entropy_with_logits(logits=predict,labels=input_y)\n loss = tf.reduce_mean(cross_entroy) + tf.add_n(tf.get_collection(\"loss\"))\n\n #define the moving average\n ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY,num_updates=global_step)\n moving_average_op = ema.apply(tf.trainable_variables()) \n\n #define the learning_rate and train_step\n learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,global_step=global_step,decay_steps=num_examples/BATCH_SIZE,decay_rate=LEARNING_RATE_DECAY,name=\"learning_rate_decay\")\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)\n\n train_op = tf.group(train_step,moving_average_op)\n\n Saver = tf.train.Saver(max_to_keep=15)\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n for i in range(TRAINING_STEP):\n batch_x,batch_y = mnist.train.next_batch(BATCH_SIZE)\n sess.run(train_op,feed_dict={input_x:batch_x,input_y:batch_y})\n\n if i % 1000 == 0:\n batch_loss = sess.run(loss,feed_dict={input_x:batch_x,input_y:batch_y})\n all_loss = sess.run(loss,feed_dict={input_x:mnist.train.images,input_y:mnist.train.labels})\n\n print(\"After training \"+str(i)+\", the batch loss is \"+str(batch_loss)+\", and all loss is \"+str(all_loss))\n Saver.save(sess,os.path.join(MODEL_PATH,MODEL_NAME),global_step=global_step)\n\ndef getmnist(mnist_path):\n mnist = input_data.read_data_sets(mnist_path,one_hot = True)\n return mnist\n\nif __name__ == \"__main__\":\n mnist_path = \"/Users/liyanan/Documents/Test/Tensorflow/data/mnist_data/\"\n mnist_data = getmnist(mnist_path)\n train_num_examples = mnist_data.train.num_examples\n train(mnist_data,train_num_examples)\n","repo_name":"belivem/Study","sub_path":"src/mnist_fully_network/mnist_best_practice/mnist_train.py","file_name":"mnist_train.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"71319805598","text":"from sbol import * # noqa\nimport ipywidgets as widgets\n\n\ntypesInteractions = ['Inhibition', 'Stimulation', 'Biochemical Reaction',\n 'Non-Covalent Binding', 'Degradation', 'Genetic Production', # noqa\n 'Control']\nmapInteractions = {'Inhibition': 'http://identifiers.org/biomodels.sbo/SBO:0000169', # noqa\n 'Stimulation': 'http://identifiers.org/biomodels.sbo/SBO:0000170', # noqa\n 'Biochemical Reaction': 'http://identifiers.org/biomodels.sbo/SBO:0000176', # noqa\n 'Non-Covalent Binding': 'http://identifiers.org/biomodels.sbo/SBO:0000177', # noqa\n 'Degradation': 'http://identifiers.org/biomodels.sbo/SBO:0000179', # noqa\n 'Genetic Production': 'http://identifiers.org/biomodels.sbo/SBO:0000589', # noqa\n 'Control': 'http://identifiers.org/biomodels.sbo/SBO:0000168'} # noqa\n\ntypesParticipants = ['Inhibitor', 'Inhibited', 'Stimulator', 'Stimulated', 'Reactant', # noqa\n 'Product', 'Promoter', 'Modifier', 'Modified', 'Template']\nmapParticipants = {'Inhibitor': 'http://identifiers.org/biomodels.sbo/SBO:0000020', # noqa\n 'Inhibited': 'http://identifiers.org/biomodels.sbo/SBO:0000642', # noqa\n 'Stimulator': 'http://identifiers.org/biomodels.sbo/SBO:0000459', # noqa\n 'Stimulated': 'http://identifiers.org/biomodels.sbo/SBO:0000643', # noqa\n 'Reactant': 'http://identifiers.org/biomodels.sbo/SBO:0000010', # noqa\n 'Product': 'http://identifiers.org/biomodels.sbo/SBO:0000011', # noqa\n 'Promoter': 'http://identifiers.org/biomodels.sbo/SBO:0000598', # noqa\n 'Modifier': 'http://identifiers.org/biomodels.sbo/SBO:0000019', # noqa\n 'Modified': 'http://identifiers.org/biomodels.sbo/SBO:0000644', # noqa\n 'Template': 'http://identifiers.org/biomodels.sbo/SBO:0000645'} # noqa\n\n\ndef findPlasmid(fc, fcDictionary):\n plasmidName = ''\n\n if fc.displayId not in fcDictionary:\n return None\n\n if fcDictionary[fc.displayId] is None:\n return None\n\n name = fcDictionary[fc.displayId]\n\n if name != fc.displayId:\n plasmidName = fc.displayId.replace(name, '')[:-1]\n else:\n plasmidName = 'Other'\n\n return plasmidName\n\n\ndef createHBoxChildren(moduleName, customNameDictionary, fcDictionary):\n module = customNameDictionary[moduleName]\n\n selectWidgetDictionary = {}\n\n for fc in module.functionalComponents:\n plasmid = findPlasmid(fc, fcDictionary)\n\n if plasmid is None:\n continue\n\n name = fcDictionary[fc.displayId]\n\n if plasmid not in selectWidgetDictionary:\n selectWidgetDictionary[plasmid] = []\n\n selectWidgetDictionary[plasmid].append(name)\n\n hboxChildren = []\n\n for plasmid in selectWidgetDictionary.keys():\n nameList = []\n\n for component in selectWidgetDictionary[plasmid]:\n nameList.append(component)\n\n selectWidget = widgets.SelectMultiple(\n options=nameList,\n description=plasmid,\n rows=len(nameList),\n style={'description_width': '125px'},\n layout=widgets.Layout(width='325px')\n )\n\n hboxChildren.append(selectWidget)\n\n return hboxChildren\n\n\nparticipantDictionary = {} # fc display id : participation\n\n\ndef createParticipationChildren(hBoxChildren):\n selectedFC = []\n participationChildren = []\n for widget in hBoxChildren:\n if len(widget.value) != 0:\n participationChildren.append(widgets.HTML('' +\n widget.description +\n ' '))\n for selected in widget.value:\n dropdown = widgets.Dropdown(\n options=typesParticipants,\n description=selected,\n value=None,\n style={'description_width': '150px'},\n layout=widgets.Layout(width='325px'))\n participationChildren.append(dropdown)\n\n if widget.description != 'Other':\n selectedFC.append(widget.description + '_' + selected)\n else:\n selectedFC.append(selected)\n\n participationChildren.append(widgets.HTML(' '))\n\n return (participationChildren, selectedFC)\n\n\ndef createInteraction(customNameDictionary,\n moduleName,\n interactionName,\n interactionType,\n participantDictionary,\n selectedFC):\n module = customNameDictionary[moduleName]\n try:\n interaction = module.interactions.create(interactionName)\n interaction.types = mapInteractions[interactionType]\n except: # noqa\n raise Exception(str(interactionName.value + ' already exists!'))\n\n for fc in module.functionalComponents:\n if fc.displayId in selectedFC:\n participation = interaction.participations.create(fc.displayId)\n participation.participant = fc.identity\n\n participantDictionary[fc.displayId] = participation\n\n\ndef createMapsTos(doc,\n participationWidgetsChildren,\n participantDictionary,\n plasmidPartDictionary,\n addedPlasmidParts):\n plasmid = ''\n for widget in participationWidgetsChildren:\n if widget.value != ' ':\n if '' in widget.value:\n plasmid = widget.value.replace('', '').replace(' ', '')\n else:\n if plasmid != 'Other':\n fcName = plasmid + '_' + widget.description\n else:\n fcName = widget.description\n\n participantDictionary[fcName].roles = mapParticipants[widget.value] # noqa\n\n fc = doc.find(participantDictionary[fcName].participant).cast(FunctionalComponent) # noqa\n\n if fcName not in addedPlasmidParts:\n continue\n\n mapsto = addedPlasmidParts[fcName].mapsTos.create(fc.displayId)\n mapsto.refinement = SBOL_REFINEMENT_USE_LOCAL # noqa\n mapsto.local = fc.identity\n mapsto.remote = plasmidPartDictionary[fcName].identity\n","repo_name":"bchan/SBOLAnnotations","sub_path":"src/addinginteractions.py","file_name":"addinginteractions.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"22132102622","text":"\"\"\"Custom type definitions and shortcuts for annotating ``friendly_traceback``.\"\"\"\n\nimport os\nimport sys\nfrom types import FrameType\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, TypeVar, Union\n\nif TYPE_CHECKING:\n from _typeshed import StrPath\n\n from .core import TracebackData\nelse:\n StrPath = Union[str, os.PathLike]\n\n# TODO see https://www.daan.fyi/writings/python-protocols and define\n# a more general exception that could include all possible fields\n# declared as optional\n# including new ones for SyntaxError in Python 3.10+\n_E = TypeVar(\"_E\", bound=BaseException)\n\n\nif sys.version_info >= (3, 8):\n from typing import Literal, Optional, Protocol, TypedDict\n\n InclusionChoice = Literal[\n \"message\",\n \"hint\",\n \"what\",\n \"why\",\n \"where\",\n \"friendly_tb\",\n \"python_tb\",\n \"debug_tb\",\n \"explain\",\n \"no_tb\",\n ]\n\n class Info(TypedDict, total=False):\n header: str\n message: str\n original_python_traceback: str\n simulated_python_traceback: str\n shortened_traceback: str\n suggest: str\n generic: str\n parsing_error: str\n parsing_error_source: str\n cause: str\n last_call_header: str\n last_call_source: str\n last_call_variables: str\n exception_raised_header: str\n exception_raised_source: str\n exception_raised_variables: str\n lang: str\n _exc_instance: BaseException\n _frame: Optional[FrameType]\n _tb_data: \"TracebackData\"\n\n class Formatter(Protocol):\n def __call__(self, info: Info, include: InclusionChoice = ...) -> str:\n ...\n\n class CauseInfo(TypedDict, total=False):\n cause: str\n suggest: str\n\n Site = Literal[\"friendly\", \"python\", \"bug\", \"email\", \"warnings\"]\n\n ScopeKind = Literal[\"local\", \"global\", \"nonlocal\"]\n\n ObjectsInfo = TypedDict(\n \"ObjectsInfo\",\n {\n \"locals\": List[Tuple[str, str, Any]],\n \"globals\": List[Tuple[str, str, Any]],\n \"builtins\": List[Tuple[str, str, Any]],\n \"expressions\": List[Tuple[str, Any]],\n \"name, obj\": List[Tuple[str, Any]],\n },\n )\n SimilarNamesInfo = TypedDict(\n \"SimilarNamesInfo\",\n {\"locals\": List[str], \"globals\": List[str], \"builtins\": List[str], \"best\": Any},\n )\n\nelse:\n InclusionChoice = str\n Info = Dict[str, str]\n Formatter = Callable[[Info, InclusionChoice], str]\n Site = str\n CauseInfo = Dict[str, str]\n ScopeKind = str\n ObjectsInfo = Dict[str, List[Any]]\n SimilarNamesInfo = Dict[str, List[str]]\n\n\nExplain = Callable[[_E, FrameType, \"TracebackData\"], CauseInfo]\nGenericExplain = Callable[[], str]\nParser = Union[\n Callable[[str, FrameType, \"TracebackData\"], CauseInfo],\n Callable[[_E, FrameType, \"TracebackData\"], CauseInfo],\n]\nTranslator = Callable[[str], str]\nWriter = Callable[[str], None]\n","repo_name":"alexmojaki/friendly-traceback","sub_path":"friendly_traceback/typing_info.py","file_name":"typing_info.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"}
+{"seq_id":"6756998741","text":"from fastapi import APIRouter\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic.main import BaseModel\n\n\nclass Book(BaseModel):\n\tname: str\n\tprice: float\n\n\nfake_books_db = [\n\t{ \"name\": \"Mindset\", \"price\": 189.0 },\n\t{ \"name\": \"อิคิไก ความหมายของการมีชีวิตอยู่\", \"price\": 250.0 },\n\t{ \"name\": \"เลิกเป็นคนดี แล้วชีวิตจะมีความสุข\", \"price\": 270.0 },\n]\n\nrouter = APIRouter()\n\n\n@router.get(\"/books\")\nasync def show_all_books():\n\treturn fake_books_db[::-1]\n\t\n@router.get(\"/books/{book_id}\", response_model=Book)\nasync def read_books(book_id: int):\n\treturn fake_books_db[book_id - 1]\n\n@router.post(\"/books\")\nasync def create_book(book: Book):\n\tfake_books_db.append(book)\n\treturn book\n\n@router.put(\"/books/{book_id}\", response_model=Book)\nasync def update_book(book_id: int, book: Book):\n\tbook_encoded = jsonable_encoder(book)\n\tfake_books_db[book_id] = book_encoded\n\treturn book_encoded\n\n@router.delete(\"/book/{book_id}\")\nasync def delete_book(book_id: int):\n\tbook = fake_books_db[book_id - 1]\n\tfake_books_db.pop(book_id - 1)","repo_name":"cckcoder/fast-api-basic","sub_path":"routers/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"6278544707","text":"from dash import Dash \r\nfrom dash_html_components import Div, H1, P\r\nfrom dash_core_components import Graph \r\n\r\nexternal_stylesheets = [\r\n\r\n]\r\n\r\n\r\napp = Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\n\r\napp.layout = Div (\r\n children=[\r\n H1(\"Olá mundo\"),\r\n P(\"Bem vindo ao dash\"),\r\n Graph(\r\n figure={\r\n 'data': [\r\n {'y': [1, 2, 3, 4],\r\n 'y': [1, 2, 3, 4],\r\n 'type': 'line'\r\n }\r\n ]\r\n }\r\n )\r\n ]\r\n )\r\n\r\napp.run_server(debug=True)","repo_name":"Jonielevy/RealTimePlot","sub_path":"python/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"72698694558","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport sys\nimport re\n\np_phone_numbers = [line.split('\\n')[0] for line in sys.stdin.readlines()]\n\nphone_reg_ex = re.compile(r'^[7-9]\\d{9}$')\n\nfor i in range(int(p_phone_numbers[0])):\n if re.search(phone_reg_ex, p_phone_numbers[i+1]): print('YES')\n else: print('NO')\n","repo_name":"acarter881/hacker_rank_python_answers","sub_path":"validating_phone_numbers.py","file_name":"validating_phone_numbers.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"39031008232","text":"import discord, asyncio\nfrom os import system\nimport os\nfrom os import path\nimport sqlite3\n\nclient = discord.Client()\ntoken = \"\"\nwd=os.getcwd()\n\nconn=sqlite3.connect('DiscordLog.db')\nc=conn.cursor()\nc.execute(\"CREATE TABLE IF NOT EXISTS Discord(Guild TEXT,Channel TEXT,Author TEXT,Date TEXT,Message TEXT)\")\n@client.event\nasync def on_message(message):\n if not path.exists(wd+str(message.guild)):\n os.mkdir(wd+str(message.guild))\n gd=wd+str(message.guild)+'/'\n File_object = open(gd+\"log - \"+str(message.channel),\"a\")\n str1=str(message.author)\n str2=str(message.created_at)\n str3=str(message.guild)\n str4=str(message.channel)\n str5=message.content\n c.execute(\"INSERT INTO Discord(Guild,Channel,Author,Date,Message)VALUES (?,?,?,?,?)\",(str3,str4,str1,str2,str5))\n conn.commit()\n L=[str1 + \" \",str2 + \"\\n\",str3 + \" \",str4 + \"\\n\",str5 + \"\\n\",\"\\n\"]\n print(L)\n File_object.writelines(L)\n for attachment in message.attachments:\n if not path.exists(gd+\"attachments/\"):\n os.mkdir(gd+\"attachments/\")\n await attachment.save(gd+\"attachments/\"+attachment.filename)\nclient.run(token, bot=False)\n\n","repo_name":"Ryu184/Selfbot-logger","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"14448472784","text":"import argparse\nimport os\nimport timeit\nfrom functools import partial\nimport random\nfrom itertools import accumulate\nfrom sorting import BaseSort\n\n\nclass RadixSort(BaseSort):\n \"\"\"\n A class used to encapsulate the Radix Sort algorithm\n\n Attributes\n ----------\n -\n\n Methods\n -------\n sort(arr, in_place=False)\n Sorts an array using the radix sort algorithm\n\n __digit_counting_sort(arr, exp)\n Counting sort implementation that sorts the array based on the element digit specified by exp\n\n \"\"\"\n def __repr__(self):\n return \"Radix Sort\"\n\n\n def sort(self, arr: list, in_place=False) -> list:\n \"\"\"\n Sorts an array using the radix sort algorithm\n\n Parameters:\n arr (list): list to be sorted\n in_place (bool): whether the list should be sorted in place\n\n Returns:\n list: the sorted list\n \"\"\" \n if in_place:\n work_arr = arr\n else:\n work_arr = arr.copy()\n \n max_elmt = max(work_arr) # max element is used for stopping criteria\n\n # sort every digit until the largest element is reduced to a decimal (assuming array consists of integers only)\n exp = 1 \n while max_elmt/exp >= 1:\n # counting sort is used for every digit\n work_arr = self.__digit_counting_sort(work_arr, exp)\n\n # digits are represented using exp = 10^i, where i is the i'th digit\n exp *= 10\n\n return work_arr\n \n\n def __digit_counting_sort(self, arr: list, exp: int) -> list:\n \"\"\"\n Counting sort implementation that sorts the array based on the element digit specified by exp\n\n Parameters:\n arr (list): list to be sorted\n exp (int): specifies the digit to sort by (exp=10^i for digit i)\n\n Returns:\n list: the list sorted by the digit specified by exp\n \"\"\" \n n = len(arr)\n work_arr = [None]*n\n\n # initialize counting array (assuming base 10)\n count_arr = [0]*10\n\n # count occurrences of each element digit in array\n for x in arr:\n # find digit index\n digit_index = int((x / exp) % 10)\n count_arr[digit_index] += 1\n\n # do an accumulative sun of the elements digit occurences in the counting array\n count_arr = list(accumulate(count_arr))\n\n # iterate array elements in reverse and place in sorted array based on occurences in counting array\n # reverse iteration ensures that in the case of equality, the elements occuring first in the unsorted array will occur first in the sorted array\n for x in arr[::-1]:\n digit_index = int((x / exp) % 10)\n work_arr[count_arr[digit_index]-1] = x\n count_arr[digit_index] -= 1\n return work_arr\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Radix sorting algorithm')\n\n parser.add_argument('-data', help='parameters for generating random data [len, seed]', nargs=2, type=int)\n parser.add_argument('-t', help='measure the execution time', nargs=2, required=False, type=int)\n args = parser.parse_args()\n \n n = args.data[0]\n seed = args.data[1]\n t = args.t\n\n # shuffle data randomly with seed\n sorted_data = list(range(n))\n random.seed(seed)\n random_data = random.sample(sorted_data, n)\n sorting_algo = RadixSort()\n\n # verify that list is sorted correctly\n if not sorting_algo.sort(random_data) == sorted_data:\n print('Error sorting array using <{}>'.format(sorting_algo))\n exit(1)\n\n # measure execution time\n if args.t:\n times = timeit.Timer(partial(sorting_algo.sort, random_data)).repeat(t[1], t[0])\n \n # average time taken\n time_taken = min(times) / t[0]\n\n print('Timing analysis')\n print('Sorting method: {}'.format(sorting_algo))\n print('Data length: {}'.format(n))\n print('Executions: {}'.format(t[0]))\n print('Average time: {}s'.format(time_taken))","repo_name":"Woobs8/data_structures_and_algorithms","sub_path":"Python/Sorting/radix_sort.py","file_name":"radix_sort.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"29036974974","text":"#! /usr/bin/python3\n\n\"\"\"\n This file is part of LUCA.\n\n LUCA - LEGO Universe Creation (Lab) Archiver\n Created 2013 Brickever \n\n LUCA is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n LUCA is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with LUCA If not, see .\n\"\"\"\nimport os\nimport sys\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\napp = \"LUCA\"\nmajver = \"0.4\"\nminver = \"\"\n\n# Write window title\nos.system(\"title {0} v{1}\".format(app, majver))\nlocalUserName = input(\"\\nEnter your Creation Lab Username: \")\n\n# Search the localUserName on the Creation Lab\nurl = \"http://universe.lego.com/en-us/community/creationlab/displaycreationlist.aspx?SearchText={0}&order=oldest&show=12\".format(localUserName)\nr = requests.get(url).content\nsoup = BeautifulSoup(r)\ncreations = []\nfor link in soup.find_all('a'): \n if link.get('href')[0:49] == \"/en-us/Community/CreationLab/DisplayCreation.aspx\": \n creations.append('http://universe.lego.com' + link.get('href'))\n\n# Check if links were found/added for the entered username\n# If not, close LUCA \nif not creations:\n print('The username \"{0}\" does not result in any search entry on the Creation Lab.'.format(localUserName))\n input(\"Press Enter to close LUCA.\")\n raise SystemExit(0) \n \n# Check if one link contains the localUserName\n# If not, close LUCA\nr = requests.get(creations[0]).content\nsoup = BeautifulSoup(r)\nonlineUserName = soup.find(id=\"ctl00_ContentPlaceHolderUniverse_HyperLinkUsername\")\nif localUserName.lower() == onlineUserName.string.lower():\n memberid = onlineUserName.get('href')[63:99]\n print(\"\\nYour Creations are now downloading, {0}.\\n\".format(localUserName))\nelse:\n print('The username \"{0}\" does not appear to match with any usernames online.'.format(localUserName))\n input(\"Press Enter to close LUCA.\")\n raise SystemExit(0) \n\n\n\n\n\nurl = \"http://universe.lego.com/en-us/community/creationlab/displaycreationlist.aspx?memberid={0}&show=48\".format(memberid)\nr = requests.get(url).content\nsoup = BeautifulSoup(r)\n\n# Create folder to save files in,\n# unless it already exists\nif not os.path.exists(localUserName):\n os.mkdir(localUserName)\n\n\ncreations = []\nfor link in soup.find_all('a'): \n if link.get('href')[0:49] == \"/en-us/Community/CreationLab/DisplayCreation.aspx\": \n creations.append('http://universe.lego.com' + link.get('href'))\n \n\nfor creation in creations:\n\n# ------- INFORMATION GHATERING ------- #\n r = requests.get(creation).content\n soup = BeautifulSoup(r)\n\n title = soup.find_all('h1')[2] #add .string to get only the text\n titleString = title.string\n titleString = titleString.replace('/','')\n description = soup.find(id=\"creationInfoText\")\n tags = soup.find_all(class_='column-round-body')[3].contents[9]\n challenge = soup.find(id=\"CreationChallenge\").contents[1].contents[1]\n\n date = soup.find(id=\"CreationUser\")\n date.div.decompose()\n date.a.decompose()\n \n page = '''\n\n\n \n{0} \n\n\n{1}\n{2}\n{3}\n{4}\n{5}\n\n\n '''.format(titleString, title, description, tags, challenge, date)\n\n imgLinkList = []\n i = 1\n \n for imgLink in soup.find_all('a'):\n if imgLink.get('href')[0:13] == \"GetMedia.aspx\":\n imgLinkList.append('http://universe.lego.com/en-us/community/creationlab/'+ imgLink.get('href'))\n\n\n# ------- INFORMATION WRITING ------- #\n # List of illegal characters for filenames\n blacklist = [\"\\\\\", \"/\", \":\", \"*\", \"?\", '\"', \"<\", \">\", \"|\"]\n # Makes it easier to understand what is going on.\n filepath = os.path.join(os.getcwd(), localUserName)\n\n\n HTMLfilename = \"{0}.html\".format(titleString)\n for char in blacklist: \n if char in HTMLfilename:\n HTMLfilename = HTMLfilename.replace(char, \"-\")\n\n # Write HTML documents. \n with open(os.path.join(filepath, HTMLfilename), \"wt\") as newHTML:\n newHTML.write(page)\n\n # Display filename after it was installed, \n # part of LUCA's non-GUI progress bar.\n print(os.path.basename(HTMLfilename), end=\"\\n\")\n \n \n for imgLink in imgLinkList:\n r = requests.get(imgLink)\n img = r.content \n \n # Original filename \n filename = \"{0}{1}.jpg\".format(titleString, i)\n for char in blacklist: \n if char in filename:\n # If an illegal character is found, replace it with a dash\n filename = filename.replace(char, \"-\")\n\n # Write all non HTML files. \n with open(os.path.join(filepath, filename), 'wb') as newImg:\n newImg.write(img)\n \n # Display filename after it was installed, \n # part of LUCA's non-GUI progress bar.\n print(os.path.basename(filename), end=\"\\n\")\n i = i + 1\n\n\n# Get list of all downloaded files\nnum_of_files = os.listdir(os.path.join(os.getcwd(), localUserName))\n# Remove Thumbs.db from list\nif \"Thumbs.db\" in num_of_files:\n num_of_files.remove(\"Thumbs.db\")\n\n# Display success message containing number\n# of files downloaded and where they were saved.\nprint('\\n{0} files successfully downloaded and saved to \\n\"{1}\"'.format(len(num_of_files), filepath))\ninput(\"\\nPress Enter to close LUCA.\\n\")\nraise SystemExit(0)\n \n\n\n\n\n\n\n\n\n","repo_name":"patrickrst/LUCA","sub_path":"LUCA.py","file_name":"LUCA.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"41165442408","text":"\"\"\"A module for samplers for SNAIL training\n\"\"\"\nimport itertools\nimport random\n\nimport torch.utils.data\nimport numpy as np\n\n\nclass KShotSampler(torch.utils.data.sampler.Sampler):\n \"\"\"A sampler for K-shot training. Note that the supplied dataset object for\n the sampler must follow a few rules (see the dataset argument description).\n\n Arguments:\n dataset (Dataset): A dataset from which to sample. The dataset must\n have integer labels, which represents the class to which each item\n belongs. Each class must have the same number of examples. Also,\n examples from each class must be contiguous.\n `query_size + support_size` must be the number of examples per\n class.\n n (integer): the number of classes to sample per minibatch\n k (integer): the number of examples per class to return for each\n minibatch\n class_size (integer): the number of examples per class\n \"\"\"\n def __init__(self, dataset, n, k, class_size=20):\n self.n = n\n self.k = k\n self.class_size = class_size\n self.dataset = dataset\n\n def __iter__(self):\n indexes = list(range(len(self)))\n np.random.shuffle(indexes)\n\n for i in range(0, len(self), self.n):\n # if there aren't enough remaining classes for a batch, stop\n if len(self) - i < self.n:\n raise StopIteration\n support = []\n classes = list(enumerate(indexes[i:i+self.n]))\n # shuffle the classes, so the query class isn't always the same\n # index\n np.random.shuffle(classes)\n # initialize first class, which will be the query class\n k_offsets = np.random.choice(\n self.class_size,\n size=self.k+1,\n replace=False\n )\n # add k samples of first class to support set\n support = [(\n self.dataset[(classes[0][1]*self.class_size)+offset][0],\n classes[0][0]\n ) for offset in k_offsets[0:-1]]\n query = (self.dataset[(classes[0][1]*self.class_size)+k_offsets[-1]][0], classes[0][0])\n # populate the support set with the remaining classes\n for cls_i, cls in classes[1:]:\n k_offsets = np.random.choice(\n self.class_size,\n size=self.k,\n replace=False\n )\n for offset in k_offsets:\n support.append((\n self.dataset[(cls*self.class_size)+offset][0],\n cls_i\n ))\n # shuffle the support set, so it's not in contiguous classes\n np.random.shuffle(support)\n yield support, query\n ret = []\n\n def __len__(self):\n return len(self.dataset)//self.class_size\n\n\nclass KShotBatchToTensor(object):\n def __init__(self, num_classes):\n self.num_classes = num_classes\n\n def __call__(self, minibatch):\n imgs, labels = zip(*minibatch)\n return torch.stack(imgs), torch.FloatTensor(labels)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n","repo_name":"sagelywizard/snail","sub_path":"sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"51"}
+{"seq_id":"31940840735","text":"# coding:utf-8\nimport os\n\nimport os,shutil\n\n'''\n目的:获取目标文件夹下所有类型的指定文件\n\nfindAllFilesWithSpecifiedSuffix:寻找特定后缀的所有文件\ncopyFilesToSpecifiedPath:拷贝文件到特定路径\nreplaceFilesContent:替换文件列表里old_content为new_content\n'''\n\nclass FileUtil:\n @staticmethod\n def findAllFilesWithSpecifiedSuffix(target_dir, target_suffix=\"txt\"):\n find_res = []\n target_suffix_dot = \".\" + target_suffix\n walk_generator = os.walk(target_dir)\n for root_path, dirs, files in walk_generator:\n if len(files) < 1:\n continue\n for file in files:\n file_name, suffix_name = os.path.splitext(file)\n if suffix_name == target_suffix_dot:\n find_res.append(os.path.join(root_path, file))\n return find_res\n\n @staticmethod\n def copyFilesToSpecifiedPath(file_name_list,targetPath):\n for file_name in file_name_list:\n shutil.copy(file_name, targetPath)\n\n @staticmethod\n def findAndCopyPngFileToSpecifiedPath(target_dir,suffix,target_path):\n find_res = FileUtil.findAllFilesWithSpecifiedSuffix(target_dir, suffix)\n #print(find_res)\n FileUtil.copyFilesToSpecifiedPath(find_res,target_path)\n\n @staticmethod\n def replaceFilesContent(file_name_list, old_content, new_content):\n for file_name in file_name_list:\n with open(file_name, mode=\"r\", encoding=\"UTF-8\") as f1, \\\n open(file_name + \"_new\", mode=\"w\", encoding=\"UTF-8\") as f2:\n for line in f1:\n line = line.replace(old_content, new_content)\n f2.write(line)\n os.remove(file_name)\n os.rename(file_name + \"_new\", file_name)\n\n @staticmethod\n def replaceFilesContent(file_name_list, dict_content):\n for file_name in file_name_list:\n with open(file_name, mode=\"r\", encoding=\"UTF-8\") as f1, \\\n open(file_name + \"_new\", mode=\"w\", encoding=\"UTF-8\") as f2:\n for line in f1:\n for key in dict_content.keys():\n #print( key + \":\" + str(dict_content[key]))\n line = line.replace(\"$\"+key, str(dict_content[key]))\n f2.write(line)\n os.remove(file_name)\n os.rename(file_name + \"_new\", file_name)\n\n @staticmethod\n def findAndReplaceFilesToSpecifiedContent(target_dir,suffix,old_content,new_content):\n find_res = FileUtil.findAllFilesWithSpecifiedSuffix(target_dir, suffix)\n #print(find_res)\n FileUtil.replaceFilesContent(find_res,old_content,new_content)\n\n\nclass Util:\n\n @staticmethod\n def mkdir(path):\n \"\"\"判断文件夹是否存在,不存在就创建\"\"\"\n if not os.path.exists(path):\n os.mkdir(path)\n\n @staticmethod\n def is_file(path: str, extension: list):\n \"\"\"\n 判断路径是否是文件,且该文件格式是否存在于集合中\n :param path: 文件路径\n :param extension: 文件后缀集合\n :return: boolean\n \"\"\"\n return os.path.isfile(path) and path.rsplit('.', 1)[1] in extension\n\n @staticmethod\n def get_path():\n \"\"\"获取当前文件所处的路径\"\"\"\n path = os.getcwd()\n return path\n\n @staticmethod\n def load_setting_from_obj(obj: object):\n \"\"\"\n 读取对象中所有自定义的属性,保存在一个字典中并返回\n :param obj:对象\n :return: attrs,保存了对象所有自定义属性的字典\n \"\"\"\n attrs = {key: values for key, values in obj.__dict__.items() if not key.startswith('__')}\n return attrs\n","repo_name":"klc407073648/build_lib","sub_path":"build/pythonUtil/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"75128529759","text":"from lpython import i32, f64\n\ndef test_list_repeat():\n l_int_1: list[i32] = [1, 2]\n l_int_2: list[i32] = []\n l_int_3: list[i32]\n l_tuple_1: list[tuple[f64, i32]] = [(1.0, 2), (2.0, 4), (3.0, 6)]\n l_tuple_2: list[tuple[f64, i32]] = []\n l_tuple_3: list[tuple[f64, i32]]\n l_str_1: list[str] = ['ab', 'cd']\n l_str_2: list[str] = []\n l_str_3: list[str]\n i: i32\n\n assert len(l_int_1 * 0) == 0\n assert l_int_1 * 1 == [1, 2]\n assert l_int_1 * 2 == [1, 2, 1, 2]\n\n for i in range(10):\n l_int_3 = l_int_1 * i\n assert l_int_3 == l_int_2\n l_int_2 += l_int_1\n\n l_tuple_3 = l_tuple_1 * i\n assert l_tuple_3 == l_tuple_2\n l_tuple_2 += l_tuple_1\n\n l_str_3 = l_str_1 * i\n assert l_str_3 == l_str_2\n l_str_2 += l_str_1\n\n for i in range(5):\n assert l_int_1 * i + l_int_1 * (i + 1) == l_int_1 * (2 * i + 1)\n assert l_tuple_1 * i + l_tuple_1 * (i + 1) == l_tuple_1 * (2 * i + 1)\n assert l_str_1 * i + l_str_1 * (i + 1) == l_str_1 * (2 * i + 1)\n\n print(l_int_1)\n print(l_tuple_1)\n print(l_tuple_1)\n\ntest_list_repeat()\n","repo_name":"lcompilers/lpython","sub_path":"integration_tests/test_list_repeat.py","file_name":"test_list_repeat.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"ro","doc_type":"code","stars":1142,"dataset":"github-code","pt":"51"}
+{"seq_id":"42413731650","text":"from order.models import Order,File\nfrom master_data.models import Part,Package,RouterMaster\nfrom app.helper.order_helper.OrderUploadHelper import OrderUploadHelper\nfrom app.helper.CSV_file_management.CSVFileManagement import CSVFileManagement\nimport csv\nfrom datetime import datetime\nimport math\nfrom django.conf import settings\nfrom app.helper.config.ConfigPart import ConfigPart\nfrom app.helper.file_management.FileManagement import FileManagement\n\n\nconfigPart = ConfigPart()\nclass OrderComfirmHelper:\n\n file_list = []\n CSV_name_str = \"\"\n project_code = \"\"\n updated_by = \"\"\n\n\n def __init__(self,username_str):\n\n self.file_list = File.objects.filter(updated_by= username_str, status = 1)\n self.part_list = Part.objects.filter(is_active=True)\n self.package_list = Package.objects.filter(is_active=True)\n self.routerMaster_list = RouterMaster.objects.filter(is_active=True)\n self.order_list = Order.objects.filter()\n self.project_code = self.file_list[0].project_code\n\n self.CSV_name_str = settings.MEDIA_ROOT +'/'+ FileManagement.find_file(\n configPart.configs.get(\"UPLOAD_ORDER_PART\").data, \n str(self.file_list[0].file_no) + \"DatabaseCSV.csv\" )\n \n self.updated_by = username_str\n \n def read_order_from_CSV(self) :\n \n order_csv_list = CSVFileManagement.read_CSV_file(self.CSV_name_str,';','|')\n\n return order_csv_list\n \n def add_part(self,part_number,supplier_code,part_name,package_no) :\n \n part = Part()\n part.part_number = part_number\n part.project_code = self.project_code\n part.supplier_code = supplier_code\n part.part_name = part_name\n part.package_no = package_no\n part.updated_by = self.updated_by\n part.is_active = True\n part.status = 1 \n part.save()\n\n return part\n\n \n def add_order(self,order_obj) :\n\n order_add_obj = Order()\n order_add_obj.item_no = order_obj[0]\n order_add_obj.part_number = order_obj[1]\n order_add_obj.file_no = order_obj[2]\n order_add_obj.order_no = order_obj[3]\n order_add_obj.due_date = datetime.strptime(str(order_obj[4]), \"%d/%m/%Y %H:%M\")\n order_add_obj.order_qty = int(order_obj[5])\n order_add_obj.history_updated = order_obj[6]\n order_add_obj.supplier_code = order_obj[7]\n order_add_obj.plant_code = order_obj[8]\n order_add_obj.project_code = self.project_code\n order_add_obj.updated_by = self.updated_by\n order_add_obj.updated_date = datetime.utcnow()\n\n part_db_list = [p for p in self.part_list if p.part_number.upper() == order_obj[1] and p.status==2 ]\n \n if len(part_db_list) > 0 :\n \n order_add_obj.is_part_completed = True\n\n package_db_list = [p for p in self.package_list if p.package_no.upper() == part_db_list[0].package_no.upper() ]\n\n if len(package_db_list) > 0 :\n \n order_add_obj.package_no = package_db_list[0].package_no\n order_add_obj.package_qty = math.ceil(int(order_obj[5])/package_db_list[0].snp)\n \n else :\n \n check_part_list = Part.objects.filter(part_number=order_obj[1])\n \n order_add_obj.is_part_completed = False\n\n if len(check_part_list) == 0 :\n \n self.add_part(order_obj[1],order_obj[7],order_obj[9],None)\n \n routerMaster_db_list = [r for r in self.routerMaster_list if r.supplier_code.upper() == order_obj[7].upper() and r.plant_code.upper() == order_obj[8].upper() ]\n\n if len(routerMaster_db_list) > 0 :\n \n order_add_obj.is_route_completed = True\n order_add_obj.route_trip = routerMaster_db_list[0].route_trip\n order_add_obj.route_code = routerMaster_db_list[0].route_code\n\n else :\n\n order_add_obj.is_route_completed = False\n \n \n \n order_add_obj.save()\n\n return order_add_obj\n\n\n def update_order(self,order_obj) :\n\n order_update_obj= Order.objects.filter(order_no__iexact=order_obj[3])\n order_update_obj.update(\n order_qty=int(order_obj[5]),\n history_updated=order_obj[6],\n updated_date=datetime.utcnow(),\n updated_by=self.updated_by,\n is_deleted=False)\n \n return order_update_obj\n\n \n def delete_order(self,order_obj) :\n\n order_update_obj = Order.objects.filter(order_no__iexact=order_obj[3])\n order_update_obj.update(\n history_updated=order_obj[6],\n updated_date=datetime.utcnow(),\n updated_by=self.updated_by,\n is_deleted=True)\n \n return order_update_obj\n\n \n def order_comfirm_manage(self) :\n\n order_return_list = []\n with open(self.CSV_name_str, newline='') as csvfile:\n\n order_csv_list = csv.reader(csvfile, delimiter=';', quotechar='|')\n\n for order_obj in order_csv_list:\n \n if order_obj[10] == \"add\" :\n\n order_return_obj = self.add_order(order_obj)\n order_return_list.append(order_return_obj)\n\n \n if order_obj[10] == \"update\" :\n\n order_return_obj = self.update_order(order_obj)\n order_return_list.append(order_return_obj)\n \n if order_obj[10] == \"delete\" :\n\n order_return_obj = self.delete_order(order_obj)\n order_return_list.append(order_return_obj)\n \n self.file_list.update(status = 2)\n\n return order_return_list\n \n \n\n \n \n ","repo_name":"priosnmakky/adientAPI","sub_path":"app/helper/order_helper/OrderComfirmHelper.py","file_name":"OrderComfirmHelper.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"850563127","text":"# --- Day 6: Universal Orbit Map ---\n\n\ndef main():\n with open(\"input\", \"r\") as input_data:\n # load the input orbit data\n local_orbit_map = input_data.read().split(\"\\n\")[:-1]\n\n # create a dictionary from the local orbit map\n orbit_dict = {}\n for line in local_orbit_map:\n com, orbiter = line.split(\")\")\n orbit_dict[orbiter] = com\n\n # get the number of direct and indirect orbits\n n_direct_orbits = 0\n n_indirect_orbits = 0\n for orbiter in orbit_dict.keys():\n com = orbit_dict[orbiter]\n n_direct_orbits += 1\n while com != \"COM\":\n com = orbit_dict[com]\n n_indirect_orbits += 1\n\n # the answer to the puzzle is the total number of direct and indirect\n # orbits in the map data\n answer = n_direct_orbits + n_indirect_orbits\n\n print(\"Answer:\", answer)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rociomer/advent-of-code","sub_path":"2019/day-6/day-6-part-1-solution.py","file_name":"day-6-part-1-solution.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"}
+{"seq_id":"71015838240","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import *\n\nxdata = np.linspace(-np.pi,np.pi,100) #구간을 개수만큼 나눔\nydata = []\nfor i in xdata:\n ydata.append(np.sin(i))\nplt.plot(xdata,ydata) #선그래프\nplt.show()\n# plt.bar(xdata,ydata) #막대그래프(세로)\n#\n# plt.barh(xdata,ydata) #막대그래프(가로)\n#\n# plt.hist([1,1,2,3,4,5,6,7,8,10]) #히스토그램\n#\n# plt.pie([10,20,30]) #원형\n#\n# plt.scatter(xdata,ydata) #w점\n# plt.show()\n\n# print(np.arange(1,10,2))\n# print(np.random.choice(10,5))\n#\n# print(np.random.choice(np.arange(1,46),5))\n\na = np.array([[1,1,1],[1,1,1]]) #2*3\nb = np.array([[1,1],[1,1],[1,1]]) #3*2\nc= np.dot(a,b)\nprint(c)","repo_name":"DDongYul/OpenSrcSW","sub_path":"python/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"29588707376","text":"import threading\nimport time\nimport logger\nimport urwid\nimport json\n\nfrom config import ServerConfig\nfrom utils import wrap_response\n\nlogger = logger.get_logger(__name__)\n\nclass CommandHandler(object):\n\n def __init__(self, mqs, slave2beacon, rq):\n self.message_queues = mqs\n self.slave2beacon = slave2beacon\n self.response_queue = rq\n\n def number_of_bots(self):\n return sum([1 for slave in self.slave2beacon.keys()])\n\n def number_of_beacons(self):\n return sum([1 for addr in self.message_queues.keys() if addr is not \"all\"])\n\n def active_bots(self):\n if self.number_of_bots() > 0:\n response = \"[+] Active Bots:\\n\"\n response += \"\\n\".join([\"{0}) {1}\"\n .format(idx+1, ServerConfig.UI_BOT_IDENTIFIER(slave)) for idx, slave in enumerate(self.slave2beacon.keys())])\n else:\n response = \"[+] There are not any active bots\"\n return response\n\n def handle_command(self, cmd, interface_footer):\n logger.debug(\"Command and Control: Typed command: {0}\".format(cmd))\n cmd = cmd.strip()\n prompt = interface_footer.caption.strip()\n if prompt is \">\":\n if cmd == \"bots\" or cmd == \"b\":\n self.response_queue.put(wrap_response(\"SYSTEM\", self.active_bots()))\n elif cmd == \"interact\" or cmd == \"i\":\n interface_footer.set_caption(\"Select Slave >\")\n elif cmd == \"mass\" or cmd == \"m\":\n interface_footer.set_caption(\"All >\")\n elif cmd == \"\":\n pass\n else:\n self.response_queue.put(wrap_response(\"SYSTEM\", \"Unknown command: {}\".format(cmd)))\n elif prompt is \"Select Slave >\":\n if cmd == \"back\" or cmd is \"b\":\n interface_footer.set_caption(\" >\")\n else:\n try:\n slave_num = int(cmd)\n self.recipient = list(self.slave2beacon.keys())[slave_num-1]\n bot_prompt = ServerConfig.UI_BOT_IDENTIFIER(self.recipient)\n interface_footer.set_caption(\"{0} >\".format(bot_prompt))\n except (ValueError, IndexError):\n response = \"Please select a number from the bot list:\\n\" + self.active_bots()\n self.response_queue.put(wrap_response(\"SYSTEM\", response))\n elif prompt is \"All >\":\n if cmd == \"back\" or cmd is \"b\":\n interface_footer.set_caption(\" >\")\n else:\n logger.debug('Queuing command \"{0}\" for all bots'.format(cmd))\n wrapped_command = json.dumps({\"to\": \"all\", \"cmd\": cmd})\n for i in range(self.number_of_beacons()):\n self.message_queues[\"all\"].put(wrapped_command)\n else: # This means prompt point to individual bot\n if cmd == \"back\" or cmd is \"b\":\n interface_footer.set_caption(\"Select Slave >\")\n else:\n logger.debug('Queuing command \"{0}\" for {1}'.format(cmd, self.slave2beacon[self.recipient]))\n wrapped_command = json.dumps({\"to\": self.recipient.mac_addr, \"cmd\": cmd})\n self.message_queues[self.slave2beacon[self.recipient]].put(wrapped_command)\n ","repo_name":"SkyBulk/vortus","sub_path":"core/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"23312975040","text":"import sqlite3\n\nconn = sqlite3.connect(\"user_test1.db\", isolation_level=None)\nc = conn.cursor()\n\nc.execute(\"SELECT name FROM sqlite_master WHERE type IN ('table', 'view') AND name NOT LIKE 'sqlite_%' UNION ALL SELECT name FROM sqlite_temp_master WHERE type IN ('table', 'view') ORDER BY 1;\")\nprint(c.fetchall())\n\nc.execute(\"SELECT * FROM table1;\")\nprint(c.fetchall())\nc.execute(\"SELECT * FROM table2;\")\nprint(c.fetchall())\nc.execute(\"SELECT * FROM table3;\")\nprint(c.fetchall())","repo_name":"SeungHoonJeon/Capstone-design-Kiosk","sub_path":"select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"28572890753","text":"import json\nimport sys\n\nimport calliope\n\ntags = {}\n\nfor item in calliope.playlist.read(sys.stdin):\n artist_name = item['artist']\n artist_tags = item['lastfm.tags.top']\n for tag in artist_tags:\n l = tags.get(tag, set())\n l.add(artist_name)\n tags[tag] = l\n\n\noutput = []\nfor tag, artists in tags.items():\n output.append({'text': tag, 'weight': len(artists)})\n\noutput = sorted(output, key=lambda item: item['weight'], reverse=True)\n\njson.dump(output[:100], sys.stdout)\n","repo_name":"ssssam/calliope","sub_path":"scripts/tagcloud.py","file_name":"tagcloud.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"}
+{"seq_id":"72790036958","text":"\"\"\"\nRequests HTTP library (http://requests.readthedocs.io/) integration for the\nMDK.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom sys import maxsize\n\nfrom mdk import MDK\n\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\n\n__all__ = [\"requests_session\"]\n\n\ndef requests_session(mdk_session):\n \"\"\"\n Create a ``requests.Session`` from an MDK session.\n\n The resulting ``requests.Sesssion`` will:\n\n 1. Set timeouts based on the timeout set on the MDK session.\n 2. Send the X-MDK-CONTEXT HTTP header in all HTTP requests.\n\n IMPORTANT: Because of item #2 the resulting ``requests.Sesssion`` should\n only be used to talk to systems that use the MDK; the X-MDK-CONTEXT header\n may leak information about your system's internals.\n\n The resulting ``requests.Session`` has an adapter set. If you want to add\n your own additional adapters you will need to use something like\n https://pypi.python.org/pypi/requests-middleware.\n \"\"\"\n req_session = Session()\n adapter = _MDKAdapter(mdk_session)\n req_session.mount(\"http://\", adapter)\n req_session.mount(\"https://\", adapter)\n return req_session\n\n\nclass _MDKAdapter(HTTPAdapter):\n \"\"\"\n Set timeouts and session context header from the MDK session.\n\n See\n http://requests.readthedocs.io/en/master/api/#requests.adapters.HTTPAdapter\n for details.\n \"\"\"\n def __init__(self, mdk_session):\n self._mdk_session = mdk_session\n HTTPAdapter.__init__(self)\n\n def add_headers(self, request, **kwargs):\n \"\"\"Override base class.\"\"\"\n headers = (request.headers or {}).copy()\n headers[MDK.CONTEXT_HEADER] = self._mdk_session.externalize()\n request.headers = headers\n\n def _get_timeout(self, proposed_timeout):\n \"\"\"\n Given a proposed timeout, return a timeout that takes the MDK session\n timeout into account.\n \"\"\"\n mdk_timeout = self._mdk_session.getRemainingTime()\n if mdk_timeout is None:\n mdk_timeout = maxsize\n if proposed_timeout is None:\n proposed_timeout = maxsize\n result = min(mdk_timeout, proposed_timeout)\n if result == maxsize:\n result = None\n return result\n\n def send(self, request, stream=False, timeout=None, verify=True, cert=None,\n proxies=None):\n \"\"\"Override base class.\"\"\"\n if isinstance(timeout, tuple):\n connect_timeout, read_timeout = timeout\n timeout = (self._get_timeout(connect_timeout),\n self._get_timeout(read_timeout))\n else:\n timeout = self._get_timeout(timeout)\n return HTTPAdapter.send(self, request, stream=stream, timeout=timeout,\n verify=verify, cert=cert, proxies=proxies)\n","repo_name":"datawire/mdk","sub_path":"python/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"51"}
+{"seq_id":"12944240844","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n comments = [\n {\n 'user': 'Tom',\n 'content': 'comment'\n },\n {\n 'user': 'Jerry',\n 'content': 'Bad'\n }\n ]\n return render_template('index.html', avatar='https://yichengzhang.cn/uploads/croppedImg_6548.jpeg', comments=comments)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"zsalec/python_flask","sub_path":"prj/prj15_filter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"}
+{"seq_id":"34679325172","text":"import os\nfrom emmet.api.routes.materials.tasks.query_operators import (\n MultipleTaskIDsQuery,\n TrajectoryQuery,\n DeprecationQuery,\n EntryQuery,\n)\nfrom emmet.api.core.settings import MAPISettings\n\nfrom monty.tempfile import ScratchDir\nfrom monty.serialization import loadfn, dumpfn\nfrom json import load\n\n\ndef test_multiple_task_ids_query():\n op = MultipleTaskIDsQuery()\n\n assert op.query(task_ids=\" mp-149, mp-13\") == {\n \"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}\n }\n\n with ScratchDir(\".\"):\n dumpfn(op, \"temp.json\")\n new_op = loadfn(\"temp.json\")\n\n assert new_op.query(task_ids=\" mp-149, mp-13\") == {\n \"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}\n }\n\n\ndef test_entries_query():\n op = EntryQuery()\n\n assert op.query(task_ids=\" mp-149, mp-13\") == {\n \"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}\n }\n\n with ScratchDir(\".\"):\n dumpfn(op, \"temp.json\")\n new_op = loadfn(\"temp.json\")\n query = {\"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}}\n\n assert new_op.query(task_ids=\" mp-149, mp-13\") == query\n\n with open(os.path.join(MAPISettings().TEST_FILES, \"tasks_Li_Fe_V.json\")) as file:\n tasks = load(file)\n docs = op.post_process(tasks, query)\n assert docs[0][\"entry\"][\"@class\"] == \"ComputedStructureEntry\"\n\n\ndef test_trajectory_query():\n op = TrajectoryQuery()\n\n assert op.query(task_ids=\" mp-149, mp-13\") == {\n \"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}\n }\n\n with ScratchDir(\".\"):\n dumpfn(op, \"temp.json\")\n new_op = loadfn(\"temp.json\")\n query = {\"criteria\": {\"task_id\": {\"$in\": [\"mp-149\", \"mp-13\"]}}}\n\n assert new_op.query(task_ids=\" mp-149, mp-13\") == query\n\n with open(os.path.join(MAPISettings().TEST_FILES, \"tasks_Li_Fe_V.json\")) as file:\n tasks = load(file)\n docs = op.post_process(tasks, query)\n assert docs[0][\"trajectories\"][0][\"@class\"] == \"Trajectory\"\n\n\ndef test_deprecation_query():\n op = DeprecationQuery()\n\n assert op.query(task_ids=\" mp-149, mp-13\") == {\n \"criteria\": {\"deprecated_tasks\": {\"$in\": [\"mp-149\", \"mp-13\"]}}\n }\n\n with ScratchDir(\".\"):\n dumpfn(op, \"temp.json\")\n new_op = loadfn(\"temp.json\")\n query = {\"criteria\": {\"deprecated_tasks\": {\"$in\": [\"mp-149\", \"mp-13\"]}}}\n\n assert new_op.query(task_ids=\" mp-149, mp-13\") == query\n\n docs = [\n {\"task_id\": \"mp-149\", \"deprecated_tasks\": [\"mp-149\"]},\n {\"task_id\": \"mp-13\", \"deprecated_tasks\": [\"mp-1234\"]},\n ]\n r = op.post_process(docs, query)\n\n assert r[0] == {\n \"task_id\": \"mp-149\",\n \"deprecated\": True,\n \"deprecation_reason\": None,\n }\n\n assert r[1] == {\n \"task_id\": \"mp-13\",\n \"deprecated\": False,\n \"deprecation_reason\": None,\n }\n","repo_name":"materialsproject/emmet","sub_path":"emmet-api/tests/materials/tasks/test_query_operators.py","file_name":"test_query_operators.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"51"}
+{"seq_id":"33367433653","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom dog_images import DogImages\nfrom image_classifier import ImageClassifier\n\ndef main():\n picsize = 128\n # Step 1: Get List of Dogs\n lst = [x[0] for x in os.walk('../Images')]\n lst_dogs = [a.replace('../Images/', '') for a in lst[1:]]\n\n dog_images = DogImages(lst_dogs, picsize)\n # dog_images.generate_img_files()\n train_imgs = dog_images.load_images('train')\n test_imgs = dog_images.load_images('test')\n Xtest = test_imgs[0]\n Ytest = test_imgs[1]\n Xtrain = train_imgs[0]\n Ytrain = train_imgs[1]\n\n for N, dogs in enumerate(lst_dogs):\n best_score = 0\n dog = dogs[dogs.index('-')+1:]\n classes = [dog, 'Not-'+dog]\n for i in range(5):\n # free up some memory...\n tf.reset_default_graph()\n Xtrain, Ytrain = shuffle_function(Xtest, Ytest)\n Xtest, Ytest = shuffle_function(Xtest, Ytest)\n\n Ytrain_1 = np.zeros((Ytrain.shape[0],2))\n Ytest_1 = np.zeros((Ytest.shape[0],2))\n\n Ytrain_1[Ytrain[:,N]==1] = [0, 1]\n Ytrain_1[Ytrain[:,N]==0] = [1, 0]\n\n Ytest_1[Ytest[:,N]==1] = [0, 1]\n Ytest_1[Ytest[:,N]==0] = [1, 0]\n\n Ytrain_a = Ytrain_1[Ytrain_1[:,0] == 1]\n Xtrain_a = Xtrain[Ytrain_1[:,0] == 1]\n Ytrain_b = Ytrain_1[Ytrain_1[:,0] == 0][:int(1.5*Ytrain_a.shape[0])]\n Xtrain_b = Xtrain[Ytrain_1[:,0] == 0][:int(1.5*Ytrain_a.shape[0])]\n\n Ytrain_run = np.concatenate((Ytrain_a, Ytrain_b))\n Xtrain_run = np.concatenate((Xtrain_a, Xtrain_b))\n\n model = ImageClassifier(picsize, classes,\n out_channels = 12,\n out_channels_2 = 24,\n hidden_units = 100,\n regularization_strength = 1.0,\n batch_size = 100,\n learning_rate = 0.001,\n convolution_size = 5,\n pool_size = 2,\n training_epochs = 100,\n loss_threshold = 0.01,\n verbose=True)\n model.fit(Xtrain_run, Ytrain_run)\n score = model.score(Xtest, Ytest_1)\n\n if score > best_score and score < 1.0:\n model.save_('models/' + dog + '.pickle')\n model.sess.close()\n tf.reset_default_graph()\n del model\n print()\n print('\\rOverall Percent Complete: {:.4f}%'\n .format((i + 5*N)/(120.0*5)))\n\ndef shuffle_function(X, y):\n Xhold = X.copy()\n Yhold = y.copy()\n new = np.array([i for i in range(Xhold.shape[0])])\n np.random.shuffle(new)\n for i, n in enumerate(new):\n X[i, :] = Xhold[n, :]\n y[i, :] = Yhold[n, :]\n return X, y\n\nif __name__ == '__main__':\n main()\n","repo_name":"NeverForged/DogClassifier","sub_path":"Source/test_run.py","file_name":"test_run.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"27245676999","text":"import json\nimport base64\nfrom vigilate_backend.models import Station, User, Plans, UserPrograms\nfrom django.utils import timezone\nfrom vigilate_backend.models import UserPrograms\nfrom vigilate_backend import alerts\nfrom vulnerability_manager import cpe_updater\n\ndef get_query(request):\n \"\"\"Parse a query\n \"\"\"\n if request.method == \"POST\" or request.method == \"PATCH\":\n if \"application/json\" in request.content_type:\n return request.data\n query = list(request.data)[0]\n if query:\n try:\n query = json.loads(query)\n except:\n return None\n else:\n return query\n try:\n query = json.loads(query)\n except:\n return None\n else:\n return query\n return None\n\ndef parse_cpe(cpe):\n \"\"\"Parse a cpe\n \"\"\"\n res = {}\n cpe = [elem.split('_')[0] for elem in cpe.split(':') if elem]\n res['devlopper'] = cpe[2]\n res['software'] = cpe[3]\n res['version'] = cpe[4]\n return res\n\ndef avoid_id_falsfication(user, request):\n if request.method in [\"POST\",\"PATCH\",\"PUT\",\"DELETE\"]:\n\n if not hasattr(request, \"data\") or \"user\" not in request.data:\n return True\n\n if user.is_superuser:\n return True\n try:\n request.data['user'] = int(request.data['user'])\n except ValueError:\n return False\n\n return request.data['user'] == user.id\n\n return True\n\ndef get_token(request):\n authheader = request.META.get('HTTP_AUTHORIZATION', '')\n if not authheader:\n return None\n\n try:\n method, token = authheader.split()\n if method != \"token\":\n return None\n except Exception:\n return None\n\n return token\n\ndef get_scanner_cred(request):\n authheader = request.META.get('HTTP_AUTHORIZATION', '')\n email = None\n token = None\n \n if not authheader:\n return (None, None)\n \n try:\n method, creds = authheader.split()\n\n if method != \"Basic\":\n return (None, None)\n (email, token) = base64.b64decode(creds).decode(\"utf8\").split(':')\n except Exception as e:\n (None, None)\n\n return (email, token)\n\ndef can_add_station(nb_station, user):\n if not user.plan:\n return True\n if nb_station >= user.plan.max_stations:\n return False\n return True\n\ndef nb_station_over_quota(nb_station, user):\n if not user.plan:\n return 0\n if nb_station >= user.plan.max_stations:\n return nb_station - user.plan.max_stations\n return 0\n\n\ndef update_contrat(user):\n stations = Station.objects.filter(user=user.id)\n over = nb_station_over_quota(stations.count(), user)\n enable_stations = [s for s in stations][:-over]\n disable_stations = [s for s in stations][-over:]\n if over:\n for s in enable_stations:\n if s.disabled:\n s.disabled = False\n s.save()\n \n for s in disable_stations:\n if not s.disabled:\n s.disabled = True\n s.save()\n else:\n for s in stations:\n if s.disabled:\n s.disabled = False\n s.save()\n\ndef check_expired_plan(user):\n if not user.plan or not user.plan.validity_time:\n return\n\n expire_in = user.plan.validity_time - (timezone.now() - user.plan_purchase_date).total_seconds()\n if expire_in <= 0:\n user.plan = Plans.objects.filter(default=True).first()\n user.save()\n update_contrat(user)\n\ndef add_progs(elem, versions, user, station, extra_field, up_to_date):\n for version in versions:\n (cpe, up_to_date) = cpe_updater.get_cpe_from_name_version(elem['program_name'], version, up_to_date)\n new_prog = UserPrograms(user=user, minimum_score=1, poste=station,\n program_name=elem['program_name'], program_version=version, cpe=cpe)\n if 'minimum_score' in elem:\n new_prog.minimum_score = int(elem['minimum_score'])\n for f in extra_field:\n setattr(new_prog, f, int(extra_field[f]))\n\n new_prog.save()\n alerts.check_prog(new_prog, user)\n\ndef maj_progs(progs, elem, versions, user, up_to_date):\n for (prog, version) in zip(progs, versions):\n prog_changed = False\n if prog.program_version != version:\n prog_changed = True\n prog.program_version = version\n (cpe, up_to_date) = cpe_updater.get_cpe_from_name_version(elem['program_name'], version, up_to_date)\n prog.cpe = cpe\n if 'minimum_score' in elem and prog.minimum_score != int(elem['minimum_score']):\n prog_changed = True\n prog.minimum_score = int(elem['minimum_score'])\n if prog_changed:\n prog.save()\n alerts.check_prog(prog, user)\n","repo_name":"vigilate/backend","sub_path":"vigilate_backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"37171779605","text":"class LinkedList:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef middleNode(linkedList):\n count = 0\n currentNode = linkedList\n while currentNode:\n count = count + 1\n currentNode = currentNode.next\n\n middleNode = linkedList\n for _ in range(count // 2):\n middleNode = middleNode.next\n return middleNode\n\n\ndef middleNode2(linkedList):\n slowNode = linkedList\n fastNode = linkedList\n\n while fastNode and fastNode.next:\n slowNode = slowNode.next\n fastNode = fastNode.next.next\n\n return slowNode\n\n\nhead = LinkedList(1)\nhead.next = LinkedList(1)\nhead.next.next = LinkedList(3)\nhead.next.next.next = LinkedList(4)\nhead.next.next.next.next = LinkedList(4)\nhead.next.next.next.next = LinkedList(4)\nhead.next.next.next.next.next = LinkedList(4)\nmiddleNode(head)\n\n\"\"\"\nresult = middleNode(head)\nprint(result.value)\n\"\"\"\n","repo_name":"vidyadharreddy18/python","sub_path":"AlgoExpert/easy/linkedlists/middleNode.py","file_name":"middleNode.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"29938054807","text":"from django.urls import path, include\nfrom rest_framework.authtoken import views\nfrom drf_yasg.utils import swagger_auto_schema\n\nfrom .views import AddressesView, AddressDetail, AddressRegister, RegisterView\n\ndecorated_token_view = swagger_auto_schema(\n method=\"POST\",\n security=[{\"Basic\": []}],\n operation_description=\"Generate API Token\"\n )(views.obtain_auth_token)\n\n\nurlpatterns = [\n path('address//', AddressDetail.as_view()),\n path('addresses/', AddressesView.as_view()),\n path('register/', AddressRegister.as_view()),\n path('api-auth/', include('rest_framework.urls')),\n path('api_token/', views.obtain_auth_token),\n]\n","repo_name":"dethspikee/address-api-demo","sub_path":"address/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"69850101582","text":"'''\nCreated on 24 Nov 2018\n\n@author: hoanglong\n'''\n\nfrom Environment import IWorldMapModule\nimport Environment.MapGeneratorModule as gm\nimport numpy as np\nimport copy \n\nclass SimpleWorldMap(IWorldMapModule.IWorldMap):\n '''\n WorldMap object, used to abstract map implementation from WorldModule\n '''\n\n\n def __init__(self):\n '''\n Constructor\n '''\n self.PATH_VAL = 0\n self.OBSTACLE_VAL = 1\n self.GOAL_VAL = 2\n self.mapGenerator = gm.MapGenerator()\n if self.mapGenerator.checkBlankMap():\n self.generateMap()\n self.map = copy.deepcopy(self.mapGenerator.getMap())\n self.width = np.shape(self.map)[0]\n self.height = np.shape(self.map)[1]\n self.originalMap = self.mapGenerator.getMap()\n \n def generateMap(self, height=30, width=30, numOfObstacles=10, obstacleSize=10):\n self.mapGenerator.generateBlankMap()\n obstacleList = []\n for i in range(numOfObstacles):\n x1 = np.random.randint(1, width)\n y1 = np.random.randint(1, height)\n x2 = max(min(x1 + np.random.randint(-obstacleSize / 2 + 1, obstacleSize / 2), 0), width)\n y2 = max(min(y1 + np.random.randint(-obstacleSize / 2 + 1, obstacleSize / 2), 0), height)\n if x2 == 0 and y2 == 0:\n x2 = 1\n obstacleList.append((x1, y1, x2, y2))\n \n self.mapGenerator.addStraightBlocks(obstacleList, self.OBSTACLE_VAL)\n #add goal\n x_goal = width - 1\n y_goal = np.random.randint(0, height)\n self.mapGenerator.addObject([(x_goal, y_goal)], self.GOAL_VAL)\n \n self.map = self.mapGenerator.getMap()\n \n def isObstacle(self, x, y):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height) and self.map[x, y] == self.OBSTACLE_VAL:\n return True\n else:\n return False\n \n def isGoal(self, x, y):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height) and self.map[x, y] == self.GOAL_VAL:\n return True\n else:\n return False\n \n def isPath(self, x, y):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height) and (self.map[x, y] == self.PATH_VAL):\n return True\n else:\n return False\n \n def getMapSize(self):\n return self.width, self.height\n \n def markPositionTemporarily(self, x, y, val):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height):\n self.map[x, y] = val + self.GOAL_VAL + 1\n pass\n \n def unmark(self, x, y):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height):\n self.map[x, y] = self.originalMap[x, y]\n \n def isMark(self, x, y, val):\n if (x >= 0) and (x < self.width) and (y >= 0) and (y < self.height) and (val == self.map[x, y] - self.GOAL_VAL - 1):\n return True\n else:\n return False\n pass\n ","repo_name":"LeHoangLong/Exercise","sub_path":"ReinforcementLearning/WindWorld/Environment/SimpleWorldMap.py","file_name":"SimpleWorldMap.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"4028017911","text":"from programy.utils.logging.ylogger import YLogger\n\nfrom programy.extensions.base import Extension\n\n\nclass SchedulerExtension(Extension):\n\n # execute() is the interface that is called from the tag in the AIML\n def execute(self, client_context, data):\n YLogger.debug(client_context, \"Scheduler - [%s]\", data)\n\n # REDMIND IN|EVERY X SECONDS|MINUTES|HOURS|DAYS|WEEKS MESSAGE|GRAMMAR ...........\n\n words = data.split()\n if len(words)> 5:\n if words[0].upper() == 'REMIND':\n when = words[1].upper()\n if when in ['IN', 'EVERY']:\n quantity = int(words[2])\n period = words[3].upper()\n if period in ['SECONDS', 'MINUTES', 'HOURS', 'DAYS', 'WEEKS']:\n action = words[4]\n if action in ['MESSAGE', 'GRAMMAR']:\n text = \" \".join(words[5:])\n self.schedule(client_context, when, quantity, period, action, text)\n return 'OK'\n else:\n print ('MESSAGE missing')\n else:\n print (\"Invalid period %s\"% period)\n else:\n print(\"Invalid when %s\"% when)\n else:\n print (\"Invalid command, must start with REMIND\")\n\n return 'ERR'\n\n def schedule(self, client_context, when, quantity, period, action, text):\n\n if when == 'IN':\n if period == 'SECONDS':\n client_context.client.scheduler.schedule_in_n_seconds(client_context.userid, client_context.id, action, text, quantity)\n\n elif when == 'EVERY':\n if period == 'SECONDS':\n client_context.client.scheduler.schedule_every_n_seconds(client_context.userid, client_context.id, action, text, quantity)\n\n\n","repo_name":"secrecy27/chatbot","sub_path":"src/programy/extensions/scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"13828726251","text":"\n__module_name__ = \"_base_lightning_data_module.py\"\n__doc__ = \"\"\"Aux. module to organize AnnData/torch datasets into PyTorch-Lightning LightningDataModule.\"\"\"\n__author__ = \", \".join([\"Michael E. Vinyard\"])\n__email__ = \", \".join([\"vinyard@g.harvard.edu\"])\n\n\n# -- import native modules: ----------------------------------------------------\nfrom abc import ABC, abstractmethod\nimport os\n\n\n# -- import packages: ----------------------------------------------------------\nfrom lightning import LightningDataModule\nfrom torch.utils.data import DataLoader\nimport anndata\n\n\n# -- main module class: --------------------------------------------------------\nclass BaseLightningDataModule(ABC, LightningDataModule):\n def __init__(\n self,\n adata: anndata.AnnData = None,\n batch_size: int = 2000,\n num_workers: int = os.cpu_count(),\n **kwargs\n ):\n super(BaseLightningDataModule, self).__init__()\n self.__parse__(locals()) \n\n def __parse__(self, kwargs, ignore=[\"self\", \"__class__\"]):\n self._kwargs = {}\n for k, v in kwargs.items():\n if not k in ignore:\n setattr(self, k, v)\n self._kwargs[k] = v\n if k == \"kwargs\":\n for l, w in v.items():\n setattr(self, l, w)\n self._kwargs[l] = w\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset, batch_size=self.batch_size, num_workers=self.num_workers\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_dataset, batch_size=self.batch_size, num_workers=self.num_workers\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset, batch_size=self.batch_size, num_workers=self.num_workers\n )\n\n def predict_dataloader(self):\n return DataLoader(\n self.dataset, batch_size=self.batch_size, num_workers=self.num_workers\n )\n","repo_name":"mvinyard/torch-adata","sub_path":"torch_adata/_tools/_base_lightning_data_module.py","file_name":"_base_lightning_data_module.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"47"}
+{"seq_id":"28239828217","text":"import numpy as np\n\n# Convolutional Neural Networks: Step by Step\n\n\n# zero pad,4-dimensional matrix only\ndef zero_pad(X, pad):\n X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)\n return X_pad\n\n\n# conv single step\ndef conv_single_step(a_slice_prev, W, b):\n s = np.multiply(a_slice_prev, W) + b\n Z = np.sum(s)\n return Z\n\n\n# 卷积层,正向传播\ndef conv_forward(A_prev, W, b, hparameters):\n \"\"\"\n Implements the forward propagation for a convolution function\n\n Arguments:\n A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)\n b -- Biases, numpy array of shape (1, 1, 1, n_C)\n hparameters -- python dictionary containing \"stride\" and \"pad\"\n\n Returns:\n Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward() function\n \"\"\"\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n f, f, n_C_prev, n_C = W.shape\n stride = hparameters['stride']\n pad = hparameters['pad']\n\n n_H = int((n_H_prev + pad * 2 - f) / stride) + 1\n n_W = int((n_W_prev + pad * 2 - f) / stride) + 1\n Z = np.zeros((m, n_H, n_W, n_C))\n\n A_prev_pad = zero_pad(A_prev, pad) # zero pad\n\n for i in range(m):\n ith_A_prev_pad = A_prev_pad[i]\n for H_i in range(n_H):\n for W_i in range(n_W):\n for C_i in range(n_C):\n # The current \"slice\"\n vert_start = H_i * stride\n vert_end = vert_start + f\n horiz_start = W_i * stride\n horiz_end = horiz_start + f\n\n a_slice_prev = ith_A_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n Z[i, H_i, W_i, C_i] = conv_single_step(a_slice_prev, W[:, :, :, C_i], b[:, :, :, C_i])\n\n cache = (A_prev, W, b, hparameters)\n return Z, cache\n\n\n# 池化层,正向传播\ndef pool_forward(A_prev, hparameters, mode=\"max\"):\n \"\"\"\n Implements the forward pass of the pooling layer\n\n Arguments:\n A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n hparameters -- python dictionary containing \"f\" and \"stride\"\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n\n Returns:\n A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters\n \"\"\"\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n f = hparameters[\"f\"]\n stride = hparameters[\"stride\"]\n\n n_H = int((n_H_prev - f) / stride) + 1\n n_W = int((n_W_prev - f) / stride) + 1\n A = np.zeros((m, n_H, n_W, n_C_prev))\n\n for i in range(m):\n ith_A_prev = A_prev[i]\n for H_i in range(n_H):\n for W_i in range(n_W):\n for C_i in range(n_C_prev):\n vert_start = H_i * stride\n vert_end = vert_start + f\n horiz_start = W_i * stride\n horiz_end = horiz_start + f\n\n a_slice_prev = ith_A_prev[vert_start:vert_end, horiz_start:horiz_end, C_i]\n if mode == \"max\":\n A[i, H_i, W_i, C_i] = np.max(a_slice_prev)\n elif mode == \"average\":\n A[i, H_i, W_i, C_i] = np.mean(a_slice_prev)\n\n cache = (A_prev, hparameters)\n return A, cache\n\n\n# 卷积层,反向传播\ndef conv_backward(dZ, cache):\n \"\"\"\n Implement the backward propagation for a convolution function\n\n Arguments:\n dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)\n cache -- cache of values needed for the conv_backward(), output of conv_forward()\n\n Returns:\n dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),\n numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)\n dW -- gradient of the cost with respect to the weights of the conv layer (W)\n numpy array of shape (f, f, n_C_prev, n_C)\n db -- gradient of the cost with respect to the biases of the conv layer (b)\n numpy array of shape (1, 1, 1, n_C)\n \"\"\"\n A_prev, W, b, hparameters = cache\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n f, f, n_C_prev, n_C = W.shape\n stride = hparameters['stride']\n pad = hparameters['pad']\n m, n_H, n_W, n_C = dZ.shape\n\n dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))\n dW = np.zeros((f, f, n_C_prev, n_C))\n db = np.zeros((1, 1, 1, n_C))\n\n A_prev_pad = zero_pad(A_prev, pad)\n dA_prev_pad = zero_pad(dA_prev, pad)\n\n for i in range(m):\n a_prev_pad = A_prev_pad[i]\n da_prev_pad = dA_prev_pad[i]\n\n for H_i in range(n_H):\n for W_i in range(n_W):\n for C_i in range(n_C):\n # The current \"slice\"\n vert_start = H_i * stride\n vert_end = vert_start + f\n horiz_start = W_i * stride\n horiz_end = horiz_start + f\n\n a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]\n\n da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:, :, :, C_i] * dZ[i, H_i, W_i, C_i]\n dW[:, :, :, C_i] += a_slice * dZ[i, H_i, W_i, C_i]\n db[:, :, :, C_i] += dZ[i, H_i, W_i, C_i]\n\n dA_prev[i, :, :, :] = dA_prev_pad[i, pad:-pad, pad:-pad, :]\n return dA_prev, dW, db\n\n\n# Creates a mask from an input matrix x, to identify the max entry of x.\ndef create_mask_from_window(x):\n mask = (x == np.max(x))\n return mask\n\n\n# Distributes the input value in the matrix of dimension shape\ndef distribute_value(dz, shape):\n n_H, n_W = shape\n average = dz / (n_H * n_W)\n a = np.ones(shape) * average\n return a\n\n\n# 池化层,反向传播\ndef pool_backward(dA, cache, mode=\"max\"):\n \"\"\"\n Implements the backward pass of the pooling layer\n\n Arguments:\n dA -- gradient of cost with respect to the output of the pooling layer, same shape as A\n cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters\n mode -- the pooling mode you would like to use, defined as a string (\"max\" or \"average\")\n\n Returns:\n dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev\n \"\"\"\n A_prev, hparameters = cache\n m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape\n m, n_H, n_W, n_C = dA.shape\n stride = hparameters['stride']\n f = hparameters['f']\n\n dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))\n\n for i in range(m):\n a_prev = A_prev[i]\n\n for H_i in range(n_H):\n for W_i in range(n_W):\n for C_i in range(n_C):\n vert_start = H_i * stride\n vert_end = vert_start + f\n horiz_start = W_i * stride\n horiz_end = horiz_start + f\n\n if mode == \"max\":\n a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, C_i]\n mask = create_mask_from_window(a_prev_slice)\n dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, C_i] +=\\\n mask * dA[i, vert_start, horiz_start, C_i]\n elif mode == 'average':\n da = dA[i, vert_start, horiz_start, C_i]\n shape = (f, f)\n dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, C_i] += distribute_value(da, shape)\n\n return dA_prev\n","repo_name":"TimePickerWang/DeepLearning","sub_path":"CourseFour-Convolutional Neural Networks/assignment1_1CNN.py","file_name":"assignment1_1CNN.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"47"}
+{"seq_id":"69821938382","text":"import socket\nimport time\nimport threading\nimport random\nimport hashlib\n\nfrom nodeconnection import NodeConnection\n\nclass Node(threading.Thread):\n def __init__(self, host, port, id=None, callback=None, max_connections=0): \n super(Node, self).__init__()\n self.terminate_flag = threading.Event()\n self.host = host\n self.port = port\n self.callback = callback\n self.nodesInbound = [] \n self.nodes_outbound = [] \n self.reconnect_to_nodes = []\n self.id = str(id) \n self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.init_server()\n self.message_count_send = 0\n self.message_count_recv = 0\n self.message_count_rerr = 0\n self.max_connections = max_connections\n self.debug = False\n\n \n def all_nodes(self):\n return self.nodesInbound + self.nodes_outbound\n\n def debug_print(self, message):\n if self.debug:\n print(\"DEBUG (\" , self.id , \"): \" , message)\n\n def init_server(self):\n print(\"Initialisation a leaf node in port: \" + str(self.port) + \" on node (\" + self.id + \")\")\n self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.soc.bind((self.host, self.port))\n self.soc.settimeout(10.0)\n self.soc.listen(1)\n\n \n\n \n\n def send_to_node(self, n, data):\n self.message_count_send = self.message_count_send + 1\n for node in self.all_nodes():\n if node.id == n.id:\n node.send(data)\n return True\n else:\n print(\"Node send to node: Could not send the data, node is not found!\")\n return False\n\n def connect_with_node(self, host, port, reconnect=False):\n if host == self.host and port == self.port:\n print(\"connect with node: Cannot connect with yourself!!\")\n return False\n\n for node in self.nodes_outbound:\n if node.host == host and node.port == port:\n print(\"connect with node: Already connected with this node (\" + node.id + \").\")\n return True\n\n try:\n soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.debug_print(\"connecting to %s port %s\" % (host, port))\n soc.connect((host, port))\n\n soc.send(self.id.encode('utf-8'))\n connected_node_id = soc.recv(4096).decode('utf-8')\n\n for node in self.nodesInbound:\n if node.host == host and node.id == connected_node_id:\n print(\"connect with node: This node (\" + node.id + \") is already connected with us.\")\n return True\n\n thread_client = self.create_new_connection(soc, connected_node_id, host, port)\n thread_client.start()\n\n self.nodes_outbound.append(thread_client)\n self.outbound_node_connected(thread_client)\n\n if reconnect:\n self.debug_print(\"connect with node: Reconnection check is enabled on node \" + host + \":\" + str(port))\n self.reconnect_to_nodes.append({\n \"host\": host, \"port\": port, \"tries\": 0\n })\n\n except Exception as e:\n self.debug_print(\"TcpServer.connect_with_node: Could not connect with node. (\" + str(e) + \")\")\n\n def disconnect_with_node(self, node):\n if node in self.nodes_outbound:\n self.node_disconnect_with_outbound_node(node)\n node.stop()\n else:\n self.debug_print(\"Node disconnect_with_node: cannot disconnect with a node with which we are not connected.\")\n\n def stop(self):\n self.node_request_to_stop()\n self.terminate_flag.set()\n\n def create_new_connection(self, connection, id, host, port):\n return NodeConnection(self, connection, id, host, port)\n\n def reconnect_nodes(self):\n for node_to_check in self.reconnect_to_nodes:\n found_node = False\n self.debug_print(\"reconnect_nodes: Checking node \" + node_to_check[\"host\"] + \":\" + str(node_to_check[\"port\"]))\n\n for node in self.nodes_outbound:\n if node.host == node_to_check[\"host\"] and node.port == node_to_check[\"port\"]:\n found_node = True\n node_to_check[\"trials\"] = 0 \n self.debug_print(\"reconnect_nodes: Node \" + node_to_check[\"host\"] + \":\" + str(node_to_check[\"port\"]) + \" still running!\")\n\n if not found_node: \n node_to_check[\"trials\"] += 1\n if self.node_reconnection_error(node_to_check[\"host\"], node_to_check[\"port\"], node_to_check[\"trials\"]):\n self.connect_with_node(node_to_check[\"host\"], node_to_check[\"port\"])\n\n else:\n self.debug_print(\"reconnect_nodes: Removing node (\" + node_to_check[\"host\"] + \":\" + str(node_to_check[\"port\"]) + \") from the reconnection list!\")\n self.reconnect_to_nodes.remove(node_to_check)\n\n def run(self):\n while not self.terminate_flag.is_set(): \n try:\n self.debug_print(\"Node: Wait for incoming connection\")\n connection, client_address = self.soc.accept()\n\n self.debug_print(\"Total inbound connections:\" + str(len(self.nodesInbound)))\n \n if self.max_connections == 0 or len(self.nodesInbound) < self.max_connections:\n \n \n connected_node_id = connection.recv(4096).decode('utf-8')\n connection.send(self.id.encode('utf-8')) \n\n thread_client = self.create_new_connection(connection, connected_node_id, client_address[0], client_address[1])\n thread_client.start()\n\n self.nodesInbound.append(thread_client)\n self.inbound_node_connected(thread_client)\n\n else:\n self.debug_print(\"New connection is closed. You have reached the maximum connection limit!\")\n connection.close()\n \n except socket.timeout:\n self.debug_print('Node: Connection timeout!')\n\n except Exception as e:\n raise e\n\n self.reconnect_nodes()\n\n time.sleep(0.01)\n\n print(\"Node stopping...\")\n for t in self.nodesInbound:\n t.stop()\n\n for t in self.nodes_outbound:\n t.stop()\n\n time.sleep(1)\n self.soc.settimeout(None) \n self.soc.close()\n print(\"Node stopped\")\n\n def outbound_node_connected(self, node):\n \n self.debug_print(\"outbound_node_connected: \" + node.id)\n if self.callback is not None:\n self.callback(\"outbound_node_connected\", self, node, {})\n\n def inbound_node_connected(self, node):\n self.debug_print(\"inbound_node_connected: \" + node.id)\n if self.callback is not None:\n self.callback(\"inbound_node_connected\", self, node, {})\n\n def node_disconnected(self, node):\n self.debug_print(\"node_disconnected: \" + node.id)\n\n if node in self.nodesInbound:\n del self.nodesInbound[self.nodesInbound.index(node)]\n self.inbound_node_disconnected(node)\n\n if node in self.nodes_outbound:\n del self.nodes_outbound[self.nodes_outbound.index(node)]\n self.outbound_node_disconnected(node)\n\n def inbound_node_disconnected(self, node):\n self.debug_print(\"inbound_node_disconnected: \" + node.id)\n if self.callback is not None:\n self.callback(\"inbound_node_disconnected\", self, node, {})\n\n def outbound_node_disconnected(self, node):\n self.debug_print(\"outbound_node_disconnected: \" + node.id)\n if self.callback is not None:\n self.callback(\"outbound_node_disconnected\", self, node, {})\n\n def node_message(self, node, data):\n self.debug_print(\"node_message: \" + node.id + \": \" + str(data))\n if self.callback is not None:\n self.callback(\"node_message\", self, node, data)\n\n def node_disconnect_with_outbound_node(self, node):\n self.debug_print(\"node wants to disconnect with oher outbound node: \" + node.id)\n if self.callback is not None:\n self.callback(\"node_disconnect_with_outbound_node\", self, node, {})\n\n def node_request_to_stop(self):\n self.debug_print(\"node is requested to stop!\")\n if self.callback is not None:\n self.callback(\"node_request_to_stop\", self, {}, {})\n\n def node_reconnection_error(self, host, port, trials):\n self.debug_print(\"node_reconnection_error: Reconnecting to node \" + host + \":\" + str(port) + \" (trials: \" + str(trials) + \")\")\n return True\n\n def __str__(self):\n return 'Node: {}:{}'.format(self.host, self.port)\n\n def __repr__(self):\n return ''.format(self.host, self.port, self.id)\n","repo_name":"Yashvardhan777/GnutellaNetwork","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":8942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"1470559679","text":"import json\nfilename = \"username2.json\"\n\ndef get_stored_name():\n\ttry:\n\t\twith open(filename) as f_obj:\n\t\t\tusername = json.load(f_obj)\n\texcept FileNotFoundError:\n\t\tusername = None\n\treturn username\ndef get_new_name():\n\tusername = input(\"What is your name:\")\n\tprint(\"Hello \" + username +\" !\")\n\twith open(filename,'w') as f_obj:\n\t\tjson.dump(username,f_obj)\ndef greet_user():\n\tusername = get_stored_name()\n\tif username:\n\t\tprint(\"Welcome \"+username+\" !\")\n\telse:\n\t\tget_new_name()\n\ngreet_user()","repo_name":"chaojimali666/core_python","sub_path":"Part1/Cha10/remember_me_2.py","file_name":"remember_me_2.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10233304533","text":"#!/usr/bin/env python3\n\nfrom asyncio import ensure_future, get_event_loop\n\nfrom stablecoin import StablecoinInteractor\n\nfrom blockchain.trustchain import TrustChain\n# from persistence.database import Database\n\nfrom bank.tikkie import Tikkie\nfrom persistence.inmemorypersistence import InMemoryPersistence\nfrom blockchain.ipv8.eurotoken.community import EuroTokenCommunity\nfrom blockchain.ipv8.trustchain.community import MyTrustChainCommunity\nfrom ui.rest import MyRESTManager\n\nfrom pyipv8.ipv8.configuration import get_default_configuration\nfrom pyipv8.ipv8_service import IPv8\n\nfrom binascii import hexlify, unhexlify\nfrom base64 import b64encode\n\nimport os\n\nGATEWAY_NAME = os.environ.get('GATEWAY_NAME', \"Demo Gateway\").strip()\nGATEWAY_HOSTNAME = os.environ.get('GATEWAY_HOSTNAME', \"develop.euro-token.nl\").strip()\nGATEWAY_IP = os.environ.get('GATEWAY_IP', \"0.0.0.0\").strip()\nRATE_E2T = float(os.environ.get('RATE_E2T', 1.00))\nRATE_T2E = float(os.environ.get('RATE_T2E', 1.00))\n\nDOCKER = bool(int(os.environ.get('DOCKER', 0)))\n\ndef resolve_user(path):\n return os.path.expanduser(path)\n\ndef get_rest_manager(interactor):\n return MyRESTManager(interactor)\n\nasync def start_communities():\n rest_port = 8000\n ipv8_port = 8090\n hostname = GATEWAY_HOSTNAME\n ip_address = GATEWAY_IP\n configuration = get_default_configuration()\n configuration['port'] = ipv8_port\n configuration['keys'] = [{\n 'alias': \"my peer\",\n 'generation': u\"curve25519\",\n 'file': (f\"/vol/keys/trustchain/ec.pem\" if DOCKER else resolve_user(\"~/.ssh/eurotoken/trustchain/ec.pem\"))\n }]\n configuration['address'] = ip_address\n configuration['logger'] = {\n 'level': \"INFO\",\n }\n configuration['overlays'] = [{\n 'class': 'MyTrustChainCommunity',\n 'key': \"my peer\",\n 'walkers': [{\n 'strategy': \"RandomWalk\",\n 'peers': 10,\n 'init': {\n 'timeout': 3.0\n }\n }],\n 'initialize': {\n 'working_directory': (f'/vol/database'if DOCKER else f'.local')\n },\n 'on_start': [('started', )]\n }, {\n 'class': 'EuroTokenCommunity',\n 'key': \"my peer\",\n 'walkers': [{\n 'strategy': \"RandomWalk\",\n 'peers': 10,\n 'init': {\n 'timeout': 3.0\n }\n }],\n 'initialize': {},\n 'on_start': [('started', )]\n }\n ]\n\n ipv8 = IPv8(configuration, extra_communities={'MyTrustChainCommunity': MyTrustChainCommunity, 'EuroTokenCommunity': EuroTokenCommunity})\n await ipv8.start()\n interactor = buildSI(ipv8, hostname, ipv8_port)\n rest_manager = get_rest_manager(interactor)\n await rest_manager.start(ip_address, rest_port)\n\ndef buildSI(ipv8, address, ipv8_port):\n prefix = ('/vol/keys/' if DOCKER else resolve_user('~/.ssh/eurotoken/'))\n bank = Tikkie(\n production=False,\n\n # abn_api_path='/vol/keys/tikkie/abn_stablecoin_key',\n # sandbox_key_path='/vol/keys/tikkie/tikkie_key_sandbox',\n # production_key_path='/vol/keys/tikkie/tikkie_key_prod',\n\n abn_api_path=f'{prefix}/tikkie/abn_stablecoin_key',\n sandbox_key_path=f'{prefix}/tikkie/tikkie_key_sandbox',\n production_key_path=f'{prefix}/tikkie/tikkie_key_prod',\n\n global_url=\"http://bagn.blokzijl.family\",\n url=\"/api/exchange/e2t/tikkie_callback\")\n\n blockchain = TrustChain(identity=\"pubkey0123456789abcdef\", ipv8=ipv8, address=(address, ipv8_port) )\n persistence = InMemoryPersistence()\n\n s = StablecoinInteractor(\n name = GATEWAY_NAME,\n bank = bank,\n blockchain = blockchain,\n persistence = persistence,\n rateE2T = RATE_E2T,\n rateT2E = RATE_T2E,\n )\n\n return s\n\ndef main():\n ensure_future(start_communities())\n get_event_loop().run_forever()\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"rwblokzijl/stablecoin-exchange","sub_path":"backend/stablecoin/run_coin.py","file_name":"run_coin.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"8283770075","text":"import sys\n\nfrom ProductionRules import *\nfrom DiffProductionRules import *\nfrom IntProductionRules import *\nfrom FunctionTree import *\n\n\nclass DiffFunctionTree(FunctionTree):\n\n\tdef applyProduction( self, production, complexity ):\n\t\tleaf = self.getRandomLeaf()\n\n\t\t# if this leaf's value has already been set to a constant, do nothing\n\t\tif leaf.getValue() is not None:\n\t\t\treturn\n\n\t\tparent = leaf.getParent()\n\n\t\t# avoid (f*g)^k to prevent large coefficients\n\t\tif parent is not None:\n\t\t\tif parent.getValue() == powerConst and production == times:\n\t\t\t\treturn\n\t\t# else, replace it with a combo of Inner Node - Left Child, Right Child\n\n\t\t# create new inner node holding a production rule\n\t\tnewNode = Node( production, complexity )\n\t\t# create new leaf\n\t\tnewLeaf = Node()\n\t\tnewNode.setLeftChild( leaf )\n\t\tnewNode.setRightChild( newLeaf )\n\t\tself.replaceNode( leaf, newNode, parent )\n\t\t# if an inner node has rule \"powerConst\", its right child must be a const\n\t\tif production == powerConst:\n\t\t\tnewLeaf.setValue( const() )\n\n\n\t# Assign an elementary function to each leaf whose value has not been set\n\tdef assignFunctionsToLeaves( self ):\n\t\tleaves = self.getAllLeaves( self.root )\n\t\tfor leaf in leaves:\n\t\t\tif leaf.getValue() is None:\n\t\t\t\tfunc = DiffProductionRules.getRandomElemFunction()\n\t\t\t\t# Move coefficient if (a*x)^b\n\t\t\t\tif func == linear and leaf.getParent() is not None and leaf.getParent().getValue() == powerConst:\n\t\t\t\t leaf.setValue( buildFunction( \"x&\", \"1\", True, True ) )\n\t\t\t\t parent = leaf.getParent()\n\t\t\t\t grandparent = parent.getParent()\n\t\t\t\t # create new inner node for coefficient multiplication\n\t\t\t\t newNode = Node( times )\n\t\t\t\t # create new leaf\n\t\t\t\t newLeaf = Node( const() )\n\t\t\t\t newNode.setLeftChild( newLeaf )\n\t\t\t\t newNode.setRightChild( parent )\n\t\t\t\t self.replaceNode( parent, newNode, grandparent )\n\t\t\t\telse:\n\t\t\t\t leaf.setValue( func() )\n\n\n\t# Evaluate the subtree rooted at node to get the output function\n\tdef getFunctionAtSubtree( self, node ):\n\t\tif node.isLeaf():\n\t\t\treturn node.getValue()\n\t\telse:\n\t\t\t# get the function\n\t\t\tproduction = node.getValue()\n\t\t\tleftFunction = self.getFunctionAtSubtree( node.getLeftChild() )\n\t\t\trightFunction = self.getFunctionAtSubtree( node.getRightChild() )\n\t\t\tresult = production( leftFunction, rightFunction )\n\n\t\t\t# get the derivative\n\t\t\tderivative = DiffProductionRules.getDerivative( production.__name__, leftFunction, rightFunction )\n\t\t\tresult.setDerivative( derivative )\n\t\t\treturn result\n\n\n\t# Build a function tree with the input complexity bound\n\t@classmethod\n\tdef buildTreeWithMaxComplexity(self, complexity ):\n\t\ttree = DiffFunctionTree( complexity )\n\t\twhile tree.getComplexity() < complexity:\n\t\t\tproductionRule = DiffProductionRules.getRandomProductionRule()\n\t\t\tcomplexity = DiffProductionRules.complexityMap[productionRule]\n\t\t\ttree.applyProduction( productionRule, complexity )\n\t\ttree.assignFunctionsToLeaves()\n\t\treturn tree","repo_name":"wassgha/CalcTutor","sub_path":"main/question_factory/experiment/DiffFunctionTree.py","file_name":"DiffFunctionTree.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"47"}
+{"seq_id":"2064920412","text":"import argparse\nfrom pathlib import Path\n\nimport diff_match_patch\nimport lxml.html\n\n\nclass PyDiff:\n def __init__(self, from_file: str, to_file: str) -> None:\n from_path = Path(from_file)\n to_path = Path(to_file)\n from_content = from_path.read_text(\"utf-8\")\n to_content = to_path.read_text(\"utf-8\")\n\n dmp = diff_match_patch.diff_match_patch()\n dmp.Diff_Timeout = 25 # slowly, take it easy...\n diffs = dmp.diff_main(from_content, to_content)\n dmp.diff_cleanupSemantic(diffs)\n self._heading = \"'{}'→'{}' \".format(\n from_path.name, to_path.name\n )\n\n diff_html = dmp.diff_prettyHtml(diffs)\n diff_tree = lxml.html.fromstring(diff_html)\n diff_tree.classes.add(\"diff-container\")\n for elem in list(diff_tree):\n if elem.tag != \"span\":\n elem.attrib.pop(\"style\", None)\n if elem.tag == \"del\":\n elem.attrib[\"inert\"] = \"true\"\n\n self._diff_tree = diff_tree\n\n def _compress_markup(self) -> lxml.html.Element:\n root = lxml.html.Element(\"div\")\n root.classes.add(\"diff-container\")\n for elem in list(self._diff_tree):\n if elem.tag != \"span\":\n root.append(elem)\n else:\n text_list = [t for t in elem.xpath(\"text()\")]\n if len(text_list) < 3:\n root.append(elem)\n else:\n compressed = lxml.html.Element(\"span\")\n compressed.classes.add(\"compressed-lines\")\n compressed.text = text_list[0]\n filler = lxml.html.Element(\"span\")\n filler.classes.add(\"filler\")\n filler.tail = text_list[-1]\n compressed.append(filler)\n if text_list[-1].endswith(\"\\u00B6\"):\n br_tag = lxml.html.Element(\"br\")\n n_children = len(list(compressed.xpath(\"*\")))\n compressed.insert(n_children, br_tag)\n root.append(compressed)\n return root\n\n def get_markup(self, compress: bool = False) -> str:\n if compress:\n elem = self._compress_markup()\n else:\n elem = self._diff_tree\n return self._heading + lxml.html.tostring(elem, encoding=\"unicode\")\n\n\ndef main(\n from_file: str,\n to_file: str,\n out_path: str,\n css_path: str,\n js_path: str,\n compress: bool,\n) -> None:\n css = \"\".format(Path(css_path).read_text(\"utf-8\"))\n js = \"\".format(Path(js_path).read_text(\"utf-8\"))\n pd = PyDiff(from_file, to_file)\n title = Path(out_path).stem\n page_markup = \"\\n\".join(\n [\n \"\",\n '',\n \"\",\n ' ',\n ' ',\n \"{} \".format(title),\n css,\n \"\",\n \"\",\n '{}
'.format(pd.get_markup(compress)),\n js,\n \"\",\n \"\",\n ]\n )\n Path(out_path).write_text(page_markup, \"utf-8\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"fromFile\", type=str)\n parser.add_argument(\"toFile\", type=str)\n parser.add_argument(\"outFile\", type=str)\n parser.add_argument(\"--compress\", action=\"store_true\")\n args = parser.parse_args()\n\n css_path = Path(__file__).with_name(\"additional.css\")\n js_path = Path(__file__).with_name(\"event.js\")\n main(\n args.fromFile,\n args.toFile,\n args.outFile,\n css_path,\n js_path,\n args.compress,\n )\n","repo_name":"AWtnb/PowerShell","sub_path":"cmdlets/python/diff_as_html/inline/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"21913591192","text":"import sys\nfrom turtle import screensize\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtWidgets import QDesktopWidget # 用于获取电脑屏幕参数\nfrom PyQt5.QtWidgets import QHBoxLayout, QWidget, QPushButton # 用于设置控件\nfrom PyQt5.QtGui import QIcon\nfrom cv2 import QT_PUSH_BUTTON\n\n\nclass FirstMainWin(QMainWindow):\n def __init__(self, parent=None):\n super(FirstMainWin, self).__init__(parent)\n\n #设置主窗口标题\n self.setWindowTitle('第一个主窗口应用')\n #设置窗口尺寸\n self.resize(400, 300)\n\n #获得当前状态栏\n self.status = self.statusBar()\n #显示消息\n self.status.showMessage('只存在5秒的消息', 5000)\n\n #设置窗口初始位置\n self.move(100, 100)\n\n #添加Button\n self.button1 = QPushButton('退出')\n #将信号槽关联\n self.button1.clicked.connect(self.onClick_Button)\n\n #布局为水平布局\n layout = QHBoxLayout()\n layout.addWidget(self.button1)\n\n #添加主框架\n mainFrame = QWidget()\n #将上面创建的水平布局放置到主框架中\n mainFrame.setLayout(layout)\n #将主框架放置到中心窗口 #每个创建应用程序的语法都是这样\n self.setCentralWidget(mainFrame)\n\n #设置居中\n def center(self):\n #获取屏幕坐标\n screen = QDesktopWidget().screenGeometry()\n #获取窗口坐标系\n size = self.geometry()\n newLeft = (screen.width()-size.width())/2\n newTop = (screen.height()-size.height())/2\n self.move(newLeft, newTop)\n\n #按钮单击事件的方法(Slot)\n def onClick_Button(self):\n sender = self.sender()\n print(sender.text()+' 按钮被按下')\n #获取当前应用程序\n app = QApplication.instance()\n #退出应用程序\n app.quit()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n app.setWindowIcon(QIcon(\n 'E:\\\\BaiduNetdiskDownload\\\\Icon1800\\\\常用图标库\\\\PNG@2_black_icons\\\\ spiral [#29].png'))\n main = FirstMainWin()\n main.center()\n main.show()\n\n sys.exit(app.exec_())\n","repo_name":"KalutSirocco/PyQT5_Practice","sub_path":"FirstMainWin.py","file_name":"FirstMainWin.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"24114559958","text":"__author__ = 'biziuraa'\n\nimport asyncio\nimport socket\nfrom config import Config\nfrom structpu import *\nfrom datagram2 import Datagram2\nimport json\n\n# from exception_decor import exception\n#\n# from exception_logger import logger\n\n\nsetting = Config.inst()\n__all__ = ['main', 'Endpoint']\n\n\nclass DatagramEndpointProtocol(asyncio.DatagramProtocol):\n\n def __init__(self, endpoint, loop):\n self._endpoint = endpoint\n self.loop = loop\n\n def connection_made(self, transport):\n self._endpoint._transport = transport\n\n def datagram_received(self, data, addr):\n self._endpoint.add_datagram(data, addr)\n\n def connection_lost(self, exc):\n print(\"Socket closed, stop the event loop\")\n self.loop.stop()\n\n def error_received(self, exc):\n print('Error received:', exc)\n\n\nclass Endpoint:\n def __init__(self, queue_size=None):\n\n self.MCAST_GRP, self.MCAST_PORT, self.MCAST_HOST, _ = setting.version\n self.ADDRESS = (self.MCAST_HOST, self.MCAST_PORT)\n\n ttl = struct.pack('@i', 1)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\n\n self.sock.bind(self.ADDRESS)\n mreq = struct.pack(\"4sl\", socket.inet_aton(self.MCAST_GRP), socket.INADDR_ANY)\n\n self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n if queue_size is None:\n queue_size = 0\n self._queue = asyncio.Queue(queue_size)\n\n self._transport = None\n\n def add_datagram(self, data, addr):\n try:\n self._queue.put_nowait((data, addr))\n except asyncio.QueueFull:\n print('asyncio.QueueFull')\n\n def send(self, data):\n self._transport.sendto(data, (self.MCAST_GRP, self.MCAST_PORT))\n return data\n\n async def receive(self):\n\n data, addr = await self._queue.get()\n self._queue.task_done()\n return data, addr\n\n\n# Dgram2=Datagram2()\n\n# @exception(logger)\ntask_channel_name = []\n\n\nasync def recv_json(local, loop, Dgram2, path):\n with open(path, encoding='cp1251') as f:\n json_tu = json.load(f)\n\n for channel_name in Dgram2.parsing_js(json_tu, js=True):\n if channel_name:\n local.send(Dgram2.packed_TDatagram2[channel_name])\n if not channel_name in task_channel_name:\n print(Dgram2.ts_array)\n task_channel_name.append(channel_name)\n loop.create_task(send(local, channel_name, Dgram2))\n\n local.send(Dgram2.packed_TDatagram2[channel_name])\n\n\nasync def recv(local, loop, Dgram2):\n while True:\n data, address = await local.receive()\n # print('recived {}'.format(data))\n\n channel_name = Dgram2.parsing_data(data, js=False)\n\n if channel_name:\n local.send(Dgram2.packed_TDatagram2[channel_name])\n if not channel_name in task_channel_name:\n print(Dgram2.ts_array)\n task_channel_name.append(channel_name)\n loop.create_task(send(local, channel_name, Dgram2))\n\n # await asyncio.sleep(0.01)\n\n\n# @exception(logger)\nasync def send(local, channel_name, Dgram2):\n while True:\n local.send(Dgram2.packed_TDatagram2[channel_name])\n await asyncio.sleep(1)\n\n\ndef main(Dgram2, path):\n endpoint = Endpoint()\n loop = asyncio.get_event_loop()\n connect = loop.create_datagram_endpoint(lambda: DatagramEndpointProtocol(endpoint, loop), sock=endpoint.sock)\n transport, protocol = loop.run_until_complete(connect)\n task = []\n loop.create_task(recv(endpoint, loop, Dgram2))\n loop.create_task(recv_json(endpoint, loop, Dgram2, path))\n loop.run_forever()\n\n transport.close()\n loop.close()\n","repo_name":"4ndriesh/mcast","sub_path":"server/server_udp.py","file_name":"server_udp.py","file_ext":"py","file_size_in_byte":3854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"13443703859","text":"#1\r\nclass MhsTIF(object):\r\n \r\n def __init__(self,nama,NIM,asal,saku):\r\n self.nama = nama\r\n self.NIM = NIM\r\n self.asal = asal\r\n self.saku = saku\r\n \r\nc0 = MhsTIF ('Ika','L200180001','Sukoharjo', 240000)\r\nc1 = MhsTIF ('Budi','L200180010','Sragen', 230000)\r\nc2 = MhsTIF ('Ahmad','L200180002','Surakarta', 250000)\r\nc3 = MhsTIF ('Chandra','L200180004','Surakarta', 230000)\r\nc4 = MhsTIF ('Eka','L200180005','Boyolali', 240000)\r\nc5 = MhsTIF ('Fandi','L20018006','Salatiga', 250000)\r\nc6 = MhsTIF ('Deni','L200180007','Klaten', 245000)\r\nc7 = MhsTIF ('Galuh','L20018008','Wonogiri', 245000)\r\nc8 = MhsTIF ('Janto','L200180009','Klaten', 245000)\r\nc9 = MhsTIF ('Hasan','L2001800011','Karanganyar', 270000)\r\nc10 = MhsTIF ('Khalid','L200180012','Purwodadi', 265000)\r\nMhs = [c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]\r\n\r\ndef urutkan(A):\r\n baru = {}\r\n for i in range(len(A)):\r\n baru[A[i].nama] = A[i].NIM\r\n listofTuples = sorted(baru.items(), key = lambda x: x[1])\r\n for elemen in listofTuples :\r\n print(elemen[0], \":\", elemen[1])\r\n\r\n#2\r\ndef bubbleSort(arr):\r\n n = len(arr)\r\n for i in range (n):\r\n for j in range(0, n-i-1):\r\n if arr[j] > arr[j+1] :\r\n arr[j], arr[j+1] = arr[j+1], arr[j]\r\n\r\n return arr\r\ndef gabung(a,b):\r\n c = []\r\n c = a+b\r\n n = len(c)\r\n for i in range(n):\r\n for j in range(0, n-i-1):\r\n if c[j] > c[j+1] :\r\n c[j], c[j+1] = c[j+1], c[j]\r\n return c\r\n\r\n#3\r\nfrom time import time as detak\r\nfrom random import shuffle as kocok\r\nk = [i for i in range(1,6001)]\r\nkocok(k)\r\ndef u_bub(arr):\r\n n = len (arr)\r\n for i in range (n):\r\n for j in range(0, n-i-1):\r\n if arr[j] > arr[j+1] :\r\n arr[j], arr[j+1] = arr[j+1], arr[j]\r\ndef u_sel(A):\r\n for i in range(len(A)):\r\n min_in = i\r\n for j in range(i+1, len (A)):\r\n if A[min_in] > A[j]:\r\n min_in = j\r\n A[i], A[min_in] = A[min_in], A[i]\r\ndef u_ins(arr):\r\n for i in range(1, len(arr)):\r\n key = arr[i]\r\n j = i-1\r\n while j >= 0 and key < arr[j]:\r\n arr[j+1] = arr[j]\r\n j -= 1\r\n arr[j+1] = key\r\n","repo_name":"L200180009/algostruk","sub_path":"MODUL 5/Tugas.py","file_name":"Tugas.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"2522893054","text":"from django.shortcuts import render, redirect\r\nimport datetime\r\n\r\n# Create your views here.\r\ndef index(request):\r\n\r\n if 'words' not in request.session:\r\n request.session['words'] = []\r\n \r\n data = {\r\n 'words' : request.session['words']\r\n }\r\n\r\n print(\"DATATAAAAA ZIZZ\", data)\r\n\r\n return render(request, 'index.html', data)\r\n\r\ndef add(request):\r\n newWord = {}\r\n\r\n print\r\n\r\n if request.method == \"POST\":\r\n if len(request.POST['word']) < 2:\r\n return redirect('/')\r\n if 'checker' not in request.POST:\r\n newWord['checker'] = False\r\n else:\r\n newWord['checker'] = True\r\n newWord['word'] = request.POST['word']\r\n newWord['color'] = request.POST['color']\r\n newWord['timestamp'] = datetime.datetime.now().strftime(\"%b %d, %Y at %H:%M %p\")\r\n\r\n words = request.session['words']\r\n words.insert(0,newWord)\r\n request.session['words'] = words\r\n\r\n # print(words)\r\n print(request.session['words'])\r\n \r\n return redirect('/')\r\n\r\ndef clear(request):\r\n request.session.clear()\r\n\r\n return redirect('/')","repo_name":"gbhoot/djangoBasics","sub_path":"05-Session-Words/main/apps/session_words/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"19123966982","text":"import cv2\nimport os\nfrom collections import defaultdict\nimport numpy as np\nimport os.path as osp\ndef plot_boxes(cur_frame, head_map, points, ids,body_map={}, text=True):\n plotting_im = cur_frame.copy()\n for index, t_dim in enumerate(head_map):\n (startX, startY, endX, endY) = [int(i) for i in t_dim]\n cv2.rectangle(plotting_im, (startX, startY), (endX, endY),\n (0, 255, 0), 2)\n cur_centroid = tuple([(startX+endX)//2,\n (startY+endY)//2])\n\n # cv2.circle(plotting_im, cur_centroid, 2,\n # (255, 0, 0), 2)\n\n if text:\n cv2.putText(plotting_im, str(ids[index]), cur_centroid,\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)\n for index, t_dim in enumerate(points):\n X, Y, = [int(i) for i in t_dim]\n cv2.circle(plotting_im, (X, Y), 2,\n (0, 0, 255), 2)\n\n for index, (t_id, t_dim) in enumerate(body_map.items()):\n (startX, startY, endX, endY) = [int(i) for i in t_dim]\n cv2.rectangle(plotting_im, (startX, startY), (endX, endY),\n (0, 255, 0), 2)\n return plotting_im\n\ndef CroHead():\n root = '../../dataset/HT21/train'\n sub_scenes = os.listdir(root)\n print(sub_scenes)\n\n for sub_scene in sub_scenes[2:]:\n imgs_path = os.path.join(root, sub_scene, 'img1')\n imgs_id = os.listdir(imgs_path)\n det_path = os.path.join(imgs_path.replace('img1', 'det'), 'det.txt')\n\n bboxes = defaultdict(list)\n with open(det_path, 'r') as f:\n lines = f.readlines()\n # imgs_path = [i.rstrip().strip(\"#\").lstrip()\n # for i in lines if i.startswith('#')]\n for lin in lines:\n lin_list = [float(i) for i in lin.rstrip().split(',')]\n ind = int(lin_list[0])\n bboxes[ind].append(lin_list)\n f.close()\n gts = defaultdict(list)\n with open(os.path.join(imgs_path.replace('img1','gt'), 'gt.txt'), 'r') as f:\n lines = f.readlines()\n for lin in lines:\n lin_list = [float(i) for i in lin.rstrip().split(',')]\n ind = int(lin_list[0])\n gts[ind].append(lin_list)\n f.close()\n # print(gts)\n # print(imgs_id)\n\n for img_id in imgs_id:\n img_path=os.path.join(imgs_path,img_id)\n labels = bboxes[int(img_id.split('.')[0])]\n labels_point = gts[int(img_id.split('.')[0])]\n annotations = np.zeros((0, 4))\n points = np.zeros((0, 2))\n if len(labels) == 0:\n label = [[0, 0, 0, 0, 0]]\n ignore_ar = []\n for idx, label in enumerate(labels):\n annotation = np.zeros((1, 4))\n # bbox\n annotation[0, 0] = label[2] # x1\n annotation[0, 1] = label[3] # y1\n annotation[0, 2] = label[4] +label[2] # x2\n annotation[0, 3] = label[5] +label[3]# y2\n annotations = np.append(annotations, annotation, axis=0)\n for idx, label in enumerate(labels_point):\n point = np.zeros((1, 2))\n # bbox\n point[0, 0] = label[2] + label[4]/2# x1\n point[0, 1] = label[3] + label[5]/2 # y1\n points = np.append(points, point, axis=0)\n # print(annotations)\n print(len(points))\n img = cv2.imread(img_path)\n img = plot_boxes(img,{},points)\n # cv2.imshow(img_id, img)\n save_path = img_path.replace('img1','vis')\n cv2.imwrite(save_path,img)\n # cv2.waitKey()\n\nvideo_path = 'E:/netdisk\\SenseCrowd/video_ori'\nlabel_path = 'E:/netdisk\\SenseCrowd/label_list_all_rmInvalid'\nimport json\nimport os\nfrom numpy import array\nimport numpy as np\nimport pylab as pl\ndef SensorCrowd():\n Info_dict={}\n time = 0\n for scene in sorted(os.listdir(video_path)[51:]):\n print(scene)\n gts = defaultdict(list)\n with open(os.path.join(label_path,scene+'.txt')) as f:\n lines = f.readlines()\n for line in lines:\n lin_list = [i for i in line.rstrip().split(' ')]\n ind = lin_list[0]\n lin_list = [float(i) for i in lin_list[3:] if i != '']\n assert len(lin_list)%7==0\n gts[ind]=lin_list\n\n root = osp.join(video_path, scene)\n img_ids = os.listdir(root)\n print(img_ids)\n id_list = []\n for img_id in img_ids:\n if not img_id.endswith(\"jpg\"):\n continue\n time+=1/5\n img_path=osp.join(root, img_id)\n label = gts[img_id]\n box_and_point = np.array(label).reshape(-1,7)\n boxes = box_and_point[:,0:4]\n points = box_and_point[:,4:6]\n ids = box_and_point[:,6].astype(np.int)\n\n id_list.append(ids)\n\n img = cv2.imread(img_path)\n print(img_path)\n plot_img = plot_boxes(img, boxes, points, ids)\n cv2.imshow(img_id, plot_img)\n cv2.waitKey()\n all_id = np.concatenate(id_list)\n Info_dict.update({scene:len(set(all_id))})\n\n\n print(time)\n with open('info.json','w') as f:\n json.dump(Info_dict,f)\n\n # print(Info_dict)\n\ndef SENSE_train_val_test():\n import random\n random.seed(0)\n scenarios = ['1_cut', '']\n all_scenarios = []\n with open('./info.json','r') as f:\n a = json.load(f)\n for k, v in a.items():\n all_scenarios.append(k)\n print(len(all_scenarios))\n train_val = random.sample(all_scenarios, int(len(all_scenarios)*0.6))\n # print(train_val)\n test = list(set(all_scenarios)-set(train_val))\n\n val = random.sample(train_val, int(0.1*len(all_scenarios)))\n # print(val)\n train = list(set(train_val)-set(val))\n data = ''\n with open('./train.txt', 'w') as f:\n for i in train: data += i+'\\n'\n f.write(data)\n data = ''\n with open('./val.txt', 'w') as f:\n for i in val: data += i+'\\n'\n f.write(data)\n data = ''\n with open('./test.txt', 'w') as f:\n for i in test: data += i+'\\n'\n f.write(data)\n\n\n print(len(train) +len(val)+len(test))\n\ndef Infor_statistics():\n with open('./info.json','r') as f:\n a = json.load(f)\n data = []\n number = np.zeros(5)\n cat = ['0~50', '50~100', '100~150', '150~200', '200~400']\n for k, v in a.items():\n data.append(v)\n if v in range(0,50):\n number[0]+=1\n elif v in range(50,100):\n number[1]+=1\n elif v in range(100,150):\n number[2]+=1\n elif v in range(150, 200):\n number[3] += 1\n elif v in range(200, 400):\n number[4] += 1\n data = np.array(data)\n import pdb\n pdb.set_trace()\n\n print(data, data.sum())\n draw_hist(data)\n\n\n\ndef draw_hist(lenths):\n data = lenths\n\n bins = np.linspace(min(data), 400, 10)\n bins = [0,100, 200, 400]\n pl.hist(data, bins)\n\n pl.xlabel('Number of people')\n\n pl.ylabel('Number of occurences')\n\n pl.title('Frequency distribution of number of people in SensorCrowd (634 Seq)')\n\n pl.show()\n\n\n\nif __name__ =='__main__':\n SensorCrowd()\n Infor_statistics()\n # SENSE_train_val_test()","repo_name":"taohan10200/DRNet","sub_path":"datasets/dataset_prepare/video_vis.py","file_name":"video_vis.py","file_ext":"py","file_size_in_byte":7383,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"47"}
+{"seq_id":"2605944037","text":"from itertools import combinations\n\ndef solution(number, k) :\n\tanswer = []\n\tindex = -1\n\tfor num in number :\n\t\tif not answer :\n\t\t\tanswer.append(num)\n\t\t\tcontinue\n\t\tif k > 0 :\n\t\t\twhile answer[-1] < num :\n\t\t\t\tanswer.pop()\n\t\t\t\tk -= 1\n\t\t\t\tif not answer or k <= 0 :\n\t\t\t\t\tbreak\n\t\tanswer.append(num)\n\tanswer = answer[:-k] if k > 0 else answer\n\treturn ''.join(answer)\n\ndef main() :\n\tnumber, k = \"1924\", 2\t\t\t# 94\n\t#number, k = \"1231234\", 3 \t\t# 3234\n\tnumber, k = \"4177252841\", 4 \t# 775841\n\tprint(\"solution : \", solution(number, k))\n\nmain()","repo_name":"SY97P/Daily1PB","sub_path":"그리디/큰수만들기.py","file_name":"큰수만들기.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"18263159052","text":"class Lavadora:\n \n def __init__(self, preciobase,consumo,carga):\n self.__preciobase=preciobase\n self.__consumo=consumo\n self.__carga=carga\n \n def precioFinal(self):\n precioconIVA=self.__preciobase*1.21\n if self.__consumo==\"A\":\n return precioconIVA+100\n elif self.__consumo==\"B\":\n return precioconIVA+80\n elif self.__consumo==\"C\":\n return precioconIVA+60\n elif self.__consumo==\"D\":\n return precioconIVA+50\n elif self.__consumo==\"E\":\n return precioconIVA+30\n elif self.__consumo==\"F\":\n return precioconIVA+10\n else:\n return 0\n \n def ver_datos(self):\n print(self.__preciobase,self.__consumo,self.__carga,self.precioFinal())","repo_name":"lidiXD/ejercicios-python-1","sub_path":"SolucionExamenGestion/src/ejercicio02/Lavadora.py","file_name":"Lavadora.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"12576200322","text":"# vim: set syntax=python\nBASE_DIR = '/opt/nfvacc'\n\n# OVS-DPDK switch compatibility table\n# (http://docs.openvswitch.org/en/latest/faq/releases/)\n# OVS DPDK\n# 2.6.x\t16.07.2\n# 2.7.x\t16.11.2\n# 2.10.x 17.11.3\n# 2.11.x 18.11.1\n\nDPDK_VERSION = '16.07.2'\nDPDK_VERSION = '16.11.2'\nDPDK_VERSION = '17.11.3'\nDPDK_VERSION = '18.11.1'\nOVS_VERSION = '2.6.0'\nOVS_VERSION = '2.7.0'\nOVS_VERSION = '2.10.0'\nOVS_VERSION = '2.11.0'\nQEMU_VERSION = '2.7.0'\nQEMU_VERSION = '2.12.0'\nQEMU_VERSION = '3.1.0'\n\nTARBALLS_DIR = BASE_DIR + '/tarballs/'\n\nDPDK_TARBALL_FILE = 'dpdk-' + DPDK_VERSION + '.tar.xz'\nDPDK_TARBALL_URL = 'http://fast.dpdk.org/rel/' + DPDK_TARBALL_FILE\nDPDK_DIR = BASE_DIR + '/dpdk-stable-' + DPDK_VERSION\nNR_HUGEPAGES = 16384\nDPDK_TARGET = 'x86_64-native-linuxapp-gcc'\nDPDK_BUILD = DPDK_DIR + '/' + DPDK_TARGET\n\nOVS_TARBALL_FILE = 'openvswitch-' + OVS_VERSION + '.tar.gz'\nOVS_TARBALL_URL = 'http://openvswitch.org/releases/' + OVS_TARBALL_FILE\nOVS_DIR = BASE_DIR + '/openvswitch-' + OVS_VERSION\nOVSDB_SCHEMA = OVS_DIR + '/vswitchd/vswitch.ovsschema'\n#OVSDB_RUN_DIR=$BASE_DIR/openvswitch-${OVS_VERSION}-runtime\nOVSDB_RUN_DIR = '/usr/local/'\nOVS_RUN_DIR = '/usr/local/var/run/openvswitch/'\nOVS_ETC_DIR = '/usr/local/etc/openvswitch/'\nOVS_LOG_DIR = '/usr/local/var/log/openvswitch/'\nOVSDB_SOCK = OVS_RUN_DIR + '/db.sock'\nOVSDB_CONF = OVS_ETC_DIR + '/conf.db'\nOVSDB_LOG_FILE = OVS_LOG_DIR + '/ovsdb-server.log'\nOVSDB_PID_FILE = OVS_RUN_DIR + '/ovsdb-server.pid'\nOVS_VHOST_SOCKETS_DIR = OVS_RUN_DIR\n\nSLEEP_SECS = '1'\n\nQEMU_TARBALL_FILE = 'qemu-' + QEMU_VERSION + '.tar.bz2'\nQEMU_TARBALL_URL = 'http://wiki.qemu-project.org/download/' + QEMU_TARBALL_FILE\nQEMU_DIR = BASE_DIR + '/qemu-' + QEMU_VERSION + '/'\nQEMU_BIN = QEMU_DIR + '/x86_64-softmmu/qemu-system-x86_64'\n\n","repo_name":"nfvri/dputils","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"23147362322","text":"def binary_search(x,array):\n answer = 0\n left = 0 \n right = 0\n reversed_array = array.reverse()\n if x > max(array):\n answer = -1\n return answer\n while left != right:\n for i in len(array):\n if array[i] == x:\n left = i\n for j in len(array):\n if reversed_array[j] == x:\n right = j\n answer = len(array) - (i+j+2)\n return answer\n \n\n","repo_name":"HUFcus-focus/algo-gago","sub_path":"01_이것이_취업을_위한_코딩_테스트다/5주차_이진_탐색/01_스터디내용/강현우/03.알고리즘 유형별 기출문제/27.정렬된 배열에서 특정 수의 개수 구하기.py","file_name":"27.정렬된 배열에서 특정 수의 개수 구하기.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"38755309906","text":"\n# Compare CSVs: https://stackoverflow.com/a/49444878/10267529\n\nimport csv\nfrom pathlib import Path\n\nbefore_file = Path('./data/ALA_before.csv')\nafter_file = Path('./data/ALA_after.csv')\noutfile = Path('./data/ALA_diff.csv')\n\n# Create a set object by mapping the \"before\" file onto a tuple object\n# (an immutable list), passing teh result of `open()` to `csv.reader()`\nbefore = set(\n map(\n tuple,\n csv.reader(open(before_file))\n )\n)\n\n# Repeat for the second (\"after\") file\nafter = set(map(tuple, csv.reader(open(after_file))))\n\n# Use the caret (^) operator to get the symmetric difference between the sets\n# https://www.linuxtopia.org/online_books/programming_books/python_programming/python_ch16s03.html\n\ndifferences = before ^ after\n\noutput = csv.writer(open(outfile, mode='w'))\n\ncompared = sorted(differences, key=lambda x: x[0], reverse=True)\n\nfor count, row in enumerate(compared):\n output.writerow(row)\n\nprint(f\"Wrote {count} rows to output file.\")\n","repo_name":"hughlilly/learn_python","sub_path":"07-files/files7.py","file_name":"files7.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"8439373205","text":"\"\"\"Remove posts_comments junction table\n\nRevision ID: 46771d436ed7\nRevises: 31a1e2ea098c\nCreate Date: 2023-05-01 19:58:10.709868\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '46771d436ed7'\ndown_revision = '31a1e2ea098c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('posts_comments')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('posts_comments',\n sa.Column('post_id', sa.UUID(), autoincrement=False, nullable=False),\n sa.Column('comment_id', sa.UUID(), autoincrement=False, nullable=False),\n sa.ForeignKeyConstraint(['comment_id'], ['posts.id'], name='posts_comments_comment_id_fkey'),\n sa.ForeignKeyConstraint(['post_id'], ['posts.id'], name='posts_comments_post_id_fkey'),\n sa.PrimaryKeyConstraint('post_id', 'comment_id', name='posts_comments_pkey')\n )\n # ### end Alembic commands ###\n","repo_name":"AdamRa0/posts","sub_path":"backend/migrations/versions/46771d436ed7_remove_posts_comments_junction_table.py","file_name":"46771d436ed7_remove_posts_comments_junction_table.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"43212104598","text":"#\n# @lc app=leetcode.cn id=1122 lang=python3\n#\n# [1122] 数组的相对排序\n#\n\n# @lc code=start\nclass Solution:\n def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:\n hash1 = {}\n for i in arr1:\n hash1[i] = hash1.get(i, 0)+1\n res = []\n for i in arr2:\n res.extend([i]*hash1[i])\n del hash1[i]\n for i in sorted(hash1.keys()):\n res.extend([i]*hash1[i])\n return res\n\n\n\n #############对元组排序\n# @lc code=end\n\n","repo_name":"westqzy/leetcodes","sub_path":"1122.数组的相对排序.py","file_name":"1122.数组的相对排序.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"6071691552","text":"\n\ndef run_prog(listing):\n listing = listing.strip();\n prog = listing.split(\",\");\n\n def get_command(ic):\n nonlocal prog\n return prog[ic].zfill(5);\n\n def get_opcode(cmd):\n return cmd[3:]\n\n def get_params_and_modes(cmd):\n nonlocal prog\n nonlocal ic\n a_mode = int(cmd[2])\n b_mode = int(cmd[1])\n c_mode = int(cmd[0])\n a = get_addr(ic+1) if ic+1 < len(prog) else 0\n b = get_addr(ic+2) if ic+2 < len(prog) else 0\n c = get_addr(ic+3) if ic+3 < len(prog) else 0\n print(f\"IC = {ic} -- {cmd}, {a}, {b}, {c}\")\n return a, b, c, a_mode, b_mode, c_mode\n\n def get_input():\n val = input(\"#: \")\n return int(val)\n\n def get_addr(addr,mode=0):\n nonlocal prog\n return addr if mode else int(prog[addr])\n\n\n def set_addr(addr,val):\n nonlocal prog\n prog[addr]=f\"{val}\"\n return\n\n def output(val):\n print(f\"{val}\")\n return\n\n\n done = False\n ic = 0;\n while not done:\n cmd = get_command(ic)\n op = get_opcode(cmd);\n a, b, c, a_mode, b_mode, c_mode = get_params_and_modes(cmd);\n if op == \"01\": #ADD\n a = get_addr(a,a_mode)\n b = get_addr(b,b_mode)\n set_addr(c,a+b)\n ic += 4\n elif op == \"02\": #MULTIPLY\n a = get_addr(a,a_mode)\n b = get_addr(b,b_mode)\n set_addr(c,a*b)\n ic += 4\n elif op == \"03\": #INPUT\n set_addr(a,get_input())\n ic += 2\n elif op == \"04\": #OUTPUT\n output(get_addr(a,a_mode))\n ic += 2\n elif op == \"05\": #JMP if TRUE\n a = get_addr(a,a_mode)\n b = get_addr(b,b_mode)\n ic = b if a else ic+3\n elif op == \"06\": #JMP if FALSE\n a = get_addr(a,a_mode)\n b = get_addr(b,b_mode)\n ic = b if a==0 else ic+3\n elif op == \"07\": #LT\n a = get_addr(a,a_mode)\n b = get_addr(b,b_mode)\n set_addr(c, 1 if a= len(prog):\n done = True\n\n return prog\n\n\ndef parse_prog(line):\n #build a Program\n return line.split(\",\")\n\nwith open(\"data/diagnostics\") as f:\n for line in f:\n print(run_prog(line))\n\n\n","repo_name":"agarithm/2019_AOC","sub_path":"05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"42560823467","text":"import struct\nimport random\nfrom fancy.python_simulations.crc import Crc\nimport sys\n\ncrc32_polinomials = [\n 0x04C11DB7, 0xEDB88320, 0xDB710641, 0x82608EDB, 0x741B8CD7, 0xEB31D82E,\n 0xD663B05, 0xBA0DC66B, 0x32583499, 0x992C1A4C, 0x32583499, 0x992C1A4C]\n\n\ndef counter():\n i = 0\n\n def count():\n nonlocal i\n i += 1\n return i\n return count\n\n\ndef generate_prefixes(num, batch=10000):\n\n prefixes = set()\n\n while len(prefixes) < num:\n for _ in range(batch):\n prefixes.add(random.randint(0, 1000000000000))\n\n reminder = len(prefixes) - num\n for _ in range(reminder):\n prefixes.pop()\n\n return prefixes\n\n\ndef generate_paths(num_prefixes, failed_prefixes, width, levels, debug=False):\n\n prefixes = generate_prefixes(num_prefixes)\n\n #width = int(sys.argv[2])\n #levels = int(sys.argv[3])\n\n hashes = []\n for i in range(levels):\n hashes.append(\n Crc(32, crc32_polinomials[i], True, 0xffffffff, True, 0xffffffff))\n\n prefixes_paths = {}\n\n count = counter()\n\n r = 0\n count_failed_prefixes = 0\n for prefix in prefixes:\n r += 1\n s = ''\n for hash in hashes:\n\n hash_out = hash.bit_by_bit_fast(struct.pack(\"Q\", prefix)) % width\n s += str(hash_out) + \"-\"\n\n fail_status = 0\n\n if count_failed_prefixes < failed_prefixes:\n count_failed_prefixes += 1\n fail_status = 1\n\n path = s[:-1]\n\n if path not in prefixes_paths:\n prefixes_paths[path] = [fail_status]\n else:\n prefixes_paths[path].append(fail_status)\n\n if debug:\n print(path)\n\n if r % 100000 == 0:\n print('{}'.format(count()))\n\n return prefixes_paths\n\n\ndef fast_generate_paths(\n num_prefixes, failed_prefixes, width, levels, debug=False):\n\n #width = int(sys.argv[2])\n #levels = int(sys.argv[3])\n\n prefixes_paths = {}\n\n count = counter()\n\n bucket_size = width**levels\n\n r = 0\n count_failed_prefixes = 0\n for _ in range(num_prefixes):\n r += 1\n\n path = random.randint(0, bucket_size)\n fail_status = 0\n\n if count_failed_prefixes < failed_prefixes:\n count_failed_prefixes += 1\n fail_status = 1\n\n if path not in prefixes_paths:\n prefixes_paths[path] = [fail_status]\n else:\n prefixes_paths[path].append(fail_status)\n\n if debug:\n print(path)\n\n if r % 100000 == 0:\n print('{}'.format(count()))\n\n return prefixes_paths\n\n\ndef find_collisions(prefixes_paths):\n \"\"\"\n returns the numner of non failed prefixes that will be triggered\n Args:\n prefixes_paths:\n\n Returns:\n\n \"\"\"\n\n count = 0\n for path, prefix_type in prefixes_paths.items():\n if 1 in prefix_type and 0 in prefix_type:\n count += prefix_type.count(0)\n elif 1 in prefix_type:\n print(prefix_type)\n\n return count\n\n\ndef count_start_with(prefixes, start_width):\n\n count = 0\n for prefix in prefixes:\n if prefix.startswith(start_width):\n print(prefix)\n count += 1\n\n return count\n\n\n#index = hashes[0].bit_by_bit_fast((self.flow_to_bytestream(flow))) % mod\n","repo_name":"nsg-ethz/FANcY","sub_path":"experiments/fancy/python_simulations/hashing_test.py","file_name":"hashing_test.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"47"}
+{"seq_id":"260680988","text":"import logging\n\n\nlogger = logging.getLogger('filelogger')\n\n\ndef handle_services_exceptions(func):\n \"\"\"Decorator that handle exceptions in services functions\"\"\"\n\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logger.error(\n f\"An error `{e.__class__.__name__}` occurred in the function \"\n f\"`{func.__name__}` with arguments: {args} and keyword \"\n f\"arguments: {kwargs}\"\n )\n raise\n\n return wrapper\n","repo_name":"artemowkin/FatalErrorGames","sub_path":"utils/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"29641000861","text":"# -*- coding: latin-1 -*-\n\n__author__ = 'mouton'\n\nfrom PyQt4.QtGui import QVBoxLayout, QHBoxLayout, QTextEdit, QLineEdit, QWidget, QLabel, QComboBox, QPushButton\n\n\nclass PropertyWidget(QWidget):\n def __init__(self, parent=None):\n super(PropertyWidget, self).__init__(parent)\n self.layout = QVBoxLayout()\n\n self.noItem = QLabel('Property of the selected element.')\n\n self.arcParamEditor = ArcParamEditorWidget(self)\n self.nodeParamEditor = NodeParamEditorWidget(self)\n self.connectedComponentParamEditor = ConnectedComponentParamEditorWidget(self)\n\n self.layout.addWidget(self.noItem)\n self.layout.addWidget(self.arcParamEditor)\n self.arcParamEditor.hide()\n self.layout.addWidget(self.nodeParamEditor)\n self.nodeParamEditor.hide()\n self.layout.addWidget(self.connectedComponentParamEditor)\n self.connectedComponentParamEditor.hide()\n\n self.propertyItem = self.noItem\n\n self.setLayout(self.layout)\n\n self.setMinimumHeight(250)\n self.setMaximumHeight(250)\n\n def setItem(self, item):\n self.propertyItem.hide()\n self.propertyItem = item\n self.propertyItem.show()\n return item\n\n def setNoItem(self):\n return self.setItem(self.noItem)\n\n def setArcItem(self):\n return self.setItem(self.arcParamEditor)\n\n def setNodeItem(self):\n return self.setItem(self.nodeParamEditor)\n\n def setConnectedComponentItem(self):\n return self.setItem(self.connectedComponentParamEditor)\n\n\nclass ArcParamEditorWidget(QWidget):\n def __init__(self, parent=None):\n super(ArcParamEditorWidget, self).__init__(parent)\n\n hbox = QHBoxLayout()\n vbox = QVBoxLayout()\n\n hbox2 = QHBoxLayout()\n self._lb1 = QLabel('Arc index : ', self)\n self._indexQCB = QComboBox(self)\n self._indexQCB.setMaxVisibleItems(5)\n hbox2.addWidget(self._lb1)\n hbox2.addWidget(self._indexQCB)\n\n self._labelTE = QTextEdit(self)\n self._labelTE.setUndoRedoEnabled(True)\n vbox.addLayout(hbox2)\n vbox.addWidget(self._labelTE)\n\n self._formulaTE = QTextEdit(self)\n self._formulaTE.setUndoRedoEnabled(True)\n self._consequencesTE = QTextEdit(self)\n self._consequencesTE.setUndoRedoEnabled(True)\n hbox.addLayout(vbox)\n hbox.addWidget(self._formulaTE)\n hbox.addWidget(self._consequencesTE)\n self.setLayout(hbox)\n\n self._selectedArc = None\n\n self._labelTE.textChanged.connect(self.labelChanged)\n self._formulaTE.textChanged.connect(self.formulaChanged)\n self._consequencesTE.textChanged.connect(self.consequencesChanged)\n\n self._indexQCB.currentIndexChanged.connect(self.window().setModified)\n self._labelTE.textChanged.connect(self.window().setModified)\n self._formulaTE.textChanged.connect(self.window().setModified)\n self._consequencesTE.textChanged.connect(self.window().setModified)\n\n def init(self):\n self._indexQCB.clear()\n self.setLabel('Arc label.')\n self.setFormula('Arc boolean formula.')\n self.setConsequences('Arc consequeneces.')\n\n def setIndexes(self, maxIndex, index):\n self._indexQCB.clear()\n for i in xrange(maxIndex):\n self._indexQCB.addItem(str(i))\n self._indexQCB.setCurrentIndex(index)\n\n def setLabel(self, label):\n self._labelTE.setText(label)\n\n def setFormula(self, formula):\n self._formulaTE.setText(formula)\n\n def setConsequences(self, consequences):\n self._consequencesTE.setText(consequences)\n\n def labelChanged(self):\n try:\n self._selectedArc.setLabel(str(self._labelTE.toPlainText()))\n except AttributeError:\n pass\n\n def formulaChanged(self):\n try:\n self._selectedArc.setFormula(str(self._formulaTE.toPlainText()))\n except AttributeError:\n pass\n\n def consequencesChanged(self):\n try:\n self._selectedArc.setConsequences(str(self._consequencesTE.toPlainText()))\n except AttributeError:\n pass\n\n def setSelectedArc(self, a):\n try:\n self._indexQCB.currentIndexChanged.disconnect(self._selectedArc.setIndex)\n except (AttributeError, TypeError):\n pass\n\n self._selectedArc = a\n try:\n self.setIndexes(a.getMaxIndex(), a.getIndex())\n self.setLabel(a.getLabel())\n self.setFormula(a.getFormula())\n self.setConsequences(a.getConsequencesStr())\n self._indexQCB.currentIndexChanged.connect(a.setIndex)\n except AttributeError:\n self.init()\n\n\nclass NodeParamEditorWidget(QWidget):\n def __init__(self, parent=None):\n super(NodeParamEditorWidget, self).__init__(parent)\n\n self._tokenWidgets = []\n self._showTokenWidgetIndex = 0\n\n vbox = QVBoxLayout()\n\n hboxTitle = QHBoxLayout()\n self._lb1 = QLabel('Node index : ', self)\n self._lb2 = QLabel('', self)\n self._lb2.setMinimumWidth(50)\n self._lb2.setMaximumWidth(50)\n hboxTitle.addWidget(self._lb1)\n hboxTitle.addWidget(self._lb2)\n\n self._labelTE = QLineEdit(self)\n hboxTitle.addWidget(self._labelTE)\n\n vbox.addLayout(hboxTitle)\n\n self._vboxToken = QVBoxLayout()\n\n self._upToken = QPushButton('Up')\n self._upToken.setMaximumHeight(20)\n # self._upToken.setMaximumWidth(150)\n self._upToken.setEnabled(False)\n\n self._plusToken = QPushButton('+')\n # self._plusToken.setMaximumWidth(150)\n\n self._downToken = QPushButton('Down')\n self._downToken.setMaximumHeight(20)\n # self._downToken.setMaximumWidth(150)\n self._downToken.setEnabled(False)\n\n self._vboxToken.addWidget(self._upToken)\n self._vboxToken.addWidget(self._plusToken)\n self._vboxToken.addWidget(self._downToken)\n\n vbox.addLayout(self._vboxToken)\n\n self._upToken.clicked.connect(self.upToken)\n self._downToken.clicked.connect(self.downToken)\n self._plusToken.clicked.connect(self.addToken)\n\n self.init()\n\n self._selectedNode = None\n self._labelTE.textChanged.connect(self.labelChanged)\n self._labelTE.textChanged.connect(self.window().setModified)\n\n self.setLayout(vbox)\n\n def addToken(self):\n self._selectedNode.addToken()\n token = TokenWidget(len(self._tokenWidgets), parent=self)\n self._tokenWidgets.append(token)\n self._vboxToken.insertWidget(self._vboxToken.count() - 2, token)\n\n if len(self._tokenWidgets) >= 3:\n self._tokenWidgets[-3].hide()\n self._upToken.setEnabled(True)\n self._showTokenWidgetIndex += 1\n\n def removeToken(self, tokenWidget):\n tokenWidget.hide()\n self._tokenWidgets.remove(tokenWidget)\n self._selectedNode.removeToken(tokenWidget.index)\n for tW in self._tokenWidgets[tokenWidget.index:]:\n tW.index -= 1\n self._vboxToken.removeWidget(tokenWidget)\n\n self.updateTokenWidgetIndex()\n\n def updateTokenWidgetIndex(self):\n if len(self._tokenWidgets) <= 2:\n self._upToken.setEnabled(False)\n self._downToken.setEnabled(False)\n for tokW in self._tokenWidgets:\n tokW.show()\n self._plusToken.show()\n self._showTokenWidgetIndex = 0\n return\n\n maxIndex = len(self._tokenWidgets) - 1\n if maxIndex == self._showTokenWidgetIndex:\n self._showTokenWidgetIndex -= 1\n\n self._tokenWidgets[self._showTokenWidgetIndex].show()\n self._tokenWidgets[self._showTokenWidgetIndex + 1].show()\n if maxIndex - self._showTokenWidgetIndex == 1:\n self._plusToken.show()\n else:\n self._tokenWidgets[self._showTokenWidgetIndex + 2].show()\n\n def upToken(self):\n self._downToken.setEnabled(True)\n\n maxIndex = len(self._tokenWidgets) - 1\n if self._showTokenWidgetIndex == maxIndex - 1:\n self._plusToken.hide()\n else:\n self._tokenWidgets[self._showTokenWidgetIndex + 2].hide()\n\n self._showTokenWidgetIndex -= 1\n self._tokenWidgets[self._showTokenWidgetIndex].show()\n if self._showTokenWidgetIndex == 0:\n self._upToken.setEnabled(False)\n\n def downToken(self):\n self._upToken.setEnabled(True)\n maxIndex = len(self._tokenWidgets) - 1\n if self._showTokenWidgetIndex == maxIndex - 2:\n self._plusToken.show()\n else:\n self._tokenWidgets[self._showTokenWidgetIndex + 3].show()\n\n self._tokenWidgets[self._showTokenWidgetIndex].hide()\n self._showTokenWidgetIndex += 1\n if self._showTokenWidgetIndex == maxIndex - 1:\n self._downToken.setEnabled(False)\n\n def init(self):\n self.setLabel('Node label.')\n\n def setIndex(self, num):\n self._lb2.setText(str(num))\n\n def setLabel(self, label):\n self._labelTE.setText(label)\n\n def setTokens(self, tokens):\n for tokenWidget in self._tokenWidgets:\n tokenWidget.hide()\n self._upToken.setEnabled(False)\n self._downToken.setEnabled(False)\n del self._tokenWidgets[:]\n\n for token in tokens:\n tokenWidget = TokenWidget(len(self._tokenWidgets), parent=self)\n tokenWidget.setText(token)\n self._tokenWidgets.append(tokenWidget)\n self._vboxToken.insertWidget(self._vboxToken.count() - 2, tokenWidget)\n tokenWidget.hide()\n\n self._plusToken.show()\n if len(self._tokenWidgets) >= 3:\n self._upToken.setEnabled(True)\n self._tokenWidgets[-2].show()\n self._tokenWidgets[-1].show()\n self._showTokenWidgetIndex = len(tokens) - 2\n else:\n for tokenWidget in self._tokenWidgets:\n tokenWidget.show()\n self._showTokenWidgetIndex = 0\n\n def labelChanged(self):\n try:\n self._selectedNode.setLabel(str(self._labelTE.text()))\n except AttributeError:\n pass\n\n def tokenChanged(self, token):\n index = token.index\n text = token.text\n self._selectedNode.setToken(index, text)\n\n def setSelectedNode(self, n):\n self._selectedNode = n\n try:\n self.setIndex(n.num)\n self.setLabel(n.getLabel())\n self.setTokens(n.getTokens())\n except AttributeError:\n self.init()\n\n\nclass TokenWidget(QWidget):\n def __init__(self, index, parent=None):\n super(TokenWidget, self).__init__(parent)\n\n self._index = index\n hboxToken = QHBoxLayout()\n hboxToken.addWidget(QLabel('Token'))\n self._indexLabel = QLabel(str(index))\n hboxToken.addWidget(self._indexLabel)\n hboxToken.addWidget(QLabel('('))\n self._qte = QLineEdit()\n self._qte.setMinimumHeight(30)\n self._qte.setMaximumHeight(30)\n hboxToken.addWidget(self._qte)\n hboxToken.addWidget(QLabel(')'))\n removeButton = QPushButton('-')\n hboxToken.addWidget(removeButton)\n\n self.setLayout(hboxToken)\n self.setContentsMargins(0, 0, 0, 0)\n self.layout().setContentsMargins(0, 0, 0, 0)\n\n removeButton.clicked.connect(self.remove)\n removeButton.clicked.connect(self.window().setModified)\n\n self._qte.textChanged.connect(self.update)\n self._qte.textChanged.connect(self.window().setModified)\n\n @property\n def index(self):\n return self._index\n\n @index.setter\n def index(self, index):\n self._index = index\n self._indexLabel.setText(str(index))\n\n @property\n def text(self):\n return self._qte.text()\n\n def setText(self, text):\n return self._qte.setText(text)\n\n def remove(self):\n self.parent().removeToken(self)\n\n def update(self):\n self.parent().tokenChanged(self)\n\n\nclass ConnectedComponentParamEditorWidget(QWidget):\n def __init__(self, parent=None):\n super(ConnectedComponentParamEditorWidget, self).__init__(parent)\n hbox = QHBoxLayout()\n hbox.addWidget(QLabel('Scene : '))\n\n self._sceneQCB = QComboBox(self)\n self._sceneQCB.setMaxVisibleItems(5)\n hbox.addWidget(self._sceneQCB)\n\n self._sceneQCB.clear()\n self.setLayout(hbox)\n\n def initSceneQCB(self):\n try:\n self._sceneQCB.currentIndexChanged.disconnect(self.changeScene)\n except TypeError:\n pass\n sceneOfCC = self._selectedConnectedComponent.scene()\n self._sceneQCB.clear()\n i = 0\n for scene in self.parent().parent().scenes():\n self._sceneQCB.addItem(scene.getName())\n if scene == sceneOfCC:\n self._sceneQCB.setCurrentIndex(i)\n i += 1\n\n self._sceneQCB.currentIndexChanged.connect(self.changeScene)\n\n def setSelectedConnectedComponent(self, cc):\n self._selectedConnectedComponent = cc\n self.initSceneQCB()\n\n def changeScene(self, sceneIndex):\n self._sceneQCB.clear()\n self._selectedConnectedComponent.scene().changeConnectedComponentSceneByIndex(\n self._selectedConnectedComponent, sceneIndex)","repo_name":"mouton5000/DiscreteEventApplicationEditor","sub_path":"gui/PropertiesItems.py","file_name":"PropertiesItems.py","file_ext":"py","file_size_in_byte":13351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33242394321","text":"\"\"\"Draytek Web Admin - BasePage.\"\"\"\nfrom toolium.pageobjects.page_object import PageObject\n\n\nclass BasePageObject(PageObject):\n \"\"\"Selenium Page Object Model from Toolium. BasePage class.\"\"\"\n\n @staticmethod\n def read_element_value(element):\n \"\"\"Read element value from various properties based on element type.\n\n :param element: Web Element\n :returns: element value\n \"\"\"\n if element.web_element.is_enabled():\n if (type(element).__name__ == \"InputText\") or (\n type(element).__name__ == \"Text\"\n ):\n return element.text.strip()\n if (type(element).__name__ == \"Checkbox\") or (\n type(element).__name__ == \"InputRadio\"\n ):\n return element.is_selected()\n if type(element).__name__ == \"Select\":\n return str(element.option)\n raise TypeError(\n f\"read_element_value: Unhandled element type: {type(element).__name__}\"\n )\n return None\n\n @staticmethod\n def set_element_value(element, value):\n \"\"\"Set element to specified value based on element type.\n\n :param element: Web Element\n :param value: Value to be set against element\n :return: true if setting applied\n \"\"\"\n if value is None:\n return False\n if element.web_element.is_enabled():\n if type(element).__name__ == \"InputText\":\n element.clear()\n element.text = value\n return True\n if type(element).__name__ == \"Checkbox\":\n if value:\n element.check()\n elif not None:\n element.uncheck()\n return True\n if type(element).__name__ == \"InputRadio\":\n if value:\n element.check()\n return True\n if type(element).__name__ == \"Select\":\n element.option = value\n return True\n raise TypeError(\n f\"write_element_value: Unhandled element type: {type(element).__name__}\"\n )\n return False\n","repo_name":"highlight-slm/Draytek-Web-Auto-Configuration","sub_path":"draytekwebadmin/pages/basepageobject.py","file_name":"basepageobject.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"8582933123","text":"def fact(x) :\n f = 1\n for i in range(1,x+1) :\n f *= i\n return f\n\np = 0.12\ntemp = 0.0\nfor i in range(0,2+1) :\n temp += fact(10)/(fact(i) * fact(10-i)) * pow(p,i) * pow(1-p,10-i)\nprint(round(temp,3))\nprint(round(1 - temp + fact(10)/(fact(2) * fact(10-2)) * pow(p,2) * pow(1-p,10-2),3))\n","repo_name":"ojus1/10DStat","sub_path":"binodist2.py","file_name":"binodist2.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"19306502371","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport torch\nimport tushare as ts\nfrom transformerquant.featurizers.default_featurizer import DefaultFeaturizer\nfrom transformerquant.dataset.sampler import Sampler\nfrom transformerquant.trainer.agent import Agent\nfrom transformerquant.utils.datetime_converter import convert_str_to_dt\nfrom transformerquant.configs.bert_config import BertConfig\nfrom transformerquant.models.residual_bert import BertForPreTraining, BertForSequenceClassification\n\ndef create_feature_container(dropna=False):\n featurizer = DefaultFeaturizer(fwd_returns_window=1, task='regression')\n order_book_ids = ['000001','000002']#'000003','000008','000009','000010','000011','000012','000016','000017','000300','000905','399001','399002','399003','399004','399005','399006','399008','399100','399101','399106','399107','399108','399333','399606']\n feature_container = {}\n for order_book_id in order_book_ids:\n try:\n print(\"process {}\".format(order_book_id))\n data = ts.get_k_data(order_book_id, start='1990-01-01', end='2018-05-14', index=True)\n open_ts = torch.tensor(data['open'].values, dtype=torch.float32)\n high_ts = torch.tensor(data['high'].values, dtype=torch.float32)\n low_ts = torch.tensor(data['low'].values, dtype=torch.float32)\n close_ts =torch.tensor(data['close'].values, dtype=torch.float32) \n volume_ts = torch.tensor(data['volume'].values, dtype=torch.float32)\n #pdb.set_trace()\n output = featurizer.forward(open_ts,high_ts,low_ts,close_ts,volume_ts)\n data['datetime'] = data['date'].apply(lambda x:convert_str_to_dt(x, format_=\"%Y-%m-%d\"))\n output_np_list = [feature.cpu().detach().numpy() for feature in output]\n #pdb.set_trace()\n output_np = np.asarray(output_np_list).transpose(1,0)\n feature_df = pd.DataFrame(output_np, index=data['datetime'])\n except Exception as e:\n print(\"{} fialed\".format(order_book_id))\n else:\n print(\"{} successfully\".format(order_book_id))\n if dropna:\n feature_df = feature_df.dropna()\n #pdb.set_trace()\n feature_container[order_book_id] = feature_df\n return feature_container\n\n\ndef create_sample_container(feature_container, task='regression'):\n sequence_window = 30\n use_normalize = False\n frequency_x = '1d'\n batch_size = 32\n sampler = Sampler(sequence_window=sequence_window,\n frequency_x=frequency_x,\n interval_depart=False,\n process_nan=True,\n use_normalize = use_normalize,\n saved_nomalizer_dir='/tmp/',\n batch_size=batch_size,\n train_ratio=0.7,\n val_ratio = 0.1,\n test_ratio = 0.2,\n task = task)\n sample_container = sampler.generate_sample(feature_container)\n return sample_container\n\n\ndef create_model():\n config = BertConfig()\n config.d_model = 72\n model = BertForSequenceClassification(config)\n return model\n\n\ndef create_agent(model):\n use_cuda=True\n loss_func = torch.nn.MSELoss()\n n_epochs = 300\n lr = 0.001\n early_stop_patience = 80\n #optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)\n #optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)\n to_save_dir = \"/media/allen/c54da21a-a3bc-4c5e-a36c-0a41b6108e59/production/Prophet_contribute/train_scripts/classical_deep_position/1d/model\"\n checkpoint = None#\"/media/allen/c54da21a-a3bc-4c5e-a36c-0a41b6108e59/quant/Quant_core/AI_Trader/asset/training_PositionModel_20180509_1814/model_PositionModel_282_val_loss=0.3601593.pth\"\n #agent\n agent = Agent(model,\n use_cuda=use_cuda,\n loss_func=loss_func,\n optimizer=optimizer,\n lr_scheduler = lr_scheduler,\n n_epochs=n_epochs,\n early_stop_patience=early_stop_patience,\n to_save_dir=to_save_dir,\n checkpoint=checkpoint)\n return agent\n\n\ndef main(load=False):\n feature_container = create_feature_container(dropna=True)\n sample_container = create_sample_container(feature_container)\n# return sample_container\n model = create_model()\n agent = create_agent(model)\n state = agent.fit(sample_container['dataloader_train'], sample_container['dataloader_val'])\n #agent.predict(sample_container['dataloader_test'])\n return state\n\nif __name__ == \"__main__\":\n #feature_container = create_feature_container()\n #sampler_container = create_sample_container(feature_container)\n main()","repo_name":"StateOfTheArt-quant/transformerquant","sub_path":"examples/train_workflow.py","file_name":"train_workflow.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"47"}
+{"seq_id":"19347202157","text":"from terminaltables import AsciiTable\nfrom .problem_reporter import ProblemReporter\n\n\nclass Statistics(object):\n \"\"\" Class to deal with statistics \"\"\"\n\n def __init__(self, kind):\n self.kind = kind\n self.problems = ProblemReporter.get_problems()\n\n def stat(self):\n \"\"\" Run statistics feature based on the statistic kind \"\"\"\n if not self.problems:\n print(\"\\nNo statistics to display since no problem was found.\")\n return\n\n if self.kind == \"project\":\n self._stat_project()\n elif self.kind == \"file\":\n self._stat_file()\n\n def _stat_project(self):\n \"\"\" Run statistics based on overral project \"\"\"\n title = \"Statistics - Project\"\n self._print_logo(title)\n aux_dict = {}\n\n # calculate the amount of problems per check\n for kind, problems in self.problems.items():\n aux_dict[kind] = len(problems)\n\n # sort the amount of problems\n table_data = [[\"Problem\", \"Amount\"]]\n for kind, value in sorted(aux_dict.items(), key=lambda k, v: (v, k)):\n table_data.append([kind, str(value)])\n stat_table = AsciiTable(table_data)\n stat_table.justify_columns = {0: 'left', 1: 'center'}\n print(stat_table.table)\n\n def _stat_file(self):\n \"\"\" Run statistics per file \"\"\"\n title = \"Statistics - Per File\"\n self._print_logo(title)\n\n # Get all files that have problems\n files = []\n for problems in self.problems.values():\n for problem in problems:\n file_name = problem.file_name\n if file_name not in files:\n files.append(problem.file_name)\n\n # Calculate the amount of problems in each file\n data_dict = {}\n for _file in files:\n problem_dict = {}\n for kind, problems in self.problems.items():\n for problem in problems:\n if problem.file_name == _file:\n problem_dict[kind] = problem_dict.get(kind, 0) + 1\n data_dict[_file] = problem_dict\n\n # Create table data\n table_data = [[\"File\", \"Total Amount\", \"Problems\"]]\n for file_name, problems_dict in data_dict.items():\n total_ammount = 0\n problem = \"\"\n for kind, ammount in problems_dict.items():\n total_ammount += ammount\n problem += str(ammount) + \" \" + kind + \"\\n\"\n else:\n table_data.append([file_name, total_ammount, problem.strip()])\n\n stat_table = AsciiTable(table_data)\n stat_table.inner_row_border = True\n stat_table.justify_columns = {0: 'left', 1: 'center', 2: 'left'}\n print(stat_table.table)\n\n @staticmethod\n def _print_logo(title):\n \"\"\" Print the statistics logo \"\"\"\n border = \"=\" * len(title)\n print(\"\")\n print(border)\n print(title)\n print(border)\n","repo_name":"open-power-sdk/migration-advisor","sub_path":"ma/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"47"}
+{"seq_id":"34736646426","text":"import numpy as np\nfrom sklearn.datasets import make_blobs\nimport pandas as pd\nimport matplotlib.pylab as plt\n\ndef Decision_Surface(data, target, model, surface=True, probabilities=False, cell_size=.01, size=20):\n # Get bounds\n x_min, x_max = data[data.columns[0]].min(), data[data.columns[0]].max()\n y_min, y_max = data[data.columns[1]].min(), data[data.columns[1]].max()\n \n # Create a mesh\n xx, yy = np.meshgrid(np.arange(x_min, x_max, cell_size), np.arange(y_min, y_max, cell_size))\n meshed_data = pd.DataFrame(np.c_[xx.ravel(), yy.ravel()])\n \n # Add interactions\n for i in range(data.shape[1]):\n if i <= 1:\n continue\n \n meshed_data = np.c_[meshed_data, np.power(xx.ravel(), i)]\n\n if model != None:\n # Predict on the mesh\n if probabilities:\n Z = model.predict_proba(meshed_data)[:, 1].reshape(xx.shape)\n else:\n Z = model.predict(meshed_data).reshape(xx.shape)\n \n # Plot mesh and data\n if data.shape[1] > 2:\n plt.title(\"humor^(\" + str(range(1,data.shape[1])) + \") and number_pets\")\n else:\n plt.title(\"humor and number_pets\")\n plt.xlabel(\"humor\")\n plt.ylabel(\"number_pets\")\n if surface and model != None:\n if probabilities:\n cs = plt.contourf(xx, yy, Z,cmap=plt.cm.coolwarm, alpha=0.4)\n else:\n cs = plt.contourf(xx, yy, Z, levels=[-1,0,1],cmap=plt.cm.coolwarm, alpha=0.4)\n color = [\"blue\" if t == 0 else \"red\" for t in target]\n plt.scatter(data[data.columns[0]], data[data.columns[1]], color=color, s=size)\n\ndef create_data():\n # Set the randomness\n np.random.seed(36)\n\n # Number of users\n n_users = 600\n\n # Relationships\n variable_names = [\"humor\", \"number_pets\"]\n target_name = \"success\"\n\n # Generate data\n a = np.random.normal(5, 5, 600)\n b = np.random.normal(10, 5, 600)\n c = np.random.normal(20, 5, 600)\n\n x1 = list(a+10) + list(c+10) + list(b+10)\n x2 = list((b+10)/10) + list((b+10)/10) + list((c+10)/10)\n target = list(np.ones(len(b))) + list(np.ones(len(b))) + list(np.zeros(len(b)))\n\n data = pd.DataFrame(np.c_[x1, x2], columns=variable_names)\n\n # Add interactions\n data['humor^2'] = np.power(data['humor'], 2)\n data['humor^3'] = np.power(data['humor'], 3)\n data['humor^4'] = np.power(data['humor'], 4)\n\n data[target_name] = target\n\n Y = data[target_name]\n return target_name, variable_names, data, Y\n\ndef handson_data():\n np.random.seed(26)\n X, Y = make_blobs(n_samples=4000, n_features=3, cluster_std=4, centers=3, shuffle=False, random_state=42)\n colors = [\"red\"] * 3800 + [\"blue\"] * 200\n Y = np.array([0] * 3800 + [1] * 200)\n\n order = np.random.choice(range(4000), 4000, False)\n\n X = X[order]\n Y = Y[order]\n\n X = pd.DataFrame(X, columns=['earning', 'geographic', 'experience'])\n\n return X, Y\n\ndef X(complexity=1):\n drops = [\"success\"]\n \n for i in [2, 3, 4]:\n if i > complexity:\n drops.append(\"humor^\" + str(i))\n \n return data.drop(drops, 1)\n\ntarget_name, variable_names, data, Y = create_data()","repo_name":"mariazm/Spring2017_ProfFosterProvost","sub_path":"Module4_Regularization/dstools/data_tools.py","file_name":"data_tools.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"47"}
+{"seq_id":"32833629988","text":"from User.models import User, Group\nfrom Contest.models import Contest, ContestProblem\nfrom Problem.models import Problem\nfrom Statistic.models import Board\nfrom Submission.models import Submission\nfrom Api.models import *\nfrom common.utils import make_key\nfrom django.core.cache import cache\nfrom django.http import HttpResponse\nfrom rest_framework.renderers import JSONRenderer\nimport zlib\nimport logging\nlogger = logging.getLogger('django')\n\nclass JSONResponse(HttpResponse):\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\ndef userDetail(request, uid):\n try:\n u = User.getById(uid)\n except:\n return HttpResponse(status=404)\n if request.method == 'GET':\n serializer = UserSerializer(u)\n return JSONResponse(serializer.data)\n\ndef groupDetail(request, gid):\n try:\n g = Group.getById(gid)\n except:\n return HttpResponse(status=404)\n if request.method == 'GET':\n serializer = GroupSerializer(g)\n return JSONResponse(serializer.data)\n\ndef boardDetail(request, cid):\n try:\n cdata = cache.get(make_key(cid, 'board'))\n if cdata:\n return JSONResponse(eval(zlib.decompress(cdata)))\n c = Contest.getById(cid)\n b = Board()\n b.contest = c\n if request.method == 'GET':\n serializer = BoardSerializer(b)\n data = serializer.data\n cache.set(make_key(cid, 'board'), zlib.compress(str(data)), 60)\n return JSONResponse(data)\n except Exception as e:\n return HttpResponse(e)\n\ndef userLogin(request):\n username, password = '', ''\n if request.META.has_key('HTTP_AUTHORIZATION'):\n authmeth, auth = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\n if authmeth.lower() == 'basic':\n auth = auth.strip().decode('base64')\n username, password = auth.split(':', 1)\n u = User.getUserByRawUsername(username)\n if u!=None and u!=False and u.checkPasswd(password):\n serializer = UserSerializer(u)\n return JSONResponse(serializer.data)\n return HttpResponse(status=404)\n","repo_name":"Mr-Phoebe/BOJ-V3","sub_path":"kari/Api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"8756112635","text":"class Solution(object):\n\tdef setZeroes(self, matrix):\n\t\tm=len(matrix)\n\t\tn=len(matrix[0])\n\t\trow_info=[-1]*m\n\t\tcol_info=[-1]*n\n\t\tfor i in range(m):\n\t\t\tfor j in range(n):\n\t\t\t\tif matrix[i][j]==0:\n\t\t\t\t\trow_info[i]=0\n\t\t\t\t\tcol_info[j]=0\n\t\tfor i in range(m):\n\t\t\tif row_info[i]==0:\n\t\t\t\tfor j in range(n):\n\t\t\t\t\tmatrix[i][j]=0\n\t\tfor j in range(n):\n\t\t\tif col_info[j]==0:\n\t\t\t\tfor i in range(m):\n\t\t\t\t\tmatrix[i][j]=0\n\t\t\ns=Solution()\ns.setZeroes([[0,0,0,5],[4,3,1,4],[0,1,1,4],[1,2,1,3],[0,0,1,1]])\t\t\t\t\t\t","repo_name":"KevinStigma/Leetcode","sub_path":"Set Matrix Zeroes.py","file_name":"Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"25404457927","text":"# coding=utf-8\n\n\"\"\"\nTests for an md_utils program\n\"\"\"\nimport os\nimport unittest\nfrom md_utils.converge_evb_par import main, PAR_FILE_NAME, TRIAL_NAME\nfrom md_utils.md_common import capture_stdout, capture_stderr, diff_lines, silent_remove\nimport logging\n\n# logging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nDISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)\n\n__author__ = 'hmayes'\n\nMAIN_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)\nDATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')\nSUB_DATA_DIR = os.path.join(DATA_DIR, 'converge_evb_par')\nFILL_DIR = os.path.join(DATA_DIR, 'fill_tpl')\n\nCONV_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par.ini')\nCONV_ALT_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_alt.ini')\nPAR_INI = os.path.join(SUB_DATA_DIR, 'evb_par.ini')\nCONV_MAX_ITER_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_max_iters.ini')\nCONV_MAX_STEP_SIZE_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_max_step_size.ini')\nCOPY_OUTPUT_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par.ini')\nMAX_MIN_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_min_val.ini')\nDIRS_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_initial_dirs.ini')\nREPEAT_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_repeat_min.ini')\nTRIANGLE_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_triangle.ini')\n\nCONV_NM_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_so.ini')\nCONV_NM_MULTI_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_so_multi.ini')\nCONV_NOT_TESTED_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_not_tested_method.ini')\nCONV_HOP_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_basin_hop.ini')\nCONV_HOP_MIN_MAX_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_basin_hop_min_max.ini')\nBIN_HOP_RESULT_SUM = os.path.join(SUB_DATA_DIR, 'bin_hop_results.csv')\nGOOD_BIN_HOP_RESULT_SUM = os.path.join(SUB_DATA_DIR, 'bin_hop_results_good.csv')\nBEST_PARAMS = os.path.join(SUB_DATA_DIR, 'best_params.txt')\nGOOD_BEST_PARAMS = os.path.join(SUB_DATA_DIR, 'best_params_good.txt')\n\nPAR_OUT = os.path.join(SUB_DATA_DIR, 'evb_hm_maupin_gauss_3.5.par')\nCOPY_PAR = os.path.join(DATA_DIR, 'evb_viib0.0_viilb1.0.par')\nGOOD_PAR_OUT = os.path.join(SUB_DATA_DIR, 'evb_hm_maupin_gauss_3.5_good.par')\nGOOD_PAR_OUT2 = os.path.join(SUB_DATA_DIR, 'evb_hm_maupin_gauss_3.5_good2.par')\nGOOD_MAX_MIN_PAR_OUT = os.path.join(SUB_DATA_DIR, 'evb_hm_maupin_gauss_3.5_min_max_good.par')\nALT_PAR_FNAME = os.path.join(SUB_DATA_DIR, 'evb.par')\nSCRIPT_OUT = os.path.join(MAIN_DIR, 'script_output.txt')\nSCRIPT_COPY_OUT = os.path.join(DATA_DIR, 'script_viib0.0_viilb1.0.txt')\nGOOD_SCRIPT_OUT = os.path.join(SUB_DATA_DIR, 'script_out_good.txt')\nRESULT_SUM = os.path.join(MAIN_DIR, 'vii0_vij0_gamma.csv')\nGOOD_RESULT_SUM = os.path.join(SUB_DATA_DIR, 'result_sum_good.csv')\nRESID_PAR_OUT = os.path.join(DATA_DIR, 'evb_resid8.0_viilb1.0.par')\nTEST_OUT = os.path.join(SUB_DATA_DIR, 'test_out.csv')\nOTHER_RESID_NAMES = ['evb_resid8.145898_viilb1.0.par',\n 'evb_resid8.381966_viilb1.0.par',\n 'evb_resid9.0_viilb1.0.par',\n 'evb_resid10.618034_viilb1.0.par',\n 'evb_resid10212.284784_viilb1.0.par',\n 'evb_resid26723.164692_viilb1.0.par',\n 'evb_resid76948.755062_viilb1.0.par',\n 'evb_resid77849.0_viilb1.0.par',\n 'evb_resid78408.0_viilb1.0.par']\n\n# for testing to fail well\nMISSING_TRIAL_NAME_KEY_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_missing_key_in_trial_name.ini')\nMISSING_TRIAL_NAME_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_no_trial_name.ini')\nMISSING_TPL_KEY_INI = os.path.join(SUB_DATA_DIR, 'missing_tpl_key.ini')\nMISSING_TPL_INI = os.path.join(SUB_DATA_DIR, 'missing_tpl.ini')\nMISSING_PAR_NAME = os.path.join(SUB_DATA_DIR, 'missing_new_par_name.ini')\nTPL_KEY_IN_MAIN_INI = os.path.join(FILL_DIR, 'tpl_key_in_main.ini')\nVAL_BEFORE_SECTION_INI = os.path.join(FILL_DIR, 'no_initial_section.ini')\nMISSING_MAIN_INI = os.path.join(FILL_DIR, 'missing_main.ini')\nEXTRA_SEC_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_extra_section.ini')\nWRONG_MAX_ITER_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_wrong_max_iter.ini')\nMISSING_EQ_KEY_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_missing_eq_key.ini')\nWRONG_EQ_ORDER_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_wrong_eq_order.ini')\nTOO_MANY_PARAM_VALS_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_vals.ini')\nMISSING_BASH_SCRIPT_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_missing_bash.ini')\nMISSING_RESULT_FNAME_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_missing_output_fname.ini')\nNON_FLOAT_MIN_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_nonfloat_min.ini')\nTOO_MANY_MAX_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_too_many_max.ini')\nNON_FLOAT_DIR_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_multi_par_bad_dirs.ini')\nCONV_HOP_MAX_MIN_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_basin_hop_switch_min_max.ini')\nCONV_HOP_NONFLOAT_MAX_INI = os.path.join(SUB_DATA_DIR, 'conv_evb_par_basin_hop_nonfloat_max.ini')\n\n\nclass TestMainFailWell(unittest.TestCase):\n # These tests only check for (hopefully) helpful messages\n def testHelp(self):\n test_input = ['-h']\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertFalse(output)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"optional arguments\" in output)\n\n def testMissingConfig(self):\n test_input = [\"-c\", 'ghost']\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Could not read file\" in output)\n\n def testMissingDefaultTpl(self):\n test_input = [\"-c\", MISSING_TPL_KEY_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"default template file\" in output)\n\n def testMissingTpl(self):\n test_input = [\"-c\", MISSING_TPL_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"template file specified\" in output)\n\n def testMissingFilledTplName(self):\n # new file name not specified by either config file or command line\n test_input = [\"-c\", MISSING_PAR_NAME]\n main(test_input)\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(PAR_FILE_NAME in output)\n\n def testTplKeyInMain(self):\n # aim for a helpful message if the template key is in main\n test_input = [\"-c\", TPL_KEY_IN_MAIN_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue('Unexpected key' in output)\n\n def testValBeforeSection(self):\n # make sure it gracefully fails when a template key is missing\n test_input = [\"-c\", VAL_BEFORE_SECTION_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue('must start with' in output)\n\n def testMissingMain(self):\n # make sure it gracefully fails when a template key is missing\n test_input = [\"-c\", MISSING_MAIN_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"missing the required 'main' section\" in output)\n\n def testExtraSection(self):\n # catch an error if the program finds an unexpected section\n test_input = [\"-c\", EXTRA_SEC_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue('not one of the valid section names' in output)\n\n def testNoTrialName(self):\n # catch an error if the program finds an unexpected section\n test_input = [\"-c\", MISSING_TRIAL_NAME_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Missing key name 'trial_name'\" in output)\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testMissingTrialNameKey(self):\n test_input = [\"-c\", MISSING_TRIAL_NAME_KEY_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(TRIAL_NAME in output)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n\n def testNonIntMaxIter(self):\n test_input = [\"-c\", WRONG_MAX_ITER_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"invalid literal for int()\" in output)\n\n def testMissingEqKey(self):\n test_input = [\"-c\", MISSING_EQ_KEY_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"needed to evaluate\" in output)\n\n def testWrongEqOrder(self):\n test_input = [\"-c\", WRONG_EQ_ORDER_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Could not evaluate\" in output)\n\n def testTwoParamVals(self):\n test_input = [\"-c\", TOO_MANY_PARAM_VALS_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"3 values were found\" in output)\n\n def testMissingBashScript(self):\n test_input = [\"-c\", MISSING_BASH_SCRIPT_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Missing file\" in output)\n\n def testMissingResultFileName(self):\n test_input = [\"-c\", MISSING_RESULT_FNAME_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"bash driver output\" in output)\n\n def testNonfloatMin(self):\n test_input = [\"-c\", NON_FLOAT_MIN_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"convert string\" in output)\n\n def testTooManyMax(self):\n test_input = [\"-c\", TOO_MANY_MAX_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Expected\" in output)\n\n def testNonFloatDir(self):\n test_input = [\"-c\", NON_FLOAT_DIR_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"float\" in output)\n\n def testBasinHopMaxMin(self):\n # test catching min greater than max\n test_input = [\"-c\", CONV_HOP_MAX_MIN_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"is not less than\" in output)\n\n def testBasinHopNonFloatMax(self):\n # test catching min greater than max\n test_input = [\"-c\", CONV_HOP_NONFLOAT_MAX_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"could not convert string\" in output)\n\n\nclass TestMain(unittest.TestCase):\n def testMakeParStartLow(self):\n # For this test, there is exactly one value provided for each parameter, and x_0 is too low\n silent_remove(PAR_OUT)\n silent_remove(COPY_PAR)\n try:\n main([\"-c\", CONV_INI])\n self.assertFalse(diff_lines(PAR_OUT, GOOD_PAR_OUT2))\n self.assertFalse(diff_lines(COPY_PAR, GOOD_PAR_OUT2))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(COPY_PAR, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testMakeParStartHigh(self):\n # Testing that starting from an x_0 too high still ends at the same answer\n # Also check specifying par file name in command line; should overwrite what is in the config file\n try:\n silent_remove(ALT_PAR_FNAME)\n main([\"-c\", CONV_ALT_INI, \"-f\", ALT_PAR_FNAME])\n self.assertFalse(diff_lines(ALT_PAR_FNAME, GOOD_PAR_OUT2))\n self.assertFalse(diff_lines(RESID_PAR_OUT, GOOD_PAR_OUT2))\n finally:\n silent_remove(ALT_PAR_FNAME, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(RESID_PAR_OUT, disable=DISABLE_REMOVE)\n for file_name in OTHER_RESID_NAMES:\n silent_remove(os.path.join(DATA_DIR, file_name), disable=DISABLE_REMOVE)\n\n def testNoOpt(self):\n # Testing that will run without any params specified to be optimized\n silent_remove(PAR_OUT)\n test_input = [\"-c\", PAR_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n try:\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"No parameters will be optimized\" in output)\n self.assertFalse(diff_lines(PAR_OUT, GOOD_PAR_OUT2))\n self.assertFalse(diff_lines(COPY_PAR, GOOD_PAR_OUT2))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(COPY_PAR, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testMaxIterNum(self):\n # Specified a small number of iterations\n test_input = [\"-c\", CONV_MAX_ITER_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n try:\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"Maximum number of function evaluations has been exceeded\" in output)\n diffs = diff_lines(PAR_OUT, GOOD_PAR_OUT2)\n self.assertEqual(len(diffs), 2)\n self.assertEqual('- -0.000000 : constant Vii', diffs[0])\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testCopyOutput(self):\n # Stop based on step size; multiple variables\n try:\n # since we backup RESULT_SUM, start by removing it so we don't accidentally make a copy\n silent_remove(RESULT_SUM)\n test_input = [\"-c\", COPY_OUTPUT_INI]\n main(test_input)\n self.assertFalse(diff_lines(SCRIPT_OUT, GOOD_SCRIPT_OUT))\n self.assertFalse(diff_lines(SCRIPT_COPY_OUT, GOOD_SCRIPT_OUT))\n self.assertFalse(diff_lines(RESULT_SUM, GOOD_RESULT_SUM))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(COPY_PAR, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_COPY_OUT, disable=DISABLE_REMOVE)\n silent_remove(RESULT_SUM, disable=DISABLE_REMOVE)\n\n def testMaxMin(self):\n # Stop based on step size\n try:\n test_input = [\"-c\", MAX_MIN_INI]\n main(test_input)\n self.assertFalse(diff_lines(PAR_OUT, GOOD_MAX_MIN_PAR_OUT))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testInitialDirections(self):\n # Start multi-variable\n test_input = [\"-c\", DIRS_INI]\n try:\n silent_remove(BEST_PARAMS)\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n with capture_stdout(main, test_input) as output:\n # this option reduced the function calls by 1 (19 to 18)\n self.assertTrue(\"Function evaluations: 223\" in output)\n self.assertFalse(diff_lines(BEST_PARAMS, GOOD_BEST_PARAMS))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(BEST_PARAMS, disable=DISABLE_REMOVE)\n\n def testNonTestedMethod(self):\n # Try alternate minimization method\n try:\n test_input = [\"-c\", CONV_NOT_TESTED_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(SCRIPT_OUT)\n with capture_stderr(main, test_input) as output:\n self.assertTrue(\"Only the following optimization methods\" in output)\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testNelderMead(self):\n # Try alternate minimization method\n try:\n silent_remove(SCRIPT_OUT)\n test_input = [\"-c\", CONV_NM_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(SCRIPT_OUT)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"Function evaluations: 24\" in output)\n self.assertTrue(\"vii_0: 0.000000\" in output)\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testNelderMeadMultiVar(self):\n # Try alternate minimization method for multiple variable. Did worse than Powell for multiple functions\n # Results from Powell are:\n # Current function value: 0.000000\n # Iterations: 10\n # Function evaluations: 242\n # Optimized parameters:\n # vii_0 = 2.000000\n # vij_0 = 0.000000\n # gamma = -2.000000\n try:\n test_input = [\"-c\", CONV_NM_MULTI_INI]\n silent_remove(SCRIPT_OUT)\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(SCRIPT_OUT)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"Current function value: 6.192328\" in output)\n self.assertTrue(\"Iterations: 29\" in output)\n self.assertTrue(\"Function evaluations: 56\" in output)\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testBasinHop(self):\n # Try hopping + minimization\n try:\n test_input = [\"-c\", CONV_HOP_INI]\n silent_remove(BIN_HOP_RESULT_SUM)\n silent_remove(SCRIPT_OUT)\n main(test_input)\n self.assertFalse(diff_lines(BIN_HOP_RESULT_SUM, GOOD_BIN_HOP_RESULT_SUM))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(BIN_HOP_RESULT_SUM, disable=DISABLE_REMOVE)\n\n def testBasinHopBounds(self):\n # Try hopping + minimization\n try:\n silent_remove(SCRIPT_OUT)\n test_input = [\"-c\", CONV_HOP_MIN_MAX_INI]\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(SCRIPT_OUT)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"success condition satisfied. Number of function calls: 83\" in output)\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n\n def testRepeatMin(self):\n # Test repeating minimization and removing duplicate opt_params\n test_input = [\"-c\", REPEAT_INI]\n try:\n silent_remove(BEST_PARAMS)\n silent_remove(SCRIPT_OUT)\n silent_remove(TEST_OUT)\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(BEST_PARAMS)\n silent_remove(SCRIPT_OUT)\n silent_remove(TEST_OUT)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"Function evaluations: 25\\nOptimization terminated successfully. \"\n \"Completed 2 of 3 minimization cycles\" in output)\n self.assertFalse(diff_lines(BEST_PARAMS, GOOD_BEST_PARAMS))\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n silent_remove(BEST_PARAMS, disable=DISABLE_REMOVE)\n silent_remove(TEST_OUT, disable=DISABLE_REMOVE)\n\n def testTriangleMin(self):\n # Test stepwise minimization with multiple minimization steps\n test_input = [\"-c\", TRIANGLE_INI]\n try:\n silent_remove(SCRIPT_OUT)\n if logger.isEnabledFor(logging.DEBUG):\n main(test_input)\n silent_remove(SCRIPT_OUT)\n with capture_stdout(main, test_input) as output:\n self.assertTrue(\"Resid: 34.416667 for parameters: 0.500000, 1.833333\\n\" in output)\n self.assertTrue(\"Resid: 4.496540 for parameters: 0.853871, 1.414121, -3.133996, \"\n \" 4.085879, 2.381966\\n\" in output)\n self.assertTrue(\"Function evaluations: 35\\nOptimization terminated successfully. \"\n \"Completed 2 of 2 minimization cycles\" in output)\n finally:\n silent_remove(PAR_OUT, disable=DISABLE_REMOVE)\n silent_remove(SCRIPT_OUT, disable=DISABLE_REMOVE)\n","repo_name":"team-mayes/md_utils","sub_path":"tests/test_converge_evb_par.py","file_name":"test_converge_evb_par.py","file_ext":"py","file_size_in_byte":22284,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"72902803343","text":"from past.utils import old_div\nfrom builtins import range\nfrom EMAN2 import *\n\ndef main():\n\tprogname = os.path.basename(sys.argv[0])\n\tusage = \"\"\"e2ddd_ptclaligner.py [options]\n\n\tDetermines the optimal per-particle alignment of boxed particles from DDD movie frames.\n\t\n\tExample: e2ddd_ptclaligner.py --dddmovie 20140926_43522_raw.hdf --average 20140926_43522_raw_avg.hdf --project .\n\t\"\"\"\n\t\n\tparser = EMArgumentParser(usage=usage,version=EMANVERSION)\n\t\n\tparser.add_argument(\"--dddmovie\",type=str,default=None,help=\"The DDD movie whose particles you wish to align\",required=True)\n\tparser.add_argument(\"--average\",type=str,default=None,help=\"The averaged DDD movie frames you used for boxing\",required=True)\n\tparser.add_argument(\"--project\",type=str,default=None,help=\"Location of eman2 'project' containing e2boxercache and info directories\", required=True)\n\tparser.add_argument(\"--maxiters\",type=int,default=5,help=\"How many times you wish to iteratively improve the alignment\")\n\t\n\t(options, args) = parser.parse_args()\n\t\n\tpid=E2init(sys.argv)\n\t\n\tnfs = EMUtil.get_image_count(options.dddmovie)\n\tcoords = js_open_dict(options.project + '/info/' + options.average[:-4] + '_info.json')\n\tboxes = [b[0:2] for b in coords['boxes']]\n\tbase = js_open_dict(options.project + '/e2boxercache/base.json')\n\tboxsize = base['box_size']\n\tptcls = options.project+'/particles/'+options.average[:-4] + '_ptcls.hdf'\n\tprepost = []\n\tfor b,box in enumerate(boxes[0:5]):\n\t\tprint(('box {}/{}'.format(b+1,len(boxes))))\n\t\tptcl = EMData(ptcls,b)\n\t\tptcl.process_inplace('normalize.edgemean')\n\t\t# iteratively align particle frames to the avg of all of the particle's frames\n\t\tfor iter in range(options.maxiters):\n\t\t\tif iter == 0: bavg=Averagers.get('mean')\n\t\t\taavg=Averagers.get('mean')\n\t\t\tfor i in range(nfs):\n\t\t\t\tr = Region(box[0]-old_div(boxsize,2),box[1]-old_div(boxsize,2),boxsize,boxsize)\n\t\t\t\td = EMData(options.dddmovie,i,False,r)\n\t\t\t\td.process_inplace('normalize.edgemean')\n\t\t\t\tif iter == 0: bavg.add_image(d)\n\t\t\t\td2 = d.align('translational',ptcl,{'intonly':0, 'masked':0, 'maxshift':5, 'nozero':0, 'useflcf':1})\n\t\t\t\tt = d2.get_attr('xform.align2d')\n\t\t\t\td2.transform(t)\n\t\t\t\taavg.add_image(d2)\n\t\t\tif iter == 0: before = bavg.finish()\n\t\t\tptcl = aavg.finish()\n\t\t\n\t\tprepost.append(before)\n\t\tprepost.append(ptcl)\n\t\n\tdisplay(prepost)\n\t\n\t# optionally(?) use interpolation to shift pixels in regions not containing particles (weighted by particles whos shifts have been calculated)\n\t\n\tE2end(pid)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"cryoem/eman2","sub_path":"examples/e2ddd_ptclaligner.py","file_name":"e2ddd_ptclaligner.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"47"}
+{"seq_id":"37311452476","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.core.management.base import BaseCommand\n\nfrom home.models import HomePage\nfrom questionnaires.models import PollIndexPage, SurveyIndexPage, QuizIndexPage, PollFormField, SurveyFormField, \\\n QuizFormField\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n poll_form_fields = PollFormField.objects.all()\n poll_form_field_list = []\n for poll_form_field in poll_form_fields:\n poll_form_field.choices = '|'.join(poll_form_field.choices.split(','))\n poll_form_field_list.append(poll_form_field)\n\n PollFormField.objects.bulk_update(poll_form_field_list, ['choices'], batch_size=1000)\n\n survey_form_fields = SurveyFormField.objects.all()\n survey_form_field_list = []\n for survey_form_field in survey_form_fields:\n survey_form_field.choices = '|'.join(survey_form_field.choices.split(','))\n survey_form_field_list.append(survey_form_field)\n\n SurveyFormField.objects.bulk_update(survey_form_field_list, ['choices'], batch_size=1000)\n\n quiz_form_fields = QuizFormField.objects.all()\n quiz_form_field_list = []\n for quiz_form_field in quiz_form_fields:\n quiz_form_field.choices = '|'.join(quiz_form_field.choices.split(','))\n quiz_form_field.correct_answer = '|'.join(quiz_form_field.correct_answer.split(','))\n quiz_form_field_list.append(quiz_form_field)\n\n QuizFormField.objects.bulk_update(quiz_form_field_list, ['choices', 'correct_answer'], batch_size=1000)\n\n self.stdout.write(self.style.SUCCESS('Fixed questionnaires choices.'))\n","repo_name":"unicef/iogt","sub_path":"questionnaires/management/commands/fix_questionnaires_choices.py","file_name":"fix_questionnaires_choices.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"47"}
+{"seq_id":"72384700622","text":"#!/usr/bin/env python3\n\nimport pyexcel\n\n# Request data from user\ndef get_ip_data():\n input_ip = input(\"\\nWhat is the IP address? \")\n input_driver = input(\"What is the driver associated with this device? \")\n input_device = input(\"Which device IP is assigned to (Switch, server or Router)? \")\n input_iptype = input(\"Input IP type (public or private): \")\n d = {\"IP\": input_ip, \"driver\": input_driver, \"Device\": input_device, \"IP Type\": input_iptype}\n return d\n\n\nmylistdict = [] \n\nprint(\"Hello! This program will make you a *.xls file\")\n\nwhile(True):\n try:\n mylistdict.append(get_ip_data())\n keep_going = input(\"\\nWould you like to add another value? Enter to continue, or enter 'q' to quit: \")\n if (keep_going.lower() == 'q'):\n break\n except:\n continue\n\nfilename = input(\"\\nWhat is the name of the *.xls file? \")\n\npyexcel.save_as(records=mylistdict, dest_file_name=f'{filename}.xls')\n\nprint(\"The file \" + filename + \".xls should be in your local directory\")\n","repo_name":"njpatel04/mycode","sub_path":"excelout/excelwork.py","file_name":"excelwork.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26884170589","text":"# wheel.py\n# cs1 class example\n# based on a cs2 example by Fabio Pellacini\n# python version October, 2011, Devin Balkcom\n# Animation by THC.\n\nfrom cs1lib import *\nfrom math import pi, sin, cos\n\nOUTER_RADIUS = .37 # the relative radius of circles around the outer rim of the wheel\n # a scaling of .37 causes the circles to just barely touch\n \nOUTER_DISTANCE = 1 - OUTER_RADIUS\n\nFRAME_RATE = 30\nTIMESTEP = 1.0 / FRAME_RATE\n\nWINDOW_WIDTH = 400\nWINDOW_HEIGHT = 400\n\nMIN_SIZE = 20 # draw no smaller wheel than this\nWHEEL_COUNT = 5 # draw this many wheels in each recursive step\nANGLE_INCREMENT = 360 // WHEEL_COUNT\n\ndef radians(degrees):\n return degrees * pi / 180.0\n\n# Compute the x coordinate of a location that is\n# a distance 'distance' from a point cx, cy, with angle\n# 'angle' from the horizontal\n\ndef compute_polar_x(cx, angle, distance):\n return cx + cos(radians(angle)) * distance\n \ndef compute_polar_y(cy, angle, distance):\n return cy + sin(radians(angle)) * distance \n \n# Recursively draw the wheels.\ndef draw_wheel(x, y, r, angle, increment):\n # Draw this wheel.\n set_fill_color(0, 0, 1, .2)\n draw_circle(x, y, r)\n set_fill_color(0, 1, 0, .2)\n draw_circle(x, y, r * (OUTER_DISTANCE - OUTER_RADIUS)) \n\n # If this wheel is small enough, we're done.\n if r < MIN_SIZE:\n return\n\n # Not too small. Draw the five outer wheels\n for i in range(WHEEL_COUNT):\n new_x = compute_polar_x(x, angle, r * OUTER_DISTANCE)\n new_y = compute_polar_y(y, angle, r * OUTER_DISTANCE)\n \n angle += ANGLE_INCREMENT\n \n draw_wheel(new_x, new_y, r * OUTER_RADIUS, angle + increment, increment)\n draw_line(x, y, new_x, new_y) # draw the spokes connecting to the outer wheels\n \ndef main():\n global increment\n\n set_clear_color(1, 1, 1)\n clear()\n\n draw_wheel(WINDOW_WIDTH // 2, WINDOW_WIDTH // 2,\n WINDOW_HEIGHT // 2, increment, increment)\n increment += 1\n\nincrement = 0\n\nstart_graphics(main, title = \"The Wheel\", width = WINDOW_WIDTH, height = WINDOW_HEIGHT)","repo_name":"electronsandbits/python-learning","sub_path":"CS1/Fall_2019/week_5/wheel.py","file_name":"wheel.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"4339046407","text":"from sage.all import *\nfrom sage.misc.prandom import randrange\n\n#Fermat primality test\ndef fermat(n, t):\n for i in range(t):\n a = randint(2, n-2)\n r = pow(a, n-1, n)\n\n if r != 1:\n return \"composite\"\n return \"prime\"\n\n#Prime generator\ndef prime_gen(k):\n while True:\n\n #Generate a random number between 2^(k-1) and (2^k) - 1\n p = randint(2**(k-1), 2**k-1)\n\n #Check if the prime is a prime\n if fermat(p, k) == \"prime\":\n return p\n\n#Key Generator\ndef key_gen(k):\n p = prime_gen(k//2)\n q = prime_gen(k//2)\n \n if p != q:\n n = p * q\n phi = (p-1)*(q-1)\n \n e = -1\n while e < 0:\n #Generate random integers between 1 and phi \n x = randint(1, phi)\n if gcd(x, phi) == 1:\n e = x\n #Compute d using the extended Eucliden algorithm\n d = inverse_mod(e, phi)\n \n return (n, e, d)\n \n#Encrypt the message\ndef encryption(m, n, e):\n return pow(m, e, n)\n\n#Decrypt the message\ndef decryption(c, n, d):\n return pow(c, d, n)\n\nif __name__ == \"__main__\":\n #generating the keys\n n, e, d = key_gen(100)\n print('n = '+str(n),'e = '+str(e), 'd = '+str(d))\n \n #message\n m = 12345\n print('message = '+str(m))\n \n #encrypted message\n e = encryption(m, n, e)\n print('Encrypted message = '+str(e))\n\n #decrypted encrypted message\n d = decryption(e, n, d)\n print('Decrypted message = '+str(d)) \n","repo_name":"jambrr/assignments","sub_path":"security/assignment_3.py","file_name":"assignment_3.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"27246298235","text":"from Node import Node\nimport math\n\nclass BalancedTree(object):\n\t\"\"\"docstring for BalancedTree\"\"\"\n\tdef __init__(self):\n\t\tself.rootNode = None\n\n\tdef insert(self, data):\n\t\tif self.rootNode is None:\n\t\t\tself.rootNode = Node(data, None)\n\t\t\tparentNode = self.rootNode\n\t\telse:\n\t\t\tparentNode = self.rootNode.insert(data)\n\n\t\tself.rebalanceTree(parentNode)\n\n\tdef rebalanceTree(self, node):\n\t\tself.setBalance(node)\n\n\t\tif node.parentNode is not None:\n\t\t\tself.rebalanceTree(node.parentNode)\n\n\tdef setBalance(self, node):\n\t\tnode.balance = self.height(node.rightChild) - self.height(node.leftChild)\n\n\t\tif node.balance > 1:\n\t\t\tif node.rightChild.balance < 0:\n\t\t\t\tself.rotateright(node.rightChild.leftChild)\n\t\t\tself.rotateleft(node.rightChild)\n\n\t\tif node.balance < -1:\n\t\t\tif node.leftChild.balance > 0:\n\t\t\t\tself.rotateleft(node.leftChild.rightChild)\n\t\t\tself.rotateright(node.leftChild)\n\n\tdef rotateleft(self, node):\n\t\tson = node.leftChild\n\t\tparent = node.parentNode\n\t\tgrand_parent = node.parentNode.parentNode\n\t\tnode.leftChild = parent\n\t\tnode.parentNode = grand_parent\n\t\tparent.rightChild = son\n\t\tparent.parentNode = node\n\t\tif son:\n\t\t\tson.parentNode = parent\n\t\tif grand_parent:\n\t\t\tif parent == grand_parent.rightChild:\n\t\t\t\tgrand_parent.rightChild = node\n\t\t\telse:\n\t\t\t\tgrand_parent.leftChild = node\n\t\telse:\n\t\t\tself.rootNode = node\n\n\tdef rotateright(self, node):\n\t\tson = node.rightChild\n\t\tparent = node.parentNode\n\t\tgrand_parent = node.parentNode.parentNode\n\t\tnode.rightChild = parent\n\t\tnode.parentNode = grand_parent\n\t\tparent.leftChild = son\n\t\tparent.parentNode = node\n\t\tif son:\n\t\t\tson.parentNode = parent\n\t\tif grand_parent:\n\t\t\tif parent == grand_parent.rightChild:\n\t\t\t\tgrand_parent.rightChild = node\n\t\t\telse:\n\t\t\t\tgrand_parent.leftChild = node\n\t\telse:\n\t\t\tself.rootNode = node\n\n\tdef height(self, node):\n\t\tif node == None:\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn 1 + max(self.height(node.leftChild), self.height(node.rightChild))\n\n\tdef traverseInOrder(self):\n\t\tif self.rootNode is not None:\n\t\t\tself.rootNode.traverseInOrder()","repo_name":"asaaditya8/Python-DSAlgo","sub_path":"AVL/BalancedTree.py","file_name":"BalancedTree.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"14984191539","text":"from flask import request, render_template, redirect, url_for, flash, get_flashed_messages\nfrom flask_wtf import Form\nfrom wtforms.ext.sqlalchemy.orm import model_form\nfrom functools import wraps\nimport json\n\nfrom msimb import app, db\nfrom msimb.models import Note\n\n\n\"\"\"\n Decorator to return SPF formatted responses with just the body\n part of the content, or the whole content for regular requests\n\"\"\"\ndef handle_spf(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n spf = wants_spf()\n content, status = f(*args, spf=spf, **kwargs)\n if spf:\n flashes = render_template('flashes.html', messages=get_flashed_messages())\n\n # put the page content into a format spf will handle\n content = json.dumps({\n 'body': {\n 'content': content,\n 'flashes': flashes\n }\n })\n return content, status\n return decorated\n\n# spfjs sends ?spf=navigation for its requests, not application/json?\ndef wants_spf():\n return request.args.get('spf', False)\n\n@app.route('/')\n@handle_spf\ndef home(spf=False):\n status = 200\n return render_template('home.html', spf=spf), status\n\n# @app.route('/notes', methods=['GET', 'POST'])\n@handle_spf\ndef notes(spf=False):\n status = 200\n NoteForm = model_form(Note, Form, only=['text', 'image'])\n form = NoteForm(request.form)\n\n if request.method == 'POST' and form.validate():\n err = Note.is_spam(request.remote_addr)\n if err:\n flash(err)\n else:\n data = request.get_json()\n db.session.add(Note(form.text.data, form.image.data, request.remote_addr))\n db.session.commit()\n form = NoteForm()\n status = 201\n notes = Note.query.all()\n\n return render_template('notes.html', form=form, notes=notes, spf=spf), status\n\n# @app.route('/notes/')\n@handle_spf\ndef note(note_id, spf=False):\n note = Note.query.get(note_id)\n return str(note), 200\n\n# @app.route('/about')\n@handle_spf\ndef about(spf=False):\n return render_template('about.html', spf=spf), 200\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return redirect(url_for('home'))\n\n","repo_name":"prototo/MSIMB","sub_path":"msimb/views/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"71058672784","text":"class Solution:\n def findLength(self, nums1: List[int], nums2: List[int]) -> int:\n # dp[i][j] means max length of common subarray which ended with nums1[i] & nums2[j]\n # in nums1[i] and nums2[j]\n # dp[i][j] = dp[i-1][j-1] + 1 if nums1[i] == nums2[j] else 0\n\n m , n = len(nums1) , len(nums2)\n dp = [[0 for _ in range(n)] for _ in range(m)]\n \n maxLen = 0\n for i in range(m):\n for j in range(n):\n if nums1[i] == nums2[j]:\n dp[i][j] = dp[i-1][j-1] + 1 if i != 0 and j != 0 else 1\n maxLen = max(maxLen,dp[i][j])\n \n return maxLen","repo_name":"Cannizza-zzk/python_review","sub_path":"leetcode/718.py","file_name":"718.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"32485030199","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.forms import ModelForm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom django.http import HttpResponse, StreamingHttpResponse\nfrom collections import namedtuple\nfrom wsgiref.util import FileWrapper\n\n\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom matplotlib.dates import DateFormatter\n\nfrom groups.models import Group\n\ndef analysis(request, template_name='analysis/display.html'):\n meeting_days_analysis()\n meeting_times_analysis()\n popular_courses_analysis()\n return render(request, template_name)\n\n\ndef meeting_times_analysis():\n times = []\n\n for i in Group.objects.raw('SELECT * FROM groups_group'):\n times.append(i.meeting_time)\n\n fig, ax = plt.subplots()\n\n labels = []\n\n for i in range(min(times), max(times)+1):\n labels.append(i)\n\n sizeOfCount = max(times) + 1 - min(times)\n\n countTimes = [0] * sizeOfCount\n \n for i in range(len(times)):\n for j in range(len(labels)):\n if times[i] == labels[j]:\n countTimes[j] = countTimes[j] + 1\n \n plt.bar(labels, countTimes, align='center', alpha=1, edgecolor='black')\n plt.ylabel(\"Amount of Groups Studying at that Time\")\n plt.title(\"Times of the Day by Amount of Groups Studying\") \n plt.xlabel(\"Times - 24 hours clock\")\n plt.savefig(\"static/images/meeting_times_graph.png\")\n\ndef meeting_days_analysis():\n days = []\n\n figurine, axxx = plt.subplots()\n\n for i in Group.objects.raw('SELECT * FROM groups_group'):\n days.append(i.meeting_day)\n\n count = [0,0,0,0,0,0,0]\n\n for i in range(len(days)):\n if days[i] == 1:\n count[0] = count[0] + 1\n elif days[i] == 2:\n count[1] = count[1] + 1\n elif days[i] == 3:\n count[2] = count[2] + 1\n elif days[i] == 4:\n count[3] = count[3] + 1\n elif days[i] == 5:\n count[4] = count[4] + 1\n elif days[i] == 6:\n count[5] = count[5] + 1\n elif days[i] == 7:\n count[6] = count[6] + 1\n\n DAYS_OF_WEEK = (\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\")\n n = np.arange(len(DAYS_OF_WEEK))\n\n axxx.bar(DAYS_OF_WEEK, count, align='center', alpha=1, edgecolor='black')\n plt.ylabel(\"Amount of Groups Studying on the Day\")\n plt.title('Days of the Week by Number Groups Studying')\n plt.savefig(\"static/images/meeting_days_graph.png\")\n\ndef popular_courses_analysis():\n courses = []\n\n figure, axx = plt.subplots()\n\n for i in Group.objects.raw('SELECT * FROM groups_group'):\n courses.append(i.group_course)\n\n myset = set(courses)\n mylist = list(myset)\n\n countCourses = [0] * len(myset)\n\n \n for i in range(len(courses)):\n for j in range(len(mylist)):\n if courses[i] == mylist[j]:\n countCourses[j] = countCourses[j] + 1\n\n axx.bar(mylist, countCourses, align='center', alpha=1, edgecolor='black')\n plt.xlabel(\"Courses Of Groups\")\n plt.xticks(rotation=90)\n plt.gcf().subplots_adjust(bottom=0.25, left=0.15)\n plt.ylabel(\"Number of Groups Studying Course\")\n plt.title(\"Courses of Groups by Amount of Groups Studying\") \n plt.savefig(\"static/images/popular_courses_graph.png\")\n\n\ndef meeting_times(request):\n return StreamingHttpResponse(FileWrapper(open('static/images/meeting_times_graph.png', 'rb')), content_type='image/png')\n\ndef meeting_days(request):\n return StreamingHttpResponse(FileWrapper(open('static/images/meeting_days_graph.png', 'rb')), content_type='image/png')\n\ndef popular_courses(request):\n return StreamingHttpResponse(FileWrapper(open('static/images/popular_courses_graph.png', 'rb')), content_type='image/png')\n\ndef __str__(self):\n return self.meeting_time\n\n\n\n","repo_name":"WriteAfterReed/StudySync_proj","sub_path":"analysis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"19167297646","text":"from waflib.extras import msvs\nfrom waflib import Build\nfrom waflib.TaskGen import feature, before\n\n### Common Flags setup.\n\n# For each tool that should have a set of common flags there should be a set\n# of nested dictionaries such that looking up _flags[][]\n# gives the set of flags to append. A None key can be used for things that should\n# be common across all variants.\n\ncxx_flags_msvc = {\n None : ['/volatile:iso', '/Zi', '/FS', '/nologo', '/W4', '/WX',\n '/utf-8'],\n 'release' : ['/O2', '/EHsc', '/GL', '/Gw', '/Gy', '/fp:fast'],\n 'debug' : ['/Od', '/EHscr', '/GF', '/RTC1', '/Za'],\n}\n\ncxx_flags = {\n 'msvc' : cxx_flags_msvc,\n}\n\nlink_flags_msvc = {\n None : ['/nologo', '/wx', '/debug', '/incremental:no'],\n 'release' : ['/ltcg', '/opt:ref,icf'],\n 'debug' : [],\n}\n\nlink_flags = {\n 'msvc' : link_flags_msvc,\n}\n\n@feature('common_flags')\n@before('process_source', 'apply_link')\ndef apply_common_flags(self):\n def add_flags(target_var, tool_var, flags_dict):\n for tool in None, self.env[tool_var]:\n for variant in None, self.bld.variant:\n try:\n self.env.append_unique(target_var, flags_dict[tool][variant])\n except KeyError:\n pass\n if 'cxx' in self.features:\n add_flags('CXXFLAGS', 'COMPILER_CXX', cxx_flags)\n if 'cxxprogram' in self.features:\n add_flags('LINKFLAGS', 'COMPILER_CXX', link_flags)\n if 'cprogram' in self.features:\n add_flags('LINKFLAGS', 'COMPILER_C', link_flags)\n\n### Standard commands\n\ndef options(ctx):\n ctx.load('msvs')\n ctx.load('compiler_cxx')\n\ndef configure(ctx):\n ctx.load('msvs')\n ctx.load('compiler_cxx')\n\ndef build(ctx):\n defines = []\n \n if ctx.variant == 'release':\n defines += ['NDEBUG']\n\n ctx.program(\n source = ctx.path.ant_glob('src/**/*.cpp'),\n target = 'gb-emu',\n features = 'common_flags',\n includes = ['src'],\n defines = defines,\n )\n\nclass ReleaseBuild(Build.BuildContext):\n cmd = 'build'\n variant = 'release'\n\nclass DebugBuild(Build.BuildContext):\n '''executes the build with debug variant'''\n cmd = 'dbuild'\n fun = 'build'\n variant = 'debug'\n\n### MSVS customization\n\ndef wrap_node_class(cls):\n class Wrapper(cls):\n def get_build_command(self, props):\n return super(Wrapper, self).get_build_command(props).replace('build', 'dbuild')\n def get_rebuild_command(self, props):\n return super(Wrapper, self).get_rebuild_command(props).replace('build', 'dbuild')\n return Wrapper\n\nnode_wrap_dir = {\n 'vsnode_target' : msvs.vsnode_target,\n 'vsnode_build_all' : msvs.vsnode_build_all,\n 'vsnode_install_all' : msvs.vsnode_install_all,\n}\n\nclass MsvsGenerator(msvs.msvs_generator):\n variant = 'debug'\n numver = '12.00'\n vsver = '14'\n platform_toolset_ver = 'v140'\n default_project = 'gb-emu'\n\n def init(self):\n self.projects_dir = self.srcnode.make_node('.vs/projects')\n self.projects_dir.mkdir()\n self.solution_name = '.vs/gb-emu.sln'\n for (prop, cls) in node_wrap_dir.items():\n setattr(self, prop, wrap_node_class(cls))\n super(MsvsGenerator, self).init()\n","repo_name":"jbootsma/gb-emu","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"25439045951","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n\"\"\"\n@author: PANYUNSONG\n@file: load_new_data.py\n@time: 2017/10/7 20:32\n@desc: python3.6\n\"\"\"\n\nfrom sklearn.datasets import load_files\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_new_data(corpus_dir):\n news = load_files(corpus_dir, encoding='utf-8')\n tfidf_vect = TfidfVectorizer()\n X = tfidf_vect.fit_transform(news.data)\n X_train, X_test, y_train, y_test = train_test_split(X, news.target, test_size=0.3, stratify=news.target)\n return X_train, X_test, y_train, y_test\n\n\nif __name__ == '__main__':\n corpus_dir = 'D:/UbunutWin/corpus/news_data/BQ20_seg'\n X_train, X_test, y_train, y_test = load_new_data(corpus_dir)\n print(X_train.shape)\n print(X_test.shape)\n print(y_train.shape)\n print(y_test.shape)\n","repo_name":"zhengzishang/MingText","sub_path":"load_new_data.py","file_name":"load_new_data.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"43754497435","text":"from bsedata.bse import BSE\r\nimport pickle\r\nimport pandas as pd\r\nimport time\r\n\r\n\r\n# Store all data(dictionary) to .pkl file\r\ndef store_data_to_file(file_name):\r\n b = BSE()\r\n b = BSE(update_codes = True)\r\n data = b.getScripCodes() # Get all the data from bseIndia\r\n file_name = file_name + \".pkl\" \r\n f = open(file_name,\"wb\")\r\n pickle.dump(data,f)\r\n f.close()\r\n\r\n# store_data_to_file(\"file\") # Run this only once to store data in .pkl file\r\n\r\n# This method returns price by taking the \"key provided by user\"\r\ndef find_price(key_provided_by_user):\r\n file_name = \"file\"\r\n c_data = {}\r\n b = BSE()\r\n file_name = file_name + \".pkl\" \r\n try:\r\n unpickled_df = pd.read_pickle(file_name)\r\n except:\r\n store_data_to_file(\"file\")\r\n unpickled_df = pd.read_pickle(file_name)\r\n \r\n print(len(unpickled_df))\r\n for key, val in unpickled_df.items():\r\n if key_provided_by_user.lower() in val.lower() and \"fund\" not in val.lower():\r\n try:\r\n quote = b.getQuote(key)\r\n c_data[val] = quote[\"currentValue\"]\r\n except Exception:\r\n pass\r\n # print(\"Not available\")\r\n print(c_data)\r\n return(c_data)\r\n\r\n# Testing of above code\r\n# file_name = \"file\"\r\n# codelist = find_price(\"IDFC\")\r\n# codelist = find_price(\"IDFC First Bank Ltd\")\r\n\r\n\r\n","repo_name":"gramo37/StockTracker-Python","sub_path":"stock_tracker.py","file_name":"stock_tracker.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"26228606742","text":"# Lab 7 Q4\n\nnumbers = []\nfor i in range(0, 5):\n number = int(input(\"Please enter a number: \"))\n numbers.append(number)\n\n\ndef average_list(lst):\n avg = sum(lst) / len(lst)\n print(f\"\\nAverage = {avg:.2f}\")\n\n\ndef max_list(lst):\n maximum = max(lst)\n print(f\"Maximum = {maximum}\")\n\n\ndef min_list(lst):\n minimum = min(lst)\n print(f\"Minimum = {minimum}\")\n\n\ndef sort_list(lst):\n print(f\"Original = {lst}\")\n sorty = sorted(lst)\n print(f\"Sorted = {sorty}\")\n\n\ndef sum_list(lst):\n summ = sum(lst)\n print(f\"Sum = {summ}\")\n\n\naverage_list(numbers)\nmax_list(numbers)\nmin_list(numbers)\nsort_list(numbers)\nsum_list(numbers)\n","repo_name":"coreylynch00/CollegeCodeRepo","sub_path":"Python/ThirdYear/Lab7/q4_num_functions.py","file_name":"q4_num_functions.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"12918618486","text":"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import init\n\nfrom yaonlp.layers import BiLSTM\nfrom yaonlp.utils import to_cuda, seq_mask_by_lens\n\nfrom syntax_enhance.syntax_encoder import SyntaxReprs\n\n\nclass PointerGenerator(nn.Module):\n def __init__(self,\n config,\n vocab_size,\n mode = \"baseline\",\n model_file = None) -> None:\n super(PointerGenerator, self).__init__()\n self.hidden_size = config.hidden_size\n self.vocab_size = vocab_size\n self.emb_dim = config.emb_dim\n self.dropout = config.dropout\n self.use_cuda = config.use_cuda\n\n self.coverage_loss_weight = config.coverage_loss_weight # lambda, equation 13\n\n self.use_coverage = config.use_coverage\n self.use_pgen = config.use_pgen\n\n self.mode = mode\n \n if mode == \"baseline\":\n self.encoder = EncoderBase(self.vocab_size, self.emb_dim, self.hidden_size, self.dropout)\n elif mode == \"syntax_enhanced\":\n self.encoder = EncoderSyntaxEnhanced()\n elif mode == \"bert_enhanced\":\n self.encoder = EncoderBertEnhanced()\n elif mode == \"joint_enhanced\":\n self.encoder = EncoderJointEnhanced()\n\n self.reduce_state = ReduceState(self.hidden_size)\n self.decoder = Decoder(self.vocab_size, self.emb_dim, self.hidden_size, self.dropout, self.use_cuda)\n\n if model_file is not None:\n state = torch.load(model_file, map_location= lambda storage, location: storage) # TODO CPU storage?\n self.encoder.load_state_dict(state['encoder_state_dict'])\n self.decoder.load_state_dict(state['decoder_state_dict'], strict=False)\n self.reduce_state.load_state_dict(state['reduce_state_dict'])\n\n def forward(self, \n enc_inputs,\n enc_lens, \n enc_inputs_extend,\n oov_nums,\n dec_inputs, \n dec_tags,\n dec_lens,\n max_len=150):\n batch_size, seq_len = enc_inputs.size()\n\n # enc_states: (batch_size, seq_len, hidden_size * 2)\n enc_states, enc_hidden = self.encoder(inputs=enc_inputs, seq_lens=enc_lens)\n\n # it is needed, because encoder's lstm is bidirectional, but decoder isn't\n dec_state = self.reduce_state(enc_hidden)\n\n # initial\n context_vector = torch.zeros(batch_size, self.hidden_size * 2)\n coverage_vector = torch.zeros(enc_states.size()[:2]) # (batch_size, seq_len)\n\n if self.use_cuda and torch.cuda.is_available():\n context_vector, coverage_vector = to_cuda(data=(context_vector, coverage_vector))\n\n step_losses = []\n for step in range(min(max(dec_lens), max_len)):\n dec_prev = dec_inputs[:, step]\n\n final_dist, dec_state, h_star_t, attn_dist, p_gen, next_coverage = \\\n self.decoder(prev_target=dec_prev, \n prev_dec_state=dec_state, \n enc_states=enc_states, \n enc_input_extend=enc_inputs_extend,\n oov_nums=oov_nums,\n dec_lens = dec_lens,\n enc_lens = enc_lens,\n prev_context_vector=context_vector,\n coverage=coverage_vector)\n \n tag = dec_tags[:, step]\n if self.use_cuda and torch.cuda.is_available():\n tag = tag.cuda()\n\n gold_probs = final_dist.gather(1, tag.unsqueeze(1)).squeeze() # (batch_size, )\n step_loss = -torch.log(gold_probs)\n\n if self.use_coverage:\n step_coveraged_loss = torch.sum(torch.min(attn_dist, coverage_vector), dim=1) # equation 12\n step_loss = step_loss + self.coverage_loss_weight * step_coveraged_loss # equation 13\n\n coverage_vector = next_coverage\n \n step_losses.append(step_loss)\n\n # mask\n dec_masks = seq_mask_by_lens(dec_lens)\n if self.use_cuda and torch.cuda.is_available():\n dec_masks = dec_masks.cuda()\n\n losses = torch.stack(step_losses, dim=1)\n losses *= dec_masks\n\n sum_losses = torch.sum(losses, dim=1)\n if self.use_cuda and torch.cuda.is_available():\n dec_lens = dec_lens.cuda()\n batch_avg_loss = sum_losses / dec_lens\n loss = torch.mean(batch_avg_loss)\n \n return loss\n\n\nclass EncoderBase(nn.Module):\n def __init__(self, \n vocab_size, \n emb_dim,\n hidden_size, \n dropout) -> None:\n super(EncoderBase, self).__init__()\n self.embedding = nn.Embedding(num_embeddings=vocab_size, \n embedding_dim=emb_dim)\n\n self.bilstm = BiLSTM(input_size=emb_dim, \n hidden_size=hidden_size,\n num_layers=1, \n batch_first=True, \n dropout=dropout)\n\n # self.reset_parameters()\n\n def reset_parameters(self):\n init.normal_(self.embedding.weight)\n\n def forward(self, inputs, seq_lens):\n embedding = self.embedding(inputs)\n\n enc_states, hidden = self.bilstm(inputs=embedding, seq_lens=seq_lens)\n enc_states = enc_states.contiguous()\n\n return enc_states, hidden\n\n\nclass EncoderSyntaxEnhanced(nn.Module):\n def __init__(self, \n vocab_size, \n emb_dim,\n hidden_size, \n dropout) -> None:\n super(EncoderSyntaxEnhanced, self).__init__()\n\n self.syntax_encoder = SyntaxReprs(parser_file=\"/home/jgy/YaoNLP/pretrained_model/dependency_parser/parser.pt\")\n\n self.embedding = nn.Embedding(num_embeddings=vocab_size, \n embedding_dim=emb_dim)\n\n self.bilstm = BiLSTM(input_size=emb_dim, # TODO emb_dim + parser_emb_dim\n hidden_size=hidden_size,\n num_layers=1, \n batch_first=True, \n dropout=dropout)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n\n return\n\n def forward(self, inputs, syntax_tokens, seq_lens, syntax_tokens_lens):\n x_syn = self.encoder(syntax_tokens, syntax_tokens_lens)\n\n embed_x = self.embedding(inputs)\n # TODO syntax enhanced\n if self.training:\n embed_x, x_syn = drop_bi_input_independent(embed_x, x_syn, self.config.dropout_emb)\n\n x_lexical = torch.cat((embed_x, x_syn), dim=2)\n\n\n enc_states, hidden = self.bilstm(inputs=x_lexical, seq_lens=seq_lens)\n enc_states = enc_states.contiguous()\n\n return enc_states, hidden\n\n\nclass EncoderBertEnhanced(nn.Module):\n def __init__(self) -> None:\n super(EncoderBertEnhanced, self).__init__()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n\n return\n\n def forward(self):\n\n return\n\n\nclass EncoderJointEnhanced(nn.Module):\n def __init__(self) -> None:\n super(EncoderJointEnhanced, self).__init__()\n \n \n self.reset_parameters()\n\n def reset_parameters(self):\n\n return\n\n def forward(self):\n\n return\n\n\n# Add to the graph a linear layer to reduce the encoder's final hiddent and cell state into a single initial state for the decoder\nclass ReduceState(nn.Module):\n def __init__(self, hidden_size) -> None:\n super(ReduceState, self).__init__()\n self.hidden_size = hidden_size\n\n self.reduce_h = nn.Linear(hidden_size * 2, hidden_size)\n self.reduce_c = nn.Linear(hidden_size * 2, hidden_size)\n\n # self.reset_parameters()\n\n def reset_parameters(self):\n init.normal_(self.reduce_h.weight)\n init.normal_(self.reduce_c.weight)\n\n def forward(self, enc_hidden):\n h, c = enc_hidden # from bilstm, both with shape (2, batch_size, hidden_size)\n\n # (batch_size, hidden_size * 2)\n h_in = h.transpose(0, 1).contiguous().view(-1, self.hidden_size * 2)\n hidden_reduced_h = F.relu(self.reduce_h(h_in))\n\n c_in = c.transpose(0, 1).contiguous().view(-1, self.hidden_size * 2)\n hidden_reduced_c = F.relu(self.reduce_c(c_in))\n\n # h, c dim = (1, b, hidden_size)\n return (hidden_reduced_h.unsqueeze(0), hidden_reduced_c.unsqueeze(0)) \n\n\nclass Attention(nn.Module):\n def __init__(self, hidden_size) -> None:\n super(Attention, self).__init__()\n self.use_coverage = True\n self.use_cuda = True\n\n self.W_h = nn.Linear(hidden_size * 2, hidden_size * 2) # encoder projection\n self.W_s = nn.Linear(hidden_size * 2, hidden_size * 2) # decoder projection\n self.W_c = nn.Linear(1, hidden_size * 2, bias=False) # coverage projection\n\n self.v = nn.Linear(hidden_size * 2, 1, bias=False)\n\n # self.reset_parameters()\n\n def reset_parameters(self):\n init.normal_(self.W_h.weight)\n init.normal_(self.W_s.weight)\n init.normal_(self.W_c.weight)\n init.normal_(self.v.weight)\n\n def forward(self, dec_in_state, enc_states, enc_lens, coverage_vector):\n batch_size, seq_len, n = enc_states.size() # (batch_size, seq_len, hidden_size * 2)\n\n enc_feats = self.W_h(enc_states) # (batch_size, seq_len, hidden_size * 2)\n enc_feats = enc_feats.view(-1, n) # (batch_size * seq_len, hidden_size * 2)\n\n dec_feats = self.W_s(dec_in_state) # (batch_size, hidden_size * 2)\n\n # expand decoder features, (batch_size, seq_len, hidden_size * 2)\n dec_feats_expanded = dec_feats.unsqueeze(1).expand(enc_states.size()).contiguous() \n dec_feats_expanded = dec_feats_expanded.view(-1, n) # (batch_size * seq_len, hidden_size * 2)\n\n attn_feats = enc_feats + dec_feats_expanded # (batch_size * seq_len, hidden_size * 2)\n\n # calculate e, equation 11\n if self.use_coverage:\n coverage_vector = coverage_vector.view(-1, 1) # (batch_size * seq_len, 1)\n coverage_feats = self.W_c(coverage_vector) # (batch_size * seq_len, hidden_size * 2)\n attn_feats += coverage_feats # (batch_size * seq_len, hidden_size * 2)\n \n e = F.tanh(attn_feats)\n e = self.v(e) # (batch_size * seq_len, 1)\n e = e.view(-1, seq_len) # (batch_size, seq_len)\n\n enc_pad_mask = seq_mask_by_lens(enc_lens)\n if self.use_cuda and torch.cuda.is_available():\n enc_pad_mask = enc_pad_mask.cuda()\n\n # calculate attention distribution 'a', equation 2\n attn_dist_ = F.softmax(e, dim=1) * enc_pad_mask # (batch_size, seq_len) \n normalization_factor = attn_dist_.sum(1, keepdim=True)\n attn_dist = attn_dist_ / normalization_factor # (batch_size, seq_len)\n\n attn_dist = attn_dist.unsqueeze(1) # (batch_size, 1, seq_len)\n # equation 3, calculate context vectors 'h_star_t\n context_vector = torch.bmm(attn_dist, enc_states) \n context_vector = context_vector.squeeze(1) # (batch_size, hidden_size * 2)\n\n # squeeze, (batch_size, seq_len)\n attn_dist = attn_dist.squeeze(1) # (batch_size, seq_len)\n\n # c_t, equation 10\n if self.use_coverage:\n coverage_vector = coverage_vector.view(-1, seq_len) # (batch_size, seq_len)\n coverage_vector = coverage_vector + attn_dist\n\n return context_vector, attn_dist, coverage_vector\n\n\nclass Decoder(nn.Module):\n def __init__(self, \n vocab_size, \n emb_dim,\n hidden_size,\n dropout,\n use_cuda) -> None:\n super(Decoder, self).__init__()\n self.hidden_size = hidden_size\n\n self.use_coverage = True\n self.use_pgen = True\n \n self.use_cuda = use_cuda\n\n self.embedding = nn.Embedding(num_embeddings=vocab_size, \n embedding_dim=emb_dim)\n\n self.x_context = nn.Linear(hidden_size * 2 + emb_dim, emb_dim)\n\n self.lstm = nn.LSTM(input_size=emb_dim, \n hidden_size=hidden_size,\n num_layers=1, \n batch_first=True, \n bidirectional=False, \n dropout=dropout)\n \n self.attention = Attention(hidden_size)\n\n self.p_vocab1 = nn.Linear(hidden_size * 3, hidden_size)\n self.p_vocab2 = nn.Linear(hidden_size, vocab_size)\n\n self.p_gen = nn.Linear(hidden_size * 4 + emb_dim, 1)\n\n # self.reset_parameters()\n self.sign = 0 # mark the step is 0 or not\n \n def reset_parameters(self):\n init.normal_(self.embedding.weight)\n init.normal_(self.x_context.weight)\n init.normal_(self.p_vocab1.weight)\n init.normal_(self.p_vocab2.weight)\n init.normal_(self.p_gen.weight)\n\n def forward(self, \n prev_target,\n prev_dec_state, \n enc_states, \n enc_input_extend, \n dec_lens,\n enc_lens,\n oov_nums, \n prev_context_vector, \n coverage):\n if (not self.training) and (self.sign == 0):\n h_decoder, c_decoder = prev_dec_state\n dec_state_hat = torch.cat((h_decoder.view(-1, self.hidden_size),\n c_decoder.view(-1, self.hidden_size)), 1) # (batch_size, hidden_size * 2)\n _, _, coverage_next = self.attention(dec_state_hat, enc_states, enc_lens, coverage)\n coverage = coverage_next\n\n self.sign = 1\n\n batch_size, seq_len, _ = enc_states.size()\n # decoder state 's_t' \n target_emb = self.embedding(prev_target)\n # project target embedding and context vectors into a new embedding space\n x_context = self.x_context(torch.cat((prev_context_vector, target_emb), dim=1)) # (batch, emb_dim)\n\n lstm_out, dec_state = self.lstm(x_context.unsqueeze(1), prev_dec_state) # lstm_out: (batch_size, 1, hidden_size) \n dec_h, dec_c = dec_state\n # concatenate lstm hidden state and cell state, (batch_size, hidden_size * 2)\n dec_state_hat = torch.cat((dec_h.view(-1, self.hidden_size),\n dec_c.view(-1, self.hidden_size)), dim=1) \n\n # attention distribution\n # (batch_size, hidden_size * 2); (batch_size, seq_len); (batch_size, seq_len);\n h_star_t, attn_dist, coverage_next = self.attention(dec_state_hat, enc_states, enc_lens, coverage)\n\n if self.training or (self.sign == 1):\n coverage_vector = coverage_next\n\n # calculate vocab distribution P_vocab, equation 4\n combine_pv = torch.cat((lstm_out.squeeze(1), h_star_t), dim=1)\n vocab_dist = F.softmax(self.p_vocab2(self.p_vocab1(combine_pv)), dim=1) # (batch_size, vocab_size)\n\n if self.use_pgen:\n # calculate P_gen, equation 8\n combine_pg = torch.cat((h_star_t, dec_state_hat, x_context), dim=1)\n p_gen = F.sigmoid(self.p_gen(combine_pg))\n\n # calculate final distribution P_w, equation 9\n vocab_dist_ = p_gen * vocab_dist\n attn_dist_ = (1 - p_gen) * attn_dist\n\n # extend vocab\n max_oov_nums = torch.max(oov_nums)\n extra_zeros = torch.zeros(batch_size, max_oov_nums)\n if self.use_cuda and torch.cuda.is_available():\n extra_zeros = extra_zeros.cuda()\n vocab_dist_ = torch.cat((vocab_dist_, extra_zeros), dim=1)\n # final_dist = vocab_dist_ + attn_dist_\n final_dist = vocab_dist_.scatter_add(dim=1, index=enc_input_extend, src=attn_dist_)\n else:\n final_dist = vocab_dist\n\n return final_dist, dec_state, h_star_t, attn_dist, p_gen, coverage_vector\n","repo_name":"Zzoay/YaoNLP","sub_path":"examples/text_summarization/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"35438779316","text":"# Wiktor Dybalski\n# Program na początku rekurencyjnie przeszukuje ilość ropy w danym miejscu a następnie zapisuję ją do tablicy stations.\n# Następnie program przechodzi przez pierwszy wiersz tablicy T oraz przy pomocy tablicy missed sprawdza on najmniejszą\n# możliwą ilość zatrzymań w danym przypadku idąc po kolei od punktu do punktu w którym jest ropa.\n# Złożoność tego algorytmu to około: O(n * m + m + m log m)\nimport heapq\n\nfrom zad8testy import runtests\n\n\ndef find(T, i, j, visited):\n m = len(T[0])\n n = len(T)\n if n - 1 >= i >= 0 and m - 1 >= j >= 0:\n if T[i][j] == 0 or visited[i][j]:\n return 0\n else:\n visited[i][j] = True\n return T[i][j] + find(T, i + 1, j, visited) + find(T, i, j + 1, visited) + find(T, i - 1, j, visited) \\\n + find(T, i, j - 1, visited)\n else:\n return 0\n\n\ndef plan(T):\n m = len(T[0])\n n = len(T)\n i = 0\n stations = []\n missed = []\n visited = [[False for _ in range(m)] for _ in range(n)]\n for j in range(m):\n if T[0][j] != 0:\n stations.append([j, find(T, i, j, visited)])\n if not T[0][m - 1]:\n stations.append([m - 1, 0])\n fuel = stations[0][1]\n stations.pop(0)\n cnt = 1\n prev = 0\n for station, oil in stations:\n dis = station - prev\n heapq.heappush(missed, -oil)\n if fuel >= dis:\n prev = station\n fuel -= dis\n continue\n while fuel < dis and missed:\n fuel += -heapq.heappop(missed)\n cnt += 1\n return cnt\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(plan, all_tests=True)\n","repo_name":"WiktorDybalski/Python_projects-term_2-ASD","sub_path":"Offline_Tasks/zad8/zad8.py","file_name":"zad8.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"18032693226","text":"import random\n\n'''\n 1-100 arasında rastgele üretilecek bir sayıyı aşağı yukarı ifadeleri ile\n buldurmaya çalışın. (hak = 5)\n ** \"random modülü\" için \"python random\" şeklinde arama yapın.\n ** 100 üzerinden puanlama yapın. Her soru 20 puan.\n ** Hak bilgisini kullanıcıdan alın ve her soru belirtilen can sayısı\n üzerinden hesaplansın.\n'''\nsayi = random.randint(1,100)\ncan = int(input(\"can sayisini girin: \"))\nhak = can\nsayac = 0\n\nwhile hak > 0:\n hak -= 1\n sayac += 1\n tahmin = int(input(\"bir sayi giriniz: \"))\n\n if tahmin == sayi:\n print(f\"tebrikler bildiniz. Toplam puaniniz: {100 - (100 / can) * (sayac - 1)}\" )\n break\n elif sayi > tahmin:\n print(\"Yukari\")\n else:\n print(\"Asagi\")\n\n if hak == 0:\n print(f\"Hakkiniz bitti. Tutulan sayi: {sayi} \")\n\n","repo_name":"Mutluaydin/python-week-4","sub_path":"SayiTahminOyunu.py","file_name":"SayiTahminOyunu.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"23104144923","text":"from typing import List, Optional\n\nfrom sqlalchemy import or_, func, outerjoin\n\nfrom .. import models, schemas, oauth2\nfrom fastapi import FastAPI, HTTPException, Depends, APIRouter\nfrom sqlalchemy.orm import Session\nfrom starlette import status\nfrom starlette.responses import Response\nfrom ..database import engine, SessionLocal, get_db\n\nrouter = APIRouter(\n prefix='/posts',\n tags=['Posts']\n)\n\n\n@router.get(\"/\", response_model=List[schemas.PostOut])\ndef get_all_posts(\n db: Session = Depends(get_db),\n current_user: int = Depends(oauth2.get_current_user),\n limit: int = 10,\n skip: int = 0,\n search: Optional[str] = None\n):\n # Base query to get all posts by the current user\n query = db.query(models.Post).filter(models.Post.owner_id == current_user.id)\n\n # Filter the posts based on the search string\n if search is not None:\n search_str = f\"%{search}%\"\n query = query.filter(\n or_(\n models.Post.title.ilike(search_str),\n models.Post.content.ilike(search_str)\n )\n )\n\n # Apply limit and offset to the query\n posts = query.limit(limit).offset(skip).all()\n\n results = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(\n models.Vote,models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).all()\n\n\n print(results)\n return results\n\n\n\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED, response_model=schemas.Post)\ndef create_post(post: schemas.PostCreate, db: Session = Depends(get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n new_post = models.Post(**post.dict(), owner_id=current_user.id)\n db.add(new_post)\n db.commit()\n db.refresh(new_post)\n return new_post\n\n\n@router.get(\"/{post_id}\", response_model=schemas.PostOut)\ndef get_post_by_id(post_id: int, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n post = db.query(models.Post, func.count(models.Vote.post_id).label(\"votes\")).join(\n models.Vote,models.Vote.post_id == models.Post.id, isouter=True).group_by(models.Post.id).filter(models.Post.id == post_id).first()\n if post:\n return post\n else:\n raise HTTPException(status_code=404, detail=\"Post not found\")\n\n\n@router.delete(\"/{id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete_post(id: int, db: Session = Depends(get_db), current_user: int = Depends(oauth2.get_current_user)):\n deleted_post = db.query(models.Post).filter(models.Post.id == id).first()\n\n if deleted_post is None:\n raise HTTPException(status_code=404, detail=\"Post not found\")\n\n if deleted_post.owner_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Not authorized to perform request action\")\n\n db.delete(deleted_post)\n db.commit()\n\n return Response(status_code=status.HTTP_204_NO_CONTENT)\n\n\n@router.put(\"/{id}\", response_model=schemas.Post)\ndef update_post(id: int, post: schemas.PostCreate, db: Session = Depends(get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n existing_post = db.query(models.Post).filter(models.Post.id == id).first()\n\n if existing_post is None:\n raise HTTPException(status_code=404, detail=\"Post not found\")\n if existing_post.owner_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=\"Not authorized to perform request action\")\n\n existing_post.title = post.title\n existing_post.content = post.content\n existing_post.published = post.published\n db.commit()\n\n return existing_post\n","repo_name":"vkquang1810/fastapi-quangvo","sub_path":"app/routers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"8265082580","text":"# put in the same file 2 log files (txt format) and the script will create a new txt file with all the different between the files\n# The script ignores the time on each line and display\nimport glob, os\n\n# Find 2 logs in this folder\ncounter_files = 0\nnameFile1 = None # File 1\nnameFile2 = None # File 2\nfor file in glob.glob(\"*.txt\"):\n if \"result\" not in file and \"lines_only_in_\" not in file:\n counter_files += 1\n if counter_files == 1:\n nameFile1 = file\n elif counter_files == 2:\n nameFile2 = file\n print(file)\nprint('Total txt files: ' + str(counter_files))\nif counter_files != 2:\n print('[-] Please put exactly 2 txt files in order to compare between them (without the \"result\" word)')\n exit()\n\n# Read the files and check if some sentences existing in 1 file and not in the other one\nprint(\"---\")\n\n# Split the content of file 1 to groups of 4 words and check if it exists in the other file\nlines_1 = None\nlines_2 = None\n\nwith open(nameFile1, encoding=\"utf8\", errors='ignore') as f:\n lines_1 = f.readlines()\n # print(lines_1)\n\nwith open(nameFile2, encoding=\"utf8\", errors='ignore') as f:\n lines_2 = f.readlines()\n # print(lines_2)\n\n\n\ndef checkDiff(content_1, content_2, fileName):\n counter_diff = 0\n offset = 32 # Use 32 for android logs (to ignore the date and the time)\n offset = 0\n f = open(fileName, \"a\")\n print(\"*******\")\n for line in content_1: # Check if line exist in lines_2\n line_without_time = line[offset:] # Starting from character 30 to avoid the data and the time\n is_exist = False\n try:\n for element in content_2:\n if line_without_time in element:\n is_exist = True\n if is_exist == False:\n is_ok = False\n if white_keywords != None:\n for white in white_keywords:\n if white in line:\n is_ok = True\n\n if black_keywords != None:\n for black in black_keywords:\n if black in line:\n is_ok = False\n\n if white_keywords == None and black_keywords == None:\n is_ok = True\n\n if is_ok:\n print(\"--- \")\n counter_diff += 1\n print(\"Different: \" + str(counter_diff))\n print(line)\n f.write(line)\n except KeyboardInterrupt as ex:\n print(\"Exception occur: \" + str(ex))\n\n f.close()\n\n\n# Use black and white keywords for filtering\n# black_keywords = [\"wifi\"]\n# white_keywords = [\"error\", \"exception\"]\nblack_keywords = None\nwhite_keywords = None\n\nprint(\"\\n[+] These lines do not exist in txt file \" + nameFile2 + \" and exist in \" + nameFile1 + \"\\n\")\ncheckDiff(content_1=lines_1, content_2=lines_2, fileName=\"lines_only_in_\" + nameFile1)\nprint(\"\\n[+] These lines do not exist in txt file \" + nameFile1 + \" and exist in \" + nameFile2 + \"\\n\")\ncheckDiff(content_1=lines_2, content_2=lines_1, fileName=\"lines_only_in_\" + nameFile2)\n\n# print all of that inside a new txt file..\n","repo_name":"winosli/Difflog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"9406960826","text":"#!/usr/bin/env python3\n\n# Author: Scott H. Hawley \n\nfrom prefigure.prefigure import get_all_args, push_wandb_config\nimport math\nimport json\nimport subprocess\nimport os, sys\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pathlib import Path\n\nimport torch\nimport torchaudio\nfrom torch import optim, nn, utils, Tensor\nfrom torch.nn import functional as F\n\nfrom tqdm.auto import tqdm, trange\nfrom einops import rearrange, repeat\n\nimport wandb\n\nfrom aeiou.viz import *\nfrom aeiou.hpc import freeze\nfrom audio_algebra.datasets import DualEffectsDataset\nfrom audio_algebra.aa_effects import *\nfrom audio_algebra.given_models import SpectrogramAE, MagSpectrogramAE, MelSpectrogramAE, DVAEWrapper\nfrom audio_algebra.DiffusionDVAE import DiffusionDVAE, sample\n\n\nfrom audiomentations import * # list of effects\n \n\n# Lightning: 1. import Lightning\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nfrom pytorch_lightning.utilities import rank_zero_only\n\ndef vicreg_var_loss_l2(z, gamma=1, eps=1e-4):\n std_z = torch.sqrt(z.var(dim=0) + eps)\n return torch.mean(F.relu( (gamma - std_z))**2 ) # the relu gets us the max(0, ...)\n\nvicreg_var_loss = vicreg_var_loss_l2\n\n\n# Lightning: 2. define LightningModule\nclass AAEffectsModule(pl.LightningModule):\n def __init__(self, given_model, aa_model, train_dl, debug=False):\n super().__init__()\n self.given_model, self.aa_model, self.train_dl, self.debug = given_model, aa_model, train_dl, debug\n self.train_iter = iter(train_dl)\n self.batch_shape = None \n \n\n def training_step(self, batch, batch_idx): \n if self.debug: print(\"\\nStarting AAEffectsModule.training_step\")\n\n mseloss = nn.MSELoss()\n \n # vicreg: 1. invariance\n if self.debug: print(\" calling do_mixing\")\n with torch.cuda.amp.autocast():\n archive = do_mixing(batch, self.given_model, self.aa_model, self.device)\n # zs are the projections from aa_model, archive[\"yz\"] are reps from given_model\n [za1, zb1, za2, zb2] = [x.float() for x in archive[\"zs\"]] # a & b are two audio clips, 1 and 2 are effects\n \n za2_guess = zb2 - zb1 + za1 # try to enforce enfoce algebraic property \n zb2_guess = za2 - za1 + zb1\n mix_loss = (mseloss(za2_guess, za2) + mseloss(zb2_guess, zb2))/2\n \n var_loss = (vicreg_var_loss(za1) + vicreg_var_loss(za2) + vicreg_var_loss(zb1) + vicreg_var_loss(zb2))/4 # vicreg: 2. variance\n cov_loss = (vicreg_cov_loss(za1) + vicreg_cov_loss(za2) + vicreg_cov_loss(zb1) + vicreg_cov_loss(zb2))/4 # vicreg: 3. covariance\n\n # reconstruction loss: inversion of aa map h^{-1}(z): z -> y, i.e. train the aa decoder\n aa_recon_loss = mseloss(archive[\"yrecons\"][0].float(), archive[\"ys\"][0].float())\n for i in range(1,4):\n aa_recon_loss += mseloss(archive[\"yrecons\"][i].float(), archive[\"ys\"][i].float()) \n\n loss = mix_loss + var_loss + cov_loss + aa_recon_loss # --- full loss function\n\n if self.debug: print(\" full loss calculated. setting log_dict...\")\n # logging during training\n log_dict = {\n 'tloss': loss.detach(),\n 'mix_loss': mix_loss.detach(),\n 'var_loss': var_loss.detach(),\n 'cov_loss': cov_loss.detach(),\n 'aa_recon_loss': aa_recon_loss.detach()\n }\n #log_dict['learning_rate'] = self.opt.param_groups[0]['lr']\n self.log_dict(log_dict, prog_bar=True, on_step=True)\n\n if self.debug: print(\" training_step: returning loss\\n\")\n\n return loss\n\n def configure_optimizers(self):\n optimizer = optim.Adam([*self.aa_model.parameters()], lr=5e-4) # Adam optimizer\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches)\n return [optimizer], [scheduler]\n\n\nclass ExceptionCallback(pl.Callback):\n def on_exception(self, trainer, module, err):\n print(f'{type(err).__name__}: {err}', file=sys.stderr)\n\n\nclass DemoCallback(pl.Callback):\n def __init__(self, val_dl, given_model, aa_model, device, global_args, wandb_logger):\n super().__init__()\n self.given_model, self.aa_model, self.device = given_model, aa_model, device\n self.demo_every = global_args.demo_every\n self.demo_samples = global_args.sample_size\n self.demo_steps = global_args.demo_steps\n self.demo_dl = iter(val_dl)\n self.sample_rate = global_args.sample_rate\n self.debug = True\n self.wandb_logger = wandb_logger\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_end(self, trainer, module, outputs, batch, batch_idx):\n last_demo_step = -1\n if (trainer.global_step - 1) % self.demo_every != 0 or last_demo_step == trainer.global_step:\n #if trainer.current_epoch % self.demo_every != 0:\n return\n if self.debug: print(\"\\nin DemoCallback.on_train_batch_end\")\n last_demo_step = trainer.global_step\n\n batch = next(self.demo_dl)\n with torch.no_grad():\n archive = do_mixing(batch, self.given_model, self.aa_model, self.device)\n # zs are the projections from aa_model, archive[\"yz\"] are reps from given_model\n za1, zb1, za2, zb2 = archive[\"zs\"] # a & b are two audio clips, 1 and 2 are effects\n za2_guess = zb2 - zb1 + za1 # try to enforce enfoce algebraic property \n zb2_guess = za2 - za1 + zb1\n\n if self.debug: print(\"trying to log to wandb\")\n #try: # don't crash the whole run just because logging fails\n if True:\n log_dict = {}\n e1names, e2names = batch[\"e1\"], batch[\"e2\"] # these are batches of names of audio effects \n if self.debug: print(\"effects: [1,2]: \",list(zip(e1names, e2names)))\n\n audios, melspecs, tokenspecs, pcs = [],[],[],[]\n for var, name in zip([za1, za2, zb1, zb2],[\"za1\", \"za2\", \"zb1\", \"zb2\"]):\n if self.debug: print(\" logging: name =\",name, \", var.shape =\",var.shape) \n log_dict[f'{name}_embeddings'] = embeddings_table(var)\n log_dict[f'{name}_3dpca'] = pca_point_cloud(var, output_type='wandbobj', mode='lines+markers')\n log_dict[f'{name}_spec'] = wandb.Image(tokens_spectrogram_image(var))\n pcs.append(log_dict[f'{name}_3dpca'])\n tokenspecs.append(log_dict[f'{name}_spec'])\n\n for key in [\"a1\",\"a2\", \"b1\",\"b2\"]: # audio inputs a & b, with effects 1 and 2 applied\n if self.debug: print(\"Logging: key =\",key)\n audio = batch[key]\n audio = rearrange(audio,'b d n -> d (b n)') # pack batches as successive groups of time-domain samples\n if self.debug: print(\" new audio.shape = \",audio.shape)\n log_dict[f'{key}_melspec_left'] = wandb.Image(audio_spectrogram_image(audio))\n filename = f'{key}_{trainer.global_step:08}.wav'\n audio = audio.clamp(-1, 1).mul(32767).to(torch.int16).cpu()\n torchaudio.save(filename, audio, self.sample_rate)\n log_dict[f'{key}_audio'] = wandb.Audio(filename,\n sample_rate=self.sample_rate,\n caption=f'Inputs')\n audios.append(log_dict[f'{key}_audio'])\n melspecs.append(log_dict[f'{key}_melspec_left'])\n\n columns = ['soundfile', 'melspec', 'tokenspec', '3dpca']\n data = [[s,m,t,p] for s, m, t, p in zip(audios, melspecs, tokenspecs,pcs)]\n log_dict[\"global_step\"] = trainer.global_step\n trainer.logger.experiment.log(log_dict)\n\n #columns = ['soundfile', 'ground truth', 'prediction']\n #grount_truth = [ 'yes' for y_true in range(50)]\n #predicted = ['yes' if y_pred%2 ==0 else 'no' for y_pred in range(50)]\n #audio_paths = filter(lambda x: 'wav' in x.name, Path('./').iterdir())\n #n = 10\n #data = [[wandb.Audio(str(x_sound)), y_true, y_pred] for x_sound, y_true, y_pred in zip(audio_paths, grount_truth[:n], predicted[:n])]\n #self.wandb_logger.log_table(key='sample_table', columns=columns, data=data, step=trainer.global_step)\n #trainer.logger.log_table(key='sample_table', columns=columns, data=data)\n if self.debug: print(\"trainer logger set\")\n\n #except Exception as e:\n # print(f'{type(e).__name__}: {e}', file=sys.stderr)\n\n\n### MAIN ### \ndef main(): \n args = get_all_args()\n print(\"args = \",args)\n\n device = torch.device('cuda') # this code runs on clusters only\n print('Using device:', device)\n torch.manual_seed(args.seed)\n\n\n # Lightning: 3. define a dataset\n print(\"Setting up dataset\")\n torch.manual_seed(args.seed)\n effects_list = [Gain, BandPassFilter, BandStopFilter, HighPassFilter, LowPassFilter]\n\n train_set = DualEffectsDataset([args.training_dir], load_frac=args.load_frac, effects_list=effects_list) \n train_dl = utils.data.DataLoader(train_set, args.batch_size, shuffle=True,\n num_workers=args.num_workers, persistent_workers=True, pin_memory=True)\n\n # TODO: make val unique! for now just repeat train & hope for no repeats (train is shuffled, val is not)\n val_set = DualEffectsDataset([args.training_dir], load_frac=args.load_frac/4, effects_list=effects_list)\n val_dl = utils.data.DataLoader(train_set, args.num_demos, shuffle=False,\n num_workers=args.num_workers, persistent_workers=True, pin_memory=True)\n\n torch.manual_seed(args.seed) # one more seed init for ordering of iterator\n val_iter, train_iter = iter(val_dl), iter(train_dl)\n print(\"Dataset ready to go! \")\n\n # Finishing up Lighting, 2: define the Lightning module\n # init the given autoencoder\n #given_model = SpectrogramAE()\n given_model = DiffusionDVAE.load_from_checkpoint(args.dvae_ckpt_file, global_args=args)\n #given_model = DVAEWrapper.load_from_checkpoint(args.dvae_ckpt_file, global_args=args)\n given_model.demo_samples, given_model.quantized = args.sample_size, args.num_quantizers > 0\n given_model.eval() # disable randomness, dropout, etc...\n freeze(given_model) # freeze the weights for inference\n print(\"Given Autoencoder is ready to go!\")\n\n # init the aa model\n aa_use_bn = False # batch norm?\n aa_use_resid = True # use residual connections? (doesn't make much difference tbh)\n emb_dims = args.latent_dim # input size to aa model\n hidden_dims = emb_dims # number of hidden dimensions in aa model. usually was 64\n trivial = False # aa_model is a no-op when this is true\n debug = True\n aa_model = AudioAlgebra(dims=emb_dims, hidden_dims=hidden_dims, use_bn=aa_use_bn, resid=aa_use_resid, trivial=trivial).to(device)\n\n aa_effects = AAEffectsModule(given_model, aa_model, train_dl) # the lightning module\n print(\"aa_effects LightningModule ready to go!\")\n\n # Lightning: 4: Train the model --- add more options\n wandb_logger = pl.loggers.WandbLogger(project=args.name)\n wandb_logger.watch(aa_effects)\n push_wandb_config(wandb_logger, args)\n\n ckpt_callback = pl.callbacks.ModelCheckpoint(every_n_train_steps=args.checkpoint_every, save_top_k=-1)\n demo_callback = DemoCallback(val_dl, given_model, aa_model, device, args, wandb_logger)\n exc_callback = ExceptionCallback()\n\n lr_monitor = LearningRateMonitor(logging_interval='step')\n\n trainer = pl.Trainer(\n gpus=args.num_gpus,\n accelerator=\"gpu\",\n num_nodes = args.num_nodes,\n #strategy='ddp',\n strategy=\"ddp_find_unused_parameters_false\",\n precision=16,\n accumulate_grad_batches=args.accum_batches, \n callbacks=[ckpt_callback, lr_monitor, demo_callback, lr_monitor],\n logger=wandb_logger,\n log_every_n_steps=1,\n max_epochs=40000,\n )\n trainer.fit(model=aa_effects, train_dataloaders=train_dl, ckpt_path=args.ckpt_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"drscotthawley/audio-algebra","sub_path":"train_aa_effects.py","file_name":"train_aa_effects.py","file_ext":"py","file_size_in_byte":12202,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"47"}
+{"seq_id":"12765600528","text":"from accounting.models import DivisionCode\nfrom accounting.serializers import DivisionCodeSerializer\nfrom customer.models import Customer\nfrom customer.serializers import CustomerSerializer\nfrom order.models import Order\nfrom order.serializers import OrderSerializer\n\n\n# Displays\ndef order_display(order: Order) -> str:\n return f\"Order {order.pro_number}, status: {order.get_status_display()}, from {order.origin_address} to {order.destination_address}\"\n\n\ndef division_code_display(division_code: DivisionCode) -> str:\n return f\"Division Code {division_code.code}, status: {division_code.get_status_display()}\"\n\n\ndef customer_display(customer: Customer) -> str:\n return f\"Customer {customer.code}, status: {customer.get_status_display()}\"\n\n\n# Links\ndef customer_link(customer: Customer) -> str:\n return f\"/billing/customers/view/{customer.id}\"\n\n\nsearchable_models = {\n \"Order\": {\n \"app\": \"order\",\n \"serializer\": OrderSerializer,\n \"search_fields\": [\n \"pro_number\",\n \"origin_address\",\n \"destination_address\",\n \"bol_number\",\n \"status\",\n ],\n \"display\": order_display,\n },\n \"DivisionCode\": {\n \"app\": \"accounting\",\n \"serializer\": DivisionCodeSerializer,\n \"search_fields\": [\n \"code\",\n \"status\",\n \"description\",\n ],\n \"display\": division_code_display,\n },\n \"Customer\": {\n \"app\": \"customer\",\n \"serializer\": CustomerSerializer,\n \"search_fields\": [\n \"name\",\n \"code\",\n \"status\",\n ],\n \"display\": customer_display,\n \"path\": customer_link,\n },\n}\n","repo_name":"Monta-Application/Monta","sub_path":"server/core/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"}
+{"seq_id":"1898995973","text":"# pip install neo4j\n# pip install kubernetes\nfrom kubernetes import client, config\nimport json\nimport logging\nfrom neo4j import GraphDatabase\nfrom neo4j.exceptions import ServiceUnavailable\n \nclass App:\n \n def __init__(self, uri, user, password):\n self.driver = GraphDatabase.driver(uri, auth=(user, password))\n \n def close(self):\n # Don't forget to close the driver connection when you are finished with it\n self.driver.close()\n \n def create_node(self, object_name, node_name):\n with self.driver.session() as session:\n session.write_transaction(self._create_node, object_name, node_name)\n \n @staticmethod\n def _create_node(tx, object_name, node_name):\n if object_name == \"service\":\n query = (\"MERGE (:service { name: $node_name }) \")\n elif object_name == \"deployment\":\n query = (\"MERGE (:deployment { name: $node_name }) \")\n elif object_name == \"replicaset\":\n query = (\"MERGE (:replicaret { name: $node_name }) \")\n elif object_name == \"pod\":\n query = (\"MERGE (:pod { name: $node_name }) \")\n if object_name != \"\":\n tx.run(query, object_name=object_name, node_name=node_name)\n \n def create_relation(self, first_node, second_node):\n with self.driver.session() as session:\n session.write_transaction(self._create_relation, first_node, second_node)\n \n @staticmethod\n def _create_relation(tx, first_node, second_node):\n query = (\n \"MATCH (a), (b) \"\n \"WHERE a.name = $first_node and b.name = $second_node \"\n \"MERGE (a)-[:relation_to]->(b) \"\n )\n tx.run(query, first_node=first_node, second_node=second_node)\n \n \ndef main(app):\n # kubectl describe secret default-token-xxxxx -n kube-public\n # kubectl describe secret neo4j-sa-token-xxxxx\n # aToken = \"(token from 'kubectl describe secret neo4j-sa-token-xxxxx')\"\n aConfiguration = client.Configuration()\n # K8s\n # aConfiguration.host = \"https://127.0.0.1:6443\"\n # MiniKube with proxy\n aConfiguration.host = \"http://127.0.0.1:8080\"\n\n aConfiguration.verify_ssl = False\n aConfiguration.api_key = {\"authorization\": \"Bearer \" + aToken}\n aApiClient = client.ApiClient(aConfiguration)\n \n v1 = client.CoreV1Api(aApiClient)\n v2 = client.AppsV1Api(aApiClient)\n v4 = v1.list_service_for_all_namespaces(watch=False)\n v5 = v2.list_deployment_for_all_namespaces(watch=False)\n v6 = v2.list_replica_set_for_all_namespaces(watch=False)\n \n service_dict = {}\n deployment_dict = {}\n replica_set_dict = {}\n \n for i in v4.items:\n selector = i.spec.selector\n if selector == None:\n continue\n for key, value in selector.items():\n temp = '%s=%s' % (key, value)\n service_dict[temp] = i\n for i in v5.items:\n selector = i.spec.selector.match_labels\n if selector == None:\n continue\n for key, value in selector.items():\n temp = '%s=%s' % (key, value)\n deployment_dict[temp] = i\n for i in v6.items:\n selector = i.spec.selector.match_labels\n if selector == None:\n continue\n for key, value in selector.items():\n temp = '%s=%s' % (key, value)\n replica_set_dict[temp] = i\n \n v7 = v1.list_namespaced_pod(\"default\")\n \n for i in v7.items:\n labels = \"\"\n for key, value in i.metadata.labels.items():\n labels = '%s=%s' % (key, value)\n break\n app.create_node('pod', i.metadata.name)\n if labels in service_dict.keys():\n app.create_node('service', service_dict[labels].metadata.name)\n app.create_relation(service_dict[labels].metadata.name, i.metadata.name)\n if labels in deployment_dict.keys():\n app.create_node('deployment', deployment_dict[labels].metadata.name)\n app.create_node('replicaset', replica_set_dict[labels].metadata.name)\n app.create_relation(deployment_dict[labels].metadata.name, replica_set_dict[labels].metadata.name)\n app.create_relation(replica_set_dict[labels].metadata.name, i.metadata.name)\n\n \nif __name__ == '__main__':\n scheme = \"neo4j\"\n host_name = \"127.0.0.1\"\n port = 7687\n url = \"{scheme}://{host_name}:{port}\".format(scheme=scheme, host_name=host_name, port=port)\n user = \"neo4j\"\n password = \"\"\n app = App(url, user, password)\n main(app)\n app.close()\n","repo_name":"lovehyun/tutorial-kubernetes","sub_path":"10.devel/neo4j/get_all.py","file_name":"get_all.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"47"}
+{"seq_id":"40880388387","text":"def solution(N,A):\r\n counter = [0]*N\r\n for i in A:\r\n if N >= i >= 1:\r\n temp_val = counter[i-1]\r\n counter[i-1] = temp_val+1\r\n elif i == N+1:\r\n max_val = max(counter)\r\n counter = [max_val]*N\r\n\r\n return counter\r\n\r\nsolution(5,[3,4,4,6,1,4,4])\r\n\r\n","repo_name":"akash29/Practice_problems","sub_path":"Others/Max_Counters.py","file_name":"Max_Counters.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33677370012","text":"\"\"\"This Script is to update average size of a column data in SQL table\"\"\"\r\nimport json\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom dam_orchestrator import mysql_connection as sql\r\nfrom dam_cloudant import data_downloader as dd\r\n\r\n\r\ndef size_estimate(veh_list, config, col_list, packet_name):\r\n \"\"\"\r\n This Method have the below functions in sequence:\r\n download data file for one day\r\n calculate mean size of the each column data in bytes from a given set of vins\r\n :param veh_list: List of vehicle considered for average\r\n :param config: Configuration file string\r\n :param col_list: List of columns\r\n :param packet_name: packet name\r\n :return: List of tuples of column and the average size\r\n \"\"\"\r\n size_dict = {}\r\n for veh in veh_list:\r\n dd(config, veh, packet_name, 20221201000000, 20221201235959, 'csv', ['ALL'])\r\n data_df = pd.read_csv(f'{packet_name}_sorted.csv')\r\n size_list = []\r\n for col in col_list:\r\n filename = ''.join(e for e in f'{col}.csv.gz' if e.isalnum())\r\n data_df[col].to_csv(filename, index=False, compression=\"gzip\")\r\n file_size = os.path.getsize(filename)\r\n size_list.append(file_size)\r\n os.remove(filename)\r\n size_dict[veh] = size_list\r\n\r\n average_list = [np.mean(k) for k in zip(*size_dict.values())]\r\n return list(zip(col_list, average_list))\r\n\r\n\r\ndef update_col_size(config, col_size_list, pac):\r\n \"\"\"\r\n This method is used to update the SQL table with the column size\r\n :param config: Configuration file string\r\n :param col_size_list: List of tuple of column size\r\n :param pac: packet name\r\n :return: Status\r\n \"\"\"\r\n table = config['field_lkp_table']\r\n conn = sql(config)\r\n for data in col_size_list:\r\n query = f\"UPDATE {table} SET size_in_bytes = '{data[1]}' where field_name='{data[1]}' and packet_name='{pac}'\"\r\n # conn.execute(query)\r\n print(query)\r\n conn.close()\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Main method to orchestrate all the functionality\r\n :return: status\r\n \"\"\"\r\n with open('../../configuration.json', encoding='utf-8') as config_file:\r\n config = json.load(config_file)\r\n config_file.close()\r\n\r\n with open('../../field_name.json', encoding='utf-8') as field_file:\r\n field_config = json.load(field_file)\r\n field_file.close()\r\n\r\n packet_list = ['volvo:fuel', 'canbs4:wcanbs46', 'can2bs6:wcanbs6', 'can3bs6:wcan3bs6']\r\n volvo_veh_list = ['MC2BAHRC0LJ065928', 'MC2BBMRC0MA068464']\r\n wabco_veh_list = ['359218066341780', '352467110465742']\r\n\r\n for packet in packet_list:\r\n pac = packet.split(':')\r\n if pac[0] == 'volvo':\r\n col_list = field_config[pac[0]].replace(\"'\", '').replace(\", \", \",\").split(',')\r\n col_size_list = size_estimate(volvo_veh_list, config, col_list, pac[1])\r\n update_col_size(config, col_size_list, pac[1])\r\n else:\r\n col_list = field_config[pac[0]].replace(\"'\", '').replace(\", \", \",\").split(',')\r\n col_size_list = size_estimate(wabco_veh_list, config, col_list, pac[1])\r\n update_col_size(config, col_size_list, pac[1])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"spanditUst/DAM","sub_path":"scripts/dam_field_size_estimator.py","file_name":"dam_field_size_estimator.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"29915017644","text":"# Ranger deep learning optimizer - RAdam + Lookahead + Gradient Centralization, combined into one optimizer.\n\n# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer\n# and/or\n# https://github.com/lessw2020/Best-Deep-Learning-Optimizers\n\n# Ranger has now been used to capture 12 records on the FastAI leaderboard.\n\n# This version = 20.4.11\n\n# Credits:\n# Gradient Centralization --> https://arxiv.org/abs/2004.01461v2 (a new optimization technique for DNNs), github: https://github.com/Yonghongwei/Gradient-Centralization\n# RAdam --> https://github.com/LiyuanLucasLiu/RAdam\n# Lookahead --> rewritten by lessw2020, but big thanks to Github @LonePatient and @RWightman for ideas from their code.\n# Lookahead paper --> MZhang,G Hinton https://arxiv.org/abs/1907.08610\n\n# summary of changes:\n# 4/11/20 - add gradient centralization option. Set new testing benchmark for accuracy with it, toggle with use_gc flag at init.\n# full code integration with all updates at param level instead of group, moves slow weights into state dict (from generic weights),\n# supports group learning rates (thanks @SHolderbach), fixes sporadic load from saved model issues.\n# changes 8/31/19 - fix references to *self*.N_sma_threshold;\n# changed eps to 1e-5 as better default than 1e-8.\n\nimport math\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\nclass Ranger(Optimizer):\n\n\tdef __init__(self, params, lr=1e-3, # lr\n\t\t\t\t alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options\n\t\t\t\t betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options\n\t\t\t\t use_gc=True, gc_conv_only=False\n\t\t\t\t # Gradient centralization on or off, applied to conv layers only or conv + fc layers\n\t\t\t\t ):\n\n\t\t# parameter checks\n\t\tif not 0.0 <= alpha <= 1.0:\n\t\t\traise ValueError(f'Invalid slow update rate: {alpha}')\n\t\tif not 1 <= k:\n\t\t\traise ValueError(f'Invalid lookahead steps: {k}')\n\t\tif not lr > 0:\n\t\t\traise ValueError(f'Invalid Learning Rate: {lr}')\n\t\tif not eps > 0:\n\t\t\traise ValueError(f'Invalid eps: {eps}')\n\n\t\t# parameter comments:\n\t\t# beta1 (momentum) of .95 seems to work better than .90...\n\t\t# N_sma_threshold of 5 seems better in testing than 4.\n\t\t# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.\n\n\t\t# prep defaults and init torch.optim base\n\t\tdefaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,\n\t\t\t\t\t\teps=eps, weight_decay=weight_decay)\n\t\tsuper().__init__(params, defaults)\n\n\t\t# adjustable threshold\n\t\tself.N_sma_threshhold = N_sma_threshhold\n\n\t\t# look ahead params\n\n\t\tself.alpha = alpha\n\t\tself.k = k\n\n\t\t# radam buffer for state\n\t\tself.radam_buffer = [[None, None, None] for ind in range(10)]\n\n\t\t# gc on or off\n\t\tself.use_gc = use_gc\n\n\t\t# level of gradient centralization\n\t\tself.gc_gradient_threshold = 3 if gc_conv_only else 1\n\n\tdef __setstate__(self, state):\n\t\tsuper(Ranger, self).__setstate__(state)\n\n\tdef step(self, closure=None):\n\t\tloss = None\n\n\t\t# Evaluate averages and grad, update param tensors\n\t\tfor group in self.param_groups:\n\n\t\t\tfor p in group['params']:\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\tgrad = p.grad.data.float()\n\n\t\t\t\tif grad.is_sparse:\n\t\t\t\t\traise RuntimeError('Ranger optimizer does not support sparse gradients')\n\n\t\t\t\tp_data_fp32 = p.data.float()\n\n\t\t\t\tstate = self.state[p] # get state dict for this param\n\n\t\t\t\tif len(state) == 0: # if first time to run...init dictionary with our desired entries\n\t\t\t\t\t# if self.first_run_check==0:\n\t\t\t\t\t# self.first_run_check=1\n\t\t\t\t\t# print(\"Initializing slow buffer...should not see this at load from saved model!\")\n\t\t\t\t\tstate['step'] = 0\n\t\t\t\t\tstate['exp_avg'] = torch.zeros_like(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n\n\t\t\t\t\t# look ahead weight storage now in state dict\n\t\t\t\t\tstate['slow_buffer'] = torch.empty_like(p.data)\n\t\t\t\t\tstate['slow_buffer'].copy_(p.data)\n\n\t\t\t\telse:\n\t\t\t\t\tstate['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n\t\t\t\t# begin computations\n\t\t\t\texp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\t\t\t\tbeta1, beta2 = group['betas']\n\n\t\t\t\t# GC operation for Conv layers and FC layers\n\t\t\t\tif grad.dim() > self.gc_gradient_threshold:\n\t\t\t\t\tgrad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))\n\n\t\t\t\tstate['step'] += 1\n\n\t\t\t\t# compute variance mov avg\n\t\t\t\texp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\t\t\t\t# compute mean moving avg\n\t\t\t\texp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n\t\t\t\tbuffered = self.radam_buffer[int(state['step'] % 10)]\n\n\t\t\t\tif state['step'] == buffered[0]:\n\t\t\t\t\tN_sma, step_size = buffered[1], buffered[2]\n\t\t\t\telse:\n\t\t\t\t\tbuffered[0] = state['step']\n\t\t\t\t\tbeta2_t = beta2 ** state['step']\n\t\t\t\t\tN_sma_max = 2 / (1 - beta2) - 1\n\t\t\t\t\tN_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\t\t\t\t\tbuffered[1] = N_sma\n\t\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\t\tstep_size = math.sqrt(\n\t\t\t\t\t\t\t(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n\t\t\t\t\t\t\t\t\t\tN_sma_max - 2)) / (1 - beta1 ** state['step'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tstep_size = 1.0 / (1 - beta1 ** state['step'])\n\t\t\t\t\tbuffered[2] = step_size\n\n\t\t\t\tif group['weight_decay'] != 0:\n\t\t\t\t\tp_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n\t\t\t\t# apply lr\n\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\tdenom = exp_avg_sq.sqrt().add_(group['eps'])\n\t\t\t\t\tp_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n\t\t\t\telse:\n\t\t\t\t\tp_data_fp32.add_(-step_size * group['lr'], exp_avg)\n\n\t\t\t\tp.data.copy_(p_data_fp32)\n\n\t\t\t\t# integrated look ahead...\n\t\t\t\t# we do it at the param level instead of group level\n\t\t\t\tif state['step'] % group['k'] == 0:\n\t\t\t\t\tslow_p = state['slow_buffer'] # get access to slow param tensor\n\t\t\t\t\tslow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha\n\t\t\t\t\tp.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor\n\n\t\treturn loss","repo_name":"orpatashnik/StyleCLIP","sub_path":"mapper/training/ranger.py","file_name":"ranger.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":3757,"dataset":"github-code","pt":"47"}
+{"seq_id":"71800902222","text":"from collections import defaultdict\nfrom typing import Any, List, Dict\nimport ast\nfrom ast import AST, dump, NodeTransformer, copy_location, Call, Name, Load, With, FunctionDef, NameConstant, Index, Subscript, arg, Expression, If, Suite, Attribute\nfrom functools import partial\nfrom dataclasses import dataclass, field\n\nfrom astpretty import pprint\n\nimport quoll.config\n\n@dataclass\nclass TranslationContext:\n\n boilerplate_alias: str = 'bp'\n\n allocation_found: bool = False\n\n measurement_hoisting_table: List[list] = field(default_factory=lambda: [])\n\n allocation_context: List[list] = field(default_factory=lambda: [])\n\n inside_class_body: bool = False\n\n _UNIQUE_NAME_COUNTER: Dict[str, int] = field(default_factory=lambda: defaultdict(lambda: 0))\n\n # TODO: Improve to be unique per scope. Now it is globally unique per compilation.\n def newname(self, name):\n count = self._UNIQUE_NAME_COUNTER[name]\n self._UNIQUE_NAME_COUNTER[name] += 1\n\n if count == 0:\n return name\n\n return f'{name}_{count}'\n\n\nclass Translator(NodeTransformer):\n\n _context: TranslationContext\n\n def __init__(self, context: TranslationContext):\n self._context = context\n\n\nclass ControlledComputer(Translator):\n\n def __init__(self, *args, control_param_name, **kwargs):\n super().__init__(*args, **kwargs)\n self._control_param_name = control_param_name\n\n def visit_Call(self, node: Call):\n if _is_operation_call(node):\n node.func = copy_location(_wrap_in_controlled(node.func), node)\n node.args.insert(0, copy_location(\n Name(self._control_param_name, ctx=Load()), node.args[0]))\n return node\n\n if _is_variant_call(node):\n functor_application = node.func\n if _identify_signature(functor_application) == 'Id':\n node.func = copy_location(_wrap_in_controlled(node.func), node)\n node.args.insert(0, copy_location(\n Name(self._control_param_name, ctx=Load()), node.args[0]))\n\n else:\n node.args[0] = copy_location(_extend_control_data(\n node.args[0], self._control_param_name), node.args[0])\n\n self.generic_visit(node)\n return node\n\n\nclass AdjointComputer(Translator):\n\n def visit_Call(self, node):\n if _is_operation_call(node):\n node.func = copy_location(_wrap_in_adjoint(node.func), node)\n return node\n\n self.generic_visit(node)\n return node\n\n def visit_Subscript(self, node):\n if _is_functor_application(node):\n return copy_location(_wrap_in_adjoint(node), node)\n\n self.generic_visit(node)\n return node\n\n\ndef _wrap_in_adjoint(node):\n return _wrap_in_functor(node, 'Adjoint')\n\ndef _wrap_in_controlled(node):\n return _wrap_in_functor(node, 'Controlled')\n\ndef _wrap_in_functor(node, name):\n wrapper = ast.parse(f'{name}[_]', mode='single').body[0].value\n wrapper.slice.value = node\n return wrapper\n\ndef _extend_control_data(original_control_data_node, extension_name):\n extended = ast.parse(f'(_) & {extension_name}').body[0].value\n extended.left = original_control_data_node\n return extended\n\ndef _is_operation_call(call):\n return isinstance(call.func, Name)\n\n\n# TODO: A variant is the result of applying a functor to an operation.\ndef _is_variant_call(call):\n return isinstance(call.func, Subscript)\\\n and _is_functor_application(call.func)\n\ndef _is_functor_application(subscript):\n return isinstance(subscript.value, Name)\\\n and subscript.value.id in ['Adjoint', 'Controlled']\\\n and isinstance(subscript.slice, Index)\\\n and isinstance(subscript.slice.value, (Name, Attribute))\n\ndef _identify_signature(functor_application):\n if not isinstance(functor_application, Subscript):\n return 'Id'\n\n if isinstance(functor_application.value, Name)\\\n and functor_application.value.id == 'Controlled':\n return 'Controlled'\n\n return _identify_signature(functor_application.slice)\n\n\nclass BodyTranslator(Translator):\n\n def visit_ClassDef(self, node):\n self._context.inside_class_body = True\n self.generic_visit(node)\n self._context.inside_class_body = False\n return node\n\n def visit_Module(self, node):\n if node.body:\n first_node = node.body[0]\n import_boilerplate = copy_location(\n _import_boilerplate(self._context.boilerplate_alias), first_node)\n node.body.insert(0, import_boilerplate)\n\n self.generic_visit(node)\n return node\n\n def visit_With(self, node):\n if _is_allocation(node):\n self._context.measurement_hoisting_table.append([])\n self._context.allocation_context.append([])\n self._context.allocation_context[-1] = [node.body, -1]\n # Replace measurement calls with variables\n for index, old_node in enumerate(node.body):\n self._context.allocation_context[-1][1] = index\n node.body[index] = self.visit(old_node)\n\n # Create measurement proxies, execute and return measurements\n new_nodes = []\n proxy_to_measure_names = {}\n for index, (_, m_node, m_name) in enumerate(self._context.measurement_hoisting_table[-1]):\n proxy_name = f'_mp{index + 1}'\n proxy_to_measure_names[proxy_name] = m_name\n new_nodes.append(self._assign_proxy(m_node, proxy_name, node))\n\n new_nodes.append(self._assign_measurements(proxy_to_measure_names, node))\n\n # Insert at the proper point in the current allocation\n hoisting_table = self._context.measurement_hoisting_table[-1]\n\n # If there are measurements to hoist\n if len(hoisting_table):\n insertion_point = hoisting_table[0][0]\n node.body[insertion_point:insertion_point] = new_nodes\n\n self._context.allocation_context.pop()\n self._context.measurement_hoisting_table.pop()\n return node\n\n self.generic_visit(node)\n return node\n\n def visit_If(self, node: If):\n if _is_control(node):\n control_name = self._context.newname('__control')\n # TODO: Add support for elif/else clauses\n assert len(node.orelse) == 0, 'Still no support for elif/else clauses'\n context_node = _control_context_node(node, control_name)\n # TODO: Add support for a more general combination of things that happen\n # inside a Quoll statement or control what can appear in these structures\n # and fail when needed.\n controlled_body = ControlledComputer(self._context, control_param_name=control_name).visit(Suite(node.body)).body\n context_node.body = controlled_body\n return context_node\n\n return self.generic_visit(node)\n\n def visit_Call(self, node: Call):\n self.generic_visit(node)\n if _is_measurement(node):\n # XXX: If the measurement does not appear inside an allocation context's\n # suite, forget about it.\n if len(self._context.measurement_hoisting_table):\n m_name = f'_m{len(self._context.measurement_hoisting_table[-1]) + 1}'\n self._context.measurement_hoisting_table[-1].append((self._context.allocation_context[-1][1], node, m_name))\n return copy_location(_replace_measurement(m_name), node)\n\n return node\n\n def visit_FunctionDef(self, node: FunctionDef):\n if _is_qdef(node):\n fix_location = partial(copy_location, old_node=node)\n node.body = self.generic_visit(Suite(node.body)).body\n new_nodes = [node]\n if _auto_adjoint(node):\n adjoint_implementation = fix_location(self._compute_adjoint(node))\n adjoint_implementation.body.reverse()\n new_nodes.append(adjoint_implementation)\n new_nodes.extend(\n map(fix_location, _wire_adjoints(node, adjoint_implementation))\n )\n\n if _auto_controlled(node):\n controlled_implementation = fix_location(self._compute_controlled(node))\n new_nodes.append(controlled_implementation)\n new_nodes.extend(\n map(fix_location, _wire_controlled(node, controlled_implementation))\n )\n\n return new_nodes\n\n self.generic_visit(node)\n return node\n\n def _compute_adjoint(self, node: FunctionDef):\n import copy\n adjoint = copy.deepcopy(node)\n adjoint.name = f'{node.name}_adj'\n if adjoint.name[0] != '_':\n adjoint.name = f'_{adjoint.name}'\n adjoint.decorator_list = []\n AdjointComputer(self._context).visit(adjoint)\n return adjoint\n\n def _compute_controlled(self, node: FunctionDef):\n import copy\n adjoint = copy.deepcopy(node)\n adjoint.name = f'{node.name}_ctl'\n if adjoint.name[0] != '_':\n adjoint.name = f'_{adjoint.name}'\n control_param_name = self._context.newname('__control')\n parameter_position = 0\n if self._context.inside_class_body:\n parameter_position = 1\n adjoint.args.args.insert(\n parameter_position, copy_location(arg(control_param_name, annotation=None), node))\n adjoint.decorator_list = []\n ControlledComputer(\n self._context, control_param_name=control_param_name).visit(adjoint)\n return adjoint\n\n\n def _assign_proxy(self, m_node: Call, proxy_name: str, node: Call):\n return copy_location(_assign_to_proxy(m_node, proxy_name), node)\n\n def _assign_measurements(self, proxy_to_measure_names, node):\n proxy_names = tuple(proxy_to_measure_names.keys())\n measure_names = tuple(proxy_to_measure_names.values())\n return copy_location(_assign_to_measurements(self._context.boilerplate_alias, proxy_names, measure_names), node)\n\n\ndef _is_qdef(node: FunctionDef):\n return len(node.decorator_list) > 0\\\n and isinstance(node.decorator_list[-1], Call)\\\n and isinstance(node.decorator_list[-1].func, Name)\\\n and node.decorator_list[-1].func.id == 'qdef'\n\n\ndef _wire_adjoints(node: FunctionDef, adjoint_node: FunctionDef):\n node_name = node.name\n adjoint_node_name = adjoint_node.name\n return [\n ast.parse(f'setattr({node_name}, \\'__adj__\\', {adjoint_node_name})').body[0],\n ast.parse(f'setattr({adjoint_node_name}, \\'__adj__\\', {node_name})').body[0]\n ]\n\ndef _wire_controlled(node: FunctionDef, controlled_node: FunctionDef):\n node_name = node.name\n controlled_node_name = controlled_node.name\n return [\n ast.parse(f'setattr({node_name}, \\'__ctl__\\', {controlled_node_name})').body[0],\n ast.parse(f'setattr({controlled_node_name}, \\'__ctl__\\', {controlled_node_name})').body[0]\n ]\n\n\n\ndef _auto_adjoint(node: FunctionDef):\n return _some_kw_match('adj', True, node.decorator_list[-1].keywords)\n\ndef _auto_controlled(node: FunctionDef):\n return _some_kw_match('ctl', True, node.decorator_list[-1].keywords)\n\ndef _some_kw_match(name, value, kwargs):\n def id_is_adj(kw):\n return kw.arg == name and isinstance(kw.value, NameConstant) and kw.value.value == value\n\n return any(map(id_is_adj, kwargs))\n\n\ndef _is_measurement(node: Call):\n return isinstance(node.func, Name) and node.func.id == 'measure'\n\n\ndef _is_allocation(node: With):\n return isinstance(node.items[0].context_expr, Call) and isinstance(node.items[0].context_expr.func, Name) and node.items[0].context_expr.func.id == 'allocation'\n\n\ndef _is_control(node: If):\n return isinstance(node.test, Call) and isinstance(node.test.func, Name) and node.test.func.id == 'superposition'\n\n\ndef _import_boilerplate(alias: str ='bp'):\n return ast.parse(f'import quoll.boilerplate as {alias}', mode='single').body[0]\n\n\ndef _control_context_node(node: If, control_param_name: str) -> With:\n context_node = ast.parse(f'with superposition(_) as {control_param_name}: ...', mode='exec').body[0]\n context_node.items[0].context_expr.args[0] = node.test.args[0]\n return context_node\n\ndef _replace_measurement(name: str):\n return ast.parse(name, mode='single').body[0].value\n\n\ndef _assign_to_proxy(m_node, proxy_name):\n assign_node = ast.parse(f'{proxy_name} = _').body[0]\n assign_node.value = m_node\n return assign_node\n\n\ndef _assign_to_measurements(bp_alias, proxy_names, measure_names):\n last_comma = ',' if len(measure_names) == 1 else ''\n return ast.parse(f'({\", \".join(measure_names) + last_comma}) = {bp_alias}.execute({\", \".join(proxy_names)})').body[0]\n\n@quoll.config.show_python\ndef translate(source: str, path: str) -> AST:\n module = ast.parse(source)\n context = TranslationContext()\n BodyTranslator(context).visit(module)\n return module","repo_name":"delapuente/quoll","sub_path":"quoll/transpiler.py","file_name":"transpiler.py","file_ext":"py","file_size_in_byte":12019,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"38705725206","text":"\"\"\"Snake Game file\"\"\"\nimport time\nimport game_screen\nfrom food import Food\nfrom snake import Snake\nfrom scoreboard import ScoreBoard\nfrom wall import Wall\nfrom turtle import onkey\n# new Snake instance\n\n\nclass SnakeGame:\n def __init__(self):\n self.wall = Wall()\n self.snake = Snake()\n self.scoreboard = ScoreBoard()\n self.food = Food()\n self.screen = game_screen.screen\n self.game_is_on = True\n self.start_game()\n\n# starting snake\n def start_game(self):\n self.game_is_on = True\n while self.game_is_on:\n self.screen.update()\n time.sleep(0.1)\n self.snake.start_snake()\n # Detect collision with food\n if self.snake.head.distance(self.food) < 15:\n self.snake.extend()\n self.food.refresh()\n self.scoreboard.add_score()\n # Detect collision with wall\n x_cor = self.snake.head.xcor()\n y_cor = self.snake.head.ycor()\n if x_cor > 250 or x_cor < -250 or y_cor > 250 or y_cor < -250:\n self.game_is_on = False\n self.scoreboard.game_over()\n # Collision with tail\n for segment in self.snake.snake_body[1:]:\n if self.snake.head.distance(segment) < 10:\n self.game_is_on = False\n self.scoreboard.game_over()\n\n","repo_name":"SamroodAli/python-pro-day-20and21-snake-game","sub_path":"snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33409357304","text":"# -*- coding: utf-8 -*-\n\nfrom .entry import EntryFrame\nfrom .enneagram import Enneagram\nfrom .constants import HOUSE_SYSTEMS\nfrom .selection import MultipleSelection\nfrom .treeview import TreeviewToplevel\nfrom .modules import (\n dt, np, pd, td, tk, plt, ttk, ConfigParser,\n FigureCanvasTkAgg, NavigationToolbar2Tk\n)\n\n\nclass Plot(tk.Toplevel):\n def __init__(self, info, jd, hsys, icons, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.menu = tk.Menu(master=self)\n self.configure(menu=self.menu)\n self.select_menu = tk.Menu(master=self.menu, tearoff=False)\n self.menu.add_cascade(\n label=\"Select\",\n menu=self.select_menu\n )\n self.select_menu.add_command(\n label=\"Enneagram Scores\",\n command=lambda: MultipleSelection(\n title=\"Enneagram Scores\",\n catalogue=[f\"Type-{i + 1}\" for i in range(9)]\n )\n )\n self.color = [\n \"red\",\n \"green\",\n \"blue\",\n \"yellow\",\n \"pink\",\n \"cyan\",\n \"brown\",\n \"purple\",\n \"orange\"\n ]\n self.date = pd.to_datetime(jd, unit=\"D\", origin=\"julian\")\n self.resizable(width=False, height=False)\n self.title(info[\"Name\"])\n self.left_frame = tk.Frame(master=self)\n self.left_frame.pack(side=\"left\", fill=\"both\")\n self.right_frame = tk.Frame(master=self)\n self.right_frame.pack(side=\"left\")\n self.figure = plt.Figure()\n self.canvas = FigureCanvasTkAgg(\n figure=self.figure,\n master=self.right_frame\n )\n self.canvas.get_tk_widget().pack(fill=\"both\", expand=True)\n self.navbar = NavigationToolbar2Tk(\n canvas=self.canvas,\n window=self.right_frame\n )\n self.date_frame = tk.Frame(master=self.left_frame)\n self.date_frame.pack()\n self.create_label(\n text=\"Local\",\n date=f\"{info['Date']} {info['Time']}\",\n row=0\n )\n self.create_label(\n text=\"UTC\",\n date=self.date.strftime(\"%d.%m.%Y %H:%M\"),\n row=1\n )\n self.backward = EntryFrame(\n master=self.left_frame,\n texts=[\"Time Interval (min)\", \"Number of Intervals\"],\n title=\"Backward (From UTC)\",\n position=\"vertical\",\n color=\"black\"\n )\n self.backward.pack()\n self.forward = EntryFrame(\n master=self.left_frame,\n texts=[\"Time Interval (min)\", \"Number of Intervals\"],\n title=\"Forward (From UTC)\",\n position=\"vertical\",\n color=\"black\"\n )\n self.forward.pack()\n self.plot_button = tk.Button(\n master=self.left_frame,\n text=\"Plot\",\n command=lambda: self.command(\n info=info,\n hsys=hsys,\n title=\"\",\n icons=icons\n )\n )\n self.combobox_frame = tk.Frame(master=self.left_frame)\n self.plot_button.pack()\n\n def create_combobox(self, values, hsys, info, icons):\n label = tk.Label(\n master=self.combobox_frame,\n text=\"Select Dates\",\n fg=\"red\"\n )\n label.pack()\n style = ttk.Style()\n style.map(\n \"TCombobox\",\n fieldbackground=[(\"readonly\", \"white\")]\n )\n combobox = ttk.Combobox(\n master=self.combobox_frame,\n values=values,\n state=\"readonly\",\n style=\"TCombobox\"\n )\n combobox.pack()\n button = tk.Button(\n master=self.combobox_frame,\n text=\"Open Enneagram Scores\",\n command=lambda: self.open_enneagram(\n hsys=hsys,\n info=info,\n icons=icons,\n combobox=combobox\n )\n )\n button.pack()\n\n @staticmethod\n def open_enneagram(hsys, info, icons, combobox):\n value = combobox.get()\n if value:\n date = dt.strptime(value, \"%d.%m.%Y %H:%M:%S\")\n info[\"Date\"] = date.strftime(\"%d.%m.%Y\")\n info[\"Time\"] = date.strftime(\"%H:%M\")\n config = ConfigParser()\n config.read(\"defaults.ini\")\n algorithm = config[\"ALGORITHM\"][\"selected\"]\n user = Enneagram(\n year=date.year,\n month=date.month,\n day=date.day,\n hour=date.hour,\n minute=date.minute,\n second=date.second,\n lat=float(info[\"Latitude\"]),\n lon=float(info[\"Longitude\"]),\n hsys=HOUSE_SYSTEMS[hsys],\n icons=icons,\n utc=True\n )\n scores = user.get_all_scores()\n TreeviewToplevel(\n values=scores,\n info=info,\n jd=user.chart.jd,\n hsys=HOUSE_SYSTEMS[hsys],\n icons=icons,\n patterns=user.patterns,\n algorithm=algorithm,\n plot=Plot,\n wide=True\n )\n\n def create_label(self, text, date, row):\n label = tk.Label(master=self.date_frame, text=text, fg=\"red\")\n label.grid(row=row, column=0, sticky=\"w\")\n value = tk.Label(master=self.date_frame, text=date)\n value.grid(row=row, column=1, sticky=\"w\")\n\n def plot(self, x, y, title):\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n start = 0\n config = ConfigParser()\n config.read(\"defaults.ini\")\n enneagram_scores = config[\"ENNEAGRAM SCORES\"]\n for i, j in zip(x, y):\n for index, k in enumerate(j):\n if enneagram_scores[f\"type-{index + 1}\"] == \"false\":\n continue\n if start == 0:\n ax.scatter(\n i,\n k,\n color=self.color[index],\n label=f\"Type-{index + 1}\"\n )\n else:\n ax.scatter(i, k, color=self.color[index])\n start += 1\n self.figure.legend(*ax.get_legend_handles_labels())\n ax.set_xlabel(\"Hour\")\n _x = []\n step = len(x) // 10 if len(x) > 20 else 1\n count = 0\n for i in x:\n if 0 < count < step:\n _x += [\"\"]\n count += 1\n else:\n count = 0\n _x += [i.strftime(\"%H:%M\")]\n count += 1\n ax.set_xticks(x)\n ax.set_xticklabels(_x)\n ax.set_ylabel(\"Enneagram Scores\")\n for label, date in zip(ax.xaxis.get_ticklabels(), x):\n label.set_rotation(45)\n label.set_fontsize(8)\n if (\n label.get_text() == self.date.strftime(\"%H:%M\")\n and\n date.strftime(\"%Y.%m.%d %H:%M\") ==\n self.date.strftime(\"%Y.%m.%d %H:%M\")\n ):\n label.set_color(\"red\")\n ax.set_title(title)\n self.figure.subplots_adjust(\n left=0.2,\n bottom=0.4,\n right=0.9,\n top=0.9,\n wspace=0.2,\n hspace=0\n )\n self.canvas.draw()\n\n def command(self, info, hsys, title, icons):\n dates = self.get_dates(widget=self.backward, multiply=-1)\n dates += [self.date]\n dates += self.get_dates(widget=self.forward, multiply=1)\n x = []\n y = []\n for date in dates:\n result = Enneagram(\n year=date.year,\n month=date.month,\n day=date.day,\n hour=date.hour,\n minute=date.minute,\n second=date.second,\n lat=float(info[\"Latitude\"]),\n lon=float(info[\"Longitude\"]),\n hsys=HOUSE_SYSTEMS[hsys],\n utc=True\n )\n result = result.get_all_scores()\n total = []\n for i in [\"sign\", \"house\"]:\n for k, v in result[i].items():\n if k in [\"Dayscores\", \"Effect of Houses\"]:\n total.append(np.array([*v.values()][:-1]))\n total = [round(float(i), 2) for i in total[0] * total[1]]\n x += [date]\n y += [total]\n self.combobox_frame.destroy()\n self.combobox_frame = tk.Frame(master=self.left_frame)\n self.combobox_frame.pack()\n self.create_combobox(\n values=[i.strftime(\"%d.%m.%Y %H:%M:%S\") for i in x],\n hsys=hsys,\n icons=icons,\n info=info\n )\n self.plot(x=x, y=y, title=title)\n\n def get_dates(self, widget, multiply):\n time_interval = widget.widgets[\"Time Interval (min)\"].get()\n number_of_intervals = widget.widgets[\"Number of Intervals\"].get()\n if all([time_interval, number_of_intervals]):\n time_interval = int(time_interval) * multiply\n number_of_intervals = int(number_of_intervals)\n result = [\n self.date + td(minutes=time_interval * (i + 1))\n for i in range(number_of_intervals)\n ]\n if multiply == -1:\n return sorted(result)\n return result\n else:\n return []\n","repo_name":"dildeolupbiten/TkEnneagram","sub_path":"Scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":9465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"47"}
+{"seq_id":"2772293572","text":"import datetime\n\nfrom itou.asp.models import Commune\nfrom tests.users.factories import ItouStaffFactory\nfrom tests.utils.test import TestCase\n\n\nclass CommunesFixtureTest(TestCase):\n # INSEE commune with a single entry (1 history entry)\n _CODES_WITHOUT_HISTORY = [\"97108\", \"13200\"]\n ## Total number of entries in the file\n _NUMBER_OF_ENTRIES = 50\n # No commune registered before this date (end_date)\n _PERIOD_MIN_DATE = datetime.date(1900, 1, 1)\n\n def test_small_test_fixture_structure(self):\n commune_set = Commune.objects.all()\n\n # Smoke tests, sort of\n # Will enforce checking structure if any update of test fixtures occurs\n # Reminder: these are referential, read-only, *external* data supplied by ASP\n assert commune_set.count() == self._NUMBER_OF_ENTRIES\n\n def test_communes_with_history(self):\n codes_with_history = Commune.objects.exclude(code__in=self._CODES_WITHOUT_HISTORY).values_list(\n \"code\", flat=True\n )\n\n for code in codes_with_history:\n with self.subTest(code=code, msg=\"INSEE code without history\"):\n # 2 entries for a code with history:\n communes = Commune.objects.filter(code=code)\n\n assert 2 == communes.count()\n\n def test_communes_without_history(self):\n for code in self._CODES_WITHOUT_HISTORY:\n with self.subTest(code=code):\n # Will error if many entries\n commune = Commune.objects.get(code=code)\n\n assert commune.end_date is None\n\n def test_current_entries(self):\n communes = Commune.objects.filter(end_date__isnull=True)\n\n assert 26 == communes.count()\n\n for commune in communes:\n with self.subTest():\n assert commune.end_date is None\n\n def test_lowest_period_date(self):\n communes = Commune.objects.filter(start_date__lt=self._PERIOD_MIN_DATE)\n\n assert 0 == communes.count()\n\n\nclass CommuneModelTest(TestCase):\n def test_by_insee_code(self):\n old_commune = Commune(\n code=99999,\n name=\"ENNUI-SUR-BLASÉ\",\n start_date=datetime.datetime(1940, 1, 1),\n end_date=datetime.datetime(2021, 12, 31),\n )\n new_commune = Commune(code=99999, name=\"ENNUI-SUR-BLASÉ\", start_date=datetime.datetime(2022, 1, 1))\n Commune.objects.bulk_create([old_commune, new_commune])\n\n result = Commune.by_insee_code(99999)\n assert new_commune == result\n\n def test_by_insee_code_ignore_manually_created(self):\n user = ItouStaffFactory()\n commune = Commune.objects.current().first()\n # Manually add a Commune as we did to duplicate an existing Commune\n # SAINT-DENIS/STE-CLOTILDE code=97411\n Commune.objects.create(\n code=commune.code,\n name=\"Autre nom\",\n start_date=commune.start_date,\n created_by=user,\n )\n result = Commune.by_insee_code(commune.code)\n assert result == commune\n\n def test_by_insee_code_and_period(self):\n old_commune = Commune(\n code=99999,\n name=\"ENNUI-SUR-BLASÉ\",\n start_date=datetime.datetime(1940, 1, 1),\n end_date=datetime.datetime(2021, 12, 31),\n )\n new_commune = Commune(code=99999, name=\"ENNUI-SUR-BLASÉ\", start_date=datetime.datetime(2022, 1, 1))\n Commune.objects.bulk_create([old_commune, new_commune])\n\n result = Commune.by_insee_code_and_period(99999, datetime.datetime(1988, 4, 28))\n assert old_commune == result\n\n result = Commune.by_insee_code_and_period(99999, datetime.datetime(2022, 11, 28))\n assert new_commune == result\n\n def test_by_insee_code_and_period_ignore_manually_created(self):\n user = ItouStaffFactory()\n commune = Commune.objects.first()\n\n # Manually add a Commune as we did to duplicate an existing Commune\n # SAINT-DENIS/STE-CLOTILDE code=97411\n Commune.objects.create(\n code=commune.code,\n name=\"Autre nom\",\n start_date=commune.start_date,\n created_by=user,\n )\n # Look for the same commune (one day after commune.start_date should still be in the commune period)\n # We should not raise a Commune.MultipleObjectsReturned because we exclude mannually created objects\n result = Commune.by_insee_code_and_period(commune.code, commune.start_date + datetime.timedelta(days=1))\n assert result == commune\n","repo_name":"FuzzyParrabellum/itou-betagouv","sub_path":"tests/asp/test_communes.py","file_name":"test_communes.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"47"}
+{"seq_id":"7514263379","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"calendarapp\"\n\nurlpatterns = [\n path(\"calendar/\", views.CalendarViewNew.as_view(), name=\"calendar\"),\n path(\"all-event-list/\", views.AllEventsListView.as_view(), name=\"all_events\"),\n path(\n \"running-event-list/\",\n views.RunningEventsListView.as_view(),\n name=\"running_events\",\n ),\n]","repo_name":"Adilkhanweb/diploma","sub_path":"backend/calendarapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"16178476305","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 20 10:35:58 2019\n\n@author: mdsamad\n\"\"\"\n\n\nimport pandas as pd\n\nimport numpy as np\n\nimport os\n\ndef mylistdir(directory):\n \"\"\"A specialized version of os.listdir() that ignores files that\n start with a leading period.\"\"\"\n filelist = os.listdir(directory)\n return [x for x in filelist\n if not (x.startswith('.'))]\n\n\n\ndef SummaryData (df):\n \n df1= df.loc[df.Type=='runs']\n df2= df.loc[df.Type=='wickets']\n \n print('Total match', df.shape[0])\n \n print ('Per bat first win', (df1.shape[0]*100)/df.shape[0])\n \n print ('Per bat second win', (df2.shape[0]*100)/df.shape[0])\n\n print ('Bat f win', np.round (df1.fInn.mean()),\n 'Bat s lose', np.round(df1.sInn.mean()),\n 'Bat s win', np.round(df2.sInn.mean()),\n 'Bat f lose', np.round(df2.fInn.mean()))\n \n\ndef dataExtract (filename):\n \n df = pd.read_excel(filename, index_col=0)\n df = df.iloc[:,:-2]\n df.Result = df.Result.shift(-2)\n df= df.dropna()\n df[['Margin','Type','D/L']]= df.Result.str.split(\" \",expand=True)\n\n df.fInn = df.fInn.str.split(\"-\",expand=True,)\n df.sInn = df.sInn.str.split(\"-\",expand=True,)\n\n df.fInn = df.fInn.str.extract('(\\d+)', expand=False).astype(int)\n df.sInn = df.sInn.str.extract('(\\d+)', expand=False).astype(int)\n \n return df","repo_name":"mdsamad001/CricketStudy","sub_path":"Source_Codes/ImportAnaData.py","file_name":"ImportAnaData.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"33465379242","text":"from rest_framework import serializers\n\nfrom apps.food.models import Ingredients, UserIngredient\nfrom apps.food.utils import get_ingredient_source_types\nfrom apps.users.serializers import UserSerializer\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField()\n name = serializers.CharField(max_length=255, required=True, allow_blank=False, allow_null=False)\n\n protein = serializers.DecimalField(max_digits=14, decimal_places=2, allow_null=False)\n fat = serializers.DecimalField(max_digits=14, decimal_places=2, allow_null=False)\n carbohydrate = serializers.DecimalField(max_digits=14, decimal_places=2, allow_null=False)\n fiber = serializers.DecimalField(max_digits=14, decimal_places=2, allow_null=False)\n\n energy = serializers.ReadOnlyField()\n source_type = serializers.ReadOnlyField()\n sub_source_type = serializers.ReadOnlyField()\n\n def save(self, **kwargs):\n data = get_ingredient_source_types(self.validated_data)\n return self.create(data)\n\n class Meta:\n model = Ingredients\n fields = [\n 'id',\n 'name',\n 'protein',\n 'fat',\n 'carbohydrate',\n 'fiber',\n 'energy',\n 'source_type',\n 'sub_source_type',\n\n ]\n\n\nclass UserIngredientSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField()\n\n user = UserSerializer(read_only=True)\n ingredient = serializers.PrimaryKeyRelatedField(\n queryset=Ingredients.objects.all(),\n allow_null=False,\n allow_empty=False,\n )\n\n def validate(self, attrs):\n user_ingredient_exists = UserIngredient.objects.filter(user=self.context['request'].user,\n ingredient=attrs['ingredient']).exists()\n if user_ingredient_exists:\n raise serializers.ValidationError(\n \"Duplicate ingredient user violates unique constraint.\")\n\n return attrs\n\n def save(self, **kwargs):\n self.validated_data['user'] = self.context['request'].user\n return self.create(self.validated_data)\n\n class Meta:\n model = UserIngredient\n fields = [\n 'id',\n 'user',\n 'ingredient',\n ]\n\n\nclass CheckIngredientSerializer(serializers.Serializer):\n def validate(self, attrs):\n self.check_valid_ingredients(self.context['request'].user)\n\n return attrs\n\n def check_valid_ingredients(self, user):\n user_ingredients = UserIngredient.objects.filter(user=user)\n fat_protein = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.fat_protein).count()\n carbohydrate_protein = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.carbohydrate_protein).count()\n fat_fiber = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.fat_fiber).count()\n carbohydrate_fiber = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.carbohydrate_fiber).count()\n pure_fat = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.pure_fat).count()\n pure_carbohydrate = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.pure_carbohydrate).count()\n vegetable_count = user_ingredients.filter(\n ingredient__sub_source_type=Ingredients.SUB_SOURCE_TYPE_CHOICES.vegetable).count()\n\n fat_count = fat_protein + fat_fiber + pure_fat\n fiber_count = fat_fiber + carbohydrate_fiber\n carbohydrate_count = carbohydrate_protein + carbohydrate_fiber + pure_carbohydrate\n protein_count = fat_protein + carbohydrate_protein\n\n if protein_count < 3:\n raise serializers.ValidationError('Protein sources count is invalid.')\n if fat_count < 3:\n raise serializers.ValidationError('Fat sources count is invalid.')\n if fiber_count < 2:\n raise serializers.ValidationError('Fiber sources count is invalid.')\n if carbohydrate_count < 3:\n raise serializers.ValidationError('Carbohydrate sources count is invalid.')\n if vegetable_count < 4:\n raise serializers.ValidationError('Vegetable sources count is invalid.')\n if user_ingredients.count() < 15:\n raise serializers.ValidationError('Please select minimum 15 ingredients.')\n return True\n","repo_name":"drmohanned/CalorieAid-Backend","sub_path":"apps/food/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"72743092621","text":"class FindUnion(object):\n def __init__(self):\n self._weights = {}\n self._parents = {}\n\n def __iter__(self):\n return self._parents.keys().__iter__()\n\n def AddIfNotExists(self, *objs):\n for obj in objs:\n if obj not in self._parents:\n self._parents[obj] = obj\n self._weights[obj] = 1\n\n def Find(self, obj):\n path = [obj]\n root = self._parents[obj]\n while root != path[-1]:\n path.append(root)\n root = self._parents[root]\n\n for ancestor in path:\n self._parents[ancestor] = root\n return root\n\n def Union(self, *objs):\n roots = [self.Find(x) for x in objs]\n heaviest = max([(self._weights[r],r) for r in roots])[1]\n for r in roots:\n if r != heaviest:\n self._weights[heaviest] += self._weights[r]\n self._parents[r] = heaviest\n","repo_name":"kazet/ATT","sub_path":"att/find_union.py","file_name":"find_union.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"13798867256","text":"import os\nimport pickle\nimport json\nfrom tqdm import tqdm\n\nimport eft.cores.jointorders as jointorders\n\ndef pklToJson(pklDir, outputPath, metainfo):\n\n eft_fileList = os.listdir(pklDir) #Check all fitting files\n print(\">> Found {} files in the fitting folder {}\".format(len(eft_fileList), pklDir))\n totalCnt =0\n erroneousCnt =0\n\n essentialdata = []\n for f in tqdm(sorted(eft_fileList)):\n \n #Load EFT data\n fileFullPath = os.path.join(pklDir, f)\n with open(fileFullPath,'rb') as f:\n eft_data = pickle.load(f)\n\n ########################\n if True:\n #Compute 2D reprojection error\n # if not (data['loss_keypoints_2d']<0.0001 or data['loss_keypoints_2d']>0.001 :\n # continue\n maxBeta = abs(eft_data['pred_shape']).max()\n if eft_data['loss_keypoints_2d']>0.0005 or maxBeta>3:\n erroneousCnt +=1\n print(\">>> Rejected: loss2d: {}, maxBeta: {}\".format( eft_data['loss_keypoints_2d'],maxBeta) )\n continue\n\n \"\"\"\n Useful data\n pose\n shape\n camera\n bbox\n\n imageName\n scale\n center\n annotId\n keypoint2d\n keypoint2d_cropped\n smpltype\n\n _sampleIdx\n \"\"\"\n\n data ={}\n data['parm_pose'] = eft_data['pred_pose_rotmat'][0].tolist() #(10,)\n data['parm_shape'] = eft_data['pred_shape'][0].tolist() #(24,3,3)\n data['parm_cam'] = eft_data['pred_camera'][0].tolist() #(3)\n data['bbox_scale'] = eft_data['scale'][0].tolist() \n data['bbox_center'] = eft_data['center'][0].tolist() \n\n # data['pred_keypoint_2d'] = \n # data['pred_keypoint_validity'] = \n data['gt_keypoint_2d'] = eft_data['keypoint2d'][0].tolist() #GT keypoint 2d in SPIN format. In image space. 49,3\n spin24_joint_validity = eft_data['keypoint2d'][0][25:,2]\n data['joint_validity_openpose18'] = spin24_joint_validity[jointorders.JOINT_MAP_SPIN24_TO_OPENPOSE18].tolist()\n\n if 'smpltype' not in eft_data.keys():\n data['smpltype'] = 'smpl'#eft_data['smpltype']\n else:\n data['smpltype'] = eft_data['smpltype']\n\n\n if 'annotId' not in eft_data.keys():\n data['annotId'] = 0\n data['imageName'] = os.path.basename(eft_data['imageName'][0]) #Only save basename\n\n essentialdata.append(data)\n\n ##DEBUG TODO\n # if len(essentialdata)==50:\n # break\n\n print(\">>> Rejection Summary: {}/{}. Valid:{}\".format( erroneousCnt, len(eft_fileList) , len(essentialdata)) )\n\n\n with open(outputPath,'w') as f:\n json.dump({\"ver\":0.1, \"data\":essentialdata, \"meta\": metainfo},f)\n print(f\"saved: {outputPath}\")\n\nif __name__ == '__main__':\n \n #Load PKL files\n # pklFileDir = '/run/media/hjoo/disk/data/cvpr2020_eft_researchoutput/0_SPIN/0_exemplarOutput/04-14_cocoall_with8143_annotId'\n # metainfo = {\"dbname\": \"COCO2014-All\", \"rawname\": '04-14_cocoall_with8143_annotId'}\n # outputPath = '04-14_cocoall_with8143_annotId.json'\n\n # pklFileDir = '/run/media/hjoo/disk/data/cvpr2020_eft_researchoutput/0_SPIN/0_exemplarOutput/04-14_coco_with8143_annotId'\n # metainfo = {\"dbname\": \"COCO2014-Part\", \"rawname\": '04-14_coco_with8143_annotId'}\n # outputPath = 'COCO2014-Part-04-14_coco_with8143_annotId.json'\n\n # pklFileDir = '/run/media/hjoo/disk/data/cvpr2020_eft_researchoutput/0_SPIN/0_exemplarOutput/11-08_lspet_with8143'\n # metainfo = {\"dbname\": \"LSPet\", \"rawname\": '11-08_lspet_with8143'}\n # outputPath = '11-08_lspet_with8143.json'\n\n pklFileDir = '/run/media/hjoo/disk/data/cvpr2020_eft_researchoutput/0_SPIN/0_exemplarOutput/11-08_mpii_with8143'\n metainfo = {\"dbname\": \"MPII\", \"rawname\": '11-08_mpii_with8143'}\n outputPath = '11-08_mpii_with8143.json'\n\n\n\n pklToJson(pklFileDir, outputPath, metainfo)\n\n\n\n","repo_name":"facebookresearch/eft","sub_path":"eft/utils/exportDB.py","file_name":"exportDB.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":351,"dataset":"github-code","pt":"47"}
+{"seq_id":"23880239371","text":"import cv2\nimport os\nimport sys\nfrom string import Template\nfrom PIL import Image\n\n\ndef doDetect(filename):\n dirname = '/home/dark_knight/hackerrank/demo'\n\n # first argument is the haarcascades path\n\n\n face_cascade_path = \"haarcascade_frontalface_default.xml\"\n face_cascade = cv2.CascadeClassifier(os.path.expanduser(face_cascade_path))\n\n scale_factor = 1.1\n min_neighbors = 3\n min_size = (30, 30)\n flags = cv2.cv.CV_HAAR_SCALE_IMAGE\n\n count = 1\n #for infname in sys.argv[1:]:\n for infname in filename:\n image_path = os.path.expanduser(infname)\n image = cv2.imread(image_path)\n\n faces = face_cascade.detectMultiScale(image, scaleFactor = scale_factor, minNeighbors = min_neighbors,\n minSize = min_size, flags = flags)\n\n for( x, y, w, h ) in faces:\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 0), 2)\n outfname = \"%s.faces.jpg\" % os.path.basename(infname)\n cv2.imwrite(os.path.join(dirname,outfname), image)\n\n img = Image.open(image_path)\n x = img.crop ((x, y, x+w, y+h))\n x.save ('crop/subject' + str (count) + '.jpg')\n #img.close()\n count += 1\n","repo_name":"duaraghav8/Corque","sub_path":"detect.py","file_name":"detect.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"}
+{"seq_id":"42573202694","text":"print(\"Program Started!\")\ncomplete = False\nwhile not complete:\n character = input(\"Please enter a standard character.\\n\")\n if len(character) == 1:\n ascii_char = ord(character)\n print(f\"The ASCII code for {character} is {ascii_char}\")\n complete = True\n else:\n print(\"Error: character not detected\")\nprint(\"Program Ended!\")","repo_name":"GDoesCode/com411","sub_path":"Basics/Functions/ascii_code.py","file_name":"ascii_code.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"13429463088","text":"#!/usr/bin/python\n# --------------------------------------------------------\n#\n# PYTHON PROGRAM DEFINITION\n#\n# The knowledge a computer has of Python can be specified in 3 levels:\n# (1) Prelude knowledge --> The computer has it by default.\n# (2) Borrowed knowledge --> The computer gets this knowledge from 3rd party libraries defined by others\n# (but imported by us in this program).\n# (3) Generated knowledge --> The computer gets this knowledge from the new functions defined by us in this program.\n#\n# When launching in a terminal the command:\n# user:~$ python3 this_file.py\n# our computer first processes this PYTHON PROGRAM DEFINITION section of the file.\n# On it, our computer enhances its Python knowledge from levels (2) and (3) with the imports and new functions\n# defined in the program. However, it still does not execute anything.\n#\n# --------------------------------------------------------\n\n# ------------------------------------------\n# IMPORTS\n# ------------------------------------------\nimport sys\nimport codecs\nimport bisect\n\n# ------------------------------------------\n# FUNCTION process_line\n# ------------------------------------------\ndef process_line(line):\n # 1. We create the output variable\n res = ()\n\n # 2. We get the parameter list from the line\n params_list = line.strip().split(\",\")\n\n #(00) start_time => A String representing the time the trip started at <%d/%m/%Y %H:%M:%S>. Example: “2019/05/02 10:05:00”\n #(01) stop_time => A String representing the time the trip finished at <%d/%m/%Y %H:%M:%S>. Example: “2019/05/02 10:10:00”\n #(02) trip_duration => An Integer representing the duration of the trip. Example: 300\n #(03) start_station_id => An Integer representing the ID of the CityBike station the trip started from. Example: 150\n #(04) start_station_name => A String representing the name of the CitiBike station the trip started from. Example: “E 2 St &; Avenue C”.\n #(05) start_station_latitude => A Float representing the latitude of the CitiBike station the trip started from. Example: 40.7208736\n #(06) start_station_longitude => A Float representing the longitude of the CitiBike station the trip started from. Example: -73.98085795\n #(07) stop_station_id => An Integer representing the ID of the CityBike station the trip stopped at. Example: 150\n #(08) stop_station_name => A String representing the name of the CitiBike station the trip stopped at. Example: “E 2 St &; Avenue C”.\n #(09) stop_station_latitude => A Float representing the latitude of the CitiBike station the trip stopped at. Example: 40.7208736\n #(10) stop_station_longitude => A Float representing the longitude of the CitiBike station the trip stopped at. Example: -73.98085795\n #(11) bike_id => An Integer representing the id of the bike used in the trip. Example: 33882\n #(12) user_type => A String representing the type of user using the bike (it can be either “Subscriber” or “Customer”). Example: “Subscriber”.\n #(13) birth_year => An Integer representing the birth year of the user using the bike. Example: 1990\n #(14) gender => An Integer representing the gender of the user using the bike (it can be either 0 => Unknown; 1 => male; 2 => female). Example: 2.\n #(15) trip_id => An Integer representing the id of the trip. Example: 190\n\n # 3. If the list contains the right amount of parameters\n if (len(params_list) == 16):\n # 3.1. We set the right type for the parameters\n params_list[2] = int(params_list[2])\n params_list[3] = int(params_list[3])\n params_list[5] = float(params_list[5])\n params_list[6] = float(params_list[6])\n params_list[7] = int(params_list[7])\n params_list[9] = float(params_list[9])\n params_list[10] = float(params_list[10])\n params_list[11] = int(params_list[11])\n params_list[13] = int(params_list[13])\n params_list[14] = int(params_list[14])\n params_list[15] = int(params_list[15])\n\n # 3.2. We assign res\n res = tuple(params_list)\n\n # 4. We return res\n return res\n\n# ------------------------------------------\n# FUNCTION my_map\n# ------------------------------------------\ndef my_map(my_input_stream, my_output_stream, my_mapper_input_parameters):\n\n data_list = []\n\n #for lines in my_input_stream: # Change this to while loop? Because we dont know how many lines there are?\n while True:\n line = my_input_stream.readline().rstrip().split(\",\")\n # Here I check to see when end of file happens. Since variable \"line\" turns into a list when using split,\n # I check the first element\n if line[0] == '':\n break\n\n if line[11] == str(my_mapper_input_parameters[0]):\n data_list.append(line[0])\n data_list.append(line[1])\n data_list.append(line[4])\n data_list.append(line[8])\n\n if len(data_list) != 0:\n i = 0\n my_output_stream.write(\"universal\" + \"\\t\" + \"(\")\n while i != (len(data_list)):\n my_output_stream.write(data_list[i] + \" @ \" + data_list[i+1] + \" @ \" + data_list[i+2] + \" @ \"\n + data_list[i+3])\n i += 4\n if i != len(data_list):\n my_output_stream.write(\" @ \")\n my_output_stream.write(\")\\n\")\n #print(str(i), len(data_list), data_list[0] + \"\\n\")\n\n\n\n\n\n pass\n\n# ---------------------------------------------------------------\n# PYTHON EXECUTION\n# This is the main entry point to the execution of our program.\n# It provides a call to the 'main function' defined in our\n# Python program, making the Python interpreter to trigger\n# its execution.\n# ---------------------------------------------------------------\nif __name__ == '__main__':\n # 1. We collect the input values\n my_input_stream = sys.stdin\n my_output_stream = sys.stdout\n my_mapper_input_parameters = [bike_id]\n\n # 2. We call to my_map\n my_map(my_input_stream,\n my_output_stream,\n my_mapper_input_parameters\n )\n","repo_name":"aFellowCoder/BigData","sub_path":"A01_Part4/my_mapper.py","file_name":"my_mapper.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"3062167972","text":"# -*- coding: utf-8 -*-\n# UTF-8 encoding when using korean\ndef c(num,cnt):\n\twhile(num!=1):\n\t\tif(num%2!=0):\n\t\t\tnum=num*3+1\n\t\t\tcnt+=1\n\t\twhile(num%2==0):\n\t\t\tnum//=2\n\t\t\tcnt+=1\n\treturn cnt\t\t\n\nwhile(True):\n\ttry:\n\t\ta,b = map(int,input().split())\n\t\tres=[]\n\t\ttemp = a\n\t\ttemp_2 = b\n\t\t\n\t\tif(a>b):\n\t\t\ta=b\n\t\t\tb=temp\n\n\t\tfor i in range(a,b+1):\n\t\t\tcnt=1\n\t\t\tres.append(c(i,cnt))\n\n\t\tans=max(res)\n\t\tprint (temp,temp_2,ans)\n\t\t\n\texcept EOFError: break","repo_name":"SoleMin/Algorithmic_Problems","sub_path":"110101/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"22491424793","text":"\nclass Node:\n\n def __init__(self, data, level):\n\n self.left = None\n self.right = None\n self.data = data\n self.level = level\n self.maxlevel = 1\n\n def insert(self, data):\n# Compare the new value with the parent node\n if self.data:\n if data < self.data:\n if self.left is None:\n self.left = Node(data, self.level + 1)\n if self.maxlevel < self.level + 1:\n self.maxlevel = self.level + 1\n \n else:\n templevel = self.left.insert(data)\n if self.maxlevel < templevel:\n self.maxlevel = templevel\n \n elif data > self.data:\n if self.right is None:\n self.right = Node(data, self.level+1)\n if self.maxlevel < self.level+1:\n self.maxlevel = self.level+1\n \n else:\n self.right.insert(data)\n if self.maxlevel < templevel:\n self.maxlevel = templevel\n \n else:\n self.data = data\n \n return self.maxlevel\n\n\n# Print the tree\n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print(self.data),\n if self.right:\n self.right.PrintTree()\n \n\n\n\n\nnumLevels = 1\ntempLevel = 1\n\n\ndef maxOfTwo(tempLevel):\n global numLevels\n if numLevels < tempLevel:\n numLevels = tempLevel\n\n\n# Use the insert method to add nodes\nroot = Node(12, 1)\ntempLevel = root.insert(6)\n\n\nmaxOfTwo(tempLevel)\n\ntempLevel = root.insert(14)\nmaxOfTwo(tempLevel)\n\ntempLevel = root.insert(3)\nmaxOfTwo(tempLevel)\n\ntempLevel = root.insert(1)\nmaxOfTwo(tempLevel)\n\n\nroot.PrintTree()\n\nprint (\"Num Level = \" + str(numLevels))\n\nprint (str(2**numLevels))\n\nprint (str(root.maxlevel))","repo_name":"yuraboruk/MyLabRepo","sub_path":"binarytree/binarytree.py","file_name":"binarytree.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"17864815708","text":"\"\"\"Fill docstrings to avoid redundant docstrings in multiple files.\n\nInspired from mne: https://mne.tools/stable/index.html\nInspired from mne.utils.docs.py by Eric Larson \n\"\"\"\n\nimport sys\nfrom typing import Callable, List\n\n# ------------------------- Documentation dictionary -------------------------\ndocdict = dict()\n\n# ---------------------------------- verbose ---------------------------------\ndocdict[\n \"verbose\"\n] = \"\"\"\nverbose : int | str | bool | None\n Sets the verbosity level. The verbosity increases gradually between\n ``\"CRITICAL\"``, ``\"ERROR\"``, ``\"WARNING\"``, ``\"INFO\"`` and ``\"DEBUG\"``.\n If None is provided, the verbosity is set to ``\"WARNING\"``.\n If a bool is provided, the verbosity is set to ``\"WARNING\"`` for ``False``\n and to ``\"INFO\"`` for ``True``.\"\"\"\n\n# ----------------------------------- audio ----------------------------------\ndocdict[\n \"audio_volume\"\n] = \"\"\"\nvolume : float | tuple\n If an int or a float is provided, the sound will use only one channel\n (mono). If a 2-length tuple is provided, the sound will use 2\n channels (stereo). The volume of each channel is given between 0 and 100.\n For stereo, the volume is given as (L, R).\"\"\"\ndocdict[\n \"audio_sample_rate\"\n] = \"\"\"\nsample_rate : float\n Sampling frequency of the sound. The default is 44100 Hz.\"\"\"\ndocdict[\n \"audio_duration\"\n] = \"\"\"\nduration : float\n Duration of the sound. The default is 1 second.\"\"\"\n\n# ----------------------------------- visual ----------------------------------\ndocdict[\n \"visual_window_name\"\n] = \"\"\"\nwindow_name : str\n Name of the window in which the visual is displayed.\"\"\"\ndocdict[\n \"visual_window_size\"\n] = \"\"\"\nwindow_size : tuple | None\n Either ``None`` to automatically select a window size based on the\n available monitors, or a 2-length of positive integer sequence as\n ``(width, height)`` in pixels.\"\"\"\ndocdict[\n \"visual_color\"\n] = \"\"\"A color is provided as matplotlib string or as ``(B, G, R)`` tuple of\nint8 set between 0 and 255.\"\"\"\ndocdict[\n \"visual_position\"\n] = \"\"\"\nThe position of the object can be either defined as the string 'center' or\n'centered' to position the object in the center of the window; or as a 2-length\ntuple of positive integer. The position is defined in pixels in opencv\ncoordinates, with (0, 0) being the top left corner of the window.\"\"\"\ndocdict[\n \"visual_length\"\n] = \"\"\"\nlength : int\n Number of pixels used to draw the length of the bar.\"\"\"\ndocdict[\n \"visual_width\"\n] = \"\"\"\nwidth : int\n Number of pixels used to draw the width of the bar.\"\"\"\n\n# ------------------------- Documentation functions --------------------------\ndocdict_indented = dict()\n\n\ndef fill_doc(f: Callable) -> Callable:\n \"\"\"Fill a docstring with docdict entries.\n\n Parameters\n ----------\n f : callable\n The function to fill the docstring of (modified in place).\n\n Returns\n -------\n f : callable\n The function, potentially with an updated __doc__.\n \"\"\"\n docstring = f.__doc__\n if not docstring:\n return f\n\n lines = docstring.splitlines()\n indent_count = _indentcount_lines(lines)\n\n try:\n indented = docdict_indented[indent_count]\n except KeyError:\n indent = \" \" * indent_count\n docdict_indented[indent_count] = indented = dict()\n\n for name, docstr in docdict.items():\n lines = [\n indent + line if k != 0 else line\n for k, line in enumerate(docstr.strip().splitlines())\n ]\n indented[name] = \"\\n\".join(lines)\n\n try:\n f.__doc__ = docstring % indented\n except (TypeError, ValueError, KeyError) as exp:\n funcname = f.__name__\n funcname = docstring.split(\"\\n\")[0] if funcname is None else funcname\n raise RuntimeError(f\"Error documenting {funcname}:\\n{str(exp)}\")\n\n return f\n\n\ndef _indentcount_lines(lines: List[str]) -> int:\n \"\"\"Minimum indent for all lines in line list.\n\n >>> lines = [' one', ' two', ' three']\n >>> indentcount_lines(lines)\n 1\n >>> lines = []\n >>> indentcount_lines(lines)\n 0\n >>> lines = [' one']\n >>> indentcount_lines(lines)\n 1\n >>> indentcount_lines([' '])\n 0\n \"\"\"\n indent = sys.maxsize\n for k, line in enumerate(lines):\n if k == 0:\n continue\n line_stripped = line.lstrip()\n if line_stripped:\n indent = min(indent, len(line) - len(line_stripped))\n return indent\n\n\ndef copy_doc(source: Callable) -> Callable:\n \"\"\"Copy the docstring from another function (decorator).\n\n The docstring of the source function is prepepended to the docstring of the\n function wrapped by this decorator.\n\n This is useful when inheriting from a class and overloading a method. This\n decorator can be used to copy the docstring of the original method.\n\n Parameters\n ----------\n source : callable\n The function to copy the docstring from.\n\n Returns\n -------\n wrapper : callable\n The decorated function.\n\n Examples\n --------\n >>> class A:\n ... def m1():\n ... '''Docstring for m1'''\n ... pass\n >>> class B(A):\n ... @copy_doc(A.m1)\n ... def m1():\n ... ''' this gets appended'''\n ... pass\n >>> print(B.m1.__doc__)\n Docstring for m1 this gets appended\n \"\"\"\n\n def wrapper(func):\n if source.__doc__ is None or len(source.__doc__) == 0:\n raise RuntimeError(\n f\"The docstring from {source.__name__} could not be copied \"\n \"because it was empty.\"\n )\n doc = source.__doc__\n if func.__doc__ is not None:\n doc += func.__doc__\n func.__doc__ = doc\n return func\n\n return wrapper\n","repo_name":"mscheltienne/simple-stimuli","sub_path":"stimuli/utils/_docs.py","file_name":"_docs.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"32585544885","text":"#tSm=toplam satış miktarı\r\n#hM=hammadde maliyeti\r\n#bOg=bakım onarım giderleri\r\n#sG=sevkiyat giderleri\r\n#sAhg=satın alınan hizmet giderleri\r\n\r\n\r\ndef katmadegerciro(tSm,hM,bOg,sG,sAhg):\r\n #Global yapmamın nedeni başka fonksiyona girdi olacak\r\n global katmadegerciro\r\n katmadegerciro=(tSm-(hM+bOg+sG+sAhg))\r\n if(katmadegerciro>1000):\r\n print(\"İşletme karda katma değer cirosu yüksek\")\r\n elif(500).\n \"\"\"\n valores = []\n\n while cantidad > 0:\n valor = input('\\nIngresá un valor: ')\n\n if not valor:\n print('El valor no puede ser nulo. Intentá de nuevo.')\n continue\n\n if valor in valores:\n print('No podés ingresar valores repetidos.')\n continue\n\n valores.append(valor)\n cantidad -= 1\n\n return valores\n\n\ndef adivinar(valores, intentos):\n \"\"\"\n Crea una copia de valores y mezcla sus contenidos.\n Pregunta por el orden original de la nueva lista y\n se pierde cuando == 0.\n \"\"\"\n copia = valores[:]\n copia.sort(key=lambda x: random())\n\n for valor in copia:\n posicion = valores.index(valor) + 1\n\n while intentos > 0:\n respuesta = pedir_entero(f'\\n¿En qué posición ingresaste el valor [ {valor} ]? ')\n\n if respuesta != posicion:\n print('Respuesta incorrecta.')\n intentos -= 1\n continue\n\n print('¡Correcto!')\n break\n\n else:\n print('\\nPerdiste. Más suerte la próxima.')\n return\n \n print('\\n¡Ganaste!')\n\n\ndef pedir_entero(msj):\n \"\"\"\n Ejecuta input(msj) hasta que se ingrese un entero.\n \"\"\"\n while True:\n try:\n entero = int(input(msj))\n except ValueError:\n print('El valor ingresado no es válido.')\n else:\n return entero\n\n\ndef jugar_de_nuevo():\n \"\"\"\n Pregunta si se quiere jugar de nuevo y ejecuta main()\n si la respuesta es 'y' o termina el programa si 'n'.\n \"\"\"\n while True:\n try:\n respuesta = input('\\n¿Querés jugar de nuevo? (y/n) ')\n assert respuesta.lower() in {'y', 'n'}\n except AssertionError:\n print('Respuesta inválida. Por favor ingresá \"y\" o \"n\".')\n else:\n if respuesta.lower() == 'y':\n borrar_pantalla()\n main()\n else:\n print()\n exit()\n\n\ndef main():\n try:\n # pedimos el total de valores a ingresar\n cantidad = pedir_entero('\\n¿Cuántos valores querés ingresar? ')\n\n # pedimos el número de intentos para adivinar\n intentos = pedir_entero('\\n¿Cuántos intentos querés? ')\n\n # ejecutamos pedir_valores() y guardamos la lista\n lista = pedir_valores(cantidad)\n\n # borramos la pantalla\n borrar_pantalla()\n\n # ejecutamos adivinar() y pasamos la lista y el nº de intentos\n adivinar(lista, intentos)\n\n # preguntamos si se quiere jugar de nuevo\n jugar_de_nuevo()\n\n # si el usuario ejecuta Ctrl + C\n except KeyboardInterrupt:\n print('\\n\\nGracias por jugar.\\n')\n exit()\n\n\n# bienvenida\nprint('\\nJuego de la memoria.')\nprint('\\nConsigna: Ingresá tantos valores como quieras y poné a prueba tu memoria.')\n\nmain()\n","repo_name":"Python-Academy-Argentina/Fundamentals","sub_path":"Clase 2/Ejemplos/03 - excepciones/juego de memoria/memoria.py","file_name":"memoria.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"es","doc_type":"code","stars":11,"dataset":"github-code","pt":"47"}
+{"seq_id":"72870108303","text":"from typing import Iterable, List, Optional\nfrom time import sleep\n\nfrom canvasapi import Canvas\nfrom canvasapi.assignment import Assignment\nfrom canvasapi.course import Course\nfrom canvasapi.exceptions import ResourceDoesNotExist\nfrom canvasapi.submission import Submission\nfrom canvasapi.user import UserDisplay\n\n\nclass CanvasAPI:\n def __init__(self, base_url: str, access_token: str) -> None:\n self.api = Canvas(base_url=base_url, access_token=access_token)\n\n def get_courses(self, enrollment_type=\"teacher\") -> Iterable[Course]:\n return self.api.get_courses(enrollment_type=enrollment_type)\n\n def create_assignment(\n self, course: Course, name: str, description: str = \"\"\n ) -> Assignment:\n return course.create_assignment(\n {\n \"name\": name,\n \"submission_types\": [\"online_upload\"],\n \"allowed_extensions\": [\"pdf\"],\n \"description\": description,\n \"published\": True,\n }\n )\n\n def get_gradeable_students(self, assignment: Assignment) -> Iterable[UserDisplay]:\n students = list(assignment.get_gradeable_students())\n for _ in range(5):\n print(\"Getting gradeable students...\")\n if len(students):\n return students\n sleep(1)\n students = list(assignment.get_gradeable_students())\n print(\"Couldn't find gradeable students.\")\n return []\n\n def upload_file(self, assignment: Assignment, file_path: str, user_id: int):\n return assignment.upload_to_submission(file_path, user=user_id)\n\n def create_submission(\n self, assignment: Assignment, user_id: int, file_id: int = None\n ) -> Submission:\n return assignment.submit(\n submission={\n \"submission_type\": \"online_upload\",\n \"user_id\": user_id,\n \"file_ids\": [file_id],\n },\n )\n\n def get_courses_by_id(self, courses_ids: List[int]) -> List[Course]:\n courses = [self._get_course_by_id(course_id) for course_id in courses_ids]\n return [course for course in courses if course]\n\n def _get_course_by_id(self, course_id: int) -> Optional[Course]:\n try:\n return self.api.get_course(course_id)\n except (TypeError, ResourceDoesNotExist):\n return None\n\n def get_or_create_assignments(self, course: Course, assignments_names: List[str]):\n all_assignments = course.get_assignments()\n assignments: List[Assignment] = []\n for name in assignments_names:\n assignment = self.get_assignment_by_name(all_assignments, name)\n if not assignment:\n assignment = self.create_assignment(course, name)\n assignments.append(assignment)\n return assignments\n\n def get_assignment_by_name(\n self, assignments: Iterable[Assignment], name: str\n ) -> Optional[Assignment]:\n for assignment in assignments:\n if assignment.name == name:\n return assignment\n","repo_name":"KnowYourselves/CanvasSynch","sub_path":"CanvasSynch/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"18298427568","text":"import webbrowser\r\nimport requests\r\nimport json\r\n\r\n\r\ndef getaccesstoken():\r\n url = 'https://sandbox.handelsbanken.com/openbanking/oauth2/token/1.0'\r\n\r\n body = \"client_id=c01c36a1-59f1-44c3-9589-7870c0201846&grant_type=client_credentials&scope=AIS\"\r\n\r\n headers = {\r\n 'Accept': \"application/json\",\r\n 'Content-Type': \"application/x-www-form-urlencoded\",\r\n }\r\n\r\n response = requests.post(url, data=body, headers=headers,)\r\n\r\n access_token = (json.loads(response.text)['access_token'])\r\n\r\n return access_token\r\n\r\n\r\ndef getconsent():\r\n url = 'https://sandbox.handelsbanken.com/openbanking/psd2/v1/consents'\r\n\r\n body = \"{\\\"access\\\":\\\"ALL_ACCOUNTS\\\"}\"\r\n\r\n headers = {\r\n 'X-IBM-Client-Id': \"c01c36a1-59f1-44c3-9589-7870c0201846\",\r\n 'Authorization': \"Bearer \" + getaccesstoken(),\r\n 'Country': \"SE\",\r\n 'PSU-IP-Address': \"192.102.28.2\",\r\n 'TPP-Transaction-ID': \"6b24ce42-237f-4303-a917-cf778e5013d6\",\r\n 'TPP-Request-ID': \"c8271b81-4229-5a1f-bf9c-758f11c1f5b1\",\r\n 'content-type': \"application/json\",\r\n 'accept': \"application/json\"\r\n }\r\n\r\n response = requests.post(url, data=body, headers=headers,)\r\n\r\n consent = (json.loads(response.text)['consentId'])\r\n return consent\r\n\r\n\r\ndef getinitauthorization():\r\n url = f'https://sandbox.handelsbanken.com/openbanking/redirect/oauth2/authorize/1.0?response_type=code&scope=AIS:{getconsent()}&client_id=c01c36a1-59f1-44c3-9589-7870c0201846&state=bc4b933c-bfc2-44c8-b858-eba90f559f91&redirect_uri=http://localapp.me/redirect/result'\r\n\r\n headers = {'accept': \"application/json\"}\r\n\r\n response = requests.get(url, headers=headers)\r\n webbrowser.open(response.url)\r\n\r\n\r\n\r\n\r\ndef requestgranttoken():\r\n url = 'https://sandbox.handelsbanken.com/openbanking/redirect/oauth2/token/1.0'\r\n\r\n body = {\r\n 'grant_type': \"authorization_code\",\r\n 'scope': \"AIS:\" + getconsent(),\r\n 'client_id': \"c01c36a1-59f1-44c3-9589-7870c0201846\",\r\n 'code': \"360ad5ce-ecfe-4ad4-83d1-9254e89a3ccc\",\r\n 'redirect_uri': \"http://localapp.me/redirect/result\"\r\n }\r\n\r\n headers = {\r\n 'Accept': \"application/json\",\r\n 'Content-Type': \"application/x-www-form-urlencoded\",\r\n }\r\n\r\n response = requests.post(url, data=body, headers=headers,)\r\n\r\n account_access_token = (json.loads(response.text)['access_token'])\r\n account_refresh_token = (json.loads(response.text)['refresh_token'])\r\n\r\n return account_access_token, account_refresh_token\r\n\r\n\r\ndef getaccounts():\r\n url = 'https://sandbox.handelsbanken.com/openbanking/psd2/v2/accounts'\r\n\r\n headers = {\r\n 'X-IBM-Client-Id': \"c01c36a1-59f1-44c3-9589-7870c0201846\",\r\n 'Authorization': \"Bearer \" + requestgranttoken()[0],\r\n 'PSU-IP-Address': \"192.102.28.2\",\r\n 'TPP-Transaction-ID': \"c8271b81-4229-5a1f-bf9c-758f11c1f5b1\",\r\n 'TPP-Request-ID': \"6b24ce42-237f-4303-a917-cf778e5013d6\",\r\n 'accept': \"application/json\",\r\n }\r\n response = requests.get(url, headers=headers,)\r\n return response.text\r\n\r\n\r\ndef gettransactions():\r\n\r\n url = 'https://sandbox.handelsbanken.com/openbanking/psd2/v2/accounts/ae57e780-6cf3-11e9-9c41-e957ce7d7d69/transactions'\r\n\r\n headers = {\r\n 'X-IBM-Client-Id': \"c01c36a1-59f1-44c3-9589-7870c0201846\",\r\n 'Authorization': \"Bearer \" + requestgranttoken()[0],\r\n 'PSU-IP-Address': \"192.102.28.2\",\r\n 'TPP-Transaction-ID': \"c8271b81-4229-5a1f-bf9c-758f11c1f5b1\",\r\n 'TPP-Request-ID': \"6b24ce42-237f-4303-a917-cf778e5013d6\",\r\n 'accept': \"application/json\",\r\n }\r\n response = requests.get(url, headers=headers)\r\n\r\n print(response.text)\r\n return response.text\r\n\r\ngettransactions()\r\ngetinitauthorization()","repo_name":"JanisRudzitis/nordigen-test","sub_path":"get_transactions.py","file_name":"get_transactions.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10283370813","text":"from django.db import models\nfrom django.core.exceptions import ValidationError\nimport datetime\nimport pytz\n\n\nclass Timezone(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return self.name\n\n\nclass Organization(models.Model):\n name = models.CharField(max_length=50)\n timezone = models.ForeignKey(Timezone,\n on_delete=models.CASCADE,\n default=195)\n\n # only allow one instance\n def save(self, *args, **kwargs):\n if not self.pk and Organization.objects.exists():\n raise ValidationError(\"There can only be one Organization instance\")\n return super(Organization, self).save(*args, **kwargs)\n\n @staticmethod\n def convert_to_utc(time):\n \"\"\"time is datetime.date without timezone info\"\"\"\n timezone = Organization.objects.all()[0].timezone # only one can exist\n timezone = pytz.timezone(str(timezone))\n time = datetime.datetime(time.year, time.month, time.day).astimezone(timezone)\n time = time.astimezone(pytz.utc) # register as UTC time\n return time\n\n @staticmethod\n def get_today_start_utc():\n today = datetime.datetime.now().date()\n return Organization.convert_to_utc(today)\n\n","repo_name":"chestnutcone/Hangry","sub_path":"organization/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10225737559","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://ktmsklep.pl/oryginalne-czesci-zamienne/208649/500-exc-f/419659/przod-amortyzatory-polki-zawieszenia#part-title'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.text, 'html.parser')\n\nbox_scheme = soup.find_all('div', class_='box box-scheme')\nid = 1\n\nwith open('result.txt', 'w') as f:\n for box in box_scheme:\n title = box.find('h3', class_='title').get_text()\n link = box.find('a')['href']\n image_url = box.find('img')['src']\n link = 'https://ktmsklep.pl/' +link\n\n response = requests.get(link)\n soup = BeautifulSoup(response.text, 'lxml')\n name_list = [name.text for name in soup.find_all('p', class_='t-name')]\n number_list = [number.text for number in soup.find_all('p', class_='t-number')]\n lp = [number.text for number in soup.find_all('td', class_='t-lp ref')]\n parts_list = []\n parts_image_url = []\n for i in range(len(name_list)):\n name_list[i] = name_list[i].replace(\"'\", '\"')\n part = '{\\n\\t\\t\\t\\t\\t\"id\": '+lp[i]+',\\n\\t\\t\\t\\t\\t\"namePart\": '+ '\"'+name_list[i]+'\"'+',\\n\\t\\t\\t\\t\\t\"partNumber\": '+'\"'+number_list[i]+'\"\\n\\t\\t\\t\\t}'\n parts_list.append(part)\n main_image = soup.find('div', class_='main-image')\n parts_image_url = main_image.find('img')['src']\n\n\n result = '{\\n\\t\"id\": '+str(id)+',\\n\\t\"nameTypePart\": '+'\"'+title+'\"'+',\\n\\t\"imageUrl\": '+'\"'+image_url+'\"'+',\\n\\t\"partsImageUrl\": '+'\"'+parts_image_url+'\"'+',\\n\\t\"parts\": [\\n\\t\\t\\t'+\",\\n\\t\\t\\t\".join(parts_list)+'\\n\\t]\\n},'\n f.write(result + '\\n')\n id += 1","repo_name":"amWilq/Motor_scheme","sub_path":"scrap-data-from-ktmsklep.py","file_name":"scrap-data-from-ktmsklep.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"43191087776","text":"import os\nimport re\nimport time\nimport uuid\nimport random\nimport logging\nimport errno\n\nfrom netaddr import IPNetwork\nimport xml.etree.ElementTree as ET\n\nfrom yardstick import ssh\nfrom yardstick.common import constants\nfrom yardstick.common import exceptions\nfrom yardstick.common import utils as common_utils\nfrom yardstick.common import yaml_loader\nfrom yardstick.network_services.utils import PciAddress\nfrom yardstick.network_services.helpers.cpu import CpuSysCores\n\n\nLOG = logging.getLogger(__name__)\n\nVM_TEMPLATE = \"\"\"\n\n {vm_name} \n {random_uuid} \n {memory} \n {memory} \n \n \n \n {vcpu} \n {cputune}\n \n hvm \n \n \n \n \n \n \n \n \n \n \n | \n \n \n \n \n \n \n \n destroy \n restart \n restart \n \n /usr/bin/kvm-spice \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\"\"\"\n\nUSER_DATA_TEMPLATE = \"\"\"\ncat > {user_file} < {network_file} < \n\n Refence: https://software.intel.com/en-us/articles/\n configure-sr-iov-network-virtual-functions-in-linux-kvm\n \"\"\"\n vm_pci = ET.SubElement(interface, 'address')\n vm_pci.set('type', 'pci')\n vm_pci.set('domain', '0x{}'.format(pci_address.domain))\n vm_pci.set('bus', '0x{}'.format(pci_address.bus))\n vm_pci.set('slot', '0x{}'.format(pci_address.slot))\n vm_pci.set('function', '0x{}'.format(pci_address.function))\n return vm_pci\n\n @classmethod\n def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str,\n queues):\n \"\"\"Add a DPDK OVS 'interface' XML node in 'devices' node\n\n \n \n \n \n \n \n \n \n \n \n ...\n \n\n Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/\n vhost-user/\n \"\"\"\n\n vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.\n format(vpath, port_num))\n root = ET.fromstring(xml_str)\n pci_address = PciAddress(vpci.strip())\n device = root.find('devices')\n\n interface = ET.SubElement(device, 'interface')\n interface.set('type', 'vhostuser')\n mac = ET.SubElement(interface, 'mac')\n mac.set('address', vports_mac)\n\n source = ET.SubElement(interface, 'source')\n source.set('type', 'unix')\n source.set('path', vhost_path)\n source.set('mode', 'client')\n\n model = ET.SubElement(interface, 'model')\n model.set('type', 'virtio')\n\n driver = ET.SubElement(interface, 'driver')\n driver.set('queues', str(queues))\n\n host = ET.SubElement(driver, 'host')\n host.set('mrg_rxbuf', 'off')\n\n cls._add_interface_address(interface, pci_address)\n\n return ET.tostring(root)\n\n @classmethod\n def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml_str):\n \"\"\"Add a SR-IOV 'interface' XML node in 'devices' node\n\n \n \n \n \n \n \n \n \n ...\n \n\n Reference: https://access.redhat.com/documentation/en-us/\n red_hat_enterprise_linux/6/html/\n virtualization_host_configuration_and_guest_installation_guide/\n sect-virtualization_host_configuration_and_guest_installation_guide\n -sr_iov-how_sr_iov_libvirt_works\n \"\"\"\n\n root = ET.fromstring(xml_str)\n device = root.find('devices')\n\n interface = ET.SubElement(device, 'interface')\n interface.set('managed', 'yes')\n interface.set('type', 'hostdev')\n\n mac = ET.SubElement(interface, 'mac')\n mac.set('address', vf_mac)\n\n source = ET.SubElement(interface, 'source')\n pci_address = PciAddress(vf_pci.strip())\n cls._add_interface_address(source, pci_address)\n\n pci_vm_address = PciAddress(vm_pci.strip())\n cls._add_interface_address(interface, pci_vm_address)\n\n return ET.tostring(root)\n\n @staticmethod\n def create_snapshot_qemu(connection, index, base_image):\n \"\"\"Create the snapshot image for a VM using a base image\n\n :param connection: SSH connection to the remote host\n :param index: index of the VM to be spawn\n :param base_image: path of the VM base image in the remote host\n :return: snapshot image path\n \"\"\"\n vm_image = '/var/lib/libvirt/images/%s.qcow2' % index\n connection.execute('rm -- \"%s\"' % vm_image)\n status, _, _ = connection.execute('test -r %s' % base_image)\n if status:\n if not os.access(base_image, os.R_OK):\n raise exceptions.LibvirtQemuImageBaseImageNotPresent(\n vm_image=vm_image, base_image=base_image)\n # NOTE(ralonsoh): done in two steps to avoid root permission\n # issues.\n LOG.info('Copy %s from execution host to remote host', base_image)\n file_name = os.path.basename(os.path.normpath(base_image))\n connection.put_file(base_image, '/tmp/%s' % file_name)\n status, _, error = connection.execute(\n 'mv -- \"/tmp/%s\" \"%s\"' % (file_name, base_image))\n if status:\n raise exceptions.LibvirtQemuImageCreateError(\n vm_image=vm_image, base_image=base_image, error=error)\n\n LOG.info('Convert image %s to %s', base_image, vm_image)\n qemu_cmd = ('qemu-img create -f qcow2 -o backing_file=%s %s' %\n (base_image, vm_image))\n status, _, error = connection.execute(qemu_cmd)\n if status:\n raise exceptions.LibvirtQemuImageCreateError(\n vm_image=vm_image, base_image=base_image, error=error)\n return vm_image\n\n @classmethod\n def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img):\n \"\"\"Build the XML from the configuration parameters\"\"\"\n memory = flavor.get('ram', '4096')\n extra_spec = flavor.get('extra_specs', {})\n cpu = extra_spec.get('hw:cpu_cores', '2')\n socket = extra_spec.get('hw:cpu_sockets', '1')\n threads = extra_spec.get('hw:cpu_threads', '2')\n vcpu = int(cpu) * int(threads)\n numa_cpus = '0-%s' % (vcpu - 1)\n hw_socket = flavor.get('hw_socket', '0')\n cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)\n\n cputune = extra_spec.get('cputune', '')\n machine = extra_spec.get('machine_type', 'pc-i440fx-xenial')\n mac = StandaloneContextHelper.get_mac_address(0x00)\n image = cls.create_snapshot_qemu(connection, index,\n flavor.get(\"images\", None))\n vm_xml = VM_TEMPLATE.format(\n vm_name=vm_name,\n random_uuid=uuid.uuid4(),\n mac_addr=mac,\n memory=memory, vcpu=vcpu, cpu=cpu,\n numa_cpus=numa_cpus,\n socket=socket, threads=threads,\n vm_image=image, cpuset=cpuset,\n machine=machine, cputune=cputune)\n\n # Add CD-ROM device\n vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)\n\n return vm_xml, mac\n\n @staticmethod\n def update_interrupts_hugepages_perf(connection):\n connection.execute(\"echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts\")\n connection.execute(\"echo never > /sys/kernel/mm/transparent_hugepage/enabled\")\n\n @classmethod\n def pin_vcpu_for_perf(cls, connection, socket='0'):\n threads = \"\"\n sys_obj = CpuSysCores(connection)\n soc_cpu = sys_obj.get_core_socket()\n sys_cpu = int(soc_cpu[\"cores_per_socket\"])\n socket = str(socket)\n cores = \"%s-%s\" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])\n if int(soc_cpu[\"thread_per_core\"]) > 1:\n threads = \"%s-%s\" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])\n cpuset = \"%s,%s\" % (cores, threads)\n return cpuset\n\n @classmethod\n def write_file(cls, file_name, xml_str):\n \"\"\"Dump a XML string to a file\"\"\"\n root = ET.fromstring(xml_str)\n et = ET.ElementTree(element=root)\n et.write(file_name, encoding='utf-8', method='xml')\n\n @classmethod\n def add_cdrom(cls, file_path, xml_str):\n \"\"\"Add a CD-ROM disk XML node in 'devices' node\n\n \n \n \n \n \n \n \n ...\n \n \"\"\"\n\n root = ET.fromstring(xml_str)\n device = root.find('devices')\n\n disk = ET.SubElement(device, 'disk')\n disk.set('type', 'file')\n disk.set('device', 'cdrom')\n\n driver = ET.SubElement(disk, 'driver')\n driver.set('name', 'qemu')\n driver.set('type', 'raw')\n\n source = ET.SubElement(disk, 'source')\n source.set('file', file_path)\n\n target = ET.SubElement(disk, 'target')\n target.set('dev', 'hdb')\n\n ET.SubElement(disk, 'readonly')\n return ET.tostring(root)\n\n @staticmethod\n def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename, mac, ip):\n \"\"\"Generate ISO image for CD-ROM \"\"\"\n\n user_config = [\" - name: {user_name}\",\n \" ssh_authorized_keys:\",\n \" - {pub_key_str}\"]\n if vm_user != \"root\":\n user_config.append(\" sudo: ALL=(ALL) NOPASSWD:ALL\")\n\n meta_data = \"/tmp/meta-data\"\n user_data = \"/tmp/user-data\"\n network_data = \"/tmp/network-config\"\n with open(\".\".join([key_filename, \"pub\"]), \"r\") as pub_key_file:\n pub_key_str = pub_key_file.read().rstrip()\n user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user)\n\n cmd_lst = [\n \"touch %s\" % meta_data,\n USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf),\n NETWORK_DATA_TEMPLATE.format(network_file=network_data, mac_address=mac,\n ip_address=ip),\n \"genisoimage -output {0} -volid cidata -joliet -r {1} {2} {3}\".format(file_path,\n meta_data,\n user_data,\n network_data),\n \"rm {0} {1} {2}\".format(meta_data, user_data, network_data),\n ]\n for cmd in cmd_lst:\n LOG.info(cmd)\n status, _, error = connection.execute(cmd)\n if status:\n raise exceptions.LibvirtQemuImageCreateError(error=error)\n\n\nclass StandaloneContextHelper(object):\n \"\"\" This class handles all the common code for standalone\n \"\"\"\n def __init__(self):\n self.file_path = None\n super(StandaloneContextHelper, self).__init__()\n\n @staticmethod\n def install_req_libs(connection, extra_pkgs=None):\n extra_pkgs = extra_pkgs or []\n pkgs = [\"qemu-kvm\", \"libvirt-bin\", \"bridge-utils\", \"numactl\", \"fping\", \"genisoimage\"]\n pkgs.extend(extra_pkgs)\n cmd_template = \"dpkg-query -W --showformat='${Status}\\\\n' \\\"%s\\\"|grep 'ok installed'\"\n for pkg in pkgs:\n if connection.execute(cmd_template % pkg)[0]:\n connection.execute(\"apt-get update\")\n connection.execute(\"apt-get -y install %s\" % pkg)\n\n @staticmethod\n def get_kernel_module(connection, pci, driver):\n if not driver:\n out = connection.execute(\"lspci -k -s %s\" % pci)[1]\n driver = out.split(\"Kernel modules:\").pop().strip()\n return driver\n\n @classmethod\n def get_nic_details(cls, connection, networks, dpdk_devbind):\n for key, ports in networks.items():\n if key == \"mgmt\":\n continue\n\n phy_ports = ports['phy_port']\n phy_driver = ports.get('phy_driver', None)\n driver = cls.get_kernel_module(connection, phy_ports, phy_driver)\n\n # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe\n bind_cmd = \"{dpdk_devbind} --force -b {driver} {port}\"\n lshw_cmd = \"lshw -c network -businfo | grep '{port}'\"\n link_show_cmd = \"ip -s link show {interface}\"\n\n cmd = bind_cmd.format(dpdk_devbind=dpdk_devbind,\n driver=driver, port=ports['phy_port'])\n connection.execute(cmd)\n\n out = connection.execute(lshw_cmd.format(port=phy_ports))[1]\n interface = out.split()[1]\n\n connection.execute(link_show_cmd.format(interface=interface))\n\n ports.update({\n 'interface': str(interface),\n 'driver': driver\n })\n LOG.info(networks)\n\n return networks\n\n @staticmethod\n def get_virtual_devices(connection, pci):\n cmd = \"cat /sys/bus/pci/devices/{0}/virtfn0/uevent\"\n output = connection.execute(cmd.format(pci))[1]\n\n pattern = \"PCI_SLOT_NAME=({})\".format(PciAddress.PCI_PATTERN_STR)\n m = re.search(pattern, output, re.MULTILINE)\n\n pf_vfs = {}\n if m:\n pf_vfs = {pci: m.group(1).rstrip()}\n\n LOG.info(\"pf_vfs:\\n%s\", pf_vfs)\n\n return pf_vfs\n\n def parse_pod_file(self, file_path, nfvi_role='Sriov'):\n self.file_path = file_path\n nodes = []\n nfvi_host = []\n try:\n cfg = yaml_loader.read_yaml_file(self.file_path)\n except IOError as io_error:\n if io_error.errno != errno.ENOENT:\n raise\n self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,\n file_path)\n cfg = yaml_loader.read_yaml_file(self.file_path)\n\n nodes.extend([node for node in cfg[\"nodes\"] if str(node[\"role\"]) != nfvi_role])\n nfvi_host.extend([node for node in cfg[\"nodes\"] if str(node[\"role\"]) == nfvi_role])\n if not nfvi_host:\n raise(\"Node role is other than SRIOV\")\n\n host_mgmt = {'user': nfvi_host[0]['user'],\n 'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),\n 'password': nfvi_host[0]['password'],\n 'ssh_port': nfvi_host[0].get('ssh_port', 22),\n 'key_filename': nfvi_host[0].get('key_filename')}\n\n return [nodes, nfvi_host, host_mgmt]\n\n @staticmethod\n def get_mac_address(end=0x7f):\n mac = [0x52, 0x54, 0x00,\n random.randint(0x00, end),\n random.randint(0x00, 0xff),\n random.randint(0x00, 0xff)]\n mac_address = ':'.join('%02x' % x for x in mac)\n return mac_address\n\n @staticmethod\n def get_mgmt_ip(connection, mac, cidr, node):\n mgmtip = None\n times = 10\n while not mgmtip and times:\n connection.execute(\"fping -c 1 -g %s > /dev/null 2>&1\" % cidr)\n out = connection.execute(\"ip neighbor | grep '%s'\" % mac)[1]\n LOG.info(\"fping -c 1 -g %s > /dev/null 2>&1\", cidr)\n if out.strip():\n mgmtip = str(out.split(\" \")[0]).strip()\n client = ssh.SSH.from_node(node, overrides={\"ip\": mgmtip})\n client.wait()\n break\n\n time.sleep(WAIT_FOR_BOOT) # FixMe: How to find if VM is booted?\n times = times - 1\n return mgmtip\n\n @classmethod\n def wait_for_vnfs_to_start(cls, connection, servers, nodes):\n for node in nodes:\n vnf = servers[node[\"name\"]]\n mgmtip = vnf[\"network_ports\"][\"mgmt\"][\"cidr\"]\n ip = cls.get_mgmt_ip(connection, node[\"mac\"], mgmtip, node)\n if ip:\n node[\"ip\"] = ip\n client = ssh.SSH.from_node(node)\n LOG.debug(\"OS version: %s\",\n common_utils.get_os_version(client))\n LOG.debug(\"Kernel version: %s\",\n common_utils.get_kernel_version(client))\n vnfs_data = common_utils.get_sample_vnf_info(client)\n for vnf_name, vnf_data in vnfs_data.items():\n LOG.debug(\"VNF name: '%s', commit ID/branch: '%s'\",\n vnf_name, vnf_data[\"branch_commit\"])\n LOG.debug(\"%s\", vnf_data[\"md5_result\"])\n return nodes\n\n @classmethod\n def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img, mac):\n # Generate public/private keys if private key file is not provided\n user_name = node.get('user')\n if not user_name:\n node['user'] = 'root'\n user_name = node.get('user')\n if not node.get('key_filename'):\n key_filename = ''.join(\n [constants.YARDSTICK_ROOT_PATH,\n 'yardstick/resources/files/yardstick_key-',\n id_name, '-', vm_name])\n ssh.SSH.gen_keys(key_filename)\n node['key_filename'] = key_filename\n # Update image with public key\n key_filename = node.get('key_filename')\n ip_netmask = \"{0}/{1}\".format(node.get('ip'), node.get('netmask'))\n ip_netmask = \"{0}/{1}\".format(node.get('ip'),\n IPNetwork(ip_netmask).prefixlen)\n Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename, mac,\n ip_netmask)\n return node\n\n\nclass Server(object):\n \"\"\" This class handles geting vnf nodes\n \"\"\"\n\n @staticmethod\n def build_vnf_interfaces(vnf, ports):\n interfaces = {}\n index = 0\n\n for key, vfs in vnf[\"network_ports\"].items():\n if key == \"mgmt\":\n mgmt_cidr = IPNetwork(vfs['cidr'])\n continue\n\n vf = ports[vfs[0]]\n ip = IPNetwork(vf['cidr'])\n interfaces.update({\n key: {\n 'vpci': vf['vpci'],\n 'driver': \"%svf\" % vf['driver'],\n 'local_mac': vf['mac'],\n 'dpdk_port_num': index,\n 'local_ip': str(ip.ip),\n 'netmask': str(ip.netmask)\n },\n })\n index = index + 1\n\n return mgmt_cidr, interfaces\n\n @classmethod\n def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):\n mgmt_cidr, interfaces = cls.build_vnf_interfaces(vnf, ports)\n\n result = {\n \"ip\": str(mgmt_cidr.ip),\n \"netmask\": str(mgmt_cidr.netmask),\n \"mac\": mac,\n \"host\": ip,\n \"user\": flavor.get('user', 'root'),\n \"interfaces\": interfaces,\n \"routing_table\": [],\n # empty IPv6 routing table\n \"nd_route_tbl\": [],\n \"name\": key, \"role\": key\n }\n\n try:\n result['key_filename'] = flavor['key_filename']\n except KeyError:\n pass\n\n try:\n result['password'] = flavor['password']\n except KeyError:\n pass\n LOG.info(result)\n return result\n\n\nclass OvsDeploy(object):\n \"\"\" This class handles deploy of ovs dpdk\n Configuration: ovs_dpdk\n \"\"\"\n\n OVS_DEPLOY_SCRIPT = \"ovs_deploy.bash\"\n\n def __init__(self, connection, bin_path, ovs_properties):\n self.connection = connection\n self.bin_path = bin_path\n self.ovs_properties = ovs_properties\n\n def prerequisite(self):\n pkgs = [\"git\", \"build-essential\", \"pkg-config\", \"automake\",\n \"autotools-dev\", \"libltdl-dev\", \"cmake\", \"libnuma-dev\",\n \"libpcap-dev\"]\n StandaloneContextHelper.install_req_libs(self.connection, pkgs)\n\n def ovs_deploy(self):\n ovs_deploy = os.path.join(constants.YARDSTICK_ROOT_PATH,\n \"yardstick/resources/scripts/install/\",\n self.OVS_DEPLOY_SCRIPT)\n if os.path.isfile(ovs_deploy):\n self.prerequisite()\n remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)\n LOG.info(remote_ovs_deploy)\n self.connection.put(ovs_deploy, remote_ovs_deploy)\n\n http_proxy = os.environ.get('http_proxy', '')\n ovs_details = self.ovs_properties.get(\"version\", {})\n ovs = ovs_details.get(\"ovs\", \"2.6.0\")\n dpdk = ovs_details.get(\"dpdk\", \"16.11.1\")\n\n cmd = \"sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'\" % (remote_ovs_deploy,\n ovs, dpdk, http_proxy)\n exit_status, _, stderr = self.connection.execute(cmd)\n if exit_status:\n raise exceptions.OVSDeployError(stderr=stderr)\n","repo_name":"opnfv/yardstick","sub_path":"yardstick/benchmark/contexts/standalone/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":24327,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"47"}
+{"seq_id":"458334048","text":"import os\nimport hashlib\nimport boto3\nfrom concurrent.futures import ThreadPoolExecutor\nfrom upload_files import batch_upload_files\n\ndef get_md5(file_path):\n hasher = hashlib.md5()\n with open(file_path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hasher.update(chunk)\n return hasher.hexdigest()\n\ndef get_s3_objects(s3_client, bucket_name):\n response = s3_client.list_objects_v2(Bucket=bucket_name)\n return {obj['Key']: obj['ETag'].strip('\"') for obj in response.get('Contents', [])}\n\ndef sync_folder_to_s3(s3_client, bucket_name, folder_path, max_threads=5):\n local_files = {\n os.path.join(root, file): get_md5(os.path.join(root, file))\n for root, _, files in os.walk(folder_path)\n for file in files\n }\n s3_objects = get_s3_objects(s3_client, bucket_name)\n\n files_to_upload = [\n file_path\n for file_path, md5 in local_files.items()\n if os.path.basename(file_path) not in s3_objects or md5 != s3_objects[os.path.basename(file_path)]\n ]\n\n batch_upload_files(s3_client, bucket_name, files_to_upload, max_threads=max_threads)\n\nif __name__ == \"__main__\":\n s3 = boto3.client('s3')\n bucket_name = \"my-bucket\"\n folder_path = \"path/to/your/folder\"\n\n sync_folder_to_s3(s3, bucket_name, folder_path, max_threads=5)","repo_name":"hands-on-cloud/python-boto3-course","sub_path":"s3-lessons/batch-operations/sync_folder_upload.py","file_name":"sync_folder_upload.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"72498390222","text":"#--------Importamos las librerías pil y matplotlib para poder usarlas para meter imágenes y graficas respectivamente\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt# le decimos que la librería se llame plt para acortarlo\r\ndef sacar_datosdevacunados():#Función que nos permite extraer los datos de un archivo en este caso txt\r\n matriz=[]#matriz a guardar\r\n f=open(\"vacunados.txt\",\"r\")#abrimos el archivo en modo lectura\r\n for linea in f:#Extracion de la información\r\n wordlist=linea.split()\r\n matriz.append(wordlist)\r\n f.close()#cerramos el archivo\r\n return matriz#devolvemos la matriz para poder usarla en el resto del código\r\n\r\ndef datos_vacunados(matriz,nombre):#Función que nos dice la información si estas vacunado o no, y de ser así nos dice\r\n #las fechas de vacunación\r\n for i in range(40):\r\n for j in range(1):#Usamos uno porque esa es la columna del nombre de usuario\r\n if nombre==matriz[i][0]:#checamos en que posición esta\r\n print(f'El usuario llamado {nombre}')#Esto nos indica su posición\r\n opcion=matriz[i][1]#Este nos indica que si esta vacunado o no\r\n opcion=opcion.lower()\r\n #Usamos lower porque no sabemos cómo vienen los si o no en la función y así aseguramos\r\n if opcion=='si':#si esta vacunado le decimos de que marca y dependiendo de la vacuna sus fechas de\r\n #vacunacion\r\n print('Si esta vacunado')\r\n print('Su marca de vacuna es',matriz[i][2])\r\n marca=matriz[i][2]\r\n if marca=='Johnson&johnson':\r\n print('Al ser la marca ',marca,'por eso solo tuvo una dosis el día',matriz[i][3])\r\n else:\r\n print('Al ser la marca',marca,'por eso solo tuvo dos dosis los días',matriz[i][3],'y',\r\n matriz[i][4])\r\n else:#Sabemos que no está vacunado\r\n print('No esta vacunado')\r\n break #Rompemos el código porque la demás información no es interesante\r\n \r\ndef grafico_vacunados(matriz,nombre):\r\n '''Función que nos dice el comparativo de la vacunación de los usuarios por marca y de vacunados y no vacunados'''\r\n #------------------------Acumuladores que van a guardar cuantas veces sale el tipo de marca--------\r\n pfizer=0\r\n astra=0\r\n moderna=0\r\n johnson=0\r\n sputnik=0\r\n sindatos=0\r\n #Con estos for vemos cuantas veces sale la marca en la matriz y lo guardamos para usarlo en la grafica\r\n for i in range(40):\r\n for j in range(2,3):\r\n if matriz[i][j]=='Pfizer':\r\n pfizer+=1\r\n elif matriz[i][j]=='Astra_Seneca':\r\n astra+=1\r\n elif matriz[i][j]=='Moderna':\r\n moderna+=1\r\n elif matriz[i][j]=='Johnson&johnson':\r\n johnson+=1\r\n elif matriz[i][j]=='Sputnik':\r\n sputnik+=1\r\n else:\r\n sindatos+=1\r\n #Creación de la grafica de pastel con la librería matplotlib.pyplot dado etiquetas, valores y colores\r\n etiquetas = ['Pfizer','Astra Seneca','Moderna','Johnson & Johnson','Sputnik','No sean vacunado']\r\n valores = [pfizer,astra,moderna,johnson,sputnik,sindatos]\r\n colores = ['#2b31b2','#d3d85d','#ba38cf','#dda173','#872121','#36b22b']\r\n plt.pie(x=valores, labels=etiquetas, colors = colores, autopct=\"%0.1f %%\", shadow=True)\r\n plt.title('Comparativo de vacunación de marcas o no vacunados por todos los usuarios')\r\n plt.show()\r\n total=pfizer+astra+moderna+johnson+sputnik#Suma de las marcas para saber el porcentaje de vacunados\r\n #Creación de la grafica de pastel con la librería matplotlib.pyplot dado etiquetas, valores y colores\r\n etiquetas = ['Gente vacunada','Gente sin vacunar',]\r\n valores = [total,sindatos]\r\n colores = ['#2b31b2','#d3d85d']\r\n plt.pie(x=valores, labels=etiquetas, colors = colores, autopct=\"%0.1f %%\", shadow=True)\r\n plt.title('Comparativo de usuarios vacunados o no')\r\n plt.show()\r\n #Impresion de las conclusiones del grafico anterior\r\n print('Como podemos ver hubo un mayor porcentaje de vacunados en nuestros usuarios con',round(total/40*100,2),'%')\r\n print('Sin embargo hay un alto porcentaje de no vacunados con',round(sindatos/40*100,2),'%')\r\n'''función principal de la parte 2 que llama a las otras funciones, guarda a la matriz\r\n y pide el nombre que pedimos en el main principal porque se va a usar'''\r\n\r\ndef info_vacunados(nombre):\r\n matriz=sacar_datosdevacunados()\r\n #llamamos a esa función para poder usar la matriz en esta función y en las otras creadas\r\n #llamada a las funciones dado la matriz y nombre del usuario\r\n nombre=datos_vacunados(matriz,nombre)\r\n grafico_vacunados(matriz,nombre)\r\n","repo_name":"Bdelas777/AppCovid","sub_path":"Proyecto/parte2.py","file_name":"parte2.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"3931791237","text":"def texte(mots:list)->str:\n \"\"\"\n Params :\n - mots : list\n \n returns \n - phrase : str\n \n retoune une chaîne de caractere formé de la concaténation des éléments du tableau mots séparé par des espaces\"\"\"\n for element in mots:\n assert type(element) == str, \"Au moins 1 des éléments de mots n'est pas une chaine de caractères\"\n\n return \" \".join(mots)\n\ndef enum(mots:list)->str:\n \"\"\"\n Params :\n - mots : list\n \n Returns\n - phrase : str\n \n Retourne une chapine de caractère formé de la concaténation des éléments d'un tableau séparé par un espace sauf pour les 2 derniers élements séparé par un \"et\" \"\"\"\n phrase = \"\"\n for element in mots:\n assert type(element) == str, \"Au moins 1 des éléments n'est pas une chaîne de caractères\"\n phrase = phrase + str(element) + \" \"\n return phrase [:len(phrase -2)]\n\n\n","repo_name":"TGV2107/Exercices","sub_path":"Modularité et gestion des bugs/Sylvain/Ex10p66/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"6391656437","text":"class Solution(object):\n def nthUglyNumber(self, n):\n primes = [2, 3, 5]\n k = len(primes)\n idx = [0 for _ in range(k)]\n nums = [1]\n while len(nums) < n:\n minArr = []\n for i in range(k):\n minArr.append(primes[i] * nums[idx[i]])\n minVal = min(minArr)\n for i in range(k):\n if minArr[i] == minVal:\n idx[i] += 1\n # print(nums, idx)\n nums.append(minVal)\n # print(nums)\n return nums[-1]\n\n\nsol = Solution()\nn = 11\nprint(sol.nthUglyNumber(n))\n\n\n\n\n\n\n","repo_name":"mengyx-work/CS_algorithm_scripts","sub_path":"leetcode/LC_264. Ugly Number II.py","file_name":"LC_264. Ugly Number II.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"47"}
+{"seq_id":"28576500614","text":"# combine one or many entries in victim.dat to guess password\n\nimport itertools\nentries = [line.rstrip() for line in open('victim.dat', 'r')]\nprint(entries)\ncombinations = itertools.combinations(entries, 2)\ncombinations = list(combinations)\nimport pdb\npdb.set_trace()\n# print(combinations)","repo_name":"OhMyBuggg/ComputerSecurity","sub_path":"project3/task1/test_itertools.py","file_name":"test_itertools.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"38084803347","text":"\n\"\"\"\n 根据返回结果,获取返回信息\n\n\"\"\"\nimport json\n\nimport jsonpath as jsonpath\n\ndef get_text(res,key):\n if res is not None:\n try:\n\n text = json.loads(res.text)\n value = jsonpath.jsonpath(text,'$..{0}'.format(key))\n\n if value:\n if len(value) == 1:\n return value[0]\n else:\n return value\n\n except Exception as e:\n return e\n else:\n return None","repo_name":"oilbean/pytestDemo","sub_path":"common/getResultText.py","file_name":"getResultText.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"47"}
+{"seq_id":"38743455075","text":"from app import app\nfrom flask import render_template, flash, redirect, url_for\nfrom app.forms import LoginForm\n\n\n@app.route('/')\n@app.route('/user/')\ndef index(name=None):\n user = {'username': name}\n if name == None:\n user = {'username': 'Jack'}\n posts = [\n {\n 'author': {'username': 'John'},\n 'body': 'Jack is great'\n },\n {\n 'author': {'username': 'Susan'},\n 'body': 'John is right'\n }\n ]\n return render_template('index.html', title='Home', user=user, posts=posts)\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n flash('Login requested for user {}, remember_me={}'.format(\n form.username.data, form.remember_me.data))\n return redirect(url_for('index'))\n return render_template('login.html', title='Sign In', form=form)\n","repo_name":"jdauphar/using_flask","sub_path":"homework2/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"10225347513","text":"# coding: utf-8\nimport ipdb\nfrom unittest import TestCase\nimport os.path\n\nfrom mongoengine import GridFSProxy\nfrom gmail import models\n\nclass DeleteTestCase(TestCase):\n def setUp(self):\n fpath = os.path.join(os.path.dirname(__file__), 'fixtures/pic.eml')\n # Add one\n with open(fpath) as fp:\n self.e = models.Email.from_string(fp.read())\n self.e.save()\n\n def test_delete_resources(self):\n self.assertRegexpMatches(str(self.e.id), r'^\\w{24}$')\n self.assertIsInstance(models.Email.objects(id=self.e.id).first(),\n models.Email)\n\n resources = []\n resources.extend(self.e.resources or [])\n self.assertNotEqual(resources, [])\n resources.extend(self.e.attachments or [])\n resources.append(self.e.source)\n # All exist\n for resc in resources:\n self.assertIsNotNone(GridFSProxy().get(resc.grid_id))\n\n self.e.delete()\n # None exsits\n self.assertIsNone(models.Email.objects(id=self.e.id).first())\n for resc in resources:\n self.assertIsNone(GridFSProxy().get(resc.grid_id))\n\n def tearDown(self):\n self.e.delete()\n\nclass EmptyBodyTestCase(TestCase):\n\n def test_empty_body(self):\n e = models.Email.from_string('To: ph0tinia@163.com')\n try:\n # msg.body will be None, in which case we cannot pass it\n # to HTMLParser, who only accepts string\n e.clean()\n except TypeError as e:\n self.fail('%s, may email.body is None' % e)\n","repo_name":"michaelhuaqing/Gmail_new","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"23318639998","text":"import os, traceback, configparser\n \nfrom libmesact import checkconfig\nfrom libmesact import buildini\nfrom libmesact import buildhal\nfrom libmesact import buildio\nfrom libmesact import buildmisc\nfrom libmesact import buildss\nfrom libmesact import utilities\n\ndef build(parent):\n\n\tif not checkconfig.checkit(parent):\n\t\tparent.machinePTE.appendPlainText('Build Failed')\n\t\treturn\n\tif parent.backupCB.isChecked():\n\t\tutilities.backupFiles(parent)\n\n\tconfig = configparser.ConfigParser()\n\tconfig.add_section('NAGS')\n\tif parent.checkMesaflashCB.isChecked():\n\t\tconfig['NAGS']['MESAFLASH'] = 'True'\n\telse:\n\t\tconfig['NAGS']['MESAFLASH'] = 'False'\n\tif parent.newUserCB.isChecked():\n\t\tconfig['NAGS']['NEWUSER'] = 'True'\n\telse:\n\t\tconfig['NAGS']['NEWUSER'] = 'False'\n\n\n\tconfig.add_section('STARTUP')\n\tif parent.loadConfigCB.isChecked():\n\t\tconfig['STARTUP']['CONFIG'] = parent.configName.text()\n\telse:\n\t\tconfig['STARTUP']['CONFIG'] = 'False'\n\twith open(os.path.expanduser('~/.config/measct/mesact.conf'), 'w') as configfile:\n\t\tconfig.write(configfile)\n\n\t\tif config.has_option('NAGS', 'MESAFLASH'):\n\t\t\tif config['NAGS']['MESAFLASH'] == 'True':\n\t\t\t\tparent.checkMesaflashCB.setChecked(True)\n\t\tif config.has_option('NAGS', 'MESAFLASH'):\n\t\t\tif config['NAGS']['NEWUSER'] == 'True':\n\t\t\t\tparent.newUserCB.setChecked(True)\n\n\t# check for linuxcnc paths\n\tif not os.path.exists(os.path.expanduser('~/linuxcnc')):\n\t\ttry:\n\t\t\tos.mkdir(os.path.expanduser('~/linuxcnc'))\n\t\texcept OSError:\n\t\t\tparent.machinePTE.appendPlainText(f'OS error\\n {traceback.print_exc()}')\n\n\tif not os.path.exists(os.path.expanduser('~/linuxcnc/configs')):\n\t\ttry:\n\t\t\tos.mkdir(os.path.expanduser('~/linuxcnc/configs'))\n\t\texcept OSError:\n\t\t\tparent.machinePTE.appendPlainText(f'OS error\\n {traceback.print_exc()}')\n\n\tif not os.path.exists(os.path.expanduser('~/linuxcnc/nc_files')):\n\t\ttry:\n\t\t\tos.mkdir(os.path.expanduser('~/linuxcnc/nc_files'))\n\t\texcept OSError:\n\t\t\tparent.machinePTE.appendPlainText(f'OS error\\n {traceback.print_exc()}')\n\n\tif not os.path.exists(os.path.expanduser('~/linuxcnc/subroutines')):\n\t\ttry:\n\t\t\tos.mkdir(os.path.expanduser('~/linuxcnc/subroutines'))\n\t\texcept OSError:\n\t\t\tparent.machinePTE.appendPlainText(f'OS error\\n {traceback.print_exc()}')\n\n\tbuildini.build(parent)\n\tbuildhal.build(parent)\n\tbuildio.build(parent)\n\tbuildmisc.build(parent)\n\tbuildss.build(parent)\n\n\n","repo_name":"cnc4less/mesact","sub_path":"mesact/src/libmesact/buildconfig.py","file_name":"buildconfig.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"12898863278","text":"with open('covdesc') as f: \n data = f.read().split('\\n')\nres = ''\nfix = '.CEL.gz'\noneline = data[0] + fix\nfor i in range(1, len(data)):\n term = data[i]\n if term.startswith('GSM'):\n term += fix\n if ' ' in term:\n term = term.replace(' ', '_')\n if oneline != '':\n oneline += '\\t'\n oneline += term\n try:\n if data[i + 1].startswith('GSM'):\n res += '\\n' + oneline\n oneline = ''\n except IndexError:\n res += '\\n' + oneline\nwith open('covdesc.txt', 'w') as f:\n f.write('\\tstate\\ttype\\ttitle')\n f.write(res)\n\n\n","repo_name":"brown-2/mis_localization","sub_path":"R/data/GSE9476_RAW/gene_covdesc.py","file_name":"gene_covdesc.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"31505700726","text":"import asyncio\nimport logging\nimport os\nimport sys\nimport threading\nimport time\n\nfrom pymaker import Address, web3_via_http\nfrom pymaker.deployment import DssDeployment\nfrom pymaker.gas import FixedGasPrice, GeometricGasPrice\nfrom pymaker.keys import register_keys\nfrom pymaker.numeric import Wad\n\nlogging.basicConfig(format='%(asctime)-15s %(levelname)-8s %(message)s', level=logging.DEBUG)\n# reduce logspew\nlogging.getLogger('urllib3').setLevel(logging.INFO)\nlogging.getLogger(\"web3\").setLevel(logging.INFO)\nlogging.getLogger(\"asyncio\").setLevel(logging.INFO)\nlogging.getLogger(\"requests\").setLevel(logging.INFO)\n\npool_size = int(sys.argv[3]) if len(sys.argv) > 3 else 10\nweb3 = web3_via_http(endpoint_uri=os.environ['ETH_RPC_URL'], http_pool_size=pool_size)\nweb3.eth.defaultAccount = sys.argv[1] # ex: 0x0000000000000000000000000000000aBcdef123\nregister_keys(web3, [sys.argv[2]]) # ex: key_file=~keys/default-account.json,pass_file=~keys/default-account.pass\n\nmcd = DssDeployment.from_node(web3)\nour_address = Address(web3.eth.defaultAccount)\nweth = DssDeployment.from_node(web3).collaterals['ETH-A'].gem\n\nGWEI = 1000000000\nslow_gas = GeometricGasPrice(initial_price=int(15 * GWEI), every_secs=42, max_price=200 * GWEI)\nfast_gas = GeometricGasPrice(initial_price=int(30 * GWEI), every_secs=42, max_price=200 * GWEI)\n\n\nclass TestApp:\n def main(self):\n self.test_replacement()\n self.test_simultaneous()\n self.shutdown()\n\n def test_replacement(self):\n first_tx = weth.deposit(Wad(4))\n logging.info(f\"Submitting first TX with gas price deliberately too low\")\n self._run_future(first_tx.transact_async(gas_price=slow_gas))\n time.sleep(0.5)\n\n second_tx = weth.deposit(Wad(6))\n logging.info(f\"Replacing first TX with legitimate gas price\")\n second_tx.transact(replace=first_tx, gas_price=fast_gas)\n\n assert first_tx.replaced\n\n def test_simultaneous(self):\n self._run_future(weth.deposit(Wad(1)).transact_async(gas_price=fast_gas))\n self._run_future(weth.deposit(Wad(3)).transact_async(gas_price=fast_gas))\n self._run_future(weth.deposit(Wad(5)).transact_async(gas_price=fast_gas))\n self._run_future(weth.deposit(Wad(7)).transact_async(gas_price=fast_gas))\n time.sleep(33)\n\n def shutdown(self):\n balance = weth.balance_of(our_address)\n if Wad(0) < balance < Wad(100): # this account's tiny WETH balance came from this test\n logging.info(f\"Unwrapping {balance} WETH\")\n assert weth.withdraw(balance).transact(gas_price=fast_gas)\n elif balance >= Wad(22): # user already had a balance, so unwrap what a successful test would have consumed\n logging.info(f\"Unwrapping 12 WETH\")\n assert weth.withdraw(Wad(22)).transact(gas_price=fast_gas)\n\n @staticmethod\n def _run_future(future):\n def worker():\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n try:\n asyncio.get_event_loop().run_until_complete(future)\n finally:\n loop.close()\n\n thread = threading.Thread(target=worker, daemon=True)\n thread.start()\n\n\nif __name__ == '__main__':\n TestApp().main()\n","repo_name":"makerdao/pymaker","sub_path":"tests/manual_test_async_tx.py","file_name":"manual_test_async_tx.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"47"}
+{"seq_id":"23123286883","text":"def binarySearch(lst, value, low, high):\n\n\tif low > high:\n\t\treturn -1\n\tmid = (low + high) // 2\n\tif lst[mid] == value:\n\t\treturn mid\n\telif lst[mid] < value:\n\t\treturn binarySearch(lst, value, mid+1, high)\n\telse:\n\t\treturn binarySearch(lst, value, low, mid)\n\ndef selectionSort(lst):\n\tfor i in range(len(lst)):\n\t\tsmall = i \n\t\tfor j in range(i+1, len(lst)):\n\t\t\tif lst[j] < lst[small]:\n\t\t\t\tsmall = j\n\n\t\tlst[i], lst[small] = lst[small], lst[i]\n\n\treturn lst\n\ndef insertionSort(lst):\n\tfor i in range(1, len(lst)):\n\t\tj = i-1\n\t\twhile(j >= 0 and lst[j] > lst[i]):\n\t\t\tlst[i], lst[j] = lst[j], lst[i]\n\t\t\tj-=1\n\n\treturn lst\n\n\n\ndef bubbleSort(lst):\n\tfor i in range(len(lst)):\n\t\tfor j in range(1, len(lst)):\n\t\t\tif lst[j-1] > lst[j]:\n\t\t\t\tlst[j-1], lst[j] = lst[j], lst[j-1]\n\treturn lst\n\n\n'''\nmerge() works for merging 2 list\nbut a heap data structure is able to merge k lists in O(k log n)\n\n'''\ndef mergeSort(lst):\n\tif len(lst) <= 1:\n\t\treturn lst\n\tmid = len(lst)//2\n\tleft = lst[:mid]\n\tright = lst[mid:]\n\n\tmergeSort(left)\n\tmergeSort(right)\n\n\ti, j, k = 0, 0, 0\n\twhile i < len(left) and j < len(right):\n\t\tif left[i] < right[j]:\n\t\t\tlst[k] = left[i]\n\t\t\ti+=1\n\t\telse:\n\t\t\tlst[k] = right[j]\n\t\t\tj+=1\n\t\tk+=1\n\n\twhile i < len(left):\n\t\tlst[k] = left[i]\n\t\ti+=1\n\t\tk+=1\n\n\twhile j < len(right):\n\t\tlst[k] = right[j]\n\t\tj+=1\n\t\tk+=1\n\n\treturn lst\n\ndef quickSort(lst):\n\tif len(lst) <= 1:\n\t\treturn lst\n\tleft = []\n\tequal = []\n\tright = []\n\tpivot = lst[0]\n\n\tfor val in lst:\n\t\tif val < pivot:\n\t\t\tleft.append(val)\n\t\telif val == pivot:\n\t\t\tequal.append(val)\n\t\telse:\n\t\t\tright.append(val)\n\treturn quickSort(left) + equal + quickSort(right)\n\ndef quicksort2(lst):\n\tif not lst:\n\t\treturn []\n\n\tpivots = [x for x in lst if x == lst[0]]\n\tleft = quicksort2([x for x in lst if x < lst[0]])\n\tright = quicksort2([x for x in lst if x > lst[0]])\n\n\treturn left+pivots+right\n\n\n'''\nGiven a nearly sorted list find k largest integers\n\nhttp://www.geeksforgeeks.org/nearly-sorted-algorithm/\n\nPut k elements in min heap. Then for rest of elements you traverse heap\nO(k) + O((n-k)log k)\n\n\n'''\nimport heapq\n\ndef k_largest(lst, k):\n\tprint(lst)\n\n\t# One line \n\t# print(heapq.nlargest(k, lst))\n\t\n\t# Same thing but multiple lines\n\t# heap = []\n\n\t# for i in lst:\n\t# \tif len(heap) < k:\n\t# \t\theapq.heappush(heap, i)\n\t# \telse:\n\t# \t\tif i > heap[0]:\n\t# \t\t\theapq.heappop(heap)\n\t# \t\t\theapq.heappush(heap, i)\n\n\t# arr = []\n\t# while len(heap) > 0:\n\t# \tarr.append(heapq.heappop(heap))\n\n\t# print(arr)\n\n\t# done using insertion sort\n\tarr = insertionSort(lst)\n\tprint(arr[len(arr)-k:])\n\n\n\nif __name__==\"__main__\":\n\t# lst = [5, 7, 3, 8, 4, 2]\n\t# print(insertionSort(lst))\n\t# print(bubbleSort(lst))\n\t# print(mergeSort(lst))\n\t# print(quickSort(lst))\n\t# print(quicksort2(lst))\n\n\tlst = [1, 3, 5, 8, 2, 10, 4, 12, 16]\n\tk_largest(lst, 3)\n\n\n","repo_name":"dkoh12/gamma","sub_path":"Python/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"47"}
+{"seq_id":"42496061569","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 4 2018\n\nFile to pull fluids out of raw mimic data cut for use in downstream RL modeling.\n\nTakes data from the raw mimic csv's in raw-data:\n\tpath on odyssey: /n/dtak/mimic-iii-v1-4/raw-data \n \nMost of what we need is in INPUTEVENTS_CV & INPUTEVENTS_MV\n\n\n@author: josephfutoma\n\"\"\"\n\nimport sys\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport pickle \nnp.set_printoptions(threshold=1000)\npd.set_option(\"display.max_columns\",101)\n\nPATH_TO_REPO = \"XXX\"\n\n#####\n##### MV input events\n#####\n\ninputdat_mv = pd.read_csv(PATH_TO_REPO+\"raw-data/INPUTEVENTS_MV.csv\",\n\tusecols=['ICUSTAY_ID','STARTTIME','ENDTIME','ITEMID','AMOUNT',\n\t'AMOUNTUOM','RATE','RATEUOM','STATUSDESCRIPTION'])\n\ninputdat_mv = inputdat_mv.dropna(axis=0,how='any',\n\tsubset=['ICUSTAY_ID','STARTTIME','ITEMID','AMOUNT'])\n\ninputdat_mv = inputdat_mv.astype(\n\t{'ICUSTAY_ID':int,\n\t\t'STARTTIME':str,\n\t\t'ENDTIME':str,\t\n\t\t'ITEMID':int,\n\t\t'AMOUNT':float,\n\t\t'AMOUNTUOM':str,\n\t\t'STATUSDESCRIPTION':str,\n\t\t'RATE':float,\n\t\t'RATEUOM':str\n\t})\n\ninputdat_mv['STARTTIME'] = pd.to_datetime(inputdat_mv['STARTTIME'])\ninputdat_mv['ENDTIME'] = pd.to_datetime(inputdat_mv['ENDTIME'])\n\n#filters\ninputdat_mv = inputdat_mv[inputdat_mv['AMOUNT']>0]\ninputdat_mv = inputdat_mv[inputdat_mv['STATUSDESCRIPTION']!='Rewritten'] #admin error, ignore these\n\n###\n### NOTE: per Leo's suggestion, just using most common crystalloids along with blood products for bleeds\n###\n\n##### CRYSTALLOIDS\nmv_crystal_items = {\n225158: 'NaCl 0.9%',\n225828: 'LR',\n# 225944: 'Sterile Water',\n# 225797: 'Free Water',\n# 225159: 'NaCl 0.45%',\n# 225161: 'NaCl 3% (Hypertonic Saline)',\n# 225823: 'D5 1/2NS',\n# 225825: 'D5NS',\n# 225827: 'D5LR',\n# 225941: 'D5 1/4NS',\n# 226089: 'Piggyback'\n}\n\n##### COLLOIDS\nmv_coll_items = {\n # 220864: 'Albumin 5%',\n # 220862: 'Albumin 25%',\n # 225174: 'Hetastarch (Hespan) 6%',\n # 225795: 'Dextran 40',\n # 225796: 'Dextran 70',\n # # -- below ITEMIDs not in use\n # # -- 220861 | Albumin (Human) 20%\n # # -- 220863 | Albumin (Human) 4%\n \n # 220949: 'Dextrose 5%',\n # 220950: 'Dextrose 10%',\n # 220952: 'Dextrose 50%'\n}\n\n\n############ BLOOD PRODUCTS\n\nmv_rbc_items = {\n 225168: 'Packed Red Blood Cells',\n 226368: 'PACU Packed RBC Intake',\n 226370: 'OR Packed RBC Intake',\n 227070: 'OR Autologous Blood Intake'\n}\n\nmv_ffp_items = {\n 220970: 'PACU FFP Intake',\n 226367: 'Fresh Frozen Plasma',\n 227072: 'OR FFP Intake'\n}\n\nmv_platelet_items = {\n225170: 'Platelets',\n226369: 'OR Platelet Intake'\n}\n\n#####\n\nmv_item_ids = list(mv_crystal_items.keys())\nmv_item_ids.extend(list(mv_coll_items.keys()))\nmv_item_ids.extend(list(mv_rbc_items.keys()))\nmv_item_ids.extend(list(mv_ffp_items.keys()))\nmv_item_ids.extend(list(mv_platelet_items.keys()))\n\nmv_items = list(mv_crystal_items.values())\nmv_items.extend(list(mv_coll_items.values()))\nmv_items.extend(list(mv_rbc_items.values()))\nmv_items.extend(list(mv_ffp_items.values()))\nmv_items.extend(list(mv_platelet_items.values()))\n\n\ninputdat_mv = inputdat_mv[inputdat_mv['ITEMID'].isin(mv_item_ids)]\ninputdat_mv = inputdat_mv.sort_values(by=[\"ICUSTAY_ID\",\"STARTTIME\"])\ninputdat_mv.loc[inputdat_mv['AMOUNTUOM']=='L','AMOUNT'] *= 1000\ninputdat_mv.loc[inputdat_mv['AMOUNTUOM']=='L','AMOUNTUOM'] = 'ml'\n\n#how long admin was, in mins\ninputdat_mv['EVENT_TIME'] = (inputdat_mv['ENDTIME']-inputdat_mv['STARTTIME']).astype('timedelta64[m]').astype(int)\n\n#NOTE: may want to play around with this, as a very large fraction are 1, so more of an \"instant\" fluids\n\nBOLUS_TIME_THRESH = 60 #time window over which fluid was administered for us to include\n\n### filter out to get final fluids dataset, for now\n\ninputdat_mv = inputdat_mv[inputdat_mv['EVENT_TIME'] <= BOLUS_TIME_THRESH]\n\n\ninputdat_mv.to_csv(PATH_TO_REPO+\"query-data/allfluids_and_bloodproducts_mv.csv\",index=False)\n\n#NOTE: we intentionally do NOT filter on volume in this script, so very small fluid amounts are kept\n# it is expected that this will be handled in a downstream data cleaning script that will aggregate\n# fluid amounts and filter out periods where very small amounts (eg < 250ml) are given\n","repo_name":"dtak/POPCORN-POMDP","sub_path":"src/mimic_preproc/extract-scripts/get_fluids.py","file_name":"get_fluids.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"47"}